xref: /php-src/ext/hash/xxhash/xxhash.h (revision adcb38b1)
1 /*
2  * xxHash - Extremely Fast Hash algorithm
3  * Header File
4  * Copyright (C) 2012-2020 Yann Collet
5  *
6  * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php)
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are
10  * met:
11  *
12  *    * Redistributions of source code must retain the above copyright
13  *      notice, this list of conditions and the following disclaimer.
14  *    * Redistributions in binary form must reproduce the above
15  *      copyright notice, this list of conditions and the following disclaimer
16  *      in the documentation and/or other materials provided with the
17  *      distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  *
31  * You can contact the author at:
32  *   - xxHash homepage: https://www.xxhash.com
33  *   - xxHash source repository: https://github.com/Cyan4973/xxHash
34  */
35 /*!
36  * @mainpage xxHash
37  *
38  * @file xxhash.h
39  * xxHash prototypes and implementation
40  */
41 /* TODO: update */
42 /* Notice extracted from xxHash homepage:
43 
44 xxHash is an extremely fast hash algorithm, running at RAM speed limits.
45 It also successfully passes all tests from the SMHasher suite.
46 
47 Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz)
48 
49 Name            Speed       Q.Score   Author
50 xxHash          5.4 GB/s     10
51 CrapWow         3.2 GB/s      2       Andrew
52 MurmurHash 3a   2.7 GB/s     10       Austin Appleby
53 SpookyHash      2.0 GB/s     10       Bob Jenkins
54 SBox            1.4 GB/s      9       Bret Mulvey
55 Lookup3         1.2 GB/s      9       Bob Jenkins
56 SuperFastHash   1.2 GB/s      1       Paul Hsieh
57 CityHash64      1.05 GB/s    10       Pike & Alakuijala
58 FNV             0.55 GB/s     5       Fowler, Noll, Vo
59 CRC32           0.43 GB/s     9
60 MD5-32          0.33 GB/s    10       Ronald L. Rivest
61 SHA1-32         0.28 GB/s    10
62 
63 Q.Score is a measure of quality of the hash function.
64 It depends on successfully passing SMHasher test set.
65 10 is a perfect score.
66 
67 Note: SMHasher's CRC32 implementation is not the fastest one.
68 Other speed-oriented implementations can be faster,
69 especially in combination with PCLMUL instruction:
70 https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html?showComment=1552696407071#c3490092340461170735
71 
72 A 64-bit version, named XXH64, is available since r35.
73 It offers much better speed, but for 64-bit applications only.
74 Name     Speed on 64 bits    Speed on 32 bits
75 XXH64       13.8 GB/s            1.9 GB/s
76 XXH32        6.8 GB/s            6.0 GB/s
77 */
78 
79 #if defined (__cplusplus)
80 extern "C" {
81 #endif
82 
83 /* ****************************
84  *  INLINE mode
85  ******************************/
86 /*!
87  * XXH_INLINE_ALL (and XXH_PRIVATE_API)
88  * Use these build macros to inline xxhash into the target unit.
89  * Inlining improves performance on small inputs, especially when the length is
90  * expressed as a compile-time constant:
91  *
92  *      https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html
93  *
94  * It also keeps xxHash symbols private to the unit, so they are not exported.
95  *
96  * Usage:
97  *     #define XXH_INLINE_ALL
98  *     #include "xxhash.h"
99  *
100  * Do not compile and link xxhash.o as a separate object, as it is not useful.
101  */
102 #if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \
103     && !defined(XXH_INLINE_ALL_31684351384)
104    /* this section should be traversed only once */
105 #  define XXH_INLINE_ALL_31684351384
106    /* give access to the advanced API, required to compile implementations */
107 #  undef XXH_STATIC_LINKING_ONLY   /* avoid macro redef */
108 #  define XXH_STATIC_LINKING_ONLY
109    /* make all functions private */
110 #  undef XXH_PUBLIC_API
111 #  if defined(__GNUC__)
112 #    define XXH_PUBLIC_API static __inline __attribute__((unused))
113 #  elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
114 #    define XXH_PUBLIC_API static inline
115 #  elif defined(_MSC_VER)
116 #    define XXH_PUBLIC_API static __inline
117 #  else
118      /* note: this version may generate warnings for unused static functions */
119 #    define XXH_PUBLIC_API static
120 #  endif
121 
122    /*
123     * This part deals with the special case where a unit wants to inline xxHash,
124     * but "xxhash.h" has previously been included without XXH_INLINE_ALL,
125     * such as part of some previously included *.h header file.
126     * Without further action, the new include would just be ignored,
127     * and functions would effectively _not_ be inlined (silent failure).
128     * The following macros solve this situation by prefixing all inlined names,
129     * avoiding naming collision with previous inclusions.
130     */
131    /* Before that, we unconditionally #undef all symbols,
132     * in case they were already defined with XXH_NAMESPACE.
133     * They will then be redefined for XXH_INLINE_ALL
134     */
135 #  undef XXH_versionNumber
136     /* XXH32 */
137 #  undef XXH32
138 #  undef XXH32_createState
139 #  undef XXH32_freeState
140 #  undef XXH32_reset
141 #  undef XXH32_update
142 #  undef XXH32_digest
143 #  undef XXH32_copyState
144 #  undef XXH32_canonicalFromHash
145 #  undef XXH32_hashFromCanonical
146     /* XXH64 */
147 #  undef XXH64
148 #  undef XXH64_createState
149 #  undef XXH64_freeState
150 #  undef XXH64_reset
151 #  undef XXH64_update
152 #  undef XXH64_digest
153 #  undef XXH64_copyState
154 #  undef XXH64_canonicalFromHash
155 #  undef XXH64_hashFromCanonical
156     /* XXH3_64bits */
157 #  undef XXH3_64bits
158 #  undef XXH3_64bits_withSecret
159 #  undef XXH3_64bits_withSeed
160 #  undef XXH3_64bits_withSecretandSeed
161 #  undef XXH3_createState
162 #  undef XXH3_freeState
163 #  undef XXH3_copyState
164 #  undef XXH3_64bits_reset
165 #  undef XXH3_64bits_reset_withSeed
166 #  undef XXH3_64bits_reset_withSecret
167 #  undef XXH3_64bits_update
168 #  undef XXH3_64bits_digest
169 #  undef XXH3_generateSecret
170     /* XXH3_128bits */
171 #  undef XXH128
172 #  undef XXH3_128bits
173 #  undef XXH3_128bits_withSeed
174 #  undef XXH3_128bits_withSecret
175 #  undef XXH3_128bits_reset
176 #  undef XXH3_128bits_reset_withSeed
177 #  undef XXH3_128bits_reset_withSecret
178 #  undef XXH3_128bits_reset_withSecretandSeed
179 #  undef XXH3_128bits_update
180 #  undef XXH3_128bits_digest
181 #  undef XXH128_isEqual
182 #  undef XXH128_cmp
183 #  undef XXH128_canonicalFromHash
184 #  undef XXH128_hashFromCanonical
185     /* Finally, free the namespace itself */
186 #  undef XXH_NAMESPACE
187 
188     /* employ the namespace for XXH_INLINE_ALL */
189 #  define XXH_NAMESPACE XXH_INLINE_
190    /*
191     * Some identifiers (enums, type names) are not symbols,
192     * but they must nonetheless be renamed to avoid redeclaration.
193     * Alternative solution: do not redeclare them.
194     * However, this requires some #ifdefs, and has a more dispersed impact.
195     * Meanwhile, renaming can be achieved in a single place.
196     */
197 #  define XXH_IPREF(Id)   XXH_NAMESPACE ## Id
198 #  define XXH_OK XXH_IPREF(XXH_OK)
199 #  define XXH_ERROR XXH_IPREF(XXH_ERROR)
200 #  define XXH_errorcode XXH_IPREF(XXH_errorcode)
201 #  define XXH32_canonical_t  XXH_IPREF(XXH32_canonical_t)
202 #  define XXH64_canonical_t  XXH_IPREF(XXH64_canonical_t)
203 #  define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t)
204 #  define XXH32_state_s XXH_IPREF(XXH32_state_s)
205 #  define XXH32_state_t XXH_IPREF(XXH32_state_t)
206 #  define XXH64_state_s XXH_IPREF(XXH64_state_s)
207 #  define XXH64_state_t XXH_IPREF(XXH64_state_t)
208 #  define XXH3_state_s  XXH_IPREF(XXH3_state_s)
209 #  define XXH3_state_t  XXH_IPREF(XXH3_state_t)
210 #  define XXH128_hash_t XXH_IPREF(XXH128_hash_t)
211    /* Ensure the header is parsed again, even if it was previously included */
212 #  undef XXHASH_H_5627135585666179
213 #  undef XXHASH_H_STATIC_13879238742
214 #endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */
215 
216 
217 
218 /* ****************************************************************
219  *  Stable API
220  *****************************************************************/
221 #ifndef XXHASH_H_5627135585666179
222 #define XXHASH_H_5627135585666179 1
223 
224 
225 /*!
226  * @defgroup public Public API
227  * Contains details on the public xxHash functions.
228  * @{
229  */
230 /* specific declaration modes for Windows */
231 #if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
232 #  if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
233 #    ifdef XXH_EXPORT
234 #      define XXH_PUBLIC_API __declspec(dllexport)
235 #    elif XXH_IMPORT
236 #      define XXH_PUBLIC_API __declspec(dllimport)
237 #    endif
238 #  else
239 #    define XXH_PUBLIC_API   /* do nothing */
240 #  endif
241 #endif
242 
243 #ifdef XXH_DOXYGEN
244 /*!
245  * @brief Emulate a namespace by transparently prefixing all symbols.
246  *
247  * If you want to include _and expose_ xxHash functions from within your own
248  * library, but also want to avoid symbol collisions with other libraries which
249  * may also include xxHash, you can use XXH_NAMESPACE to automatically prefix
250  * any public symbol from xxhash library with the value of XXH_NAMESPACE
251  * (therefore, avoid empty or numeric values).
252  *
253  * Note that no change is required within the calling program as long as it
254  * includes `xxhash.h`: Regular symbol names will be automatically translated
255  * by this header.
256  */
257 #  define XXH_NAMESPACE /* YOUR NAME HERE */
258 #  undef XXH_NAMESPACE
259 #endif
260 
261 #ifdef XXH_NAMESPACE
262 #  define XXH_CAT(A,B) A##B
263 #  define XXH_NAME2(A,B) XXH_CAT(A,B)
264 #  define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
265 /* XXH32 */
266 #  define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
267 #  define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
268 #  define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
269 #  define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
270 #  define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
271 #  define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
272 #  define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
273 #  define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
274 #  define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
275 /* XXH64 */
276 #  define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
277 #  define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
278 #  define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
279 #  define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
280 #  define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
281 #  define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
282 #  define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
283 #  define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
284 #  define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
285 /* XXH3_64bits */
286 #  define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits)
287 #  define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret)
288 #  define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed)
289 #  define XXH3_64bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed)
290 #  define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState)
291 #  define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState)
292 #  define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState)
293 #  define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset)
294 #  define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed)
295 #  define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret)
296 #  define XXH3_64bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed)
297 #  define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update)
298 #  define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest)
299 #  define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret)
300 #  define XXH3_generateSecret_fromSeed XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed)
301 /* XXH3_128bits */
302 #  define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128)
303 #  define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits)
304 #  define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed)
305 #  define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret)
306 #  define XXH3_128bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed)
307 #  define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset)
308 #  define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed)
309 #  define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret)
310 #  define XXH3_128bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed)
311 #  define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update)
312 #  define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest)
313 #  define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual)
314 #  define XXH128_cmp     XXH_NAME2(XXH_NAMESPACE, XXH128_cmp)
315 #  define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash)
316 #  define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical)
317 #endif
318 
319 
320 /* *************************************
321 *  Version
322 ***************************************/
323 #define XXH_VERSION_MAJOR    0
324 #define XXH_VERSION_MINOR    8
325 #define XXH_VERSION_RELEASE  1
326 #define XXH_VERSION_NUMBER  (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
327 
328 /*!
329  * @brief Obtains the xxHash version.
330  *
331  * This is mostly useful when xxHash is compiled as a shared library,
332  * since the returned value comes from the library, as opposed to header file.
333  *
334  * @return `XXH_VERSION_NUMBER` of the invoked library.
335  */
336 XXH_PUBLIC_API unsigned XXH_versionNumber (void);
337 
338 
339 /* ****************************
340 *  Common basic types
341 ******************************/
342 #include <stddef.h>   /* size_t */
343 typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
344 
345 
346 /*-**********************************************************************
347 *  32-bit hash
348 ************************************************************************/
349 #if defined(XXH_DOXYGEN) /* Don't show <stdint.h> include */
350 /*!
351  * @brief An unsigned 32-bit integer.
352  *
353  * Not necessarily defined to `uint32_t` but functionally equivalent.
354  */
355 typedef uint32_t XXH32_hash_t;
356 
357 #elif !defined (__VMS) \
358   && (defined (__cplusplus) \
359   || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
360 #   include <stdint.h>
361     typedef uint32_t XXH32_hash_t;
362 
363 #else
364 #   include <limits.h>
365 #   if UINT_MAX == 0xFFFFFFFFUL
366       typedef unsigned int XXH32_hash_t;
367 #   else
368 #     if ULONG_MAX == 0xFFFFFFFFUL
369         typedef unsigned long XXH32_hash_t;
370 #     else
371 #       error "unsupported platform: need a 32-bit type"
372 #     endif
373 #   endif
374 #endif
375 
376 /*!
377  * @}
378  *
379  * @defgroup xxh32_family XXH32 family
380  * @ingroup public
381  * Contains functions used in the classic 32-bit xxHash algorithm.
382  *
383  * @note
384  *   XXH32 is useful for older platforms, with no or poor 64-bit performance.
385  *   Note that @ref xxh3_family provides competitive speed
386  *   for both 32-bit and 64-bit systems, and offers true 64/128 bit hash results.
387  *
388  * @see @ref xxh64_family, @ref xxh3_family : Other xxHash families
389  * @see @ref xxh32_impl for implementation details
390  * @{
391  */
392 
393 /*!
394  * @brief Calculates the 32-bit hash of @p input using xxHash32.
395  *
396  * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark): 5.4 GB/s
397  *
398  * @param input The block of data to be hashed, at least @p length bytes in size.
399  * @param length The length of @p input, in bytes.
400  * @param seed The 32-bit seed to alter the hash's output predictably.
401  *
402  * @pre
403  *   The memory between @p input and @p input + @p length must be valid,
404  *   readable, contiguous memory. However, if @p length is `0`, @p input may be
405  *   `NULL`. In C++, this also must be *TriviallyCopyable*.
406  *
407  * @return The calculated 32-bit hash value.
408  *
409  * @see
410  *    XXH64(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128():
411  *    Direct equivalents for the other variants of xxHash.
412  * @see
413  *    XXH32_createState(), XXH32_update(), XXH32_digest(): Streaming version.
414  */
415 XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed);
416 
417 /*!
418  * Streaming functions generate the xxHash value from an incremental input.
419  * This method is slower than single-call functions, due to state management.
420  * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized.
421  *
422  * An XXH state must first be allocated using `XXH*_createState()`.
423  *
424  * Start a new hash by initializing the state with a seed using `XXH*_reset()`.
425  *
426  * Then, feed the hash state by calling `XXH*_update()` as many times as necessary.
427  *
428  * The function returns an error code, with 0 meaning OK, and any other value
429  * meaning there is an error.
430  *
431  * Finally, a hash value can be produced anytime, by using `XXH*_digest()`.
432  * This function returns the nn-bits hash as an int or long long.
433  *
434  * It's still possible to continue inserting input into the hash state after a
435  * digest, and generate new hash values later on by invoking `XXH*_digest()`.
436  *
437  * When done, release the state using `XXH*_freeState()`.
438  *
439  * Example code for incrementally hashing a file:
440  * @code{.c}
441  *    #include <stdio.h>
442  *    #include <xxhash.h>
443  *    #define BUFFER_SIZE 256
444  *
445  *    // Note: XXH64 and XXH3 use the same interface.
446  *    XXH32_hash_t
447  *    hashFile(FILE* stream)
448  *    {
449  *        XXH32_state_t* state;
450  *        unsigned char buf[BUFFER_SIZE];
451  *        size_t amt;
452  *        XXH32_hash_t hash;
453  *
454  *        state = XXH32_createState();       // Create a state
455  *        assert(state != NULL);             // Error check here
456  *        XXH32_reset(state, 0xbaad5eed);    // Reset state with our seed
457  *        while ((amt = fread(buf, 1, sizeof(buf), stream)) != 0) {
458  *            XXH32_update(state, buf, amt); // Hash the file in chunks
459  *        }
460  *        hash = XXH32_digest(state);        // Finalize the hash
461  *        XXH32_freeState(state);            // Clean up
462  *        return hash;
463  *    }
464  * @endcode
465  */
466 
467 /*!
468  * @typedef struct XXH32_state_s XXH32_state_t
469  * @brief The opaque state struct for the XXH32 streaming API.
470  *
471  * @see XXH32_state_s for details.
472  */
473 typedef struct XXH32_state_s XXH32_state_t;
474 
475 /*!
476  * @brief Allocates an @ref XXH32_state_t.
477  *
478  * Must be freed with XXH32_freeState().
479  * @return An allocated XXH32_state_t on success, `NULL` on failure.
480  */
481 XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void);
482 /*!
483  * @brief Frees an @ref XXH32_state_t.
484  *
485  * Must be allocated with XXH32_createState().
486  * @param statePtr A pointer to an @ref XXH32_state_t allocated with @ref XXH32_createState().
487  * @return XXH_OK.
488  */
489 XXH_PUBLIC_API XXH_errorcode  XXH32_freeState(XXH32_state_t* statePtr);
490 /*!
491  * @brief Copies one @ref XXH32_state_t to another.
492  *
493  * @param dst_state The state to copy to.
494  * @param src_state The state to copy from.
495  * @pre
496  *   @p dst_state and @p src_state must not be `NULL` and must not overlap.
497  */
498 XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state);
499 
500 /*!
501  * @brief Resets an @ref XXH32_state_t to begin a new hash.
502  *
503  * This function resets and seeds a state. Call it before @ref XXH32_update().
504  *
505  * @param statePtr The state struct to reset.
506  * @param seed The 32-bit seed to alter the hash result predictably.
507  *
508  * @pre
509  *   @p statePtr must not be `NULL`.
510  *
511  * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
512  */
513 XXH_PUBLIC_API XXH_errorcode XXH32_reset  (XXH32_state_t* statePtr, XXH32_hash_t seed);
514 
515 /*!
516  * @brief Consumes a block of @p input to an @ref XXH32_state_t.
517  *
518  * Call this to incrementally consume blocks of data.
519  *
520  * @param statePtr The state struct to update.
521  * @param input The block of data to be hashed, at least @p length bytes in size.
522  * @param length The length of @p input, in bytes.
523  *
524  * @pre
525  *   @p statePtr must not be `NULL`.
526  * @pre
527  *   The memory between @p input and @p input + @p length must be valid,
528  *   readable, contiguous memory. However, if @p length is `0`, @p input may be
529  *   `NULL`. In C++, this also must be *TriviallyCopyable*.
530  *
531  * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
532  */
533 XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
534 
535 /*!
536  * @brief Returns the calculated hash value from an @ref XXH32_state_t.
537  *
538  * @note
539  *   Calling XXH32_digest() will not affect @p statePtr, so you can update,
540  *   digest, and update again.
541  *
542  * @param statePtr The state struct to calculate the hash from.
543  *
544  * @pre
545  *  @p statePtr must not be `NULL`.
546  *
547  * @return The calculated xxHash32 value from that state.
548  */
549 XXH_PUBLIC_API XXH32_hash_t  XXH32_digest (const XXH32_state_t* statePtr);
550 
551 /*******   Canonical representation   *******/
552 
553 /*
554  * The default return values from XXH functions are unsigned 32 and 64 bit
555  * integers.
556  * This the simplest and fastest format for further post-processing.
557  *
558  * However, this leaves open the question of what is the order on the byte level,
559  * since little and big endian conventions will store the same number differently.
560  *
561  * The canonical representation settles this issue by mandating big-endian
562  * convention, the same convention as human-readable numbers (large digits first).
563  *
564  * When writing hash values to storage, sending them over a network, or printing
565  * them, it's highly recommended to use the canonical representation to ensure
566  * portability across a wider range of systems, present and future.
567  *
568  * The following functions allow transformation of hash values to and from
569  * canonical format.
570  */
571 
572 /*!
573  * @brief Canonical (big endian) representation of @ref XXH32_hash_t.
574  */
575 typedef struct {
576     unsigned char digest[4]; /*!< Hash bytes, big endian */
577 } XXH32_canonical_t;
578 
579 /*!
580  * @brief Converts an @ref XXH32_hash_t to a big endian @ref XXH32_canonical_t.
581  *
582  * @param dst The @ref XXH32_canonical_t pointer to be stored to.
583  * @param hash The @ref XXH32_hash_t to be converted.
584  *
585  * @pre
586  *   @p dst must not be `NULL`.
587  */
588 XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
589 
590 /*!
591  * @brief Converts an @ref XXH32_canonical_t to a native @ref XXH32_hash_t.
592  *
593  * @param src The @ref XXH32_canonical_t to convert.
594  *
595  * @pre
596  *   @p src must not be `NULL`.
597  *
598  * @return The converted hash.
599  */
600 XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
601 
602 
603 #ifdef __has_attribute
604 # define XXH_HAS_ATTRIBUTE(x) __has_attribute(x)
605 #else
606 # define XXH_HAS_ATTRIBUTE(x) 0
607 #endif
608 
609 /* C-language Attributes are added in C23. */
610 #if defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201710L) && defined(__has_c_attribute)
611 # define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x)
612 #else
613 # define XXH_HAS_C_ATTRIBUTE(x) 0
614 #endif
615 
616 #if defined(__cplusplus) && defined(__has_cpp_attribute)
617 # define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
618 #else
619 # define XXH_HAS_CPP_ATTRIBUTE(x) 0
620 #endif
621 
622 /*
623 Define XXH_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute
624 introduced in CPP17 and C23.
625 CPP17 : https://en.cppreference.com/w/cpp/language/attributes/fallthrough
626 C23   : https://en.cppreference.com/w/c/language/attributes/fallthrough
627 */
628 #if XXH_HAS_C_ATTRIBUTE(x)
629 # define XXH_FALLTHROUGH [[fallthrough]]
630 #elif XXH_HAS_CPP_ATTRIBUTE(x)
631 # define XXH_FALLTHROUGH [[fallthrough]]
632 #elif XXH_HAS_ATTRIBUTE(__fallthrough__)
633 # define XXH_FALLTHROUGH __attribute__ ((fallthrough))
634 #else
635 # define XXH_FALLTHROUGH
636 #endif
637 
638 /*!
639  * @}
640  * @ingroup public
641  * @{
642  */
643 
644 #ifndef XXH_NO_LONG_LONG
645 /*-**********************************************************************
646 *  64-bit hash
647 ************************************************************************/
648 #if defined(XXH_DOXYGEN) /* don't include <stdint.h> */
649 /*!
650  * @brief An unsigned 64-bit integer.
651  *
652  * Not necessarily defined to `uint64_t` but functionally equivalent.
653  */
654 typedef uint64_t XXH64_hash_t;
655 #elif !defined (__VMS) \
656   && (defined (__cplusplus) \
657   || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
658 #  include <stdint.h>
659    typedef uint64_t XXH64_hash_t;
660 #else
661 #  include <limits.h>
662 #  if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL
663      /* LP64 ABI says uint64_t is unsigned long */
664      typedef unsigned long XXH64_hash_t;
665 #  else
666      /* the following type must have a width of 64-bit */
667      typedef unsigned long long XXH64_hash_t;
668 #  endif
669 #endif
670 
671 /*!
672  * @}
673  *
674  * @defgroup xxh64_family XXH64 family
675  * @ingroup public
676  * @{
677  * Contains functions used in the classic 64-bit xxHash algorithm.
678  *
679  * @note
680  *   XXH3 provides competitive speed for both 32-bit and 64-bit systems,
681  *   and offers true 64/128 bit hash results.
682  *   It provides better speed for systems with vector processing capabilities.
683  */
684 
685 
686 /*!
687  * @brief Calculates the 64-bit hash of @p input using xxHash64.
688  *
689  * This function usually runs faster on 64-bit systems, but slower on 32-bit
690  * systems (see benchmark).
691  *
692  * @param input The block of data to be hashed, at least @p length bytes in size.
693  * @param length The length of @p input, in bytes.
694  * @param seed The 64-bit seed to alter the hash's output predictably.
695  *
696  * @pre
697  *   The memory between @p input and @p input + @p length must be valid,
698  *   readable, contiguous memory. However, if @p length is `0`, @p input may be
699  *   `NULL`. In C++, this also must be *TriviallyCopyable*.
700  *
701  * @return The calculated 64-bit hash.
702  *
703  * @see
704  *    XXH32(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128():
705  *    Direct equivalents for the other variants of xxHash.
706  * @see
707  *    XXH64_createState(), XXH64_update(), XXH64_digest(): Streaming version.
708  */
709 XXH_PUBLIC_API XXH64_hash_t XXH64(const void* input, size_t length, XXH64_hash_t seed);
710 
711 /*******   Streaming   *******/
712 /*!
713  * @brief The opaque state struct for the XXH64 streaming API.
714  *
715  * @see XXH64_state_s for details.
716  */
717 typedef struct XXH64_state_s XXH64_state_t;   /* incomplete type */
718 XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void);
719 XXH_PUBLIC_API XXH_errorcode  XXH64_freeState(XXH64_state_t* statePtr);
720 XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state);
721 
722 XXH_PUBLIC_API XXH_errorcode XXH64_reset  (XXH64_state_t* statePtr, XXH64_hash_t seed);
723 XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length);
724 XXH_PUBLIC_API XXH64_hash_t  XXH64_digest (const XXH64_state_t* statePtr);
725 
726 /*******   Canonical representation   *******/
727 typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t;
728 XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash);
729 XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src);
730 
731 /*!
732  * @}
733  * ************************************************************************
734  * @defgroup xxh3_family XXH3 family
735  * @ingroup public
736  * @{
737  *
738  * XXH3 is a more recent hash algorithm featuring:
739  *  - Improved speed for both small and large inputs
740  *  - True 64-bit and 128-bit outputs
741  *  - SIMD acceleration
742  *  - Improved 32-bit viability
743  *
744  * Speed analysis methodology is explained here:
745  *
746  *    https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html
747  *
748  * Compared to XXH64, expect XXH3 to run approximately
749  * ~2x faster on large inputs and >3x faster on small ones,
750  * exact differences vary depending on platform.
751  *
752  * XXH3's speed benefits greatly from SIMD and 64-bit arithmetic,
753  * but does not require it.
754  * Any 32-bit and 64-bit targets that can run XXH32 smoothly
755  * can run XXH3 at competitive speeds, even without vector support.
756  * Further details are explained in the implementation.
757  *
758  * Optimized implementations are provided for AVX512, AVX2, SSE2, NEON, POWER8,
759  * ZVector and scalar targets. This can be controlled via the XXH_VECTOR macro.
760  *
761  * XXH3 implementation is portable:
762  * it has a generic C90 formulation that can be compiled on any platform,
763  * all implementations generage exactly the same hash value on all platforms.
764  * Starting from v0.8.0, it's also labelled "stable", meaning that
765  * any future version will also generate the same hash value.
766  *
767  * XXH3 offers 2 variants, _64bits and _128bits.
768  *
769  * When only 64 bits are needed, prefer invoking the _64bits variant, as it
770  * reduces the amount of mixing, resulting in faster speed on small inputs.
771  * It's also generally simpler to manipulate a scalar return type than a struct.
772  *
773  * The API supports one-shot hashing, streaming mode, and custom secrets.
774  */
775 
776 /*-**********************************************************************
777 *  XXH3 64-bit variant
778 ************************************************************************/
779 
780 /* XXH3_64bits():
781  * default 64-bit variant, using default secret and default seed of 0.
782  * It's the fastest variant. */
783 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* data, size_t len);
784 
785 /*
786  * XXH3_64bits_withSeed():
787  * This variant generates a custom secret on the fly
788  * based on default secret altered using the `seed` value.
789  * While this operation is decently fast, note that it's not completely free.
790  * Note: seed==0 produces the same results as XXH3_64bits().
791  */
792 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void* data, size_t len, XXH64_hash_t seed);
793 
794 /*!
795  * The bare minimum size for a custom secret.
796  *
797  * @see
798  *  XXH3_64bits_withSecret(), XXH3_64bits_reset_withSecret(),
799  *  XXH3_128bits_withSecret(), XXH3_128bits_reset_withSecret().
800  */
801 #define XXH3_SECRET_SIZE_MIN 136
802 
803 /*
804  * XXH3_64bits_withSecret():
805  * It's possible to provide any blob of bytes as a "secret" to generate the hash.
806  * This makes it more difficult for an external actor to prepare an intentional collision.
807  * The main condition is that secretSize *must* be large enough (>= XXH3_SECRET_SIZE_MIN).
808  * However, the quality of the secret impacts the dispersion of the hash algorithm.
809  * Therefore, the secret _must_ look like a bunch of random bytes.
810  * Avoid "trivial" or structured data such as repeated sequences or a text document.
811  * Whenever in doubt about the "randomness" of the blob of bytes,
812  * consider employing "XXH3_generateSecret()" instead (see below).
813  * It will generate a proper high entropy secret derived from the blob of bytes.
814  * Another advantage of using XXH3_generateSecret() is that
815  * it guarantees that all bits within the initial blob of bytes
816  * will impact every bit of the output.
817  * This is not necessarily the case when using the blob of bytes directly
818  * because, when hashing _small_ inputs, only a portion of the secret is employed.
819  */
820 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize);
821 
822 
823 /*******   Streaming   *******/
824 /*
825  * Streaming requires state maintenance.
826  * This operation costs memory and CPU.
827  * As a consequence, streaming is slower than one-shot hashing.
828  * For better performance, prefer one-shot functions whenever applicable.
829  */
830 
831 /*!
832  * @brief The state struct for the XXH3 streaming API.
833  *
834  * @see XXH3_state_s for details.
835  */
836 typedef struct XXH3_state_s XXH3_state_t;
837 XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void);
838 XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr);
839 XXH_PUBLIC_API void XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state);
840 
841 /*
842  * XXH3_64bits_reset():
843  * Initialize with default parameters.
844  * digest will be equivalent to `XXH3_64bits()`.
845  */
846 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH3_state_t* statePtr);
847 /*
848  * XXH3_64bits_reset_withSeed():
849  * Generate a custom secret from `seed`, and store it into `statePtr`.
850  * digest will be equivalent to `XXH3_64bits_withSeed()`.
851  */
852 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed);
853 /*
854  * XXH3_64bits_reset_withSecret():
855  * `secret` is referenced, it _must outlive_ the hash streaming session.
856  * Similar to one-shot API, `secretSize` must be >= `XXH3_SECRET_SIZE_MIN`,
857  * and the quality of produced hash values depends on secret's entropy
858  * (secret's content should look like a bunch of random bytes).
859  * When in doubt about the randomness of a candidate `secret`,
860  * consider employing `XXH3_generateSecret()` instead (see below).
861  */
862 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize);
863 
864 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH3_state_t* statePtr, const void* input, size_t length);
865 XXH_PUBLIC_API XXH64_hash_t  XXH3_64bits_digest (const XXH3_state_t* statePtr);
866 
867 /* note : canonical representation of XXH3 is the same as XXH64
868  * since they both produce XXH64_hash_t values */
869 
870 
871 /*-**********************************************************************
872 *  XXH3 128-bit variant
873 ************************************************************************/
874 
875 /*!
876  * @brief The return value from 128-bit hashes.
877  *
878  * Stored in little endian order, although the fields themselves are in native
879  * endianness.
880  */
881 typedef struct {
882     XXH64_hash_t low64;   /*!< `value & 0xFFFFFFFFFFFFFFFF` */
883     XXH64_hash_t high64;  /*!< `value >> 64` */
884 } XXH128_hash_t;
885 
886 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* data, size_t len);
887 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSeed(const void* data, size_t len, XXH64_hash_t seed);
888 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize);
889 
890 /*******   Streaming   *******/
891 /*
892  * Streaming requires state maintenance.
893  * This operation costs memory and CPU.
894  * As a consequence, streaming is slower than one-shot hashing.
895  * For better performance, prefer one-shot functions whenever applicable.
896  *
897  * XXH3_128bits uses the same XXH3_state_t as XXH3_64bits().
898  * Use already declared XXH3_createState() and XXH3_freeState().
899  *
900  * All reset and streaming functions have same meaning as their 64-bit counterpart.
901  */
902 
903 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH3_state_t* statePtr);
904 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed);
905 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize);
906 
907 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH3_state_t* statePtr, const void* input, size_t length);
908 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* statePtr);
909 
910 /* Following helper functions make it possible to compare XXH128_hast_t values.
911  * Since XXH128_hash_t is a structure, this capability is not offered by the language.
912  * Note: For better performance, these functions can be inlined using XXH_INLINE_ALL */
913 
914 /*!
915  * XXH128_isEqual():
916  * Return: 1 if `h1` and `h2` are equal, 0 if they are not.
917  */
918 XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2);
919 
920 /*!
921  * XXH128_cmp():
922  *
923  * This comparator is compatible with stdlib's `qsort()`/`bsearch()`.
924  *
925  * return: >0 if *h128_1  > *h128_2
926  *         =0 if *h128_1 == *h128_2
927  *         <0 if *h128_1  < *h128_2
928  */
929 XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2);
930 
931 
932 /*******   Canonical representation   *******/
933 typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical_t;
934 XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash);
935 XXH_PUBLIC_API XXH128_hash_t XXH128_hashFromCanonical(const XXH128_canonical_t* src);
936 
937 
938 #endif  /* XXH_NO_LONG_LONG */
939 
940 /*!
941  * @}
942  */
943 #endif /* XXHASH_H_5627135585666179 */
944 
945 
946 
947 #if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742)
948 #define XXHASH_H_STATIC_13879238742
949 /* ****************************************************************************
950  * This section contains declarations which are not guaranteed to remain stable.
951  * They may change in future versions, becoming incompatible with a different
952  * version of the library.
953  * These declarations should only be used with static linking.
954  * Never use them in association with dynamic linking!
955  ***************************************************************************** */
956 
957 /*
958  * These definitions are only present to allow static allocation
959  * of XXH states, on stack or in a struct, for example.
960  * Never **ever** access their members directly.
961  */
962 
963 /*!
964  * @internal
965  * @brief Structure for XXH32 streaming API.
966  *
967  * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
968  * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
969  * an opaque type. This allows fields to safely be changed.
970  *
971  * Typedef'd to @ref XXH32_state_t.
972  * Do not access the members of this struct directly.
973  * @see XXH64_state_s, XXH3_state_s
974  */
975 struct XXH32_state_s {
976    XXH32_hash_t total_len_32; /*!< Total length hashed, modulo 2^32 */
977    XXH32_hash_t large_len;    /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */
978    XXH32_hash_t v[4];         /*!< Accumulator lanes */
979    XXH32_hash_t mem32[4];     /*!< Internal buffer for partial reads. Treated as unsigned char[16]. */
980    XXH32_hash_t memsize;      /*!< Amount of data in @ref mem32 */
981    XXH32_hash_t reserved;     /*!< Reserved field. Do not read or write to it, it may be removed. */
982 };   /* typedef'd to XXH32_state_t */
983 
984 
985 #ifndef XXH_NO_LONG_LONG  /* defined when there is no 64-bit support */
986 
987 /*!
988  * @internal
989  * @brief Structure for XXH64 streaming API.
990  *
991  * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
992  * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
993  * an opaque type. This allows fields to safely be changed.
994  *
995  * Typedef'd to @ref XXH64_state_t.
996  * Do not access the members of this struct directly.
997  * @see XXH32_state_s, XXH3_state_s
998  */
999 struct XXH64_state_s {
1000    XXH64_hash_t total_len;    /*!< Total length hashed. This is always 64-bit. */
1001    XXH64_hash_t v[4];         /*!< Accumulator lanes */
1002    XXH64_hash_t mem64[4];     /*!< Internal buffer for partial reads. Treated as unsigned char[32]. */
1003    XXH32_hash_t memsize;      /*!< Amount of data in @ref mem64 */
1004    XXH32_hash_t reserved32;   /*!< Reserved field, needed for padding anyways*/
1005    XXH64_hash_t reserved64;   /*!< Reserved field. Do not read or write to it, it may be removed. */
1006 };   /* typedef'd to XXH64_state_t */
1007 
1008 #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* >= C11 */
1009 #  include <stdalign.h>
1010 #  define XXH_ALIGN(n)      alignas(n)
1011 #elif defined(__cplusplus) && (__cplusplus >= 201103L) /* >= C++11 */
1012 /* In C++ alignas() is a keyword */
1013 #  define XXH_ALIGN(n)      alignas(n)
1014 #elif defined(__GNUC__)
1015 #  define XXH_ALIGN(n)      __attribute__ ((aligned(n)))
1016 #elif defined(_MSC_VER)
1017 #  define XXH_ALIGN(n)      __declspec(align(n))
1018 #else
1019 #  define XXH_ALIGN(n)   /* disabled */
1020 #endif
1021 
1022 /* Old GCC versions only accept the attribute after the type in structures. */
1023 #if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L))   /* C11+ */ \
1024     && ! (defined(__cplusplus) && (__cplusplus >= 201103L)) /* >= C++11 */ \
1025     && defined(__GNUC__)
1026 #   define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align)
1027 #else
1028 #   define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type
1029 #endif
1030 
1031 /*!
1032  * @brief The size of the internal XXH3 buffer.
1033  *
1034  * This is the optimal update size for incremental hashing.
1035  *
1036  * @see XXH3_64b_update(), XXH3_128b_update().
1037  */
1038 #define XXH3_INTERNALBUFFER_SIZE 256
1039 
1040 /*!
1041  * @brief Default size of the secret buffer (and @ref XXH3_kSecret).
1042  *
1043  * This is the size used in @ref XXH3_kSecret and the seeded functions.
1044  *
1045  * Not to be confused with @ref XXH3_SECRET_SIZE_MIN.
1046  */
1047 #define XXH3_SECRET_DEFAULT_SIZE 192
1048 
1049 /*!
1050  * @internal
1051  * @brief Structure for XXH3 streaming API.
1052  *
1053  * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
1054  * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined.
1055  * Otherwise it is an opaque type.
1056  * Never use this definition in combination with dynamic library.
1057  * This allows fields to safely be changed in the future.
1058  *
1059  * @note ** This structure has a strict alignment requirement of 64 bytes!! **
1060  * Do not allocate this with `malloc()` or `new`,
1061  * it will not be sufficiently aligned.
1062  * Use @ref XXH3_createState() and @ref XXH3_freeState(), or stack allocation.
1063  *
1064  * Typedef'd to @ref XXH3_state_t.
1065  * Do never access the members of this struct directly.
1066  *
1067  * @see XXH3_INITSTATE() for stack initialization.
1068  * @see XXH3_createState(), XXH3_freeState().
1069  * @see XXH32_state_s, XXH64_state_s
1070  */
1071 struct XXH3_state_s {
1072    XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]);
1073        /*!< The 8 accumulators. Similar to `vN` in @ref XXH32_state_s::v1 and @ref XXH64_state_s */
1074    XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]);
1075        /*!< Used to store a custom secret generated from a seed. */
1076    XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]);
1077        /*!< The internal buffer. @see XXH32_state_s::mem32 */
1078    XXH32_hash_t bufferedSize;
1079        /*!< The amount of memory in @ref buffer, @see XXH32_state_s::memsize */
1080    XXH32_hash_t useSeed;
1081        /*!< Reserved field. Needed for padding on 64-bit. */
1082    size_t nbStripesSoFar;
1083        /*!< Number or stripes processed. */
1084    XXH64_hash_t totalLen;
1085        /*!< Total length hashed. 64-bit even on 32-bit targets. */
1086    size_t nbStripesPerBlock;
1087        /*!< Number of stripes per block. */
1088    size_t secretLimit;
1089        /*!< Size of @ref customSecret or @ref extSecret */
1090    XXH64_hash_t seed;
1091        /*!< Seed for _withSeed variants. Must be zero otherwise, @see XXH3_INITSTATE() */
1092    XXH64_hash_t reserved64;
1093        /*!< Reserved field. */
1094    const unsigned char* extSecret;
1095        /*!< Reference to an external secret for the _withSecret variants, NULL
1096         *   for other variants. */
1097    /* note: there may be some padding at the end due to alignment on 64 bytes */
1098 }; /* typedef'd to XXH3_state_t */
1099 
1100 #undef XXH_ALIGN_MEMBER
1101 
1102 /*!
1103  * @brief Initializes a stack-allocated `XXH3_state_s`.
1104  *
1105  * When the @ref XXH3_state_t structure is merely emplaced on stack,
1106  * it should be initialized with XXH3_INITSTATE() or a memset()
1107  * in case its first reset uses XXH3_NNbits_reset_withSeed().
1108  * This init can be omitted if the first reset uses default or _withSecret mode.
1109  * This operation isn't necessary when the state is created with XXH3_createState().
1110  * Note that this doesn't prepare the state for a streaming operation,
1111  * it's still necessary to use XXH3_NNbits_reset*() afterwards.
1112  */
1113 #define XXH3_INITSTATE(XXH3_state_ptr)   { (XXH3_state_ptr)->seed = 0; }
1114 
1115 
1116 /* XXH128() :
1117  * simple alias to pre-selected XXH3_128bits variant
1118  */
1119 XXH_PUBLIC_API XXH128_hash_t XXH128(const void* data, size_t len, XXH64_hash_t seed);
1120 
1121 
1122 /* ===   Experimental API   === */
1123 /* Symbols defined below must be considered tied to a specific library version. */
1124 
1125 /*
1126  * XXH3_generateSecret():
1127  *
1128  * Derive a high-entropy secret from any user-defined content, named customSeed.
1129  * The generated secret can be used in combination with `*_withSecret()` functions.
1130  * The `_withSecret()` variants are useful to provide a higher level of protection than 64-bit seed,
1131  * as it becomes much more difficult for an external actor to guess how to impact the calculation logic.
1132  *
1133  * The function accepts as input a custom seed of any length and any content,
1134  * and derives from it a high-entropy secret of length @secretSize
1135  * into an already allocated buffer @secretBuffer.
1136  * @secretSize must be >= XXH3_SECRET_SIZE_MIN
1137  *
1138  * The generated secret can then be used with any `*_withSecret()` variant.
1139  * Functions `XXH3_128bits_withSecret()`, `XXH3_64bits_withSecret()`,
1140  * `XXH3_128bits_reset_withSecret()` and `XXH3_64bits_reset_withSecret()`
1141  * are part of this list. They all accept a `secret` parameter
1142  * which must be large enough for implementation reasons (>= XXH3_SECRET_SIZE_MIN)
1143  * _and_ feature very high entropy (consist of random-looking bytes).
1144  * These conditions can be a high bar to meet, so
1145  * XXH3_generateSecret() can be employed to ensure proper quality.
1146  *
1147  * customSeed can be anything. It can have any size, even small ones,
1148  * and its content can be anything, even "poor entropy" sources such as a bunch of zeroes.
1149  * The resulting `secret` will nonetheless provide all required qualities.
1150  *
1151  * When customSeedSize > 0, supplying NULL as customSeed is undefined behavior.
1152  */
1153 XXH_PUBLIC_API XXH_errorcode XXH3_generateSecret(void* secretBuffer, size_t secretSize, const void* customSeed, size_t customSeedSize);
1154 
1155 
1156 /*
1157  * XXH3_generateSecret_fromSeed():
1158  *
1159  * Generate the same secret as the _withSeed() variants.
1160  *
1161  * The resulting secret has a length of XXH3_SECRET_DEFAULT_SIZE (necessarily).
1162  * @secretBuffer must be already allocated, of size at least XXH3_SECRET_DEFAULT_SIZE bytes.
1163  *
1164  * The generated secret can be used in combination with
1165  *`*_withSecret()` and `_withSecretandSeed()` variants.
1166  * This generator is notably useful in combination with `_withSecretandSeed()`,
1167  * as a way to emulate a faster `_withSeed()` variant.
1168  */
1169 XXH_PUBLIC_API void XXH3_generateSecret_fromSeed(void* secretBuffer, XXH64_hash_t seed);
1170 
1171 /*
1172  * *_withSecretandSeed() :
1173  * These variants generate hash values using either
1174  * @seed for "short" keys (< XXH3_MIDSIZE_MAX = 240 bytes)
1175  * or @secret for "large" keys (>= XXH3_MIDSIZE_MAX).
1176  *
1177  * This generally benefits speed, compared to `_withSeed()` or `_withSecret()`.
1178  * `_withSeed()` has to generate the secret on the fly for "large" keys.
1179  * It's fast, but can be perceptible for "not so large" keys (< 1 KB).
1180  * `_withSecret()` has to generate the masks on the fly for "small" keys,
1181  * which requires more instructions than _withSeed() variants.
1182  * Therefore, _withSecretandSeed variant combines the best of both worlds.
1183  *
1184  * When @secret has been generated by XXH3_generateSecret_fromSeed(),
1185  * this variant produces *exactly* the same results as `_withSeed()` variant,
1186  * hence offering only a pure speed benefit on "large" input,
1187  * by skipping the need to regenerate the secret for every large input.
1188  *
1189  * Another usage scenario is to hash the secret to a 64-bit hash value,
1190  * for example with XXH3_64bits(), which then becomes the seed,
1191  * and then employ both the seed and the secret in _withSecretandSeed().
1192  * On top of speed, an added benefit is that each bit in the secret
1193  * has a 50% chance to swap each bit in the output,
1194  * via its impact to the seed.
1195  * This is not guaranteed when using the secret directly in "small data" scenarios,
1196  * because only portions of the secret are employed for small data.
1197  */
1198 XXH_PUBLIC_API XXH64_hash_t
1199 XXH3_64bits_withSecretandSeed(const void* data, size_t len,
1200                               const void* secret, size_t secretSize,
1201                               XXH64_hash_t seed);
1202 
1203 XXH_PUBLIC_API XXH128_hash_t
1204 XXH3_128bits_withSecretandSeed(const void* data, size_t len,
1205                                const void* secret, size_t secretSize,
1206                                XXH64_hash_t seed64);
1207 
1208 XXH_PUBLIC_API XXH_errorcode
1209 XXH3_64bits_reset_withSecretandSeed(XXH3_state_t* statePtr,
1210                                     const void* secret, size_t secretSize,
1211                                     XXH64_hash_t seed64);
1212 
1213 XXH_PUBLIC_API XXH_errorcode
1214 XXH3_128bits_reset_withSecretandSeed(XXH3_state_t* statePtr,
1215                                      const void* secret, size_t secretSize,
1216                                      XXH64_hash_t seed64);
1217 
1218 
1219 #endif  /* XXH_NO_LONG_LONG */
1220 #if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
1221 #  define XXH_IMPLEMENTATION
1222 #endif
1223 
1224 #endif  /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */
1225 
1226 
1227 /* ======================================================================== */
1228 /* ======================================================================== */
1229 /* ======================================================================== */
1230 
1231 
1232 /*-**********************************************************************
1233  * xxHash implementation
1234  *-**********************************************************************
1235  * xxHash's implementation used to be hosted inside xxhash.c.
1236  *
1237  * However, inlining requires implementation to be visible to the compiler,
1238  * hence be included alongside the header.
1239  * Previously, implementation was hosted inside xxhash.c,
1240  * which was then #included when inlining was activated.
1241  * This construction created issues with a few build and install systems,
1242  * as it required xxhash.c to be stored in /include directory.
1243  *
1244  * xxHash implementation is now directly integrated within xxhash.h.
1245  * As a consequence, xxhash.c is no longer needed in /include.
1246  *
1247  * xxhash.c is still available and is still useful.
1248  * In a "normal" setup, when xxhash is not inlined,
1249  * xxhash.h only exposes the prototypes and public symbols,
1250  * while xxhash.c can be built into an object file xxhash.o
1251  * which can then be linked into the final binary.
1252  ************************************************************************/
1253 
1254 #if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \
1255    || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387)
1256 #  define XXH_IMPLEM_13a8737387
1257 
1258 /* *************************************
1259 *  Tuning parameters
1260 ***************************************/
1261 
1262 /*!
1263  * @defgroup tuning Tuning parameters
1264  * @{
1265  *
1266  * Various macros to control xxHash's behavior.
1267  */
1268 #ifdef XXH_DOXYGEN
1269 /*!
1270  * @brief Define this to disable 64-bit code.
1271  *
1272  * Useful if only using the @ref xxh32_family and you have a strict C90 compiler.
1273  */
1274 #  define XXH_NO_LONG_LONG
1275 #  undef XXH_NO_LONG_LONG /* don't actually */
1276 /*!
1277  * @brief Controls how unaligned memory is accessed.
1278  *
1279  * By default, access to unaligned memory is controlled by `memcpy()`, which is
1280  * safe and portable.
1281  *
1282  * Unfortunately, on some target/compiler combinations, the generated assembly
1283  * is sub-optimal.
1284  *
1285  * The below switch allow selection of a different access method
1286  * in the search for improved performance.
1287  *
1288  * @par Possible options:
1289  *
1290  *  - `XXH_FORCE_MEMORY_ACCESS=0` (default): `memcpy`
1291  *   @par
1292  *     Use `memcpy()`. Safe and portable. Note that most modern compilers will
1293  *     eliminate the function call and treat it as an unaligned access.
1294  *
1295  *  - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((packed))`
1296  *   @par
1297  *     Depends on compiler extensions and is therefore not portable.
1298  *     This method is safe _if_ your compiler supports it,
1299  *     and *generally* as fast or faster than `memcpy`.
1300  *
1301  *  - `XXH_FORCE_MEMORY_ACCESS=2`: Direct cast
1302  *  @par
1303  *     Casts directly and dereferences. This method doesn't depend on the
1304  *     compiler, but it violates the C standard as it directly dereferences an
1305  *     unaligned pointer. It can generate buggy code on targets which do not
1306  *     support unaligned memory accesses, but in some circumstances, it's the
1307  *     only known way to get the most performance.
1308  *
1309  *  - `XXH_FORCE_MEMORY_ACCESS=3`: Byteshift
1310  *  @par
1311  *     Also portable. This can generate the best code on old compilers which don't
1312  *     inline small `memcpy()` calls, and it might also be faster on big-endian
1313  *     systems which lack a native byteswap instruction. However, some compilers
1314  *     will emit literal byteshifts even if the target supports unaligned access.
1315  *  .
1316  *
1317  * @warning
1318  *   Methods 1 and 2 rely on implementation-defined behavior. Use these with
1319  *   care, as what works on one compiler/platform/optimization level may cause
1320  *   another to read garbage data or even crash.
1321  *
1322  * See http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html for details.
1323  *
1324  * Prefer these methods in priority order (0 > 3 > 1 > 2)
1325  */
1326 #  define XXH_FORCE_MEMORY_ACCESS 0
1327 
1328 /*!
1329  * @def XXH_FORCE_ALIGN_CHECK
1330  * @brief If defined to non-zero, adds a special path for aligned inputs (XXH32()
1331  * and XXH64() only).
1332  *
1333  * This is an important performance trick for architectures without decent
1334  * unaligned memory access performance.
1335  *
1336  * It checks for input alignment, and when conditions are met, uses a "fast
1337  * path" employing direct 32-bit/64-bit reads, resulting in _dramatically
1338  * faster_ read speed.
1339  *
1340  * The check costs one initial branch per hash, which is generally negligible,
1341  * but not zero.
1342  *
1343  * Moreover, it's not useful to generate an additional code path if memory
1344  * access uses the same instruction for both aligned and unaligned
1345  * addresses (e.g. x86 and aarch64).
1346  *
1347  * In these cases, the alignment check can be removed by setting this macro to 0.
1348  * Then the code will always use unaligned memory access.
1349  * Align check is automatically disabled on x86, x64 & arm64,
1350  * which are platforms known to offer good unaligned memory accesses performance.
1351  *
1352  * This option does not affect XXH3 (only XXH32 and XXH64).
1353  */
1354 #  define XXH_FORCE_ALIGN_CHECK 0
1355 
1356 /*!
1357  * @def XXH_NO_INLINE_HINTS
1358  * @brief When non-zero, sets all functions to `static`.
1359  *
1360  * By default, xxHash tries to force the compiler to inline almost all internal
1361  * functions.
1362  *
1363  * This can usually improve performance due to reduced jumping and improved
1364  * constant folding, but significantly increases the size of the binary which
1365  * might not be favorable.
1366  *
1367  * Additionally, sometimes the forced inlining can be detrimental to performance,
1368  * depending on the architecture.
1369  *
1370  * XXH_NO_INLINE_HINTS marks all internal functions as static, giving the
1371  * compiler full control on whether to inline or not.
1372  *
1373  * When not optimizing (-O0), optimizing for size (-Os, -Oz), or using
1374  * -fno-inline with GCC or Clang, this will automatically be defined.
1375  */
1376 #  define XXH_NO_INLINE_HINTS 0
1377 
1378 /*!
1379  * @def XXH3_INLINE_SECRET
1380  * @brief Determines whether to inline the XXH3 withSecret code.
1381  *
1382  * When the secret size is known, the compiler can improve the performance
1383  * of XXH3_64bits_withSecret() and XXH3_128bits_withSecret().
1384  *
1385  * However, if the secret size is not known, it doesn't have any benefit. This
1386  * happens when xxHash is compiled into a global symbol. Therefore, if
1387  * @ref XXH_INLINE_ALL is *not* defined, this will be defined to 0.
1388  *
1389  * Additionally, this defaults to 0 on GCC 12+, which has an issue with function pointers
1390  * that are *sometimes* force inline on -Og, and it is impossible to automatically
1391  * detect this optimization level.
1392  */
1393 #  define XXH3_INLINE_SECRET 0
1394 
1395 /*!
1396  * @def XXH32_ENDJMP
1397  * @brief Whether to use a jump for `XXH32_finalize`.
1398  *
1399  * For performance, `XXH32_finalize` uses multiple branches in the finalizer.
1400  * This is generally preferable for performance,
1401  * but depending on exact architecture, a jmp may be preferable.
1402  *
1403  * This setting is only possibly making a difference for very small inputs.
1404  */
1405 #  define XXH32_ENDJMP 0
1406 
1407 /*!
1408  * @internal
1409  * @brief Redefines old internal names.
1410  *
1411  * For compatibility with code that uses xxHash's internals before the names
1412  * were changed to improve namespacing. There is no other reason to use this.
1413  */
1414 #  define XXH_OLD_NAMES
1415 #  undef XXH_OLD_NAMES /* don't actually use, it is ugly. */
1416 #endif /* XXH_DOXYGEN */
1417 /*!
1418  * @}
1419  */
1420 
1421 #ifndef XXH_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
1422    /* prefer __packed__ structures (method 1) for gcc on armv7+ and mips */
1423 #  if !defined(__clang__) && \
1424 ( \
1425     (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \
1426     ( \
1427         defined(__GNUC__) && ( \
1428             (defined(__ARM_ARCH) && __ARM_ARCH >= 7) || \
1429             ( \
1430                 defined(__mips__) && \
1431                 (__mips <= 5 || __mips_isa_rev < 6) && \
1432                 (!defined(__mips16) || defined(__mips_mips16e2)) \
1433             ) \
1434         ) \
1435     ) \
1436 )
1437 #    define XXH_FORCE_MEMORY_ACCESS 1
1438 #  endif
1439 #endif
1440 
1441 #ifndef XXH_FORCE_ALIGN_CHECK  /* can be defined externally */
1442 #  if defined(__i386)  || defined(__x86_64__) || defined(__aarch64__) \
1443    || defined(_M_IX86) || defined(_M_X64)     || defined(_M_ARM64) /* visual */
1444 #    define XXH_FORCE_ALIGN_CHECK 0
1445 #  else
1446 #    define XXH_FORCE_ALIGN_CHECK 1
1447 #  endif
1448 #endif
1449 
1450 #ifndef XXH_NO_INLINE_HINTS
1451 #  if defined(__OPTIMIZE_SIZE__) /* -Os, -Oz */ \
1452    || defined(__NO_INLINE__)     /* -O0, -fno-inline */
1453 #    define XXH_NO_INLINE_HINTS 1
1454 #  else
1455 #    define XXH_NO_INLINE_HINTS 0
1456 #  endif
1457 #endif
1458 
1459 #ifndef XXH3_INLINE_SECRET
1460 #  if (defined(__GNUC__) && !defined(__clang__) && __GNUC__ >= 12) \
1461      || !defined(XXH_INLINE_ALL)
1462 #    define XXH3_INLINE_SECRET 0
1463 #  else
1464 #    define XXH3_INLINE_SECRET 1
1465 #  endif
1466 #endif
1467 
1468 #ifndef XXH32_ENDJMP
1469 /* generally preferable for performance */
1470 #  define XXH32_ENDJMP 0
1471 #endif
1472 
1473 /*!
1474  * @defgroup impl Implementation
1475  * @{
1476  */
1477 
1478 
1479 /* *************************************
1480 *  Includes & Memory related functions
1481 ***************************************/
1482 /*
1483  * Modify the local functions below should you wish to use
1484  * different memory routines for malloc() and free()
1485  */
1486 #include <stdlib.h>
1487 
1488 /*!
1489  * @internal
1490  * @brief Modify this function to use a different routine than malloc().
1491  */
XXH_malloc(size_t s)1492 static void* XXH_malloc(size_t s) { return malloc(s); }
1493 
1494 /*!
1495  * @internal
1496  * @brief Modify this function to use a different routine than free().
1497  */
XXH_free(void * p)1498 static void XXH_free(void* p) { free(p); }
1499 
1500 #include <string.h>
1501 
1502 /*!
1503  * @internal
1504  * @brief Modify this function to use a different routine than memcpy().
1505  */
XXH_memcpy(void * dest,const void * src,size_t size)1506 static void* XXH_memcpy(void* dest, const void* src, size_t size)
1507 {
1508     return memcpy(dest,src,size);
1509 }
1510 
1511 #include <limits.h>   /* ULLONG_MAX */
1512 
1513 
1514 /* *************************************
1515 *  Compiler Specific Options
1516 ***************************************/
1517 #ifdef _MSC_VER /* Visual Studio warning fix */
1518 #  pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
1519 #endif
1520 
1521 #if XXH_NO_INLINE_HINTS  /* disable inlining hints */
1522 #  if defined(__GNUC__) || defined(__clang__)
1523 #    define XXH_FORCE_INLINE static __attribute__((unused))
1524 #  else
1525 #    define XXH_FORCE_INLINE static
1526 #  endif
1527 #  define XXH_NO_INLINE static
1528 /* enable inlining hints */
1529 #elif defined(__GNUC__) || defined(__clang__)
1530 #  define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused))
1531 #  define XXH_NO_INLINE static __attribute__((noinline))
1532 #elif defined(_MSC_VER)  /* Visual Studio */
1533 #  define XXH_FORCE_INLINE static __forceinline
1534 #  define XXH_NO_INLINE static __declspec(noinline)
1535 #elif defined (__cplusplus) \
1536   || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L))   /* C99 */
1537 #  define XXH_FORCE_INLINE static inline
1538 #  define XXH_NO_INLINE static
1539 #else
1540 #  define XXH_FORCE_INLINE static
1541 #  define XXH_NO_INLINE static
1542 #endif
1543 
1544 #if XXH3_INLINE_SECRET
1545 #  define XXH3_WITH_SECRET_INLINE XXH_FORCE_INLINE
1546 #else
1547 #  define XXH3_WITH_SECRET_INLINE XXH_NO_INLINE
1548 #endif
1549 
1550 
1551 /* *************************************
1552 *  Debug
1553 ***************************************/
1554 /*!
1555  * @ingroup tuning
1556  * @def XXH_DEBUGLEVEL
1557  * @brief Sets the debugging level.
1558  *
1559  * XXH_DEBUGLEVEL is expected to be defined externally, typically via the
1560  * compiler's command line options. The value must be a number.
1561  */
1562 #ifndef XXH_DEBUGLEVEL
1563 #  ifdef DEBUGLEVEL /* backwards compat */
1564 #    define XXH_DEBUGLEVEL DEBUGLEVEL
1565 #  else
1566 #    define XXH_DEBUGLEVEL 0
1567 #  endif
1568 #endif
1569 
1570 #if (XXH_DEBUGLEVEL>=1)
1571 #  include <assert.h>   /* note: can still be disabled with NDEBUG */
1572 #  define XXH_ASSERT(c)   assert(c)
1573 #else
1574 #  define XXH_ASSERT(c)   ((void)0)
1575 #endif
1576 
1577 /* note: use after variable declarations */
1578 #ifndef XXH_STATIC_ASSERT
1579 #  if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)    /* C11 */
1580 #    define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { _Static_assert((c),m); } while(0)
1581 #  elif defined(__cplusplus) && (__cplusplus >= 201103L)            /* C++11 */
1582 #    define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0)
1583 #  else
1584 #    define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { struct xxh_sa { char x[(c) ? 1 : -1]; }; } while(0)
1585 #  endif
1586 #  define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c),#c)
1587 #endif
1588 
1589 /*!
1590  * @internal
1591  * @def XXH_COMPILER_GUARD(var)
1592  * @brief Used to prevent unwanted optimizations for @p var.
1593  *
1594  * It uses an empty GCC inline assembly statement with a register constraint
1595  * which forces @p var into a general purpose register (eg eax, ebx, ecx
1596  * on x86) and marks it as modified.
1597  *
1598  * This is used in a few places to avoid unwanted autovectorization (e.g.
1599  * XXH32_round()). All vectorization we want is explicit via intrinsics,
1600  * and _usually_ isn't wanted elsewhere.
1601  *
1602  * We also use it to prevent unwanted constant folding for AArch64 in
1603  * XXH3_initCustomSecret_scalar().
1604  */
1605 #if defined(__GNUC__) || defined(__clang__)
1606 #  define XXH_COMPILER_GUARD(var) __asm__ __volatile__("" : "+r" (var))
1607 #else
1608 #  define XXH_COMPILER_GUARD(var) ((void)0)
1609 #endif
1610 
1611 /* *************************************
1612 *  Basic Types
1613 ***************************************/
1614 #if !defined (__VMS) \
1615  && (defined (__cplusplus) \
1616  || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
1617 # include <stdint.h>
1618   typedef uint8_t xxh_u8;
1619 #else
1620   typedef unsigned char xxh_u8;
1621 #endif
1622 typedef XXH32_hash_t xxh_u32;
1623 
1624 #ifdef XXH_OLD_NAMES
1625 #  define BYTE xxh_u8
1626 #  define U8   xxh_u8
1627 #  define U32  xxh_u32
1628 #endif
1629 
1630 /* ***   Memory access   *** */
1631 
1632 /*!
1633  * @internal
1634  * @fn xxh_u32 XXH_read32(const void* ptr)
1635  * @brief Reads an unaligned 32-bit integer from @p ptr in native endianness.
1636  *
1637  * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
1638  *
1639  * @param ptr The pointer to read from.
1640  * @return The 32-bit native endian integer from the bytes at @p ptr.
1641  */
1642 
1643 /*!
1644  * @internal
1645  * @fn xxh_u32 XXH_readLE32(const void* ptr)
1646  * @brief Reads an unaligned 32-bit little endian integer from @p ptr.
1647  *
1648  * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
1649  *
1650  * @param ptr The pointer to read from.
1651  * @return The 32-bit little endian integer from the bytes at @p ptr.
1652  */
1653 
1654 /*!
1655  * @internal
1656  * @fn xxh_u32 XXH_readBE32(const void* ptr)
1657  * @brief Reads an unaligned 32-bit big endian integer from @p ptr.
1658  *
1659  * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
1660  *
1661  * @param ptr The pointer to read from.
1662  * @return The 32-bit big endian integer from the bytes at @p ptr.
1663  */
1664 
1665 /*!
1666  * @internal
1667  * @fn xxh_u32 XXH_readLE32_align(const void* ptr, XXH_alignment align)
1668  * @brief Like @ref XXH_readLE32(), but has an option for aligned reads.
1669  *
1670  * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
1671  * Note that when @ref XXH_FORCE_ALIGN_CHECK == 0, the @p align parameter is
1672  * always @ref XXH_alignment::XXH_unaligned.
1673  *
1674  * @param ptr The pointer to read from.
1675  * @param align Whether @p ptr is aligned.
1676  * @pre
1677  *   If @p align == @ref XXH_alignment::XXH_aligned, @p ptr must be 4 byte
1678  *   aligned.
1679  * @return The 32-bit little endian integer from the bytes at @p ptr.
1680  */
1681 
1682 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
1683 /*
1684  * Manual byteshift. Best for old compilers which don't inline memcpy.
1685  * We actually directly use XXH_readLE32 and XXH_readBE32.
1686  */
1687 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
1688 
1689 /*
1690  * Force direct memory access. Only works on CPU which support unaligned memory
1691  * access in hardware.
1692  */
XXH_read32(const void * memPtr)1693 static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; }
1694 
1695 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
1696 
1697 /*
1698  * __pack instructions are safer but compiler specific, hence potentially
1699  * problematic for some compilers.
1700  *
1701  * Currently only defined for GCC and ICC.
1702  */
1703 #ifdef XXH_OLD_NAMES
1704 typedef union { xxh_u32 u32; } __attribute__((packed)) unalign;
1705 #endif
XXH_read32(const void * ptr)1706 static xxh_u32 XXH_read32(const void* ptr)
1707 {
1708     typedef union { xxh_u32 u32; } __attribute__((packed)) xxh_unalign;
1709     return ((const xxh_unalign*)ptr)->u32;
1710 }
1711 
1712 #else
1713 
1714 /*
1715  * Portable and safe solution. Generally efficient.
1716  * see: http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
1717  */
XXH_read32(const void * memPtr)1718 static xxh_u32 XXH_read32(const void* memPtr)
1719 {
1720     xxh_u32 val;
1721     XXH_memcpy(&val, memPtr, sizeof(val));
1722     return val;
1723 }
1724 
1725 #endif   /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
1726 
1727 
1728 /* ***   Endianness   *** */
1729 
1730 /*!
1731  * @ingroup tuning
1732  * @def XXH_CPU_LITTLE_ENDIAN
1733  * @brief Whether the target is little endian.
1734  *
1735  * Defined to 1 if the target is little endian, or 0 if it is big endian.
1736  * It can be defined externally, for example on the compiler command line.
1737  *
1738  * If it is not defined,
1739  * a runtime check (which is usually constant folded) is used instead.
1740  *
1741  * @note
1742  *   This is not necessarily defined to an integer constant.
1743  *
1744  * @see XXH_isLittleEndian() for the runtime check.
1745  */
1746 #ifndef XXH_CPU_LITTLE_ENDIAN
1747 /*
1748  * Try to detect endianness automatically, to avoid the nonstandard behavior
1749  * in `XXH_isLittleEndian()`
1750  */
1751 #  if defined(_WIN32) /* Windows is always little endian */ \
1752      || defined(__LITTLE_ENDIAN__) \
1753      || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
1754 #    define XXH_CPU_LITTLE_ENDIAN 1
1755 #  elif defined(__BIG_ENDIAN__) \
1756      || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
1757 #    define XXH_CPU_LITTLE_ENDIAN 0
1758 #  else
1759 /*!
1760  * @internal
1761  * @brief Runtime check for @ref XXH_CPU_LITTLE_ENDIAN.
1762  *
1763  * Most compilers will constant fold this.
1764  */
XXH_isLittleEndian(void)1765 static int XXH_isLittleEndian(void)
1766 {
1767     /*
1768      * Portable and well-defined behavior.
1769      * Don't use static: it is detrimental to performance.
1770      */
1771     const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 };
1772     return one.c[0];
1773 }
1774 #   define XXH_CPU_LITTLE_ENDIAN   XXH_isLittleEndian()
1775 #  endif
1776 #endif
1777 
1778 
1779 
1780 
1781 /* ****************************************
1782 *  Compiler-specific Functions and Macros
1783 ******************************************/
1784 #define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
1785 
1786 #ifdef __has_builtin
1787 #  define XXH_HAS_BUILTIN(x) __has_builtin(x)
1788 #else
1789 #  define XXH_HAS_BUILTIN(x) 0
1790 #endif
1791 
1792 /*!
1793  * @internal
1794  * @def XXH_rotl32(x,r)
1795  * @brief 32-bit rotate left.
1796  *
1797  * @param x The 32-bit integer to be rotated.
1798  * @param r The number of bits to rotate.
1799  * @pre
1800  *   @p r > 0 && @p r < 32
1801  * @note
1802  *   @p x and @p r may be evaluated multiple times.
1803  * @return The rotated result.
1804  */
1805 #if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \
1806                                && XXH_HAS_BUILTIN(__builtin_rotateleft64)
1807 #  define XXH_rotl32 __builtin_rotateleft32
1808 #  define XXH_rotl64 __builtin_rotateleft64
1809 /* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */
1810 #elif defined(_MSC_VER)
1811 #  define XXH_rotl32(x,r) _rotl(x,r)
1812 #  define XXH_rotl64(x,r) _rotl64(x,r)
1813 #else
1814 #  define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
1815 #  define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r))))
1816 #endif
1817 
1818 /*!
1819  * @internal
1820  * @fn xxh_u32 XXH_swap32(xxh_u32 x)
1821  * @brief A 32-bit byteswap.
1822  *
1823  * @param x The 32-bit integer to byteswap.
1824  * @return @p x, byteswapped.
1825  */
1826 #if defined(_MSC_VER)     /* Visual Studio */
1827 #  define XXH_swap32 _byteswap_ulong
1828 #elif XXH_GCC_VERSION >= 403
1829 #  define XXH_swap32 __builtin_bswap32
1830 #else
XXH_swap32(xxh_u32 x)1831 static xxh_u32 XXH_swap32 (xxh_u32 x)
1832 {
1833     return  ((x << 24) & 0xff000000 ) |
1834             ((x <<  8) & 0x00ff0000 ) |
1835             ((x >>  8) & 0x0000ff00 ) |
1836             ((x >> 24) & 0x000000ff );
1837 }
1838 #endif
1839 
1840 
1841 /* ***************************
1842 *  Memory reads
1843 *****************************/
1844 
1845 /*!
1846  * @internal
1847  * @brief Enum to indicate whether a pointer is aligned.
1848  */
1849 typedef enum {
1850     XXH_aligned,  /*!< Aligned */
1851     XXH_unaligned /*!< Possibly unaligned */
1852 } XXH_alignment;
1853 
1854 /*
1855  * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load.
1856  *
1857  * This is ideal for older compilers which don't inline memcpy.
1858  */
1859 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
1860 
XXH_readLE32(const void * memPtr)1861 XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* memPtr)
1862 {
1863     const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
1864     return bytePtr[0]
1865          | ((xxh_u32)bytePtr[1] << 8)
1866          | ((xxh_u32)bytePtr[2] << 16)
1867          | ((xxh_u32)bytePtr[3] << 24);
1868 }
1869 
XXH_readBE32(const void * memPtr)1870 XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void* memPtr)
1871 {
1872     const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
1873     return bytePtr[3]
1874          | ((xxh_u32)bytePtr[2] << 8)
1875          | ((xxh_u32)bytePtr[1] << 16)
1876          | ((xxh_u32)bytePtr[0] << 24);
1877 }
1878 
1879 #else
XXH_readLE32(const void * ptr)1880 XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr)
1881 {
1882     return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
1883 }
1884 
XXH_readBE32(const void * ptr)1885 static xxh_u32 XXH_readBE32(const void* ptr)
1886 {
1887     return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
1888 }
1889 #endif
1890 
1891 XXH_FORCE_INLINE xxh_u32
XXH_readLE32_align(const void * ptr,XXH_alignment align)1892 XXH_readLE32_align(const void* ptr, XXH_alignment align)
1893 {
1894     if (align==XXH_unaligned) {
1895         return XXH_readLE32(ptr);
1896     } else {
1897         return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr);
1898     }
1899 }
1900 
1901 
1902 /* *************************************
1903 *  Misc
1904 ***************************************/
1905 /*! @ingroup public */
XXH_versionNumber(void)1906 XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
1907 
1908 
1909 /* *******************************************************************
1910 *  32-bit hash functions
1911 *********************************************************************/
1912 /*!
1913  * @}
1914  * @defgroup xxh32_impl XXH32 implementation
1915  * @ingroup impl
1916  * @{
1917  */
1918  /* #define instead of static const, to be used as initializers */
1919 #define XXH_PRIME32_1  0x9E3779B1U  /*!< 0b10011110001101110111100110110001 */
1920 #define XXH_PRIME32_2  0x85EBCA77U  /*!< 0b10000101111010111100101001110111 */
1921 #define XXH_PRIME32_3  0xC2B2AE3DU  /*!< 0b11000010101100101010111000111101 */
1922 #define XXH_PRIME32_4  0x27D4EB2FU  /*!< 0b00100111110101001110101100101111 */
1923 #define XXH_PRIME32_5  0x165667B1U  /*!< 0b00010110010101100110011110110001 */
1924 
1925 #ifdef XXH_OLD_NAMES
1926 #  define PRIME32_1 XXH_PRIME32_1
1927 #  define PRIME32_2 XXH_PRIME32_2
1928 #  define PRIME32_3 XXH_PRIME32_3
1929 #  define PRIME32_4 XXH_PRIME32_4
1930 #  define PRIME32_5 XXH_PRIME32_5
1931 #endif
1932 
1933 /*!
1934  * @internal
1935  * @brief Normal stripe processing routine.
1936  *
1937  * This shuffles the bits so that any bit from @p input impacts several bits in
1938  * @p acc.
1939  *
1940  * @param acc The accumulator lane.
1941  * @param input The stripe of input to mix.
1942  * @return The mixed accumulator lane.
1943  */
XXH32_round(xxh_u32 acc,xxh_u32 input)1944 static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input)
1945 {
1946     acc += input * XXH_PRIME32_2;
1947     acc  = XXH_rotl32(acc, 13);
1948     acc *= XXH_PRIME32_1;
1949 #if (defined(__SSE4_1__) || defined(__aarch64__)) && !defined(XXH_ENABLE_AUTOVECTORIZE)
1950     /*
1951      * UGLY HACK:
1952      * A compiler fence is the only thing that prevents GCC and Clang from
1953      * autovectorizing the XXH32 loop (pragmas and attributes don't work for some
1954      * reason) without globally disabling SSE4.1.
1955      *
1956      * The reason we want to avoid vectorization is because despite working on
1957      * 4 integers at a time, there are multiple factors slowing XXH32 down on
1958      * SSE4:
1959      * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on
1960      *   newer chips!) making it slightly slower to multiply four integers at
1961      *   once compared to four integers independently. Even when pmulld was
1962      *   fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE
1963      *   just to multiply unless doing a long operation.
1964      *
1965      * - Four instructions are required to rotate,
1966      *      movqda tmp,  v // not required with VEX encoding
1967      *      pslld  tmp, 13 // tmp <<= 13
1968      *      psrld  v,   19 // x >>= 19
1969      *      por    v,  tmp // x |= tmp
1970      *   compared to one for scalar:
1971      *      roll   v, 13    // reliably fast across the board
1972      *      shldl  v, v, 13 // Sandy Bridge and later prefer this for some reason
1973      *
1974      * - Instruction level parallelism is actually more beneficial here because
1975      *   the SIMD actually serializes this operation: While v1 is rotating, v2
1976      *   can load data, while v3 can multiply. SSE forces them to operate
1977      *   together.
1978      *
1979      * This is also enabled on AArch64, as Clang autovectorizes it incorrectly
1980      * and it is pointless writing a NEON implementation that is basically the
1981      * same speed as scalar for XXH32.
1982      */
1983     XXH_COMPILER_GUARD(acc);
1984 #endif
1985     return acc;
1986 }
1987 
1988 /*!
1989  * @internal
1990  * @brief Mixes all bits to finalize the hash.
1991  *
1992  * The final mix ensures that all input bits have a chance to impact any bit in
1993  * the output digest, resulting in an unbiased distribution.
1994  *
1995  * @param h32 The hash to avalanche.
1996  * @return The avalanched hash.
1997  */
XXH32_avalanche(xxh_u32 h32)1998 static xxh_u32 XXH32_avalanche(xxh_u32 h32)
1999 {
2000     h32 ^= h32 >> 15;
2001     h32 *= XXH_PRIME32_2;
2002     h32 ^= h32 >> 13;
2003     h32 *= XXH_PRIME32_3;
2004     h32 ^= h32 >> 16;
2005     return(h32);
2006 }
2007 
2008 #define XXH_get32bits(p) XXH_readLE32_align(p, align)
2009 
2010 /*!
2011  * @internal
2012  * @brief Processes the last 0-15 bytes of @p ptr.
2013  *
2014  * There may be up to 15 bytes remaining to consume from the input.
2015  * This final stage will digest them to ensure that all input bytes are present
2016  * in the final mix.
2017  *
2018  * @param h32 The hash to finalize.
2019  * @param ptr The pointer to the remaining input.
2020  * @param len The remaining length, modulo 16.
2021  * @param align Whether @p ptr is aligned.
2022  * @return The finalized hash.
2023  */
2024 static xxh_u32
XXH32_finalize(xxh_u32 h32,const xxh_u8 * ptr,size_t len,XXH_alignment align)2025 XXH32_finalize(xxh_u32 h32, const xxh_u8* ptr, size_t len, XXH_alignment align)
2026 {
2027 #define XXH_PROCESS1 do {                           \
2028     h32 += (*ptr++) * XXH_PRIME32_5;                \
2029     h32 = XXH_rotl32(h32, 11) * XXH_PRIME32_1;      \
2030 } while (0)
2031 
2032 #define XXH_PROCESS4 do {                           \
2033     h32 += XXH_get32bits(ptr) * XXH_PRIME32_3;      \
2034     ptr += 4;                                   \
2035     h32  = XXH_rotl32(h32, 17) * XXH_PRIME32_4;     \
2036 } while (0)
2037 
2038     if (ptr==NULL) XXH_ASSERT(len == 0);
2039 
2040     /* Compact rerolled version; generally faster */
2041     if (!XXH32_ENDJMP) {
2042         len &= 15;
2043         while (len >= 4) {
2044             XXH_PROCESS4;
2045             len -= 4;
2046         }
2047         while (len > 0) {
2048             XXH_PROCESS1;
2049             --len;
2050         }
2051         return XXH32_avalanche(h32);
2052     } else {
2053          switch(len&15) /* or switch(bEnd - p) */ {
2054            case 12:      XXH_PROCESS4;
2055                          XXH_FALLTHROUGH;
2056            case 8:       XXH_PROCESS4;
2057                          XXH_FALLTHROUGH;
2058            case 4:       XXH_PROCESS4;
2059                          return XXH32_avalanche(h32);
2060 
2061            case 13:      XXH_PROCESS4;
2062                          XXH_FALLTHROUGH;
2063            case 9:       XXH_PROCESS4;
2064                          XXH_FALLTHROUGH;
2065            case 5:       XXH_PROCESS4;
2066                          XXH_PROCESS1;
2067                          return XXH32_avalanche(h32);
2068 
2069            case 14:      XXH_PROCESS4;
2070                          XXH_FALLTHROUGH;
2071            case 10:      XXH_PROCESS4;
2072                          XXH_FALLTHROUGH;
2073            case 6:       XXH_PROCESS4;
2074                          XXH_PROCESS1;
2075                          XXH_PROCESS1;
2076                          return XXH32_avalanche(h32);
2077 
2078            case 15:      XXH_PROCESS4;
2079                          XXH_FALLTHROUGH;
2080            case 11:      XXH_PROCESS4;
2081                          XXH_FALLTHROUGH;
2082            case 7:       XXH_PROCESS4;
2083                          XXH_FALLTHROUGH;
2084            case 3:       XXH_PROCESS1;
2085                          XXH_FALLTHROUGH;
2086            case 2:       XXH_PROCESS1;
2087                          XXH_FALLTHROUGH;
2088            case 1:       XXH_PROCESS1;
2089                          XXH_FALLTHROUGH;
2090            case 0:       return XXH32_avalanche(h32);
2091         }
2092         XXH_ASSERT(0);
2093         return h32;   /* reaching this point is deemed impossible */
2094     }
2095 }
2096 
2097 #ifdef XXH_OLD_NAMES
2098 #  define PROCESS1 XXH_PROCESS1
2099 #  define PROCESS4 XXH_PROCESS4
2100 #else
2101 #  undef XXH_PROCESS1
2102 #  undef XXH_PROCESS4
2103 #endif
2104 
2105 /*!
2106  * @internal
2107  * @brief The implementation for @ref XXH32().
2108  *
2109  * @param input , len , seed Directly passed from @ref XXH32().
2110  * @param align Whether @p input is aligned.
2111  * @return The calculated hash.
2112  */
2113 XXH_FORCE_INLINE xxh_u32
XXH32_endian_align(const xxh_u8 * input,size_t len,xxh_u32 seed,XXH_alignment align)2114 XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align)
2115 {
2116     xxh_u32 h32;
2117 
2118     if (input==NULL) XXH_ASSERT(len == 0);
2119 
2120     if (len>=16) {
2121         const xxh_u8* const bEnd = input + len;
2122         const xxh_u8* const limit = bEnd - 15;
2123         xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
2124         xxh_u32 v2 = seed + XXH_PRIME32_2;
2125         xxh_u32 v3 = seed + 0;
2126         xxh_u32 v4 = seed - XXH_PRIME32_1;
2127 
2128         do {
2129             v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4;
2130             v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4;
2131             v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4;
2132             v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4;
2133         } while (input < limit);
2134 
2135         h32 = XXH_rotl32(v1, 1)  + XXH_rotl32(v2, 7)
2136             + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
2137     } else {
2138         h32  = seed + XXH_PRIME32_5;
2139     }
2140 
2141     h32 += (xxh_u32)len;
2142 
2143     return XXH32_finalize(h32, input, len&15, align);
2144 }
2145 
2146 /*! @ingroup xxh32_family */
XXH32(const void * input,size_t len,XXH32_hash_t seed)2147 XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed)
2148 {
2149 #if 0
2150     /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
2151     XXH32_state_t state;
2152     XXH32_reset(&state, seed);
2153     XXH32_update(&state, (const xxh_u8*)input, len);
2154     return XXH32_digest(&state);
2155 #else
2156     if (XXH_FORCE_ALIGN_CHECK) {
2157         if ((((size_t)input) & 3) == 0) {   /* Input is 4-bytes aligned, leverage the speed benefit */
2158             return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
2159     }   }
2160 
2161     return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
2162 #endif
2163 }
2164 
2165 
2166 
2167 /*******   Hash streaming   *******/
2168 /*!
2169  * @ingroup xxh32_family
2170  */
XXH32_createState(void)2171 XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
2172 {
2173     return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
2174 }
2175 /*! @ingroup xxh32_family */
XXH32_freeState(XXH32_state_t * statePtr)2176 XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
2177 {
2178     XXH_free(statePtr);
2179     return XXH_OK;
2180 }
2181 
2182 /*! @ingroup xxh32_family */
XXH32_copyState(XXH32_state_t * dstState,const XXH32_state_t * srcState)2183 XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState)
2184 {
2185     XXH_memcpy(dstState, srcState, sizeof(*dstState));
2186 }
2187 
2188 /*! @ingroup xxh32_family */
XXH32_reset(XXH32_state_t * statePtr,XXH32_hash_t seed)2189 XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed)
2190 {
2191     XXH32_state_t state;   /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
2192     memset(&state, 0, sizeof(state));
2193     state.v[0] = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
2194     state.v[1] = seed + XXH_PRIME32_2;
2195     state.v[2] = seed + 0;
2196     state.v[3] = seed - XXH_PRIME32_1;
2197     /* do not write into reserved, planned to be removed in a future version */
2198     XXH_memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));
2199     return XXH_OK;
2200 }
2201 
2202 
2203 /*! @ingroup xxh32_family */
2204 XXH_PUBLIC_API XXH_errorcode
XXH32_update(XXH32_state_t * state,const void * input,size_t len)2205 XXH32_update(XXH32_state_t* state, const void* input, size_t len)
2206 {
2207     if (input==NULL) {
2208         XXH_ASSERT(len == 0);
2209         return XXH_OK;
2210     }
2211 
2212     {   const xxh_u8* p = (const xxh_u8*)input;
2213         const xxh_u8* const bEnd = p + len;
2214 
2215         state->total_len_32 += (XXH32_hash_t)len;
2216         state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16));
2217 
2218         if (state->memsize + len < 16)  {   /* fill in tmp buffer */
2219             XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len);
2220             state->memsize += (XXH32_hash_t)len;
2221             return XXH_OK;
2222         }
2223 
2224         if (state->memsize) {   /* some data left from previous update */
2225             XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize);
2226             {   const xxh_u32* p32 = state->mem32;
2227                 state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p32)); p32++;
2228                 state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p32)); p32++;
2229                 state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p32)); p32++;
2230                 state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p32));
2231             }
2232             p += 16-state->memsize;
2233             state->memsize = 0;
2234         }
2235 
2236         if (p <= bEnd-16) {
2237             const xxh_u8* const limit = bEnd - 16;
2238 
2239             do {
2240                 state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p)); p+=4;
2241                 state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p)); p+=4;
2242                 state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p)); p+=4;
2243                 state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p)); p+=4;
2244             } while (p<=limit);
2245 
2246         }
2247 
2248         if (p < bEnd) {
2249             XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
2250             state->memsize = (unsigned)(bEnd-p);
2251         }
2252     }
2253 
2254     return XXH_OK;
2255 }
2256 
2257 
2258 /*! @ingroup xxh32_family */
XXH32_digest(const XXH32_state_t * state)2259 XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t* state)
2260 {
2261     xxh_u32 h32;
2262 
2263     if (state->large_len) {
2264         h32 = XXH_rotl32(state->v[0], 1)
2265             + XXH_rotl32(state->v[1], 7)
2266             + XXH_rotl32(state->v[2], 12)
2267             + XXH_rotl32(state->v[3], 18);
2268     } else {
2269         h32 = state->v[2] /* == seed */ + XXH_PRIME32_5;
2270     }
2271 
2272     h32 += state->total_len_32;
2273 
2274     return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned);
2275 }
2276 
2277 
2278 /*******   Canonical representation   *******/
2279 
2280 /*!
2281  * @ingroup xxh32_family
2282  * The default return values from XXH functions are unsigned 32 and 64 bit
2283  * integers.
2284  *
2285  * The canonical representation uses big endian convention, the same convention
2286  * as human-readable numbers (large digits first).
2287  *
2288  * This way, hash values can be written into a file or buffer, remaining
2289  * comparable across different systems.
2290  *
2291  * The following functions allow transformation of hash values to and from their
2292  * canonical format.
2293  */
XXH32_canonicalFromHash(XXH32_canonical_t * dst,XXH32_hash_t hash)2294 XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
2295 {
2296     XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
2297     if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
2298     XXH_memcpy(dst, &hash, sizeof(*dst));
2299 }
2300 /*! @ingroup xxh32_family */
XXH32_hashFromCanonical(const XXH32_canonical_t * src)2301 XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
2302 {
2303     return XXH_readBE32(src);
2304 }
2305 
2306 
2307 #ifndef XXH_NO_LONG_LONG
2308 
2309 /* *******************************************************************
2310 *  64-bit hash functions
2311 *********************************************************************/
2312 /*!
2313  * @}
2314  * @ingroup impl
2315  * @{
2316  */
2317 /*******   Memory access   *******/
2318 
2319 typedef XXH64_hash_t xxh_u64;
2320 
2321 #ifdef XXH_OLD_NAMES
2322 #  define U64 xxh_u64
2323 #endif
2324 
2325 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
2326 /*
2327  * Manual byteshift. Best for old compilers which don't inline memcpy.
2328  * We actually directly use XXH_readLE64 and XXH_readBE64.
2329  */
2330 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
2331 
2332 /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
XXH_read64(const void * memPtr)2333 static xxh_u64 XXH_read64(const void* memPtr)
2334 {
2335     return *(const xxh_u64*) memPtr;
2336 }
2337 
2338 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
2339 
2340 /*
2341  * __pack instructions are safer, but compiler specific, hence potentially
2342  * problematic for some compilers.
2343  *
2344  * Currently only defined for GCC and ICC.
2345  */
2346 #ifdef XXH_OLD_NAMES
2347 typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64;
2348 #endif
XXH_read64(const void * ptr)2349 static xxh_u64 XXH_read64(const void* ptr)
2350 {
2351     typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) xxh_unalign64;
2352     return ((const xxh_unalign64*)ptr)->u64;
2353 }
2354 
2355 #else
2356 
2357 /*
2358  * Portable and safe solution. Generally efficient.
2359  * see: http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
2360  */
XXH_read64(const void * memPtr)2361 static xxh_u64 XXH_read64(const void* memPtr)
2362 {
2363     xxh_u64 val;
2364     XXH_memcpy(&val, memPtr, sizeof(val));
2365     return val;
2366 }
2367 
2368 #endif   /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
2369 
2370 #if defined(_MSC_VER)     /* Visual Studio */
2371 #  define XXH_swap64 _byteswap_uint64
2372 #elif XXH_GCC_VERSION >= 403
2373 #  define XXH_swap64 __builtin_bswap64
2374 #else
XXH_swap64(xxh_u64 x)2375 static xxh_u64 XXH_swap64(xxh_u64 x)
2376 {
2377     return  ((x << 56) & 0xff00000000000000ULL) |
2378             ((x << 40) & 0x00ff000000000000ULL) |
2379             ((x << 24) & 0x0000ff0000000000ULL) |
2380             ((x << 8)  & 0x000000ff00000000ULL) |
2381             ((x >> 8)  & 0x00000000ff000000ULL) |
2382             ((x >> 24) & 0x0000000000ff0000ULL) |
2383             ((x >> 40) & 0x000000000000ff00ULL) |
2384             ((x >> 56) & 0x00000000000000ffULL);
2385 }
2386 #endif
2387 
2388 
2389 /* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */
2390 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
2391 
XXH_readLE64(const void * memPtr)2392 XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* memPtr)
2393 {
2394     const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
2395     return bytePtr[0]
2396          | ((xxh_u64)bytePtr[1] << 8)
2397          | ((xxh_u64)bytePtr[2] << 16)
2398          | ((xxh_u64)bytePtr[3] << 24)
2399          | ((xxh_u64)bytePtr[4] << 32)
2400          | ((xxh_u64)bytePtr[5] << 40)
2401          | ((xxh_u64)bytePtr[6] << 48)
2402          | ((xxh_u64)bytePtr[7] << 56);
2403 }
2404 
XXH_readBE64(const void * memPtr)2405 XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void* memPtr)
2406 {
2407     const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
2408     return bytePtr[7]
2409          | ((xxh_u64)bytePtr[6] << 8)
2410          | ((xxh_u64)bytePtr[5] << 16)
2411          | ((xxh_u64)bytePtr[4] << 24)
2412          | ((xxh_u64)bytePtr[3] << 32)
2413          | ((xxh_u64)bytePtr[2] << 40)
2414          | ((xxh_u64)bytePtr[1] << 48)
2415          | ((xxh_u64)bytePtr[0] << 56);
2416 }
2417 
2418 #else
XXH_readLE64(const void * ptr)2419 XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr)
2420 {
2421     return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
2422 }
2423 
XXH_readBE64(const void * ptr)2424 static xxh_u64 XXH_readBE64(const void* ptr)
2425 {
2426     return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
2427 }
2428 #endif
2429 
2430 XXH_FORCE_INLINE xxh_u64
XXH_readLE64_align(const void * ptr,XXH_alignment align)2431 XXH_readLE64_align(const void* ptr, XXH_alignment align)
2432 {
2433     if (align==XXH_unaligned)
2434         return XXH_readLE64(ptr);
2435     else
2436         return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr);
2437 }
2438 
2439 
2440 /*******   xxh64   *******/
2441 /*!
2442  * @}
2443  * @defgroup xxh64_impl XXH64 implementation
2444  * @ingroup impl
2445  * @{
2446  */
2447 /* #define rather that static const, to be used as initializers */
2448 #define XXH_PRIME64_1  0x9E3779B185EBCA87ULL  /*!< 0b1001111000110111011110011011000110000101111010111100101010000111 */
2449 #define XXH_PRIME64_2  0xC2B2AE3D27D4EB4FULL  /*!< 0b1100001010110010101011100011110100100111110101001110101101001111 */
2450 #define XXH_PRIME64_3  0x165667B19E3779F9ULL  /*!< 0b0001011001010110011001111011000110011110001101110111100111111001 */
2451 #define XXH_PRIME64_4  0x85EBCA77C2B2AE63ULL  /*!< 0b1000010111101011110010100111011111000010101100101010111001100011 */
2452 #define XXH_PRIME64_5  0x27D4EB2F165667C5ULL  /*!< 0b0010011111010100111010110010111100010110010101100110011111000101 */
2453 
2454 #ifdef XXH_OLD_NAMES
2455 #  define PRIME64_1 XXH_PRIME64_1
2456 #  define PRIME64_2 XXH_PRIME64_2
2457 #  define PRIME64_3 XXH_PRIME64_3
2458 #  define PRIME64_4 XXH_PRIME64_4
2459 #  define PRIME64_5 XXH_PRIME64_5
2460 #endif
2461 
XXH64_round(xxh_u64 acc,xxh_u64 input)2462 static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input)
2463 {
2464     acc += input * XXH_PRIME64_2;
2465     acc  = XXH_rotl64(acc, 31);
2466     acc *= XXH_PRIME64_1;
2467     return acc;
2468 }
2469 
XXH64_mergeRound(xxh_u64 acc,xxh_u64 val)2470 static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val)
2471 {
2472     val  = XXH64_round(0, val);
2473     acc ^= val;
2474     acc  = acc * XXH_PRIME64_1 + XXH_PRIME64_4;
2475     return acc;
2476 }
2477 
XXH64_avalanche(xxh_u64 h64)2478 static xxh_u64 XXH64_avalanche(xxh_u64 h64)
2479 {
2480     h64 ^= h64 >> 33;
2481     h64 *= XXH_PRIME64_2;
2482     h64 ^= h64 >> 29;
2483     h64 *= XXH_PRIME64_3;
2484     h64 ^= h64 >> 32;
2485     return h64;
2486 }
2487 
2488 
2489 #define XXH_get64bits(p) XXH_readLE64_align(p, align)
2490 
2491 static xxh_u64
XXH64_finalize(xxh_u64 h64,const xxh_u8 * ptr,size_t len,XXH_alignment align)2492 XXH64_finalize(xxh_u64 h64, const xxh_u8* ptr, size_t len, XXH_alignment align)
2493 {
2494     if (ptr==NULL) XXH_ASSERT(len == 0);
2495     len &= 31;
2496     while (len >= 8) {
2497         xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr));
2498         ptr += 8;
2499         h64 ^= k1;
2500         h64  = XXH_rotl64(h64,27) * XXH_PRIME64_1 + XXH_PRIME64_4;
2501         len -= 8;
2502     }
2503     if (len >= 4) {
2504         h64 ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1;
2505         ptr += 4;
2506         h64 = XXH_rotl64(h64, 23) * XXH_PRIME64_2 + XXH_PRIME64_3;
2507         len -= 4;
2508     }
2509     while (len > 0) {
2510         h64 ^= (*ptr++) * XXH_PRIME64_5;
2511         h64 = XXH_rotl64(h64, 11) * XXH_PRIME64_1;
2512         --len;
2513     }
2514     return  XXH64_avalanche(h64);
2515 }
2516 
2517 #ifdef XXH_OLD_NAMES
2518 #  define PROCESS1_64 XXH_PROCESS1_64
2519 #  define PROCESS4_64 XXH_PROCESS4_64
2520 #  define PROCESS8_64 XXH_PROCESS8_64
2521 #else
2522 #  undef XXH_PROCESS1_64
2523 #  undef XXH_PROCESS4_64
2524 #  undef XXH_PROCESS8_64
2525 #endif
2526 
2527 XXH_FORCE_INLINE xxh_u64
XXH64_endian_align(const xxh_u8 * input,size_t len,xxh_u64 seed,XXH_alignment align)2528 XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align)
2529 {
2530     xxh_u64 h64;
2531     if (input==NULL) XXH_ASSERT(len == 0);
2532 
2533     if (len>=32) {
2534         const xxh_u8* const bEnd = input + len;
2535         const xxh_u8* const limit = bEnd - 31;
2536         xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
2537         xxh_u64 v2 = seed + XXH_PRIME64_2;
2538         xxh_u64 v3 = seed + 0;
2539         xxh_u64 v4 = seed - XXH_PRIME64_1;
2540 
2541         do {
2542             v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8;
2543             v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8;
2544             v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8;
2545             v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8;
2546         } while (input<limit);
2547 
2548         h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
2549         h64 = XXH64_mergeRound(h64, v1);
2550         h64 = XXH64_mergeRound(h64, v2);
2551         h64 = XXH64_mergeRound(h64, v3);
2552         h64 = XXH64_mergeRound(h64, v4);
2553 
2554     } else {
2555         h64  = seed + XXH_PRIME64_5;
2556     }
2557 
2558     h64 += (xxh_u64) len;
2559 
2560     return XXH64_finalize(h64, input, len, align);
2561 }
2562 
2563 
2564 /*! @ingroup xxh64_family */
XXH64(const void * input,size_t len,XXH64_hash_t seed)2565 XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t len, XXH64_hash_t seed)
2566 {
2567 #if 0
2568     /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
2569     XXH64_state_t state;
2570     XXH64_reset(&state, seed);
2571     XXH64_update(&state, (const xxh_u8*)input, len);
2572     return XXH64_digest(&state);
2573 #else
2574     if (XXH_FORCE_ALIGN_CHECK) {
2575         if ((((size_t)input) & 7)==0) {  /* Input is aligned, let's leverage the speed advantage */
2576             return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
2577     }   }
2578 
2579     return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
2580 
2581 #endif
2582 }
2583 
2584 /*******   Hash Streaming   *******/
2585 
2586 /*! @ingroup xxh64_family*/
XXH64_createState(void)2587 XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
2588 {
2589     return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
2590 }
2591 /*! @ingroup xxh64_family */
XXH64_freeState(XXH64_state_t * statePtr)2592 XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
2593 {
2594     XXH_free(statePtr);
2595     return XXH_OK;
2596 }
2597 
2598 /*! @ingroup xxh64_family */
XXH64_copyState(XXH64_state_t * dstState,const XXH64_state_t * srcState)2599 XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState)
2600 {
2601     XXH_memcpy(dstState, srcState, sizeof(*dstState));
2602 }
2603 
2604 /*! @ingroup xxh64_family */
XXH64_reset(XXH64_state_t * statePtr,XXH64_hash_t seed)2605 XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, XXH64_hash_t seed)
2606 {
2607     XXH64_state_t state;   /* use a local state to memcpy() in order to avoid strict-aliasing warnings */
2608     memset(&state, 0, sizeof(state));
2609     state.v[0] = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
2610     state.v[1] = seed + XXH_PRIME64_2;
2611     state.v[2] = seed + 0;
2612     state.v[3] = seed - XXH_PRIME64_1;
2613      /* do not write into reserved64, might be removed in a future version */
2614     XXH_memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved64));
2615     return XXH_OK;
2616 }
2617 
2618 /*! @ingroup xxh64_family */
2619 XXH_PUBLIC_API XXH_errorcode
XXH64_update(XXH64_state_t * state,const void * input,size_t len)2620 XXH64_update (XXH64_state_t* state, const void* input, size_t len)
2621 {
2622     if (input==NULL) {
2623         XXH_ASSERT(len == 0);
2624         return XXH_OK;
2625     }
2626 
2627     {   const xxh_u8* p = (const xxh_u8*)input;
2628         const xxh_u8* const bEnd = p + len;
2629 
2630         state->total_len += len;
2631 
2632         if (state->memsize + len < 32) {  /* fill in tmp buffer */
2633             XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len);
2634             state->memsize += (xxh_u32)len;
2635             return XXH_OK;
2636         }
2637 
2638         if (state->memsize) {   /* tmp buffer is full */
2639             XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize);
2640             state->v[0] = XXH64_round(state->v[0], XXH_readLE64(state->mem64+0));
2641             state->v[1] = XXH64_round(state->v[1], XXH_readLE64(state->mem64+1));
2642             state->v[2] = XXH64_round(state->v[2], XXH_readLE64(state->mem64+2));
2643             state->v[3] = XXH64_round(state->v[3], XXH_readLE64(state->mem64+3));
2644             p += 32 - state->memsize;
2645             state->memsize = 0;
2646         }
2647 
2648         if (p+32 <= bEnd) {
2649             const xxh_u8* const limit = bEnd - 32;
2650 
2651             do {
2652                 state->v[0] = XXH64_round(state->v[0], XXH_readLE64(p)); p+=8;
2653                 state->v[1] = XXH64_round(state->v[1], XXH_readLE64(p)); p+=8;
2654                 state->v[2] = XXH64_round(state->v[2], XXH_readLE64(p)); p+=8;
2655                 state->v[3] = XXH64_round(state->v[3], XXH_readLE64(p)); p+=8;
2656             } while (p<=limit);
2657 
2658         }
2659 
2660         if (p < bEnd) {
2661             XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
2662             state->memsize = (unsigned)(bEnd-p);
2663         }
2664     }
2665 
2666     return XXH_OK;
2667 }
2668 
2669 
2670 /*! @ingroup xxh64_family */
XXH64_digest(const XXH64_state_t * state)2671 XXH_PUBLIC_API XXH64_hash_t XXH64_digest(const XXH64_state_t* state)
2672 {
2673     xxh_u64 h64;
2674 
2675     if (state->total_len >= 32) {
2676         h64 = XXH_rotl64(state->v[0], 1) + XXH_rotl64(state->v[1], 7) + XXH_rotl64(state->v[2], 12) + XXH_rotl64(state->v[3], 18);
2677         h64 = XXH64_mergeRound(h64, state->v[0]);
2678         h64 = XXH64_mergeRound(h64, state->v[1]);
2679         h64 = XXH64_mergeRound(h64, state->v[2]);
2680         h64 = XXH64_mergeRound(h64, state->v[3]);
2681     } else {
2682         h64  = state->v[2] /*seed*/ + XXH_PRIME64_5;
2683     }
2684 
2685     h64 += (xxh_u64) state->total_len;
2686 
2687     return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned);
2688 }
2689 
2690 
2691 /******* Canonical representation   *******/
2692 
2693 /*! @ingroup xxh64_family */
XXH64_canonicalFromHash(XXH64_canonical_t * dst,XXH64_hash_t hash)2694 XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
2695 {
2696     XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
2697     if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
2698     XXH_memcpy(dst, &hash, sizeof(*dst));
2699 }
2700 
2701 /*! @ingroup xxh64_family */
XXH64_hashFromCanonical(const XXH64_canonical_t * src)2702 XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
2703 {
2704     return XXH_readBE64(src);
2705 }
2706 
2707 #ifndef XXH_NO_XXH3
2708 
2709 /* *********************************************************************
2710 *  XXH3
2711 *  New generation hash designed for speed on small keys and vectorization
2712 ************************************************************************ */
2713 /*!
2714  * @}
2715  * @defgroup xxh3_impl XXH3 implementation
2716  * @ingroup impl
2717  * @{
2718  */
2719 
2720 /* ===   Compiler specifics   === */
2721 
2722 #if ((defined(sun) || defined(__sun)) && __cplusplus) /* Solaris includes __STDC_VERSION__ with C++. Tested with GCC 5.5 */
2723 #  define XXH_RESTRICT /* disable */
2724 #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* >= C99 */
2725 #  define XXH_RESTRICT   restrict
2726 #else
2727 /* Note: it might be useful to define __restrict or __restrict__ for some C++ compilers */
2728 #  define XXH_RESTRICT   /* disable */
2729 #endif
2730 
2731 #if (defined(__GNUC__) && (__GNUC__ >= 3))  \
2732   || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) \
2733   || defined(__clang__)
2734 #    define XXH_likely(x) __builtin_expect(x, 1)
2735 #    define XXH_unlikely(x) __builtin_expect(x, 0)
2736 #else
2737 #    define XXH_likely(x) (x)
2738 #    define XXH_unlikely(x) (x)
2739 #endif
2740 
2741 #if defined(__GNUC__)
2742 #  if defined(__AVX2__)
2743 #    include <immintrin.h>
2744 #  elif defined(__SSE2__)
2745 #    include <emmintrin.h>
2746 #  elif defined(__ARM_NEON__) || defined(__ARM_NEON)
2747 #    define inline __inline__  /* circumvent a clang bug */
2748 #    include <arm_neon.h>
2749 #    undef inline
2750 #  endif
2751 #elif defined(_MSC_VER)
2752 #  include <intrin.h>
2753 #endif
2754 
2755 /*
2756  * One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while
2757  * remaining a true 64-bit/128-bit hash function.
2758  *
2759  * This is done by prioritizing a subset of 64-bit operations that can be
2760  * emulated without too many steps on the average 32-bit machine.
2761  *
2762  * For example, these two lines seem similar, and run equally fast on 64-bit:
2763  *
2764  *   xxh_u64 x;
2765  *   x ^= (x >> 47); // good
2766  *   x ^= (x >> 13); // bad
2767  *
2768  * However, to a 32-bit machine, there is a major difference.
2769  *
2770  * x ^= (x >> 47) looks like this:
2771  *
2772  *   x.lo ^= (x.hi >> (47 - 32));
2773  *
2774  * while x ^= (x >> 13) looks like this:
2775  *
2776  *   // note: funnel shifts are not usually cheap.
2777  *   x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13));
2778  *   x.hi ^= (x.hi >> 13);
2779  *
2780  * The first one is significantly faster than the second, simply because the
2781  * shift is larger than 32. This means:
2782  *  - All the bits we need are in the upper 32 bits, so we can ignore the lower
2783  *    32 bits in the shift.
2784  *  - The shift result will always fit in the lower 32 bits, and therefore,
2785  *    we can ignore the upper 32 bits in the xor.
2786  *
2787  * Thanks to this optimization, XXH3 only requires these features to be efficient:
2788  *
2789  *  - Usable unaligned access
2790  *  - A 32-bit or 64-bit ALU
2791  *      - If 32-bit, a decent ADC instruction
2792  *  - A 32 or 64-bit multiply with a 64-bit result
2793  *  - For the 128-bit variant, a decent byteswap helps short inputs.
2794  *
2795  * The first two are already required by XXH32, and almost all 32-bit and 64-bit
2796  * platforms which can run XXH32 can run XXH3 efficiently.
2797  *
2798  * Thumb-1, the classic 16-bit only subset of ARM's instruction set, is one
2799  * notable exception.
2800  *
2801  * First of all, Thumb-1 lacks support for the UMULL instruction which
2802  * performs the important long multiply. This means numerous __aeabi_lmul
2803  * calls.
2804  *
2805  * Second of all, the 8 functional registers are just not enough.
2806  * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic need
2807  * Lo registers, and this shuffling results in thousands more MOVs than A32.
2808  *
2809  * A32 and T32 don't have this limitation. They can access all 14 registers,
2810  * do a 32->64 multiply with UMULL, and the flexible operand allowing free
2811  * shifts is helpful, too.
2812  *
2813  * Therefore, we do a quick sanity check.
2814  *
2815  * If compiling Thumb-1 for a target which supports ARM instructions, we will
2816  * emit a warning, as it is not a "sane" platform to compile for.
2817  *
2818  * Usually, if this happens, it is because of an accident and you probably need
2819  * to specify -march, as you likely meant to compile for a newer architecture.
2820  *
2821  * Credit: large sections of the vectorial and asm source code paths
2822  *         have been contributed by @easyaspi314
2823  */
2824 #if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM)
2825 #   warning "XXH3 is highly inefficient without ARM or Thumb-2."
2826 #endif
2827 
2828 /* ==========================================
2829  * Vectorization detection
2830  * ========================================== */
2831 
2832 #ifdef XXH_DOXYGEN
2833 /*!
2834  * @ingroup tuning
2835  * @brief Overrides the vectorization implementation chosen for XXH3.
2836  *
2837  * Can be defined to 0 to disable SIMD or any of the values mentioned in
2838  * @ref XXH_VECTOR_TYPE.
2839  *
2840  * If this is not defined, it uses predefined macros to determine the best
2841  * implementation.
2842  */
2843 #  define XXH_VECTOR XXH_SCALAR
2844 /*!
2845  * @ingroup tuning
2846  * @brief Possible values for @ref XXH_VECTOR.
2847  *
2848  * Note that these are actually implemented as macros.
2849  *
2850  * If this is not defined, it is detected automatically.
2851  * @ref XXH_X86DISPATCH overrides this.
2852  */
2853 enum XXH_VECTOR_TYPE /* fake enum */ {
2854     XXH_SCALAR = 0,  /*!< Portable scalar version */
2855     XXH_SSE2   = 1,  /*!<
2856                       * SSE2 for Pentium 4, Opteron, all x86_64.
2857                       *
2858                       * @note SSE2 is also guaranteed on Windows 10, macOS, and
2859                       * Android x86.
2860                       */
2861     XXH_AVX2   = 2,  /*!< AVX2 for Haswell and Bulldozer */
2862     XXH_AVX512 = 3,  /*!< AVX512 for Skylake and Icelake */
2863     XXH_NEON   = 4,  /*!< NEON for most ARMv7-A and all AArch64 */
2864     XXH_VSX    = 5,  /*!< VSX and ZVector for POWER8/z13 (64-bit) */
2865 };
2866 /*!
2867  * @ingroup tuning
2868  * @brief Selects the minimum alignment for XXH3's accumulators.
2869  *
2870  * When using SIMD, this should match the alignment reqired for said vector
2871  * type, so, for example, 32 for AVX2.
2872  *
2873  * Default: Auto detected.
2874  */
2875 #  define XXH_ACC_ALIGN 8
2876 #endif
2877 
2878 /* Actual definition */
2879 #ifndef XXH_DOXYGEN
2880 #  define XXH_SCALAR 0
2881 #  define XXH_SSE2   1
2882 #  define XXH_AVX2   2
2883 #  define XXH_AVX512 3
2884 #  define XXH_NEON   4
2885 #  define XXH_VSX    5
2886 #endif
2887 
2888 #ifndef XXH_VECTOR    /* can be defined on command line */
2889 #  if defined(__AVX512F__)
2890 #    define XXH_VECTOR XXH_AVX512
2891 #  elif defined(__AVX2__)
2892 #    define XXH_VECTOR XXH_AVX2
2893 #  elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2))
2894 #    define XXH_VECTOR XXH_SSE2
2895 #  elif ( \
2896         defined(__ARM_NEON__) || defined(__ARM_NEON) /* gcc */ \
2897      || defined(_M_ARM64) || defined(_M_ARM_ARMV7VE) /* msvc */ \
2898    ) && ( \
2899         defined(_WIN32) || defined(__LITTLE_ENDIAN__) /* little endian only */ \
2900     || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \
2901    )
2902 #    define XXH_VECTOR XXH_NEON
2903 #  elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \
2904      || (defined(__s390x__) && defined(__VEC__)) \
2905      && defined(__GNUC__) /* TODO: IBM XL */
2906 #    define XXH_VECTOR XXH_VSX
2907 #  else
2908 #    define XXH_VECTOR XXH_SCALAR
2909 #  endif
2910 #endif
2911 
2912 /*
2913  * Controls the alignment of the accumulator,
2914  * for compatibility with aligned vector loads, which are usually faster.
2915  */
2916 #ifndef XXH_ACC_ALIGN
2917 #  if defined(XXH_X86DISPATCH)
2918 #     define XXH_ACC_ALIGN 64  /* for compatibility with avx512 */
2919 #  elif XXH_VECTOR == XXH_SCALAR  /* scalar */
2920 #     define XXH_ACC_ALIGN 8
2921 #  elif XXH_VECTOR == XXH_SSE2  /* sse2 */
2922 #     define XXH_ACC_ALIGN 16
2923 #  elif XXH_VECTOR == XXH_AVX2  /* avx2 */
2924 #     define XXH_ACC_ALIGN 32
2925 #  elif XXH_VECTOR == XXH_NEON  /* neon */
2926 #     define XXH_ACC_ALIGN 16
2927 #  elif XXH_VECTOR == XXH_VSX   /* vsx */
2928 #     define XXH_ACC_ALIGN 16
2929 #  elif XXH_VECTOR == XXH_AVX512  /* avx512 */
2930 #     define XXH_ACC_ALIGN 64
2931 #  endif
2932 #endif
2933 
2934 #if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \
2935     || XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512
2936 #  define XXH_SEC_ALIGN XXH_ACC_ALIGN
2937 #else
2938 #  define XXH_SEC_ALIGN 8
2939 #endif
2940 
2941 /*
2942  * UGLY HACK:
2943  * GCC usually generates the best code with -O3 for xxHash.
2944  *
2945  * However, when targeting AVX2, it is overzealous in its unrolling resulting
2946  * in code roughly 3/4 the speed of Clang.
2947  *
2948  * There are other issues, such as GCC splitting _mm256_loadu_si256 into
2949  * _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization which
2950  * only applies to Sandy and Ivy Bridge... which don't even support AVX2.
2951  *
2952  * That is why when compiling the AVX2 version, it is recommended to use either
2953  *   -O2 -mavx2 -march=haswell
2954  * or
2955  *   -O2 -mavx2 -mno-avx256-split-unaligned-load
2956  * for decent performance, or to use Clang instead.
2957  *
2958  * Fortunately, we can control the first one with a pragma that forces GCC into
2959  * -O2, but the other one we can't control without "failed to inline always
2960  * inline function due to target mismatch" warnings.
2961  */
2962 #if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
2963   && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
2964   && defined(__OPTIMIZE__) && !defined(__OPTIMIZE_SIZE__) /* respect -O0 and -Os */
2965 #  pragma GCC push_options
2966 #  pragma GCC optimize("-O2")
2967 #endif
2968 
2969 
2970 #if XXH_VECTOR == XXH_NEON
2971 /*
2972  * NEON's setup for vmlal_u32 is a little more complicated than it is on
2973  * SSE2, AVX2, and VSX.
2974  *
2975  * While PMULUDQ and VMULEUW both perform a mask, VMLAL.U32 performs an upcast.
2976  *
2977  * To do the same operation, the 128-bit 'Q' register needs to be split into
2978  * two 64-bit 'D' registers, performing this operation::
2979  *
2980  *   [                a                 |                 b                ]
2981  *            |              '---------. .--------'                |
2982  *            |                         x                          |
2983  *            |              .---------' '--------.                |
2984  *   [ a & 0xFFFFFFFF | b & 0xFFFFFFFF ],[    a >> 32     |     b >> 32    ]
2985  *
2986  * Due to significant changes in aarch64, the fastest method for aarch64 is
2987  * completely different than the fastest method for ARMv7-A.
2988  *
2989  * ARMv7-A treats D registers as unions overlaying Q registers, so modifying
2990  * D11 will modify the high half of Q5. This is similar to how modifying AH
2991  * will only affect bits 8-15 of AX on x86.
2992  *
2993  * VZIP takes two registers, and puts even lanes in one register and odd lanes
2994  * in the other.
2995  *
2996  * On ARMv7-A, this strangely modifies both parameters in place instead of
2997  * taking the usual 3-operand form.
2998  *
2999  * Therefore, if we want to do this, we can simply use a D-form VZIP.32 on the
3000  * lower and upper halves of the Q register to end up with the high and low
3001  * halves where we want - all in one instruction.
3002  *
3003  *   vzip.32   d10, d11       @ d10 = { d10[0], d11[0] }; d11 = { d10[1], d11[1] }
3004  *
3005  * Unfortunately we need inline assembly for this: Instructions modifying two
3006  * registers at once is not possible in GCC or Clang's IR, and they have to
3007  * create a copy.
3008  *
3009  * aarch64 requires a different approach.
3010  *
3011  * In order to make it easier to write a decent compiler for aarch64, many
3012  * quirks were removed, such as conditional execution.
3013  *
3014  * NEON was also affected by this.
3015  *
3016  * aarch64 cannot access the high bits of a Q-form register, and writes to a
3017  * D-form register zero the high bits, similar to how writes to W-form scalar
3018  * registers (or DWORD registers on x86_64) work.
3019  *
3020  * The formerly free vget_high intrinsics now require a vext (with a few
3021  * exceptions)
3022  *
3023  * Additionally, VZIP was replaced by ZIP1 and ZIP2, which are the equivalent
3024  * of PUNPCKL* and PUNPCKH* in SSE, respectively, in order to only modify one
3025  * operand.
3026  *
3027  * The equivalent of the VZIP.32 on the lower and upper halves would be this
3028  * mess:
3029  *
3030  *   ext     v2.4s, v0.4s, v0.4s, #2 // v2 = { v0[2], v0[3], v0[0], v0[1] }
3031  *   zip1    v1.2s, v0.2s, v2.2s     // v1 = { v0[0], v2[0] }
3032  *   zip2    v0.2s, v0.2s, v1.2s     // v0 = { v0[1], v2[1] }
3033  *
3034  * Instead, we use a literal downcast, vmovn_u64 (XTN), and vshrn_n_u64 (SHRN):
3035  *
3036  *   shrn    v1.2s, v0.2d, #32  // v1 = (uint32x2_t)(v0 >> 32);
3037  *   xtn     v0.2s, v0.2d       // v0 = (uint32x2_t)(v0 & 0xFFFFFFFF);
3038  *
3039  * This is available on ARMv7-A, but is less efficient than a single VZIP.32.
3040  */
3041 
3042 /*!
3043  * Function-like macro:
3044  * void XXH_SPLIT_IN_PLACE(uint64x2_t &in, uint32x2_t &outLo, uint32x2_t &outHi)
3045  * {
3046  *     outLo = (uint32x2_t)(in & 0xFFFFFFFF);
3047  *     outHi = (uint32x2_t)(in >> 32);
3048  *     in = UNDEFINED;
3049  * }
3050  */
3051 # if !defined(XXH_NO_VZIP_HACK) /* define to disable */ \
3052    && defined(__GNUC__) \
3053    && !defined(__aarch64__) && !defined(__arm64__) && !defined(_M_ARM64)
3054 #  define XXH_SPLIT_IN_PLACE(in, outLo, outHi)                                              \
3055     do {                                                                                    \
3056       /* Undocumented GCC/Clang operand modifier: %e0 = lower D half, %f0 = upper D half */ \
3057       /* https://github.com/gcc-mirror/gcc/blob/38cf91e5/gcc/config/arm/arm.c#L22486 */     \
3058       /* https://github.com/llvm-mirror/llvm/blob/2c4ca683/lib/Target/ARM/ARMAsmPrinter.cpp#L399 */ \
3059       __asm__("vzip.32  %e0, %f0" : "+w" (in));                                             \
3060       (outLo) = vget_low_u32 (vreinterpretq_u32_u64(in));                                   \
3061       (outHi) = vget_high_u32(vreinterpretq_u32_u64(in));                                   \
3062    } while (0)
3063 # else
3064 #  define XXH_SPLIT_IN_PLACE(in, outLo, outHi)                                            \
3065     do {                                                                                  \
3066       (outLo) = vmovn_u64    (in);                                                        \
3067       (outHi) = vshrn_n_u64  ((in), 32);                                                  \
3068     } while (0)
3069 # endif
3070 #endif  /* XXH_VECTOR == XXH_NEON */
3071 
3072 /*
3073  * VSX and Z Vector helpers.
3074  *
3075  * This is very messy, and any pull requests to clean this up are welcome.
3076  *
3077  * There are a lot of problems with supporting VSX and s390x, due to
3078  * inconsistent intrinsics, spotty coverage, and multiple endiannesses.
3079  */
3080 #if XXH_VECTOR == XXH_VSX
3081 #  if defined(__s390x__)
3082 #    include <s390intrin.h>
3083 #  else
3084 /* gcc's altivec.h can have the unwanted consequence to unconditionally
3085  * #define bool, vector, and pixel keywords,
3086  * with bad consequences for programs already using these keywords for other purposes.
3087  * The paragraph defining these macros is skipped when __APPLE_ALTIVEC__ is defined.
3088  * __APPLE_ALTIVEC__ is _generally_ defined automatically by the compiler,
3089  * but it seems that, in some cases, it isn't.
3090  * Force the build macro to be defined, so that keywords are not altered.
3091  */
3092 #    if defined(__GNUC__) && !defined(__APPLE_ALTIVEC__)
3093 #      define __APPLE_ALTIVEC__
3094 #    endif
3095 #    include <altivec.h>
3096 #  endif
3097 
3098 typedef __vector unsigned long long xxh_u64x2;
3099 typedef __vector unsigned char xxh_u8x16;
3100 typedef __vector unsigned xxh_u32x4;
3101 
3102 # ifndef XXH_VSX_BE
3103 #  if defined(__BIG_ENDIAN__) \
3104   || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
3105 #    define XXH_VSX_BE 1
3106 #  elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__
3107 #    warning "-maltivec=be is not recommended. Please use native endianness."
3108 #    define XXH_VSX_BE 1
3109 #  else
3110 #    define XXH_VSX_BE 0
3111 #  endif
3112 # endif /* !defined(XXH_VSX_BE) */
3113 
3114 # if XXH_VSX_BE
3115 #  if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__))
3116 #    define XXH_vec_revb vec_revb
3117 #  else
3118 /*!
3119  * A polyfill for POWER9's vec_revb().
3120  */
XXH_vec_revb(xxh_u64x2 val)3121 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val)
3122 {
3123     xxh_u8x16 const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
3124                                   0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 };
3125     return vec_perm(val, val, vByteSwap);
3126 }
3127 #  endif
3128 # endif /* XXH_VSX_BE */
3129 
3130 /*!
3131  * Performs an unaligned vector load and byte swaps it on big endian.
3132  */
XXH_vec_loadu(const void * ptr)3133 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr)
3134 {
3135     xxh_u64x2 ret;
3136     XXH_memcpy(&ret, ptr, sizeof(xxh_u64x2));
3137 # if XXH_VSX_BE
3138     ret = XXH_vec_revb(ret);
3139 # endif
3140     return ret;
3141 }
3142 
3143 /*
3144  * vec_mulo and vec_mule are very problematic intrinsics on PowerPC
3145  *
3146  * These intrinsics weren't added until GCC 8, despite existing for a while,
3147  * and they are endian dependent. Also, their meaning swap depending on version.
3148  * */
3149 # if defined(__s390x__)
3150  /* s390x is always big endian, no issue on this platform */
3151 #  define XXH_vec_mulo vec_mulo
3152 #  define XXH_vec_mule vec_mule
3153 # elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw)
3154 /* Clang has a better way to control this, we can just use the builtin which doesn't swap. */
3155 #  define XXH_vec_mulo __builtin_altivec_vmulouw
3156 #  define XXH_vec_mule __builtin_altivec_vmuleuw
3157 # else
3158 /* gcc needs inline assembly */
3159 /* Adapted from https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */
XXH_vec_mulo(xxh_u32x4 a,xxh_u32x4 b)3160 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b)
3161 {
3162     xxh_u64x2 result;
3163     __asm__("vmulouw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
3164     return result;
3165 }
XXH_vec_mule(xxh_u32x4 a,xxh_u32x4 b)3166 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b)
3167 {
3168     xxh_u64x2 result;
3169     __asm__("vmuleuw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
3170     return result;
3171 }
3172 # endif /* XXH_vec_mulo, XXH_vec_mule */
3173 #endif /* XXH_VECTOR == XXH_VSX */
3174 
3175 
3176 /* prefetch
3177  * can be disabled, by declaring XXH_NO_PREFETCH build macro */
3178 #if defined(XXH_NO_PREFETCH)
3179 #  define XXH_PREFETCH(ptr)  (void)(ptr)  /* disabled */
3180 #else
3181 #  if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86))  /* _mm_prefetch() not defined outside of x86/x64 */
3182 #    include <mmintrin.h>   /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
3183 #    define XXH_PREFETCH(ptr)  _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
3184 #  elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
3185 #    define XXH_PREFETCH(ptr)  __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
3186 #  else
3187 #    define XXH_PREFETCH(ptr) (void)(ptr)  /* disabled */
3188 #  endif
3189 #endif  /* XXH_NO_PREFETCH */
3190 
3191 
3192 /* ==========================================
3193  * XXH3 default settings
3194  * ========================================== */
3195 
3196 #define XXH_SECRET_DEFAULT_SIZE 192   /* minimum XXH3_SECRET_SIZE_MIN */
3197 
3198 #if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN)
3199 #  error "default keyset is not large enough"
3200 #endif
3201 
3202 /*! Pseudorandom secret taken directly from FARSH. */
3203 XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = {
3204     0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c,
3205     0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f,
3206     0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21,
3207     0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c,
3208     0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3,
3209     0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8,
3210     0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d,
3211     0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64,
3212     0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb,
3213     0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e,
3214     0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce,
3215     0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e,
3216 };
3217 
3218 
3219 #ifdef XXH_OLD_NAMES
3220 #  define kSecret XXH3_kSecret
3221 #endif
3222 
3223 #ifdef XXH_DOXYGEN
3224 /*!
3225  * @brief Calculates a 32-bit to 64-bit long multiply.
3226  *
3227  * Implemented as a macro.
3228  *
3229  * Wraps `__emulu` on MSVC x86 because it tends to call `__allmul` when it doesn't
3230  * need to (but it shouldn't need to anyways, it is about 7 instructions to do
3231  * a 64x64 multiply...). Since we know that this will _always_ emit `MULL`, we
3232  * use that instead of the normal method.
3233  *
3234  * If you are compiling for platforms like Thumb-1 and don't have a better option,
3235  * you may also want to write your own long multiply routine here.
3236  *
3237  * @param x, y Numbers to be multiplied
3238  * @return 64-bit product of the low 32 bits of @p x and @p y.
3239  */
3240 XXH_FORCE_INLINE xxh_u64
XXH_mult32to64(xxh_u64 x,xxh_u64 y)3241 XXH_mult32to64(xxh_u64 x, xxh_u64 y)
3242 {
3243    return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF);
3244 }
3245 #elif defined(_MSC_VER) && defined(_M_IX86)
3246 #    include <intrin.h>
3247 #    define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y))
3248 #else
3249 /*
3250  * Downcast + upcast is usually better than masking on older compilers like
3251  * GCC 4.2 (especially 32-bit ones), all without affecting newer compilers.
3252  *
3253  * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both operands
3254  * and perform a full 64x64 multiply -- entirely redundant on 32-bit.
3255  */
3256 #    define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y))
3257 #endif
3258 
3259 /*!
3260  * @brief Calculates a 64->128-bit long multiply.
3261  *
3262  * Uses `__uint128_t` and `_umul128` if available, otherwise uses a scalar
3263  * version.
3264  *
3265  * @param lhs , rhs The 64-bit integers to be multiplied
3266  * @return The 128-bit result represented in an @ref XXH128_hash_t.
3267  */
3268 static XXH128_hash_t
XXH_mult64to128(xxh_u64 lhs,xxh_u64 rhs)3269 XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs)
3270 {
3271     /*
3272      * GCC/Clang __uint128_t method.
3273      *
3274      * On most 64-bit targets, GCC and Clang define a __uint128_t type.
3275      * This is usually the best way as it usually uses a native long 64-bit
3276      * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64.
3277      *
3278      * Usually.
3279      *
3280      * Despite being a 32-bit platform, Clang (and emscripten) define this type
3281      * despite not having the arithmetic for it. This results in a laggy
3282      * compiler builtin call which calculates a full 128-bit multiply.
3283      * In that case it is best to use the portable one.
3284      * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677
3285      */
3286 #if defined(__GNUC__) && !defined(__wasm__) \
3287     && defined(__SIZEOF_INT128__) \
3288     || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
3289 
3290     __uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs;
3291     XXH128_hash_t r128;
3292     r128.low64  = (xxh_u64)(product);
3293     r128.high64 = (xxh_u64)(product >> 64);
3294     return r128;
3295 
3296     /*
3297      * MSVC for x64's _umul128 method.
3298      *
3299      * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 *HighProduct);
3300      *
3301      * This compiles to single operand MUL on x64.
3302      */
3303 #elif defined(_M_X64) || defined(_M_IA64)
3304 
3305 #ifndef _MSC_VER
3306 #   pragma intrinsic(_umul128)
3307 #endif
3308     xxh_u64 product_high;
3309     xxh_u64 const product_low = _umul128(lhs, rhs, &product_high);
3310     XXH128_hash_t r128;
3311     r128.low64  = product_low;
3312     r128.high64 = product_high;
3313     return r128;
3314 
3315     /*
3316      * MSVC for ARM64's __umulh method.
3317      *
3318      * This compiles to the same MUL + UMULH as GCC/Clang's __uint128_t method.
3319      */
3320 #elif defined(_M_ARM64)
3321 
3322 #ifndef _MSC_VER
3323 #   pragma intrinsic(__umulh)
3324 #endif
3325     XXH128_hash_t r128;
3326     r128.low64  = lhs * rhs;
3327     r128.high64 = __umulh(lhs, rhs);
3328     return r128;
3329 
3330 #else
3331     /*
3332      * Portable scalar method. Optimized for 32-bit and 64-bit ALUs.
3333      *
3334      * This is a fast and simple grade school multiply, which is shown below
3335      * with base 10 arithmetic instead of base 0x100000000.
3336      *
3337      *           9 3 // D2 lhs = 93
3338      *         x 7 5 // D2 rhs = 75
3339      *     ----------
3340      *           1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15
3341      *         4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45
3342      *         2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21
3343      *     + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63
3344      *     ---------
3345      *         2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27
3346      *     + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67
3347      *     ---------
3348      *       6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975
3349      *
3350      * The reasons for adding the products like this are:
3351      *  1. It avoids manual carry tracking. Just like how
3352      *     (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX.
3353      *     This avoids a lot of complexity.
3354      *
3355      *  2. It hints for, and on Clang, compiles to, the powerful UMAAL
3356      *     instruction available in ARM's Digital Signal Processing extension
3357      *     in 32-bit ARMv6 and later, which is shown below:
3358      *
3359      *         void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm)
3360      *         {
3361      *             xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm;
3362      *             *RdLo = (xxh_u32)(product & 0xFFFFFFFF);
3363      *             *RdHi = (xxh_u32)(product >> 32);
3364      *         }
3365      *
3366      *     This instruction was designed for efficient long multiplication, and
3367      *     allows this to be calculated in only 4 instructions at speeds
3368      *     comparable to some 64-bit ALUs.
3369      *
3370      *  3. It isn't terrible on other platforms. Usually this will be a couple
3371      *     of 32-bit ADD/ADCs.
3372      */
3373 
3374     /* First calculate all of the cross products. */
3375     xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF);
3376     xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32,        rhs & 0xFFFFFFFF);
3377     xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32);
3378     xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32,        rhs >> 32);
3379 
3380     /* Now add the products together. These will never overflow. */
3381     xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi;
3382     xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32)        + hi_hi;
3383     xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF);
3384 
3385     XXH128_hash_t r128;
3386     r128.low64  = lower;
3387     r128.high64 = upper;
3388     return r128;
3389 #endif
3390 }
3391 
3392 /*!
3393  * @brief Calculates a 64-bit to 128-bit multiply, then XOR folds it.
3394  *
3395  * The reason for the separate function is to prevent passing too many structs
3396  * around by value. This will hopefully inline the multiply, but we don't force it.
3397  *
3398  * @param lhs , rhs The 64-bit integers to multiply
3399  * @return The low 64 bits of the product XOR'd by the high 64 bits.
3400  * @see XXH_mult64to128()
3401  */
3402 static xxh_u64
XXH3_mul128_fold64(xxh_u64 lhs,xxh_u64 rhs)3403 XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs)
3404 {
3405     XXH128_hash_t product = XXH_mult64to128(lhs, rhs);
3406     return product.low64 ^ product.high64;
3407 }
3408 
3409 /*! Seems to produce slightly better code on GCC for some reason. */
XXH_xorshift64(xxh_u64 v64,int shift)3410 XXH_FORCE_INLINE xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift)
3411 {
3412     XXH_ASSERT(0 <= shift && shift < 64);
3413     return v64 ^ (v64 >> shift);
3414 }
3415 
3416 /*
3417  * This is a fast avalanche stage,
3418  * suitable when input bits are already partially mixed
3419  */
XXH3_avalanche(xxh_u64 h64)3420 static XXH64_hash_t XXH3_avalanche(xxh_u64 h64)
3421 {
3422     h64 = XXH_xorshift64(h64, 37);
3423     h64 *= 0x165667919E3779F9ULL;
3424     h64 = XXH_xorshift64(h64, 32);
3425     return h64;
3426 }
3427 
3428 /*
3429  * This is a stronger avalanche,
3430  * inspired by Pelle Evensen's rrmxmx
3431  * preferable when input has not been previously mixed
3432  */
XXH3_rrmxmx(xxh_u64 h64,xxh_u64 len)3433 static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len)
3434 {
3435     /* this mix is inspired by Pelle Evensen's rrmxmx */
3436     h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24);
3437     h64 *= 0x9FB21C651E98DF25ULL;
3438     h64 ^= (h64 >> 35) + len ;
3439     h64 *= 0x9FB21C651E98DF25ULL;
3440     return XXH_xorshift64(h64, 28);
3441 }
3442 
3443 
3444 /* ==========================================
3445  * Short keys
3446  * ==========================================
3447  * One of the shortcomings of XXH32 and XXH64 was that their performance was
3448  * sub-optimal on short lengths. It used an iterative algorithm which strongly
3449  * favored lengths that were a multiple of 4 or 8.
3450  *
3451  * Instead of iterating over individual inputs, we use a set of single shot
3452  * functions which piece together a range of lengths and operate in constant time.
3453  *
3454  * Additionally, the number of multiplies has been significantly reduced. This
3455  * reduces latency, especially when emulating 64-bit multiplies on 32-bit.
3456  *
3457  * Depending on the platform, this may or may not be faster than XXH32, but it
3458  * is almost guaranteed to be faster than XXH64.
3459  */
3460 
3461 /*
3462  * At very short lengths, there isn't enough input to fully hide secrets, or use
3463  * the entire secret.
3464  *
3465  * There is also only a limited amount of mixing we can do before significantly
3466  * impacting performance.
3467  *
3468  * Therefore, we use different sections of the secret and always mix two secret
3469  * samples with an XOR. This should have no effect on performance on the
3470  * seedless or withSeed variants because everything _should_ be constant folded
3471  * by modern compilers.
3472  *
3473  * The XOR mixing hides individual parts of the secret and increases entropy.
3474  *
3475  * This adds an extra layer of strength for custom secrets.
3476  */
3477 XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_1to3_64b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)3478 XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3479 {
3480     XXH_ASSERT(input != NULL);
3481     XXH_ASSERT(1 <= len && len <= 3);
3482     XXH_ASSERT(secret != NULL);
3483     /*
3484      * len = 1: combined = { input[0], 0x01, input[0], input[0] }
3485      * len = 2: combined = { input[1], 0x02, input[0], input[1] }
3486      * len = 3: combined = { input[2], 0x03, input[0], input[1] }
3487      */
3488     {   xxh_u8  const c1 = input[0];
3489         xxh_u8  const c2 = input[len >> 1];
3490         xxh_u8  const c3 = input[len - 1];
3491         xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2  << 24)
3492                                | ((xxh_u32)c3 <<  0) | ((xxh_u32)len << 8);
3493         xxh_u64 const bitflip = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
3494         xxh_u64 const keyed = (xxh_u64)combined ^ bitflip;
3495         return XXH64_avalanche(keyed);
3496     }
3497 }
3498 
3499 XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_4to8_64b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)3500 XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3501 {
3502     XXH_ASSERT(input != NULL);
3503     XXH_ASSERT(secret != NULL);
3504     XXH_ASSERT(4 <= len && len <= 8);
3505     seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
3506     {   xxh_u32 const input1 = XXH_readLE32(input);
3507         xxh_u32 const input2 = XXH_readLE32(input + len - 4);
3508         xxh_u64 const bitflip = (XXH_readLE64(secret+8) ^ XXH_readLE64(secret+16)) - seed;
3509         xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32);
3510         xxh_u64 const keyed = input64 ^ bitflip;
3511         return XXH3_rrmxmx(keyed, len);
3512     }
3513 }
3514 
3515 XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_9to16_64b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)3516 XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3517 {
3518     XXH_ASSERT(input != NULL);
3519     XXH_ASSERT(secret != NULL);
3520     XXH_ASSERT(9 <= len && len <= 16);
3521     {   xxh_u64 const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) + seed;
3522         xxh_u64 const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) - seed;
3523         xxh_u64 const input_lo = XXH_readLE64(input)           ^ bitflip1;
3524         xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2;
3525         xxh_u64 const acc = len
3526                           + XXH_swap64(input_lo) + input_hi
3527                           + XXH3_mul128_fold64(input_lo, input_hi);
3528         return XXH3_avalanche(acc);
3529     }
3530 }
3531 
3532 XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_0to16_64b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)3533 XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3534 {
3535     XXH_ASSERT(len <= 16);
3536     {   if (XXH_likely(len >  8)) return XXH3_len_9to16_64b(input, len, secret, seed);
3537         if (XXH_likely(len >= 4)) return XXH3_len_4to8_64b(input, len, secret, seed);
3538         if (len) return XXH3_len_1to3_64b(input, len, secret, seed);
3539         return XXH64_avalanche(seed ^ (XXH_readLE64(secret+56) ^ XXH_readLE64(secret+64)));
3540     }
3541 }
3542 
3543 /*
3544  * DISCLAIMER: There are known *seed-dependent* multicollisions here due to
3545  * multiplication by zero, affecting hashes of lengths 17 to 240.
3546  *
3547  * However, they are very unlikely.
3548  *
3549  * Keep this in mind when using the unseeded XXH3_64bits() variant: As with all
3550  * unseeded non-cryptographic hashes, it does not attempt to defend itself
3551  * against specially crafted inputs, only random inputs.
3552  *
3553  * Compared to classic UMAC where a 1 in 2^31 chance of 4 consecutive bytes
3554  * cancelling out the secret is taken an arbitrary number of times (addressed
3555  * in XXH3_accumulate_512), this collision is very unlikely with random inputs
3556  * and/or proper seeding:
3557  *
3558  * This only has a 1 in 2^63 chance of 8 consecutive bytes cancelling out, in a
3559  * function that is only called up to 16 times per hash with up to 240 bytes of
3560  * input.
3561  *
3562  * This is not too bad for a non-cryptographic hash function, especially with
3563  * only 64 bit outputs.
3564  *
3565  * The 128-bit variant (which trades some speed for strength) is NOT affected
3566  * by this, although it is always a good idea to use a proper seed if you care
3567  * about strength.
3568  */
XXH3_mix16B(const xxh_u8 * XXH_RESTRICT input,const xxh_u8 * XXH_RESTRICT secret,xxh_u64 seed64)3569 XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input,
3570                                      const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64)
3571 {
3572 #if defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
3573   && defined(__i386__) && defined(__SSE2__)  /* x86 + SSE2 */ \
3574   && !defined(XXH_ENABLE_AUTOVECTORIZE)      /* Define to disable like XXH32 hack */
3575     /*
3576      * UGLY HACK:
3577      * GCC for x86 tends to autovectorize the 128-bit multiply, resulting in
3578      * slower code.
3579      *
3580      * By forcing seed64 into a register, we disrupt the cost model and
3581      * cause it to scalarize. See `XXH32_round()`
3582      *
3583      * FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600,
3584      * XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on
3585      * GCC 9.2, despite both emitting scalar code.
3586      *
3587      * GCC generates much better scalar code than Clang for the rest of XXH3,
3588      * which is why finding a more optimal codepath is an interest.
3589      */
3590     XXH_COMPILER_GUARD(seed64);
3591 #endif
3592     {   xxh_u64 const input_lo = XXH_readLE64(input);
3593         xxh_u64 const input_hi = XXH_readLE64(input+8);
3594         return XXH3_mul128_fold64(
3595             input_lo ^ (XXH_readLE64(secret)   + seed64),
3596             input_hi ^ (XXH_readLE64(secret+8) - seed64)
3597         );
3598     }
3599 }
3600 
3601 /* For mid range keys, XXH3 uses a Mum-hash variant. */
3602 XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_17to128_64b(const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH64_hash_t seed)3603 XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
3604                      const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
3605                      XXH64_hash_t seed)
3606 {
3607     XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
3608     XXH_ASSERT(16 < len && len <= 128);
3609 
3610     {   xxh_u64 acc = len * XXH_PRIME64_1;
3611         if (len > 32) {
3612             if (len > 64) {
3613                 if (len > 96) {
3614                     acc += XXH3_mix16B(input+48, secret+96, seed);
3615                     acc += XXH3_mix16B(input+len-64, secret+112, seed);
3616                 }
3617                 acc += XXH3_mix16B(input+32, secret+64, seed);
3618                 acc += XXH3_mix16B(input+len-48, secret+80, seed);
3619             }
3620             acc += XXH3_mix16B(input+16, secret+32, seed);
3621             acc += XXH3_mix16B(input+len-32, secret+48, seed);
3622         }
3623         acc += XXH3_mix16B(input+0, secret+0, seed);
3624         acc += XXH3_mix16B(input+len-16, secret+16, seed);
3625 
3626         return XXH3_avalanche(acc);
3627     }
3628 }
3629 
3630 #define XXH3_MIDSIZE_MAX 240
3631 
3632 XXH_NO_INLINE XXH64_hash_t
XXH3_len_129to240_64b(const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH64_hash_t seed)3633 XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
3634                       const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
3635                       XXH64_hash_t seed)
3636 {
3637     XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
3638     XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
3639 
3640     #define XXH3_MIDSIZE_STARTOFFSET 3
3641     #define XXH3_MIDSIZE_LASTOFFSET  17
3642 
3643     {   xxh_u64 acc = len * XXH_PRIME64_1;
3644         int const nbRounds = (int)len / 16;
3645         int i;
3646         for (i=0; i<8; i++) {
3647             acc += XXH3_mix16B(input+(16*i), secret+(16*i), seed);
3648         }
3649         acc = XXH3_avalanche(acc);
3650         XXH_ASSERT(nbRounds >= 8);
3651 #if defined(__clang__)                                /* Clang */ \
3652     && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
3653     && !defined(XXH_ENABLE_AUTOVECTORIZE)             /* Define to disable */
3654         /*
3655          * UGLY HACK:
3656          * Clang for ARMv7-A tries to vectorize this loop, similar to GCC x86.
3657          * In everywhere else, it uses scalar code.
3658          *
3659          * For 64->128-bit multiplies, even if the NEON was 100% optimal, it
3660          * would still be slower than UMAAL (see XXH_mult64to128).
3661          *
3662          * Unfortunately, Clang doesn't handle the long multiplies properly and
3663          * converts them to the nonexistent "vmulq_u64" intrinsic, which is then
3664          * scalarized into an ugly mess of VMOV.32 instructions.
3665          *
3666          * This mess is difficult to avoid without turning autovectorization
3667          * off completely, but they are usually relatively minor and/or not
3668          * worth it to fix.
3669          *
3670          * This loop is the easiest to fix, as unlike XXH32, this pragma
3671          * _actually works_ because it is a loop vectorization instead of an
3672          * SLP vectorization.
3673          */
3674         #pragma clang loop vectorize(disable)
3675 #endif
3676         for (i=8 ; i < nbRounds; i++) {
3677             acc += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed);
3678         }
3679         /* last bytes */
3680         acc += XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed);
3681         return XXH3_avalanche(acc);
3682     }
3683 }
3684 
3685 
3686 /* =======     Long Keys     ======= */
3687 
3688 #define XXH_STRIPE_LEN 64
3689 #define XXH_SECRET_CONSUME_RATE 8   /* nb of secret bytes consumed at each accumulation */
3690 #define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64))
3691 
3692 #ifdef XXH_OLD_NAMES
3693 #  define STRIPE_LEN XXH_STRIPE_LEN
3694 #  define ACC_NB XXH_ACC_NB
3695 #endif
3696 
XXH_writeLE64(void * dst,xxh_u64 v64)3697 XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64)
3698 {
3699     if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64);
3700     XXH_memcpy(dst, &v64, sizeof(v64));
3701 }
3702 
3703 /* Several intrinsic functions below are supposed to accept __int64 as argument,
3704  * as documented in https://software.intel.com/sites/landingpage/IntrinsicsGuide/ .
3705  * However, several environments do not define __int64 type,
3706  * requiring a workaround.
3707  */
3708 #if !defined (__VMS) \
3709   && (defined (__cplusplus) \
3710   || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
3711     typedef int64_t xxh_i64;
3712 #else
3713     /* the following type must have a width of 64-bit */
3714     typedef long long xxh_i64;
3715 #endif
3716 
3717 /*
3718  * XXH3_accumulate_512 is the tightest loop for long inputs, and it is the most optimized.
3719  *
3720  * It is a hardened version of UMAC, based off of FARSH's implementation.
3721  *
3722  * This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD
3723  * implementations, and it is ridiculously fast.
3724  *
3725  * We harden it by mixing the original input to the accumulators as well as the product.
3726  *
3727  * This means that in the (relatively likely) case of a multiply by zero, the
3728  * original input is preserved.
3729  *
3730  * On 128-bit inputs, we swap 64-bit pairs when we add the input to improve
3731  * cross-pollination, as otherwise the upper and lower halves would be
3732  * essentially independent.
3733  *
3734  * This doesn't matter on 64-bit hashes since they all get merged together in
3735  * the end, so we skip the extra step.
3736  *
3737  * Both XXH3_64bits and XXH3_128bits use this subroutine.
3738  */
3739 
3740 #if (XXH_VECTOR == XXH_AVX512) \
3741      || (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0)
3742 
3743 #ifndef XXH_TARGET_AVX512
3744 # define XXH_TARGET_AVX512  /* disable attribute target */
3745 #endif
3746 
3747 XXH_FORCE_INLINE XXH_TARGET_AVX512 void
XXH3_accumulate_512_avx512(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)3748 XXH3_accumulate_512_avx512(void* XXH_RESTRICT acc,
3749                      const void* XXH_RESTRICT input,
3750                      const void* XXH_RESTRICT secret)
3751 {
3752     __m512i* const xacc = (__m512i *) acc;
3753     XXH_ASSERT((((size_t)acc) & 63) == 0);
3754     XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
3755 
3756     {
3757         /* data_vec    = input[0]; */
3758         __m512i const data_vec    = _mm512_loadu_si512   (input);
3759         /* key_vec     = secret[0]; */
3760         __m512i const key_vec     = _mm512_loadu_si512   (secret);
3761         /* data_key    = data_vec ^ key_vec; */
3762         __m512i const data_key    = _mm512_xor_si512     (data_vec, key_vec);
3763         /* data_key_lo = data_key >> 32; */
3764         __m512i const data_key_lo = _mm512_shuffle_epi32 (data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1));
3765         /* product     = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
3766         __m512i const product     = _mm512_mul_epu32     (data_key, data_key_lo);
3767         /* xacc[0] += swap(data_vec); */
3768         __m512i const data_swap = _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2));
3769         __m512i const sum       = _mm512_add_epi64(*xacc, data_swap);
3770         /* xacc[0] += product; */
3771         *xacc = _mm512_add_epi64(product, sum);
3772     }
3773 }
3774 
3775 /*
3776  * XXH3_scrambleAcc: Scrambles the accumulators to improve mixing.
3777  *
3778  * Multiplication isn't perfect, as explained by Google in HighwayHash:
3779  *
3780  *  // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to
3781  *  // varying degrees. In descending order of goodness, bytes
3782  *  // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32.
3783  *  // As expected, the upper and lower bytes are much worse.
3784  *
3785  * Source: https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291
3786  *
3787  * Since our algorithm uses a pseudorandom secret to add some variance into the
3788  * mix, we don't need to (or want to) mix as often or as much as HighwayHash does.
3789  *
3790  * This isn't as tight as XXH3_accumulate, but still written in SIMD to avoid
3791  * extraction.
3792  *
3793  * Both XXH3_64bits and XXH3_128bits use this subroutine.
3794  */
3795 
3796 XXH_FORCE_INLINE XXH_TARGET_AVX512 void
XXH3_scrambleAcc_avx512(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)3797 XXH3_scrambleAcc_avx512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
3798 {
3799     XXH_ASSERT((((size_t)acc) & 63) == 0);
3800     XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
3801     {   __m512i* const xacc = (__m512i*) acc;
3802         const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1);
3803 
3804         /* xacc[0] ^= (xacc[0] >> 47) */
3805         __m512i const acc_vec     = *xacc;
3806         __m512i const shifted     = _mm512_srli_epi64    (acc_vec, 47);
3807         __m512i const data_vec    = _mm512_xor_si512     (acc_vec, shifted);
3808         /* xacc[0] ^= secret; */
3809         __m512i const key_vec     = _mm512_loadu_si512   (secret);
3810         __m512i const data_key    = _mm512_xor_si512     (data_vec, key_vec);
3811 
3812         /* xacc[0] *= XXH_PRIME32_1; */
3813         __m512i const data_key_hi = _mm512_shuffle_epi32 (data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1));
3814         __m512i const prod_lo     = _mm512_mul_epu32     (data_key, prime32);
3815         __m512i const prod_hi     = _mm512_mul_epu32     (data_key_hi, prime32);
3816         *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32));
3817     }
3818 }
3819 
3820 XXH_FORCE_INLINE XXH_TARGET_AVX512 void
XXH3_initCustomSecret_avx512(void * XXH_RESTRICT customSecret,xxh_u64 seed64)3821 XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
3822 {
3823     XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0);
3824     XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64);
3825     XXH_ASSERT(((size_t)customSecret & 63) == 0);
3826     (void)(&XXH_writeLE64);
3827     {   int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i);
3828         __m512i const seed = _mm512_mask_set1_epi64(_mm512_set1_epi64((xxh_i64)seed64), 0xAA, (xxh_i64)(0U - seed64));
3829 
3830         const __m512i* const src  = (const __m512i*) ((const void*) XXH3_kSecret);
3831               __m512i* const dest = (      __m512i*) customSecret;
3832         int i;
3833         XXH_ASSERT(((size_t)src & 63) == 0); /* control alignment */
3834         XXH_ASSERT(((size_t)dest & 63) == 0);
3835         for (i=0; i < nbRounds; ++i) {
3836             /* GCC has a bug, _mm512_stream_load_si512 accepts 'void*', not 'void const*',
3837              * this will warn "discards 'const' qualifier". */
3838             union {
3839                 const __m512i* cp;
3840                 void* p;
3841             } remote_const_void;
3842             remote_const_void.cp = src + i;
3843             dest[i] = _mm512_add_epi64(_mm512_stream_load_si512(remote_const_void.p), seed);
3844     }   }
3845 }
3846 
3847 #endif
3848 
3849 #if (XXH_VECTOR == XXH_AVX2) \
3850     || (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0)
3851 
3852 #ifndef XXH_TARGET_AVX2
3853 # define XXH_TARGET_AVX2  /* disable attribute target */
3854 #endif
3855 
3856 XXH_FORCE_INLINE XXH_TARGET_AVX2 void
XXH3_accumulate_512_avx2(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)3857 XXH3_accumulate_512_avx2( void* XXH_RESTRICT acc,
3858                     const void* XXH_RESTRICT input,
3859                     const void* XXH_RESTRICT secret)
3860 {
3861     XXH_ASSERT((((size_t)acc) & 31) == 0);
3862     {   __m256i* const xacc    =       (__m256i *) acc;
3863         /* Unaligned. This is mainly for pointer arithmetic, and because
3864          * _mm256_loadu_si256 requires  a const __m256i * pointer for some reason. */
3865         const         __m256i* const xinput  = (const __m256i *) input;
3866         /* Unaligned. This is mainly for pointer arithmetic, and because
3867          * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
3868         const         __m256i* const xsecret = (const __m256i *) secret;
3869 
3870         size_t i;
3871         for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
3872             /* data_vec    = xinput[i]; */
3873             __m256i const data_vec    = _mm256_loadu_si256    (xinput+i);
3874             /* key_vec     = xsecret[i]; */
3875             __m256i const key_vec     = _mm256_loadu_si256   (xsecret+i);
3876             /* data_key    = data_vec ^ key_vec; */
3877             __m256i const data_key    = _mm256_xor_si256     (data_vec, key_vec);
3878             /* data_key_lo = data_key >> 32; */
3879             __m256i const data_key_lo = _mm256_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
3880             /* product     = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
3881             __m256i const product     = _mm256_mul_epu32     (data_key, data_key_lo);
3882             /* xacc[i] += swap(data_vec); */
3883             __m256i const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2));
3884             __m256i const sum       = _mm256_add_epi64(xacc[i], data_swap);
3885             /* xacc[i] += product; */
3886             xacc[i] = _mm256_add_epi64(product, sum);
3887     }   }
3888 }
3889 
3890 XXH_FORCE_INLINE XXH_TARGET_AVX2 void
XXH3_scrambleAcc_avx2(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)3891 XXH3_scrambleAcc_avx2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
3892 {
3893     XXH_ASSERT((((size_t)acc) & 31) == 0);
3894     {   __m256i* const xacc = (__m256i*) acc;
3895         /* Unaligned. This is mainly for pointer arithmetic, and because
3896          * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
3897         const         __m256i* const xsecret = (const __m256i *) secret;
3898         const __m256i prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1);
3899 
3900         size_t i;
3901         for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
3902             /* xacc[i] ^= (xacc[i] >> 47) */
3903             __m256i const acc_vec     = xacc[i];
3904             __m256i const shifted     = _mm256_srli_epi64    (acc_vec, 47);
3905             __m256i const data_vec    = _mm256_xor_si256     (acc_vec, shifted);
3906             /* xacc[i] ^= xsecret; */
3907             __m256i const key_vec     = _mm256_loadu_si256   (xsecret+i);
3908             __m256i const data_key    = _mm256_xor_si256     (data_vec, key_vec);
3909 
3910             /* xacc[i] *= XXH_PRIME32_1; */
3911             __m256i const data_key_hi = _mm256_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
3912             __m256i const prod_lo     = _mm256_mul_epu32     (data_key, prime32);
3913             __m256i const prod_hi     = _mm256_mul_epu32     (data_key_hi, prime32);
3914             xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32));
3915         }
3916     }
3917 }
3918 
XXH3_initCustomSecret_avx2(void * XXH_RESTRICT customSecret,xxh_u64 seed64)3919 XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
3920 {
3921     XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0);
3922     XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6);
3923     XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64);
3924     (void)(&XXH_writeLE64);
3925     XXH_PREFETCH(customSecret);
3926     {   __m256i const seed = _mm256_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64, (xxh_i64)(0U - seed64), (xxh_i64)seed64);
3927 
3928         const __m256i* const src  = (const __m256i*) ((const void*) XXH3_kSecret);
3929               __m256i*       dest = (      __m256i*) customSecret;
3930 
3931 #       if defined(__GNUC__) || defined(__clang__)
3932         /*
3933          * On GCC & Clang, marking 'dest' as modified will cause the compiler:
3934          *   - do not extract the secret from sse registers in the internal loop
3935          *   - use less common registers, and avoid pushing these reg into stack
3936          */
3937         XXH_COMPILER_GUARD(dest);
3938 #       endif
3939         XXH_ASSERT(((size_t)src & 31) == 0); /* control alignment */
3940         XXH_ASSERT(((size_t)dest & 31) == 0);
3941 
3942         /* GCC -O2 need unroll loop manually */
3943         dest[0] = _mm256_add_epi64(_mm256_stream_load_si256(src+0), seed);
3944         dest[1] = _mm256_add_epi64(_mm256_stream_load_si256(src+1), seed);
3945         dest[2] = _mm256_add_epi64(_mm256_stream_load_si256(src+2), seed);
3946         dest[3] = _mm256_add_epi64(_mm256_stream_load_si256(src+3), seed);
3947         dest[4] = _mm256_add_epi64(_mm256_stream_load_si256(src+4), seed);
3948         dest[5] = _mm256_add_epi64(_mm256_stream_load_si256(src+5), seed);
3949     }
3950 }
3951 
3952 #endif
3953 
3954 /* x86dispatch always generates SSE2 */
3955 #if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH)
3956 
3957 #ifndef XXH_TARGET_SSE2
3958 # define XXH_TARGET_SSE2  /* disable attribute target */
3959 #endif
3960 
3961 XXH_FORCE_INLINE XXH_TARGET_SSE2 void
XXH3_accumulate_512_sse2(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)3962 XXH3_accumulate_512_sse2( void* XXH_RESTRICT acc,
3963                     const void* XXH_RESTRICT input,
3964                     const void* XXH_RESTRICT secret)
3965 {
3966     /* SSE2 is just a half-scale version of the AVX2 version. */
3967     XXH_ASSERT((((size_t)acc) & 15) == 0);
3968     {   __m128i* const xacc    =       (__m128i *) acc;
3969         /* Unaligned. This is mainly for pointer arithmetic, and because
3970          * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
3971         const         __m128i* const xinput  = (const __m128i *) input;
3972         /* Unaligned. This is mainly for pointer arithmetic, and because
3973          * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
3974         const         __m128i* const xsecret = (const __m128i *) secret;
3975 
3976         size_t i;
3977         for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
3978             /* data_vec    = xinput[i]; */
3979             __m128i const data_vec    = _mm_loadu_si128   (xinput+i);
3980             /* key_vec     = xsecret[i]; */
3981             __m128i const key_vec     = _mm_loadu_si128   (xsecret+i);
3982             /* data_key    = data_vec ^ key_vec; */
3983             __m128i const data_key    = _mm_xor_si128     (data_vec, key_vec);
3984             /* data_key_lo = data_key >> 32; */
3985             __m128i const data_key_lo = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
3986             /* product     = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
3987             __m128i const product     = _mm_mul_epu32     (data_key, data_key_lo);
3988             /* xacc[i] += swap(data_vec); */
3989             __m128i const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2));
3990             __m128i const sum       = _mm_add_epi64(xacc[i], data_swap);
3991             /* xacc[i] += product; */
3992             xacc[i] = _mm_add_epi64(product, sum);
3993     }   }
3994 }
3995 
3996 XXH_FORCE_INLINE XXH_TARGET_SSE2 void
XXH3_scrambleAcc_sse2(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)3997 XXH3_scrambleAcc_sse2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
3998 {
3999     XXH_ASSERT((((size_t)acc) & 15) == 0);
4000     {   __m128i* const xacc = (__m128i*) acc;
4001         /* Unaligned. This is mainly for pointer arithmetic, and because
4002          * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
4003         const         __m128i* const xsecret = (const __m128i *) secret;
4004         const __m128i prime32 = _mm_set1_epi32((int)XXH_PRIME32_1);
4005 
4006         size_t i;
4007         for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
4008             /* xacc[i] ^= (xacc[i] >> 47) */
4009             __m128i const acc_vec     = xacc[i];
4010             __m128i const shifted     = _mm_srli_epi64    (acc_vec, 47);
4011             __m128i const data_vec    = _mm_xor_si128     (acc_vec, shifted);
4012             /* xacc[i] ^= xsecret[i]; */
4013             __m128i const key_vec     = _mm_loadu_si128   (xsecret+i);
4014             __m128i const data_key    = _mm_xor_si128     (data_vec, key_vec);
4015 
4016             /* xacc[i] *= XXH_PRIME32_1; */
4017             __m128i const data_key_hi = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
4018             __m128i const prod_lo     = _mm_mul_epu32     (data_key, prime32);
4019             __m128i const prod_hi     = _mm_mul_epu32     (data_key_hi, prime32);
4020             xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32));
4021         }
4022     }
4023 }
4024 
XXH3_initCustomSecret_sse2(void * XXH_RESTRICT customSecret,xxh_u64 seed64)4025 XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
4026 {
4027     XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
4028     (void)(&XXH_writeLE64);
4029     {   int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i);
4030 
4031 #       if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900
4032         /* MSVC 32bit mode does not support _mm_set_epi64x before 2015 */
4033         XXH_ALIGN(16) const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, (xxh_i64)(0U - seed64) };
4034         __m128i const seed = _mm_load_si128((__m128i const*)seed64x2);
4035 #       else
4036         __m128i const seed = _mm_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64);
4037 #       endif
4038         int i;
4039 
4040         const void* const src16 = XXH3_kSecret;
4041         __m128i* dst16 = (__m128i*) customSecret;
4042 #       if defined(__GNUC__) || defined(__clang__)
4043         /*
4044          * On GCC & Clang, marking 'dest' as modified will cause the compiler:
4045          *   - do not extract the secret from sse registers in the internal loop
4046          *   - use less common registers, and avoid pushing these reg into stack
4047          */
4048         XXH_COMPILER_GUARD(dst16);
4049 #       endif
4050         XXH_ASSERT(((size_t)src16 & 15) == 0); /* control alignment */
4051         XXH_ASSERT(((size_t)dst16 & 15) == 0);
4052 
4053         for (i=0; i < nbRounds; ++i) {
4054             dst16[i] = _mm_add_epi64(_mm_load_si128((const __m128i *)src16+i), seed);
4055     }   }
4056 }
4057 
4058 #endif
4059 
4060 #if (XXH_VECTOR == XXH_NEON)
4061 
4062 XXH_FORCE_INLINE void
XXH3_accumulate_512_neon(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)4063 XXH3_accumulate_512_neon( void* XXH_RESTRICT acc,
4064                     const void* XXH_RESTRICT input,
4065                     const void* XXH_RESTRICT secret)
4066 {
4067     XXH_ASSERT((((size_t)acc) & 15) == 0);
4068     {
4069         uint64x2_t* const xacc = (uint64x2_t *) acc;
4070         /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */
4071         uint8_t const* const xinput = (const uint8_t *) input;
4072         uint8_t const* const xsecret  = (const uint8_t *) secret;
4073 
4074         size_t i;
4075         for (i=0; i < XXH_STRIPE_LEN / sizeof(uint64x2_t); i++) {
4076             /* data_vec = xinput[i]; */
4077             uint8x16_t data_vec    = vld1q_u8(xinput  + (i * 16));
4078             /* key_vec  = xsecret[i];  */
4079             uint8x16_t key_vec     = vld1q_u8(xsecret + (i * 16));
4080             uint64x2_t data_key;
4081             uint32x2_t data_key_lo, data_key_hi;
4082             /* xacc[i] += swap(data_vec); */
4083             uint64x2_t const data64  = vreinterpretq_u64_u8(data_vec);
4084             uint64x2_t const swapped = vextq_u64(data64, data64, 1);
4085             xacc[i] = vaddq_u64 (xacc[i], swapped);
4086             /* data_key = data_vec ^ key_vec; */
4087             data_key = vreinterpretq_u64_u8(veorq_u8(data_vec, key_vec));
4088             /* data_key_lo = (uint32x2_t) (data_key & 0xFFFFFFFF);
4089              * data_key_hi = (uint32x2_t) (data_key >> 32);
4090              * data_key = UNDEFINED; */
4091             XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi);
4092             /* xacc[i] += (uint64x2_t) data_key_lo * (uint64x2_t) data_key_hi; */
4093             xacc[i] = vmlal_u32 (xacc[i], data_key_lo, data_key_hi);
4094 
4095         }
4096     }
4097 }
4098 
4099 XXH_FORCE_INLINE void
XXH3_scrambleAcc_neon(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)4100 XXH3_scrambleAcc_neon(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4101 {
4102     XXH_ASSERT((((size_t)acc) & 15) == 0);
4103 
4104     {   uint64x2_t* xacc       = (uint64x2_t*) acc;
4105         uint8_t const* xsecret = (uint8_t const*) secret;
4106         uint32x2_t prime       = vdup_n_u32 (XXH_PRIME32_1);
4107 
4108         size_t i;
4109         for (i=0; i < XXH_STRIPE_LEN/sizeof(uint64x2_t); i++) {
4110             /* xacc[i] ^= (xacc[i] >> 47); */
4111             uint64x2_t acc_vec  = xacc[i];
4112             uint64x2_t shifted  = vshrq_n_u64 (acc_vec, 47);
4113             uint64x2_t data_vec = veorq_u64   (acc_vec, shifted);
4114 
4115             /* xacc[i] ^= xsecret[i]; */
4116             uint8x16_t key_vec  = vld1q_u8    (xsecret + (i * 16));
4117             uint64x2_t data_key = veorq_u64   (data_vec, vreinterpretq_u64_u8(key_vec));
4118 
4119             /* xacc[i] *= XXH_PRIME32_1 */
4120             uint32x2_t data_key_lo, data_key_hi;
4121             /* data_key_lo = (uint32x2_t) (xacc[i] & 0xFFFFFFFF);
4122              * data_key_hi = (uint32x2_t) (xacc[i] >> 32);
4123              * xacc[i] = UNDEFINED; */
4124             XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi);
4125             {   /*
4126                  * prod_hi = (data_key >> 32) * XXH_PRIME32_1;
4127                  *
4128                  * Avoid vmul_u32 + vshll_n_u32 since Clang 6 and 7 will
4129                  * incorrectly "optimize" this:
4130                  *   tmp     = vmul_u32(vmovn_u64(a), vmovn_u64(b));
4131                  *   shifted = vshll_n_u32(tmp, 32);
4132                  * to this:
4133                  *   tmp     = "vmulq_u64"(a, b); // no such thing!
4134                  *   shifted = vshlq_n_u64(tmp, 32);
4135                  *
4136                  * However, unlike SSE, Clang lacks a 64-bit multiply routine
4137                  * for NEON, and it scalarizes two 64-bit multiplies instead.
4138                  *
4139                  * vmull_u32 has the same timing as vmul_u32, and it avoids
4140                  * this bug completely.
4141                  * See https://bugs.llvm.org/show_bug.cgi?id=39967
4142                  */
4143                 uint64x2_t prod_hi = vmull_u32 (data_key_hi, prime);
4144                 /* xacc[i] = prod_hi << 32; */
4145                 xacc[i] = vshlq_n_u64(prod_hi, 32);
4146                 /* xacc[i] += (prod_hi & 0xFFFFFFFF) * XXH_PRIME32_1; */
4147                 xacc[i] = vmlal_u32(xacc[i], data_key_lo, prime);
4148             }
4149     }   }
4150 }
4151 
4152 #endif
4153 
4154 #if (XXH_VECTOR == XXH_VSX)
4155 
4156 XXH_FORCE_INLINE void
XXH3_accumulate_512_vsx(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)4157 XXH3_accumulate_512_vsx(  void* XXH_RESTRICT acc,
4158                     const void* XXH_RESTRICT input,
4159                     const void* XXH_RESTRICT secret)
4160 {
4161     /* presumed aligned */
4162     unsigned int* const xacc = (unsigned int*) acc;
4163     xxh_u64x2 const* const xinput   = (xxh_u64x2 const*) input;   /* no alignment restriction */
4164     xxh_u64x2 const* const xsecret  = (xxh_u64x2 const*) secret;    /* no alignment restriction */
4165     xxh_u64x2 const v32 = { 32, 32 };
4166     size_t i;
4167     for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
4168         /* data_vec = xinput[i]; */
4169         xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + i);
4170         /* key_vec = xsecret[i]; */
4171         xxh_u64x2 const key_vec  = XXH_vec_loadu(xsecret + i);
4172         xxh_u64x2 const data_key = data_vec ^ key_vec;
4173         /* shuffled = (data_key << 32) | (data_key >> 32); */
4174         xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32);
4175         /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & 0xFFFFFFFF); */
4176         xxh_u64x2 const product  = XXH_vec_mulo((xxh_u32x4)data_key, shuffled);
4177         /* acc_vec = xacc[i]; */
4178         xxh_u64x2 acc_vec        = (xxh_u64x2)vec_xl(0, xacc + 4 * i);
4179         acc_vec += product;
4180 
4181         /* swap high and low halves */
4182 #ifdef __s390x__
4183         acc_vec += vec_permi(data_vec, data_vec, 2);
4184 #else
4185         acc_vec += vec_xxpermdi(data_vec, data_vec, 2);
4186 #endif
4187         /* xacc[i] = acc_vec; */
4188         vec_xst((xxh_u32x4)acc_vec, 0, xacc + 4 * i);
4189     }
4190 }
4191 
4192 XXH_FORCE_INLINE void
XXH3_scrambleAcc_vsx(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)4193 XXH3_scrambleAcc_vsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4194 {
4195     XXH_ASSERT((((size_t)acc) & 15) == 0);
4196 
4197     {         xxh_u64x2* const xacc    =       (xxh_u64x2*) acc;
4198         const xxh_u64x2* const xsecret = (const xxh_u64x2*) secret;
4199         /* constants */
4200         xxh_u64x2 const v32  = { 32, 32 };
4201         xxh_u64x2 const v47 = { 47, 47 };
4202         xxh_u32x4 const prime = { XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1 };
4203         size_t i;
4204         for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
4205             /* xacc[i] ^= (xacc[i] >> 47); */
4206             xxh_u64x2 const acc_vec  = xacc[i];
4207             xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47);
4208 
4209             /* xacc[i] ^= xsecret[i]; */
4210             xxh_u64x2 const key_vec  = XXH_vec_loadu(xsecret + i);
4211             xxh_u64x2 const data_key = data_vec ^ key_vec;
4212 
4213             /* xacc[i] *= XXH_PRIME32_1 */
4214             /* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime & 0xFFFFFFFF);  */
4215             xxh_u64x2 const prod_even  = XXH_vec_mule((xxh_u32x4)data_key, prime);
4216             /* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32);  */
4217             xxh_u64x2 const prod_odd  = XXH_vec_mulo((xxh_u32x4)data_key, prime);
4218             xacc[i] = prod_odd + (prod_even << v32);
4219     }   }
4220 }
4221 
4222 #endif
4223 
4224 /* scalar variants - universal */
4225 
4226 XXH_FORCE_INLINE void
XXH3_accumulate_512_scalar(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)4227 XXH3_accumulate_512_scalar(void* XXH_RESTRICT acc,
4228                      const void* XXH_RESTRICT input,
4229                      const void* XXH_RESTRICT secret)
4230 {
4231     xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */
4232     const xxh_u8* const xinput  = (const xxh_u8*) input;  /* no alignment restriction */
4233     const xxh_u8* const xsecret = (const xxh_u8*) secret;   /* no alignment restriction */
4234     size_t i;
4235     XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN-1)) == 0);
4236     for (i=0; i < XXH_ACC_NB; i++) {
4237         xxh_u64 const data_val = XXH_readLE64(xinput + 8*i);
4238         xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + i*8);
4239         xacc[i ^ 1] += data_val; /* swap adjacent lanes */
4240         xacc[i] += XXH_mult32to64(data_key & 0xFFFFFFFF, data_key >> 32);
4241     }
4242 }
4243 
4244 XXH_FORCE_INLINE void
XXH3_scrambleAcc_scalar(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)4245 XXH3_scrambleAcc_scalar(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4246 {
4247     xxh_u64* const xacc = (xxh_u64*) acc;   /* presumed aligned */
4248     const xxh_u8* const xsecret = (const xxh_u8*) secret;   /* no alignment restriction */
4249     size_t i;
4250     XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0);
4251     for (i=0; i < XXH_ACC_NB; i++) {
4252         xxh_u64 const key64 = XXH_readLE64(xsecret + 8*i);
4253         xxh_u64 acc64 = xacc[i];
4254         acc64 = XXH_xorshift64(acc64, 47);
4255         acc64 ^= key64;
4256         acc64 *= XXH_PRIME32_1;
4257         xacc[i] = acc64;
4258     }
4259 }
4260 
4261 XXH_FORCE_INLINE void
XXH3_initCustomSecret_scalar(void * XXH_RESTRICT customSecret,xxh_u64 seed64)4262 XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
4263 {
4264     /*
4265      * We need a separate pointer for the hack below,
4266      * which requires a non-const pointer.
4267      * Any decent compiler will optimize this out otherwise.
4268      */
4269     const xxh_u8* kSecretPtr = XXH3_kSecret;
4270     XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
4271 
4272 #if defined(__clang__) && defined(__aarch64__)
4273     /*
4274      * UGLY HACK:
4275      * Clang generates a bunch of MOV/MOVK pairs for aarch64, and they are
4276      * placed sequentially, in order, at the top of the unrolled loop.
4277      *
4278      * While MOVK is great for generating constants (2 cycles for a 64-bit
4279      * constant compared to 4 cycles for LDR), long MOVK chains stall the
4280      * integer pipelines:
4281      *   I   L   S
4282      * MOVK
4283      * MOVK
4284      * MOVK
4285      * MOVK
4286      * ADD
4287      * SUB      STR
4288      *          STR
4289      * By forcing loads from memory (as the asm line causes Clang to assume
4290      * that XXH3_kSecretPtr has been changed), the pipelines are used more
4291      * efficiently:
4292      *   I   L   S
4293      *      LDR
4294      *  ADD LDR
4295      *  SUB     STR
4296      *          STR
4297      * XXH3_64bits_withSeed, len == 256, Snapdragon 835
4298      *   without hack: 2654.4 MB/s
4299      *   with hack:    3202.9 MB/s
4300      */
4301     XXH_COMPILER_GUARD(kSecretPtr);
4302 #endif
4303     /*
4304      * Note: in debug mode, this overrides the asm optimization
4305      * and Clang will emit MOVK chains again.
4306      */
4307     XXH_ASSERT(kSecretPtr == XXH3_kSecret);
4308 
4309     {   int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16;
4310         int i;
4311         for (i=0; i < nbRounds; i++) {
4312             /*
4313              * The asm hack causes Clang to assume that kSecretPtr aliases with
4314              * customSecret, and on aarch64, this prevented LDP from merging two
4315              * loads together for free. Putting the loads together before the stores
4316              * properly generates LDP.
4317              */
4318             xxh_u64 lo = XXH_readLE64(kSecretPtr + 16*i)     + seed64;
4319             xxh_u64 hi = XXH_readLE64(kSecretPtr + 16*i + 8) - seed64;
4320             XXH_writeLE64((xxh_u8*)customSecret + 16*i,     lo);
4321             XXH_writeLE64((xxh_u8*)customSecret + 16*i + 8, hi);
4322     }   }
4323 }
4324 
4325 
4326 typedef void (*XXH3_f_accumulate_512)(void* XXH_RESTRICT, const void*, const void*);
4327 typedef void (*XXH3_f_scrambleAcc)(void* XXH_RESTRICT, const void*);
4328 typedef void (*XXH3_f_initCustomSecret)(void* XXH_RESTRICT, xxh_u64);
4329 
4330 
4331 #if (XXH_VECTOR == XXH_AVX512)
4332 
4333 #define XXH3_accumulate_512 XXH3_accumulate_512_avx512
4334 #define XXH3_scrambleAcc    XXH3_scrambleAcc_avx512
4335 #define XXH3_initCustomSecret XXH3_initCustomSecret_avx512
4336 
4337 #elif (XXH_VECTOR == XXH_AVX2)
4338 
4339 #define XXH3_accumulate_512 XXH3_accumulate_512_avx2
4340 #define XXH3_scrambleAcc    XXH3_scrambleAcc_avx2
4341 #define XXH3_initCustomSecret XXH3_initCustomSecret_avx2
4342 
4343 #elif (XXH_VECTOR == XXH_SSE2)
4344 
4345 #define XXH3_accumulate_512 XXH3_accumulate_512_sse2
4346 #define XXH3_scrambleAcc    XXH3_scrambleAcc_sse2
4347 #define XXH3_initCustomSecret XXH3_initCustomSecret_sse2
4348 
4349 #elif (XXH_VECTOR == XXH_NEON)
4350 
4351 #define XXH3_accumulate_512 XXH3_accumulate_512_neon
4352 #define XXH3_scrambleAcc    XXH3_scrambleAcc_neon
4353 #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
4354 
4355 #elif (XXH_VECTOR == XXH_VSX)
4356 
4357 #define XXH3_accumulate_512 XXH3_accumulate_512_vsx
4358 #define XXH3_scrambleAcc    XXH3_scrambleAcc_vsx
4359 #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
4360 
4361 #else /* scalar */
4362 
4363 #define XXH3_accumulate_512 XXH3_accumulate_512_scalar
4364 #define XXH3_scrambleAcc    XXH3_scrambleAcc_scalar
4365 #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
4366 
4367 #endif
4368 
4369 
4370 
4371 #ifndef XXH_PREFETCH_DIST
4372 #  ifdef __clang__
4373 #    define XXH_PREFETCH_DIST 320
4374 #  else
4375 #    if (XXH_VECTOR == XXH_AVX512)
4376 #      define XXH_PREFETCH_DIST 512
4377 #    else
4378 #      define XXH_PREFETCH_DIST 384
4379 #    endif
4380 #  endif  /* __clang__ */
4381 #endif  /* XXH_PREFETCH_DIST */
4382 
4383 /*
4384  * XXH3_accumulate()
4385  * Loops over XXH3_accumulate_512().
4386  * Assumption: nbStripes will not overflow the secret size
4387  */
4388 XXH_FORCE_INLINE void
XXH3_accumulate(xxh_u64 * XXH_RESTRICT acc,const xxh_u8 * XXH_RESTRICT input,const xxh_u8 * XXH_RESTRICT secret,size_t nbStripes,XXH3_f_accumulate_512 f_acc512)4389 XXH3_accumulate(     xxh_u64* XXH_RESTRICT acc,
4390                 const xxh_u8* XXH_RESTRICT input,
4391                 const xxh_u8* XXH_RESTRICT secret,
4392                       size_t nbStripes,
4393                       XXH3_f_accumulate_512 f_acc512)
4394 {
4395     size_t n;
4396     for (n = 0; n < nbStripes; n++ ) {
4397         const xxh_u8* const in = input + n*XXH_STRIPE_LEN;
4398         XXH_PREFETCH(in + XXH_PREFETCH_DIST);
4399         f_acc512(acc,
4400                  in,
4401                  secret + n*XXH_SECRET_CONSUME_RATE);
4402     }
4403 }
4404 
4405 XXH_FORCE_INLINE void
XXH3_hashLong_internal_loop(xxh_u64 * XXH_RESTRICT acc,const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble)4406 XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc,
4407                       const xxh_u8* XXH_RESTRICT input, size_t len,
4408                       const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
4409                             XXH3_f_accumulate_512 f_acc512,
4410                             XXH3_f_scrambleAcc f_scramble)
4411 {
4412     size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE;
4413     size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock;
4414     size_t const nb_blocks = (len - 1) / block_len;
4415 
4416     size_t n;
4417 
4418     XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
4419 
4420     for (n = 0; n < nb_blocks; n++) {
4421         XXH3_accumulate(acc, input + n*block_len, secret, nbStripesPerBlock, f_acc512);
4422         f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN);
4423     }
4424 
4425     /* last partial block */
4426     XXH_ASSERT(len > XXH_STRIPE_LEN);
4427     {   size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN;
4428         XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE));
4429         XXH3_accumulate(acc, input + nb_blocks*block_len, secret, nbStripes, f_acc512);
4430 
4431         /* last stripe */
4432         {   const xxh_u8* const p = input + len - XXH_STRIPE_LEN;
4433 #define XXH_SECRET_LASTACC_START 7  /* not aligned on 8, last secret is different from acc & scrambler */
4434             f_acc512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START);
4435     }   }
4436 }
4437 
4438 XXH_FORCE_INLINE xxh_u64
XXH3_mix2Accs(const xxh_u64 * XXH_RESTRICT acc,const xxh_u8 * XXH_RESTRICT secret)4439 XXH3_mix2Accs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret)
4440 {
4441     return XXH3_mul128_fold64(
4442                acc[0] ^ XXH_readLE64(secret),
4443                acc[1] ^ XXH_readLE64(secret+8) );
4444 }
4445 
4446 static XXH64_hash_t
XXH3_mergeAccs(const xxh_u64 * XXH_RESTRICT acc,const xxh_u8 * XXH_RESTRICT secret,xxh_u64 start)4447 XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 start)
4448 {
4449     xxh_u64 result64 = start;
4450     size_t i = 0;
4451 
4452     for (i = 0; i < 4; i++) {
4453         result64 += XXH3_mix2Accs(acc+2*i, secret + 16*i);
4454 #if defined(__clang__)                                /* Clang */ \
4455     && (defined(__arm__) || defined(__thumb__))       /* ARMv7 */ \
4456     && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */  \
4457     && !defined(XXH_ENABLE_AUTOVECTORIZE)             /* Define to disable */
4458         /*
4459          * UGLY HACK:
4460          * Prevent autovectorization on Clang ARMv7-a. Exact same problem as
4461          * the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b.
4462          * XXH3_64bits, len == 256, Snapdragon 835:
4463          *   without hack: 2063.7 MB/s
4464          *   with hack:    2560.7 MB/s
4465          */
4466         XXH_COMPILER_GUARD(result64);
4467 #endif
4468     }
4469 
4470     return XXH3_avalanche(result64);
4471 }
4472 
4473 #define XXH3_INIT_ACC { XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \
4474                         XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 }
4475 
4476 XXH_FORCE_INLINE XXH64_hash_t
XXH3_hashLong_64b_internal(const void * XXH_RESTRICT input,size_t len,const void * XXH_RESTRICT secret,size_t secretSize,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble)4477 XXH3_hashLong_64b_internal(const void* XXH_RESTRICT input, size_t len,
4478                            const void* XXH_RESTRICT secret, size_t secretSize,
4479                            XXH3_f_accumulate_512 f_acc512,
4480                            XXH3_f_scrambleAcc f_scramble)
4481 {
4482     XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
4483 
4484     XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, f_acc512, f_scramble);
4485 
4486     /* converge into final hash */
4487     XXH_STATIC_ASSERT(sizeof(acc) == 64);
4488     /* do not align on 8, so that the secret is different from the accumulator */
4489 #define XXH_SECRET_MERGEACCS_START 11
4490     XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
4491     return XXH3_mergeAccs(acc, (const xxh_u8*)secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * XXH_PRIME64_1);
4492 }
4493 
4494 /*
4495  * It's important for performance to transmit secret's size (when it's static)
4496  * so that the compiler can properly optimize the vectorized loop.
4497  * This makes a big performance difference for "medium" keys (<1 KB) when using AVX instruction set.
4498  */
4499 XXH3_WITH_SECRET_INLINE XXH64_hash_t
XXH3_hashLong_64b_withSecret(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const xxh_u8 * XXH_RESTRICT secret,size_t secretLen)4500 XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len,
4501                              XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
4502 {
4503     (void)seed64;
4504     return XXH3_hashLong_64b_internal(input, len, secret, secretLen, XXH3_accumulate_512, XXH3_scrambleAcc);
4505 }
4506 
4507 /*
4508  * It's preferable for performance that XXH3_hashLong is not inlined,
4509  * as it results in a smaller function for small data, easier to the instruction cache.
4510  * Note that inside this no_inline function, we do inline the internal loop,
4511  * and provide a statically defined secret size to allow optimization of vector loop.
4512  */
4513 XXH_NO_INLINE XXH64_hash_t
XXH3_hashLong_64b_default(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const xxh_u8 * XXH_RESTRICT secret,size_t secretLen)4514 XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len,
4515                           XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
4516 {
4517     (void)seed64; (void)secret; (void)secretLen;
4518     return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_accumulate_512, XXH3_scrambleAcc);
4519 }
4520 
4521 /*
4522  * XXH3_hashLong_64b_withSeed():
4523  * Generate a custom key based on alteration of default XXH3_kSecret with the seed,
4524  * and then use this key for long mode hashing.
4525  *
4526  * This operation is decently fast but nonetheless costs a little bit of time.
4527  * Try to avoid it whenever possible (typically when seed==0).
4528  *
4529  * It's important for performance that XXH3_hashLong is not inlined. Not sure
4530  * why (uop cache maybe?), but the difference is large and easily measurable.
4531  */
4532 XXH_FORCE_INLINE XXH64_hash_t
XXH3_hashLong_64b_withSeed_internal(const void * input,size_t len,XXH64_hash_t seed,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble,XXH3_f_initCustomSecret f_initSec)4533 XXH3_hashLong_64b_withSeed_internal(const void* input, size_t len,
4534                                     XXH64_hash_t seed,
4535                                     XXH3_f_accumulate_512 f_acc512,
4536                                     XXH3_f_scrambleAcc f_scramble,
4537                                     XXH3_f_initCustomSecret f_initSec)
4538 {
4539     if (seed == 0)
4540         return XXH3_hashLong_64b_internal(input, len,
4541                                           XXH3_kSecret, sizeof(XXH3_kSecret),
4542                                           f_acc512, f_scramble);
4543     {   XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
4544         f_initSec(secret, seed);
4545         return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret),
4546                                           f_acc512, f_scramble);
4547     }
4548 }
4549 
4550 /*
4551  * It's important for performance that XXH3_hashLong is not inlined.
4552  */
4553 XXH_NO_INLINE XXH64_hash_t
XXH3_hashLong_64b_withSeed(const void * input,size_t len,XXH64_hash_t seed,const xxh_u8 * secret,size_t secretLen)4554 XXH3_hashLong_64b_withSeed(const void* input, size_t len,
4555                            XXH64_hash_t seed, const xxh_u8* secret, size_t secretLen)
4556 {
4557     (void)secret; (void)secretLen;
4558     return XXH3_hashLong_64b_withSeed_internal(input, len, seed,
4559                 XXH3_accumulate_512, XXH3_scrambleAcc, XXH3_initCustomSecret);
4560 }
4561 
4562 
4563 typedef XXH64_hash_t (*XXH3_hashLong64_f)(const void* XXH_RESTRICT, size_t,
4564                                           XXH64_hash_t, const xxh_u8* XXH_RESTRICT, size_t);
4565 
4566 XXH_FORCE_INLINE XXH64_hash_t
XXH3_64bits_internal(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen,XXH3_hashLong64_f f_hashLong)4567 XXH3_64bits_internal(const void* XXH_RESTRICT input, size_t len,
4568                      XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
4569                      XXH3_hashLong64_f f_hashLong)
4570 {
4571     XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
4572     /*
4573      * If an action is to be taken if `secretLen` condition is not respected,
4574      * it should be done here.
4575      * For now, it's a contract pre-condition.
4576      * Adding a check and a branch here would cost performance at every hash.
4577      * Also, note that function signature doesn't offer room to return an error.
4578      */
4579     if (len <= 16)
4580         return XXH3_len_0to16_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
4581     if (len <= 128)
4582         return XXH3_len_17to128_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
4583     if (len <= XXH3_MIDSIZE_MAX)
4584         return XXH3_len_129to240_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
4585     return f_hashLong(input, len, seed64, (const xxh_u8*)secret, secretLen);
4586 }
4587 
4588 
4589 /* ===   Public entry point   === */
4590 
4591 /*! @ingroup xxh3_family */
XXH3_64bits(const void * input,size_t len)4592 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* input, size_t len)
4593 {
4594     return XXH3_64bits_internal(input, len, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_default);
4595 }
4596 
4597 /*! @ingroup xxh3_family */
4598 XXH_PUBLIC_API XXH64_hash_t
XXH3_64bits_withSecret(const void * input,size_t len,const void * secret,size_t secretSize)4599 XXH3_64bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize)
4600 {
4601     return XXH3_64bits_internal(input, len, 0, secret, secretSize, XXH3_hashLong_64b_withSecret);
4602 }
4603 
4604 /*! @ingroup xxh3_family */
4605 XXH_PUBLIC_API XXH64_hash_t
XXH3_64bits_withSeed(const void * input,size_t len,XXH64_hash_t seed)4606 XXH3_64bits_withSeed(const void* input, size_t len, XXH64_hash_t seed)
4607 {
4608     return XXH3_64bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed);
4609 }
4610 
4611 XXH_PUBLIC_API XXH64_hash_t
XXH3_64bits_withSecretandSeed(const void * input,size_t len,const void * secret,size_t secretSize,XXH64_hash_t seed)4612 XXH3_64bits_withSecretandSeed(const void* input, size_t len, const void* secret, size_t secretSize, XXH64_hash_t seed)
4613 {
4614     if (len <= XXH3_MIDSIZE_MAX)
4615         return XXH3_64bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL);
4616     return XXH3_hashLong_64b_withSecret(input, len, seed, (const xxh_u8*)secret, secretSize);
4617 }
4618 
4619 
4620 /* ===   XXH3 streaming   === */
4621 
4622 /*
4623  * Malloc's a pointer that is always aligned to align.
4624  *
4625  * This must be freed with `XXH_alignedFree()`.
4626  *
4627  * malloc typically guarantees 16 byte alignment on 64-bit systems and 8 byte
4628  * alignment on 32-bit. This isn't enough for the 32 byte aligned loads in AVX2
4629  * or on 32-bit, the 16 byte aligned loads in SSE2 and NEON.
4630  *
4631  * This underalignment previously caused a rather obvious crash which went
4632  * completely unnoticed due to XXH3_createState() not actually being tested.
4633  * Credit to RedSpah for noticing this bug.
4634  *
4635  * The alignment is done manually: Functions like posix_memalign or _mm_malloc
4636  * are avoided: To maintain portability, we would have to write a fallback
4637  * like this anyways, and besides, testing for the existence of library
4638  * functions without relying on external build tools is impossible.
4639  *
4640  * The method is simple: Overallocate, manually align, and store the offset
4641  * to the original behind the returned pointer.
4642  *
4643  * Align must be a power of 2 and 8 <= align <= 128.
4644  */
XXH_alignedMalloc(size_t s,size_t align)4645 static void* XXH_alignedMalloc(size_t s, size_t align)
4646 {
4647     XXH_ASSERT(align <= 128 && align >= 8); /* range check */
4648     XXH_ASSERT((align & (align-1)) == 0);   /* power of 2 */
4649     XXH_ASSERT(s != 0 && s < (s + align));  /* empty/overflow */
4650     {   /* Overallocate to make room for manual realignment and an offset byte */
4651         xxh_u8* base = (xxh_u8*)XXH_malloc(s + align);
4652         if (base != NULL) {
4653             /*
4654              * Get the offset needed to align this pointer.
4655              *
4656              * Even if the returned pointer is aligned, there will always be
4657              * at least one byte to store the offset to the original pointer.
4658              */
4659             size_t offset = align - ((size_t)base & (align - 1)); /* base % align */
4660             /* Add the offset for the now-aligned pointer */
4661             xxh_u8* ptr = base + offset;
4662 
4663             XXH_ASSERT((size_t)ptr % align == 0);
4664 
4665             /* Store the offset immediately before the returned pointer. */
4666             ptr[-1] = (xxh_u8)offset;
4667             return ptr;
4668         }
4669         return NULL;
4670     }
4671 }
4672 /*
4673  * Frees an aligned pointer allocated by XXH_alignedMalloc(). Don't pass
4674  * normal malloc'd pointers, XXH_alignedMalloc has a specific data layout.
4675  */
XXH_alignedFree(void * p)4676 static void XXH_alignedFree(void* p)
4677 {
4678     if (p != NULL) {
4679         xxh_u8* ptr = (xxh_u8*)p;
4680         /* Get the offset byte we added in XXH_malloc. */
4681         xxh_u8 offset = ptr[-1];
4682         /* Free the original malloc'd pointer */
4683         xxh_u8* base = ptr - offset;
4684         XXH_free(base);
4685     }
4686 }
4687 /*! @ingroup xxh3_family */
XXH3_createState(void)4688 XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void)
4689 {
4690     XXH3_state_t* const state = (XXH3_state_t*)XXH_alignedMalloc(sizeof(XXH3_state_t), 64);
4691     if (state==NULL) return NULL;
4692     XXH3_INITSTATE(state);
4693     return state;
4694 }
4695 
4696 /*! @ingroup xxh3_family */
XXH3_freeState(XXH3_state_t * statePtr)4697 XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr)
4698 {
4699     XXH_alignedFree(statePtr);
4700     return XXH_OK;
4701 }
4702 
4703 /*! @ingroup xxh3_family */
4704 XXH_PUBLIC_API void
XXH3_copyState(XXH3_state_t * dst_state,const XXH3_state_t * src_state)4705 XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state)
4706 {
4707     XXH_memcpy(dst_state, src_state, sizeof(*dst_state));
4708 }
4709 
4710 static void
XXH3_reset_internal(XXH3_state_t * statePtr,XXH64_hash_t seed,const void * secret,size_t secretSize)4711 XXH3_reset_internal(XXH3_state_t* statePtr,
4712                     XXH64_hash_t seed,
4713                     const void* secret, size_t secretSize)
4714 {
4715     size_t const initStart = offsetof(XXH3_state_t, bufferedSize);
4716     size_t const initLength = offsetof(XXH3_state_t, nbStripesPerBlock) - initStart;
4717     XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart);
4718     XXH_ASSERT(statePtr != NULL);
4719     /* set members from bufferedSize to nbStripesPerBlock (excluded) to 0 */
4720     memset((char*)statePtr + initStart, 0, initLength);
4721     statePtr->acc[0] = XXH_PRIME32_3;
4722     statePtr->acc[1] = XXH_PRIME64_1;
4723     statePtr->acc[2] = XXH_PRIME64_2;
4724     statePtr->acc[3] = XXH_PRIME64_3;
4725     statePtr->acc[4] = XXH_PRIME64_4;
4726     statePtr->acc[5] = XXH_PRIME32_2;
4727     statePtr->acc[6] = XXH_PRIME64_5;
4728     statePtr->acc[7] = XXH_PRIME32_1;
4729     statePtr->seed = seed;
4730     statePtr->useSeed = (seed != 0);
4731     statePtr->extSecret = (const unsigned char*)secret;
4732     XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
4733     statePtr->secretLimit = secretSize - XXH_STRIPE_LEN;
4734     statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE;
4735 }
4736 
4737 /*! @ingroup xxh3_family */
4738 XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset(XXH3_state_t * statePtr)4739 XXH3_64bits_reset(XXH3_state_t* statePtr)
4740 {
4741     if (statePtr == NULL) return XXH_ERROR;
4742     XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE);
4743     return XXH_OK;
4744 }
4745 
4746 /*! @ingroup xxh3_family */
4747 XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset_withSecret(XXH3_state_t * statePtr,const void * secret,size_t secretSize)4748 XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize)
4749 {
4750     if (statePtr == NULL) return XXH_ERROR;
4751     XXH3_reset_internal(statePtr, 0, secret, secretSize);
4752     if (secret == NULL) return XXH_ERROR;
4753     if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
4754     return XXH_OK;
4755 }
4756 
4757 /*! @ingroup xxh3_family */
4758 XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset_withSeed(XXH3_state_t * statePtr,XXH64_hash_t seed)4759 XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed)
4760 {
4761     if (statePtr == NULL) return XXH_ERROR;
4762     if (seed==0) return XXH3_64bits_reset(statePtr);
4763     if ((seed != statePtr->seed) || (statePtr->extSecret != NULL))
4764         XXH3_initCustomSecret(statePtr->customSecret, seed);
4765     XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE);
4766     return XXH_OK;
4767 }
4768 
4769 /*! @ingroup xxh3_family */
4770 XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset_withSecretandSeed(XXH3_state_t * statePtr,const void * secret,size_t secretSize,XXH64_hash_t seed64)4771 XXH3_64bits_reset_withSecretandSeed(XXH3_state_t* statePtr, const void* secret, size_t secretSize, XXH64_hash_t seed64)
4772 {
4773     if (statePtr == NULL) return XXH_ERROR;
4774     if (secret == NULL) return XXH_ERROR;
4775     if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
4776     XXH3_reset_internal(statePtr, seed64, secret, secretSize);
4777     statePtr->useSeed = 1; /* always, even if seed64==0 */
4778     return XXH_OK;
4779 }
4780 
4781 /* Note : when XXH3_consumeStripes() is invoked,
4782  * there must be a guarantee that at least one more byte must be consumed from input
4783  * so that the function can blindly consume all stripes using the "normal" secret segment */
4784 XXH_FORCE_INLINE void
XXH3_consumeStripes(xxh_u64 * XXH_RESTRICT acc,size_t * XXH_RESTRICT nbStripesSoFarPtr,size_t nbStripesPerBlock,const xxh_u8 * XXH_RESTRICT input,size_t nbStripes,const xxh_u8 * XXH_RESTRICT secret,size_t secretLimit,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble)4785 XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc,
4786                     size_t* XXH_RESTRICT nbStripesSoFarPtr, size_t nbStripesPerBlock,
4787                     const xxh_u8* XXH_RESTRICT input, size_t nbStripes,
4788                     const xxh_u8* XXH_RESTRICT secret, size_t secretLimit,
4789                     XXH3_f_accumulate_512 f_acc512,
4790                     XXH3_f_scrambleAcc f_scramble)
4791 {
4792     XXH_ASSERT(nbStripes <= nbStripesPerBlock);  /* can handle max 1 scramble per invocation */
4793     XXH_ASSERT(*nbStripesSoFarPtr < nbStripesPerBlock);
4794     if (nbStripesPerBlock - *nbStripesSoFarPtr <= nbStripes) {
4795         /* need a scrambling operation */
4796         size_t const nbStripesToEndofBlock = nbStripesPerBlock - *nbStripesSoFarPtr;
4797         size_t const nbStripesAfterBlock = nbStripes - nbStripesToEndofBlock;
4798         XXH3_accumulate(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripesToEndofBlock, f_acc512);
4799         f_scramble(acc, secret + secretLimit);
4800         XXH3_accumulate(acc, input + nbStripesToEndofBlock * XXH_STRIPE_LEN, secret, nbStripesAfterBlock, f_acc512);
4801         *nbStripesSoFarPtr = nbStripesAfterBlock;
4802     } else {
4803         XXH3_accumulate(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripes, f_acc512);
4804         *nbStripesSoFarPtr += nbStripes;
4805     }
4806 }
4807 
4808 #ifndef XXH3_STREAM_USE_STACK
4809 # ifndef __clang__ /* clang doesn't need additional stack space */
4810 #   define XXH3_STREAM_USE_STACK 1
4811 # endif
4812 #endif
4813 /*
4814  * Both XXH3_64bits_update and XXH3_128bits_update use this routine.
4815  */
4816 XXH_FORCE_INLINE XXH_errorcode
XXH3_update(XXH3_state_t * XXH_RESTRICT const state,const xxh_u8 * XXH_RESTRICT input,size_t len,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble)4817 XXH3_update(XXH3_state_t* XXH_RESTRICT const state,
4818             const xxh_u8* XXH_RESTRICT input, size_t len,
4819             XXH3_f_accumulate_512 f_acc512,
4820             XXH3_f_scrambleAcc f_scramble)
4821 {
4822     if (input==NULL) {
4823         XXH_ASSERT(len == 0);
4824         return XXH_OK;
4825     }
4826 
4827     XXH_ASSERT(state != NULL);
4828     {   const xxh_u8* const bEnd = input + len;
4829         const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
4830 #if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
4831         /* For some reason, gcc and MSVC seem to suffer greatly
4832          * when operating accumulators directly into state.
4833          * Operating into stack space seems to enable proper optimization.
4834          * clang, on the other hand, doesn't seem to need this trick */
4835         XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[8]; memcpy(acc, state->acc, sizeof(acc));
4836 #else
4837         xxh_u64* XXH_RESTRICT const acc = state->acc;
4838 #endif
4839         state->totalLen += len;
4840         XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE);
4841 
4842         /* small input : just fill in tmp buffer */
4843         if (state->bufferedSize + len <= XXH3_INTERNALBUFFER_SIZE) {
4844             XXH_memcpy(state->buffer + state->bufferedSize, input, len);
4845             state->bufferedSize += (XXH32_hash_t)len;
4846             return XXH_OK;
4847         }
4848 
4849         /* total input is now > XXH3_INTERNALBUFFER_SIZE */
4850         #define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN)
4851         XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0);   /* clean multiple */
4852 
4853         /*
4854          * Internal buffer is partially filled (always, except at beginning)
4855          * Complete it, then consume it.
4856          */
4857         if (state->bufferedSize) {
4858             size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize;
4859             XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize);
4860             input += loadSize;
4861             XXH3_consumeStripes(acc,
4862                                &state->nbStripesSoFar, state->nbStripesPerBlock,
4863                                 state->buffer, XXH3_INTERNALBUFFER_STRIPES,
4864                                 secret, state->secretLimit,
4865                                 f_acc512, f_scramble);
4866             state->bufferedSize = 0;
4867         }
4868         XXH_ASSERT(input < bEnd);
4869 
4870         /* large input to consume : ingest per full block */
4871         if ((size_t)(bEnd - input) > state->nbStripesPerBlock * XXH_STRIPE_LEN) {
4872             size_t nbStripes = (size_t)(bEnd - 1 - input) / XXH_STRIPE_LEN;
4873             XXH_ASSERT(state->nbStripesPerBlock >= state->nbStripesSoFar);
4874             /* join to current block's end */
4875             {   size_t const nbStripesToEnd = state->nbStripesPerBlock - state->nbStripesSoFar;
4876                 XXH_ASSERT(nbStripes <= nbStripes);
4877                 XXH3_accumulate(acc, input, secret + state->nbStripesSoFar * XXH_SECRET_CONSUME_RATE, nbStripesToEnd, f_acc512);
4878                 f_scramble(acc, secret + state->secretLimit);
4879                 state->nbStripesSoFar = 0;
4880                 input += nbStripesToEnd * XXH_STRIPE_LEN;
4881                 nbStripes -= nbStripesToEnd;
4882             }
4883             /* consume per entire blocks */
4884             while(nbStripes >= state->nbStripesPerBlock) {
4885                 XXH3_accumulate(acc, input, secret, state->nbStripesPerBlock, f_acc512);
4886                 f_scramble(acc, secret + state->secretLimit);
4887                 input += state->nbStripesPerBlock * XXH_STRIPE_LEN;
4888                 nbStripes -= state->nbStripesPerBlock;
4889             }
4890             /* consume last partial block */
4891             XXH3_accumulate(acc, input, secret, nbStripes, f_acc512);
4892             input += nbStripes * XXH_STRIPE_LEN;
4893             XXH_ASSERT(input < bEnd);  /* at least some bytes left */
4894             state->nbStripesSoFar = nbStripes;
4895             /* buffer predecessor of last partial stripe */
4896             XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
4897             XXH_ASSERT(bEnd - input <= XXH_STRIPE_LEN);
4898         } else {
4899             /* content to consume <= block size */
4900             /* Consume input by a multiple of internal buffer size */
4901             if (bEnd - input > XXH3_INTERNALBUFFER_SIZE) {
4902                 const xxh_u8* const limit = bEnd - XXH3_INTERNALBUFFER_SIZE;
4903                 do {
4904                     XXH3_consumeStripes(acc,
4905                                        &state->nbStripesSoFar, state->nbStripesPerBlock,
4906                                         input, XXH3_INTERNALBUFFER_STRIPES,
4907                                         secret, state->secretLimit,
4908                                         f_acc512, f_scramble);
4909                     input += XXH3_INTERNALBUFFER_SIZE;
4910                 } while (input<limit);
4911                 /* buffer predecessor of last partial stripe */
4912                 XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
4913             }
4914         }
4915 
4916         /* Some remaining input (always) : buffer it */
4917         XXH_ASSERT(input < bEnd);
4918         XXH_ASSERT(bEnd - input <= XXH3_INTERNALBUFFER_SIZE);
4919         XXH_ASSERT(state->bufferedSize == 0);
4920         XXH_memcpy(state->buffer, input, (size_t)(bEnd-input));
4921         state->bufferedSize = (XXH32_hash_t)(bEnd-input);
4922 #if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
4923         /* save stack accumulators into state */
4924         memcpy(state->acc, acc, sizeof(acc));
4925 #endif
4926     }
4927 
4928     return XXH_OK;
4929 }
4930 
4931 /*! @ingroup xxh3_family */
4932 XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_update(XXH3_state_t * state,const void * input,size_t len)4933 XXH3_64bits_update(XXH3_state_t* state, const void* input, size_t len)
4934 {
4935     return XXH3_update(state, (const xxh_u8*)input, len,
4936                        XXH3_accumulate_512, XXH3_scrambleAcc);
4937 }
4938 
4939 
4940 XXH_FORCE_INLINE void
XXH3_digest_long(XXH64_hash_t * acc,const XXH3_state_t * state,const unsigned char * secret)4941 XXH3_digest_long (XXH64_hash_t* acc,
4942                   const XXH3_state_t* state,
4943                   const unsigned char* secret)
4944 {
4945     /*
4946      * Digest on a local copy. This way, the state remains unaltered, and it can
4947      * continue ingesting more input afterwards.
4948      */
4949     XXH_memcpy(acc, state->acc, sizeof(state->acc));
4950     if (state->bufferedSize >= XXH_STRIPE_LEN) {
4951         size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN;
4952         size_t nbStripesSoFar = state->nbStripesSoFar;
4953         XXH3_consumeStripes(acc,
4954                            &nbStripesSoFar, state->nbStripesPerBlock,
4955                             state->buffer, nbStripes,
4956                             secret, state->secretLimit,
4957                             XXH3_accumulate_512, XXH3_scrambleAcc);
4958         /* last stripe */
4959         XXH3_accumulate_512(acc,
4960                             state->buffer + state->bufferedSize - XXH_STRIPE_LEN,
4961                             secret + state->secretLimit - XXH_SECRET_LASTACC_START);
4962     } else {  /* bufferedSize < XXH_STRIPE_LEN */
4963         xxh_u8 lastStripe[XXH_STRIPE_LEN];
4964         size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize;
4965         XXH_ASSERT(state->bufferedSize > 0);  /* there is always some input buffered */
4966         XXH_memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize);
4967         XXH_memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize);
4968         XXH3_accumulate_512(acc,
4969                             lastStripe,
4970                             secret + state->secretLimit - XXH_SECRET_LASTACC_START);
4971     }
4972 }
4973 
4974 /*! @ingroup xxh3_family */
XXH3_64bits_digest(const XXH3_state_t * state)4975 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* state)
4976 {
4977     const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
4978     if (state->totalLen > XXH3_MIDSIZE_MAX) {
4979         XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
4980         XXH3_digest_long(acc, state, secret);
4981         return XXH3_mergeAccs(acc,
4982                               secret + XXH_SECRET_MERGEACCS_START,
4983                               (xxh_u64)state->totalLen * XXH_PRIME64_1);
4984     }
4985     /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */
4986     if (state->useSeed)
4987         return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
4988     return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen),
4989                                   secret, state->secretLimit + XXH_STRIPE_LEN);
4990 }
4991 
4992 
4993 
4994 /* ==========================================
4995  * XXH3 128 bits (a.k.a XXH128)
4996  * ==========================================
4997  * XXH3's 128-bit variant has better mixing and strength than the 64-bit variant,
4998  * even without counting the significantly larger output size.
4999  *
5000  * For example, extra steps are taken to avoid the seed-dependent collisions
5001  * in 17-240 byte inputs (See XXH3_mix16B and XXH128_mix32B).
5002  *
5003  * This strength naturally comes at the cost of some speed, especially on short
5004  * lengths. Note that longer hashes are about as fast as the 64-bit version
5005  * due to it using only a slight modification of the 64-bit loop.
5006  *
5007  * XXH128 is also more oriented towards 64-bit machines. It is still extremely
5008  * fast for a _128-bit_ hash on 32-bit (it usually clears XXH64).
5009  */
5010 
5011 XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_1to3_128b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)5012 XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
5013 {
5014     /* A doubled version of 1to3_64b with different constants. */
5015     XXH_ASSERT(input != NULL);
5016     XXH_ASSERT(1 <= len && len <= 3);
5017     XXH_ASSERT(secret != NULL);
5018     /*
5019      * len = 1: combinedl = { input[0], 0x01, input[0], input[0] }
5020      * len = 2: combinedl = { input[1], 0x02, input[0], input[1] }
5021      * len = 3: combinedl = { input[2], 0x03, input[0], input[1] }
5022      */
5023     {   xxh_u8 const c1 = input[0];
5024         xxh_u8 const c2 = input[len >> 1];
5025         xxh_u8 const c3 = input[len - 1];
5026         xxh_u32 const combinedl = ((xxh_u32)c1 <<16) | ((xxh_u32)c2 << 24)
5027                                 | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
5028         xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13);
5029         xxh_u64 const bitflipl = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
5030         xxh_u64 const bitfliph = (XXH_readLE32(secret+8) ^ XXH_readLE32(secret+12)) - seed;
5031         xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl;
5032         xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph;
5033         XXH128_hash_t h128;
5034         h128.low64  = XXH64_avalanche(keyed_lo);
5035         h128.high64 = XXH64_avalanche(keyed_hi);
5036         return h128;
5037     }
5038 }
5039 
5040 XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_4to8_128b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)5041 XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
5042 {
5043     XXH_ASSERT(input != NULL);
5044     XXH_ASSERT(secret != NULL);
5045     XXH_ASSERT(4 <= len && len <= 8);
5046     seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
5047     {   xxh_u32 const input_lo = XXH_readLE32(input);
5048         xxh_u32 const input_hi = XXH_readLE32(input + len - 4);
5049         xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32);
5050         xxh_u64 const bitflip = (XXH_readLE64(secret+16) ^ XXH_readLE64(secret+24)) + seed;
5051         xxh_u64 const keyed = input_64 ^ bitflip;
5052 
5053         /* Shift len to the left to ensure it is even, this avoids even multiplies. */
5054         XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2));
5055 
5056         m128.high64 += (m128.low64 << 1);
5057         m128.low64  ^= (m128.high64 >> 3);
5058 
5059         m128.low64   = XXH_xorshift64(m128.low64, 35);
5060         m128.low64  *= 0x9FB21C651E98DF25ULL;
5061         m128.low64   = XXH_xorshift64(m128.low64, 28);
5062         m128.high64  = XXH3_avalanche(m128.high64);
5063         return m128;
5064     }
5065 }
5066 
5067 XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_9to16_128b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)5068 XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
5069 {
5070     XXH_ASSERT(input != NULL);
5071     XXH_ASSERT(secret != NULL);
5072     XXH_ASSERT(9 <= len && len <= 16);
5073     {   xxh_u64 const bitflipl = (XXH_readLE64(secret+32) ^ XXH_readLE64(secret+40)) - seed;
5074         xxh_u64 const bitfliph = (XXH_readLE64(secret+48) ^ XXH_readLE64(secret+56)) + seed;
5075         xxh_u64 const input_lo = XXH_readLE64(input);
5076         xxh_u64       input_hi = XXH_readLE64(input + len - 8);
5077         XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1);
5078         /*
5079          * Put len in the middle of m128 to ensure that the length gets mixed to
5080          * both the low and high bits in the 128x64 multiply below.
5081          */
5082         m128.low64 += (xxh_u64)(len - 1) << 54;
5083         input_hi   ^= bitfliph;
5084         /*
5085          * Add the high 32 bits of input_hi to the high 32 bits of m128, then
5086          * add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to
5087          * the high 64 bits of m128.
5088          *
5089          * The best approach to this operation is different on 32-bit and 64-bit.
5090          */
5091         if (sizeof(void *) < sizeof(xxh_u64)) { /* 32-bit */
5092             /*
5093              * 32-bit optimized version, which is more readable.
5094              *
5095              * On 32-bit, it removes an ADC and delays a dependency between the two
5096              * halves of m128.high64, but it generates an extra mask on 64-bit.
5097              */
5098             m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2);
5099         } else {
5100             /*
5101              * 64-bit optimized (albeit more confusing) version.
5102              *
5103              * Uses some properties of addition and multiplication to remove the mask:
5104              *
5105              * Let:
5106              *    a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF)
5107              *    b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000)
5108              *    c = XXH_PRIME32_2
5109              *
5110              *    a + (b * c)
5111              * Inverse Property: x + y - x == y
5112              *    a + (b * (1 + c - 1))
5113              * Distributive Property: x * (y + z) == (x * y) + (x * z)
5114              *    a + (b * 1) + (b * (c - 1))
5115              * Identity Property: x * 1 == x
5116              *    a + b + (b * (c - 1))
5117              *
5118              * Substitute a, b, and c:
5119              *    input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
5120              *
5121              * Since input_hi.hi + input_hi.lo == input_hi, we get this:
5122              *    input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
5123              */
5124             m128.high64 += input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1);
5125         }
5126         /* m128 ^= XXH_swap64(m128 >> 64); */
5127         m128.low64  ^= XXH_swap64(m128.high64);
5128 
5129         {   /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */
5130             XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2);
5131             h128.high64 += m128.high64 * XXH_PRIME64_2;
5132 
5133             h128.low64   = XXH3_avalanche(h128.low64);
5134             h128.high64  = XXH3_avalanche(h128.high64);
5135             return h128;
5136     }   }
5137 }
5138 
5139 /*
5140  * Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN
5141  */
5142 XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_0to16_128b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)5143 XXH3_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
5144 {
5145     XXH_ASSERT(len <= 16);
5146     {   if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed);
5147         if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed);
5148         if (len) return XXH3_len_1to3_128b(input, len, secret, seed);
5149         {   XXH128_hash_t h128;
5150             xxh_u64 const bitflipl = XXH_readLE64(secret+64) ^ XXH_readLE64(secret+72);
5151             xxh_u64 const bitfliph = XXH_readLE64(secret+80) ^ XXH_readLE64(secret+88);
5152             h128.low64 = XXH64_avalanche(seed ^ bitflipl);
5153             h128.high64 = XXH64_avalanche( seed ^ bitfliph);
5154             return h128;
5155     }   }
5156 }
5157 
5158 /*
5159  * A bit slower than XXH3_mix16B, but handles multiply by zero better.
5160  */
5161 XXH_FORCE_INLINE XXH128_hash_t
XXH128_mix32B(XXH128_hash_t acc,const xxh_u8 * input_1,const xxh_u8 * input_2,const xxh_u8 * secret,XXH64_hash_t seed)5162 XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2,
5163               const xxh_u8* secret, XXH64_hash_t seed)
5164 {
5165     acc.low64  += XXH3_mix16B (input_1, secret+0, seed);
5166     acc.low64  ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8);
5167     acc.high64 += XXH3_mix16B (input_2, secret+16, seed);
5168     acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8);
5169     return acc;
5170 }
5171 
5172 
5173 XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_17to128_128b(const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH64_hash_t seed)5174 XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
5175                       const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
5176                       XXH64_hash_t seed)
5177 {
5178     XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
5179     XXH_ASSERT(16 < len && len <= 128);
5180 
5181     {   XXH128_hash_t acc;
5182         acc.low64 = len * XXH_PRIME64_1;
5183         acc.high64 = 0;
5184         if (len > 32) {
5185             if (len > 64) {
5186                 if (len > 96) {
5187                     acc = XXH128_mix32B(acc, input+48, input+len-64, secret+96, seed);
5188                 }
5189                 acc = XXH128_mix32B(acc, input+32, input+len-48, secret+64, seed);
5190             }
5191             acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed);
5192         }
5193         acc = XXH128_mix32B(acc, input, input+len-16, secret, seed);
5194         {   XXH128_hash_t h128;
5195             h128.low64  = acc.low64 + acc.high64;
5196             h128.high64 = (acc.low64    * XXH_PRIME64_1)
5197                         + (acc.high64   * XXH_PRIME64_4)
5198                         + ((len - seed) * XXH_PRIME64_2);
5199             h128.low64  = XXH3_avalanche(h128.low64);
5200             h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
5201             return h128;
5202         }
5203     }
5204 }
5205 
5206 XXH_NO_INLINE XXH128_hash_t
XXH3_len_129to240_128b(const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH64_hash_t seed)5207 XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
5208                        const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
5209                        XXH64_hash_t seed)
5210 {
5211     XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
5212     XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
5213 
5214     {   XXH128_hash_t acc;
5215         int const nbRounds = (int)len / 32;
5216         int i;
5217         acc.low64 = len * XXH_PRIME64_1;
5218         acc.high64 = 0;
5219         for (i=0; i<4; i++) {
5220             acc = XXH128_mix32B(acc,
5221                                 input  + (32 * i),
5222                                 input  + (32 * i) + 16,
5223                                 secret + (32 * i),
5224                                 seed);
5225         }
5226         acc.low64 = XXH3_avalanche(acc.low64);
5227         acc.high64 = XXH3_avalanche(acc.high64);
5228         XXH_ASSERT(nbRounds >= 4);
5229         for (i=4 ; i < nbRounds; i++) {
5230             acc = XXH128_mix32B(acc,
5231                                 input + (32 * i),
5232                                 input + (32 * i) + 16,
5233                                 secret + XXH3_MIDSIZE_STARTOFFSET + (32 * (i - 4)),
5234                                 seed);
5235         }
5236         /* last bytes */
5237         acc = XXH128_mix32B(acc,
5238                             input + len - 16,
5239                             input + len - 32,
5240                             secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16,
5241                             0ULL - seed);
5242 
5243         {   XXH128_hash_t h128;
5244             h128.low64  = acc.low64 + acc.high64;
5245             h128.high64 = (acc.low64    * XXH_PRIME64_1)
5246                         + (acc.high64   * XXH_PRIME64_4)
5247                         + ((len - seed) * XXH_PRIME64_2);
5248             h128.low64  = XXH3_avalanche(h128.low64);
5249             h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
5250             return h128;
5251         }
5252     }
5253 }
5254 
5255 XXH_FORCE_INLINE XXH128_hash_t
XXH3_hashLong_128b_internal(const void * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble)5256 XXH3_hashLong_128b_internal(const void* XXH_RESTRICT input, size_t len,
5257                             const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
5258                             XXH3_f_accumulate_512 f_acc512,
5259                             XXH3_f_scrambleAcc f_scramble)
5260 {
5261     XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
5262 
5263     XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, secret, secretSize, f_acc512, f_scramble);
5264 
5265     /* converge into final hash */
5266     XXH_STATIC_ASSERT(sizeof(acc) == 64);
5267     XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
5268     {   XXH128_hash_t h128;
5269         h128.low64  = XXH3_mergeAccs(acc,
5270                                      secret + XXH_SECRET_MERGEACCS_START,
5271                                      (xxh_u64)len * XXH_PRIME64_1);
5272         h128.high64 = XXH3_mergeAccs(acc,
5273                                      secret + secretSize
5274                                             - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
5275                                      ~((xxh_u64)len * XXH_PRIME64_2));
5276         return h128;
5277     }
5278 }
5279 
5280 /*
5281  * It's important for performance that XXH3_hashLong is not inlined.
5282  */
5283 XXH_NO_INLINE XXH128_hash_t
XXH3_hashLong_128b_default(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen)5284 XXH3_hashLong_128b_default(const void* XXH_RESTRICT input, size_t len,
5285                            XXH64_hash_t seed64,
5286                            const void* XXH_RESTRICT secret, size_t secretLen)
5287 {
5288     (void)seed64; (void)secret; (void)secretLen;
5289     return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret),
5290                                        XXH3_accumulate_512, XXH3_scrambleAcc);
5291 }
5292 
5293 /*
5294  * It's important for performance to pass @secretLen (when it's static)
5295  * to the compiler, so that it can properly optimize the vectorized loop.
5296  */
5297 XXH3_WITH_SECRET_INLINE XXH128_hash_t
XXH3_hashLong_128b_withSecret(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen)5298 XXH3_hashLong_128b_withSecret(const void* XXH_RESTRICT input, size_t len,
5299                               XXH64_hash_t seed64,
5300                               const void* XXH_RESTRICT secret, size_t secretLen)
5301 {
5302     (void)seed64;
5303     return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, secretLen,
5304                                        XXH3_accumulate_512, XXH3_scrambleAcc);
5305 }
5306 
5307 XXH_FORCE_INLINE XXH128_hash_t
XXH3_hashLong_128b_withSeed_internal(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble,XXH3_f_initCustomSecret f_initSec)5308 XXH3_hashLong_128b_withSeed_internal(const void* XXH_RESTRICT input, size_t len,
5309                                 XXH64_hash_t seed64,
5310                                 XXH3_f_accumulate_512 f_acc512,
5311                                 XXH3_f_scrambleAcc f_scramble,
5312                                 XXH3_f_initCustomSecret f_initSec)
5313 {
5314     if (seed64 == 0)
5315         return XXH3_hashLong_128b_internal(input, len,
5316                                            XXH3_kSecret, sizeof(XXH3_kSecret),
5317                                            f_acc512, f_scramble);
5318     {   XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
5319         f_initSec(secret, seed64);
5320         return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, sizeof(secret),
5321                                            f_acc512, f_scramble);
5322     }
5323 }
5324 
5325 /*
5326  * It's important for performance that XXH3_hashLong is not inlined.
5327  */
5328 XXH_NO_INLINE XXH128_hash_t
XXH3_hashLong_128b_withSeed(const void * input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen)5329 XXH3_hashLong_128b_withSeed(const void* input, size_t len,
5330                             XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen)
5331 {
5332     (void)secret; (void)secretLen;
5333     return XXH3_hashLong_128b_withSeed_internal(input, len, seed64,
5334                 XXH3_accumulate_512, XXH3_scrambleAcc, XXH3_initCustomSecret);
5335 }
5336 
5337 typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void* XXH_RESTRICT, size_t,
5338                                             XXH64_hash_t, const void* XXH_RESTRICT, size_t);
5339 
5340 XXH_FORCE_INLINE XXH128_hash_t
XXH3_128bits_internal(const void * input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen,XXH3_hashLong128_f f_hl128)5341 XXH3_128bits_internal(const void* input, size_t len,
5342                       XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
5343                       XXH3_hashLong128_f f_hl128)
5344 {
5345     XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
5346     /*
5347      * If an action is to be taken if `secret` conditions are not respected,
5348      * it should be done here.
5349      * For now, it's a contract pre-condition.
5350      * Adding a check and a branch here would cost performance at every hash.
5351      */
5352     if (len <= 16)
5353         return XXH3_len_0to16_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
5354     if (len <= 128)
5355         return XXH3_len_17to128_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
5356     if (len <= XXH3_MIDSIZE_MAX)
5357         return XXH3_len_129to240_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
5358     return f_hl128(input, len, seed64, secret, secretLen);
5359 }
5360 
5361 
5362 /* ===   Public XXH128 API   === */
5363 
5364 /*! @ingroup xxh3_family */
XXH3_128bits(const void * input,size_t len)5365 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* input, size_t len)
5366 {
5367     return XXH3_128bits_internal(input, len, 0,
5368                                  XXH3_kSecret, sizeof(XXH3_kSecret),
5369                                  XXH3_hashLong_128b_default);
5370 }
5371 
5372 /*! @ingroup xxh3_family */
5373 XXH_PUBLIC_API XXH128_hash_t
XXH3_128bits_withSecret(const void * input,size_t len,const void * secret,size_t secretSize)5374 XXH3_128bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize)
5375 {
5376     return XXH3_128bits_internal(input, len, 0,
5377                                  (const xxh_u8*)secret, secretSize,
5378                                  XXH3_hashLong_128b_withSecret);
5379 }
5380 
5381 /*! @ingroup xxh3_family */
5382 XXH_PUBLIC_API XXH128_hash_t
XXH3_128bits_withSeed(const void * input,size_t len,XXH64_hash_t seed)5383 XXH3_128bits_withSeed(const void* input, size_t len, XXH64_hash_t seed)
5384 {
5385     return XXH3_128bits_internal(input, len, seed,
5386                                  XXH3_kSecret, sizeof(XXH3_kSecret),
5387                                  XXH3_hashLong_128b_withSeed);
5388 }
5389 
5390 /*! @ingroup xxh3_family */
5391 XXH_PUBLIC_API XXH128_hash_t
XXH3_128bits_withSecretandSeed(const void * input,size_t len,const void * secret,size_t secretSize,XXH64_hash_t seed)5392 XXH3_128bits_withSecretandSeed(const void* input, size_t len, const void* secret, size_t secretSize, XXH64_hash_t seed)
5393 {
5394     if (len <= XXH3_MIDSIZE_MAX)
5395         return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL);
5396     return XXH3_hashLong_128b_withSecret(input, len, seed, secret, secretSize);
5397 }
5398 
5399 /*! @ingroup xxh3_family */
5400 XXH_PUBLIC_API XXH128_hash_t
XXH128(const void * input,size_t len,XXH64_hash_t seed)5401 XXH128(const void* input, size_t len, XXH64_hash_t seed)
5402 {
5403     return XXH3_128bits_withSeed(input, len, seed);
5404 }
5405 
5406 
5407 /* ===   XXH3 128-bit streaming   === */
5408 
5409 /*
5410  * All initialization and update functions are identical to 64-bit streaming variant.
5411  * The only difference is the finalization routine.
5412  */
5413 
5414 /*! @ingroup xxh3_family */
5415 XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset(XXH3_state_t * statePtr)5416 XXH3_128bits_reset(XXH3_state_t* statePtr)
5417 {
5418     return XXH3_64bits_reset(statePtr);
5419 }
5420 
5421 /*! @ingroup xxh3_family */
5422 XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset_withSecret(XXH3_state_t * statePtr,const void * secret,size_t secretSize)5423 XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize)
5424 {
5425     return XXH3_64bits_reset_withSecret(statePtr, secret, secretSize);
5426 }
5427 
5428 /*! @ingroup xxh3_family */
5429 XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset_withSeed(XXH3_state_t * statePtr,XXH64_hash_t seed)5430 XXH3_128bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed)
5431 {
5432     return XXH3_64bits_reset_withSeed(statePtr, seed);
5433 }
5434 
5435 /*! @ingroup xxh3_family */
5436 XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset_withSecretandSeed(XXH3_state_t * statePtr,const void * secret,size_t secretSize,XXH64_hash_t seed)5437 XXH3_128bits_reset_withSecretandSeed(XXH3_state_t* statePtr, const void* secret, size_t secretSize, XXH64_hash_t seed)
5438 {
5439     return XXH3_64bits_reset_withSecretandSeed(statePtr, secret, secretSize, seed);
5440 }
5441 
5442 /*! @ingroup xxh3_family */
5443 XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_update(XXH3_state_t * state,const void * input,size_t len)5444 XXH3_128bits_update(XXH3_state_t* state, const void* input, size_t len)
5445 {
5446     return XXH3_update(state, (const xxh_u8*)input, len,
5447                        XXH3_accumulate_512, XXH3_scrambleAcc);
5448 }
5449 
5450 /*! @ingroup xxh3_family */
XXH3_128bits_digest(const XXH3_state_t * state)5451 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* state)
5452 {
5453     const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
5454     if (state->totalLen > XXH3_MIDSIZE_MAX) {
5455         XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
5456         XXH3_digest_long(acc, state, secret);
5457         XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
5458         {   XXH128_hash_t h128;
5459             h128.low64  = XXH3_mergeAccs(acc,
5460                                          secret + XXH_SECRET_MERGEACCS_START,
5461                                          (xxh_u64)state->totalLen * XXH_PRIME64_1);
5462             h128.high64 = XXH3_mergeAccs(acc,
5463                                          secret + state->secretLimit + XXH_STRIPE_LEN
5464                                                 - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
5465                                          ~((xxh_u64)state->totalLen * XXH_PRIME64_2));
5466             return h128;
5467         }
5468     }
5469     /* len <= XXH3_MIDSIZE_MAX : short code */
5470     if (state->seed)
5471         return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
5472     return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen),
5473                                    secret, state->secretLimit + XXH_STRIPE_LEN);
5474 }
5475 
5476 /* 128-bit utility functions */
5477 
5478 #include <string.h>   /* memcmp, memcpy */
5479 
5480 /* return : 1 is equal, 0 if different */
5481 /*! @ingroup xxh3_family */
XXH128_isEqual(XXH128_hash_t h1,XXH128_hash_t h2)5482 XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2)
5483 {
5484     /* note : XXH128_hash_t is compact, it has no padding byte */
5485     return !(memcmp(&h1, &h2, sizeof(h1)));
5486 }
5487 
5488 /* This prototype is compatible with stdlib's qsort().
5489  * return : >0 if *h128_1  > *h128_2
5490  *          <0 if *h128_1  < *h128_2
5491  *          =0 if *h128_1 == *h128_2  */
5492 /*! @ingroup xxh3_family */
XXH128_cmp(const void * h128_1,const void * h128_2)5493 XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2)
5494 {
5495     XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1;
5496     XXH128_hash_t const h2 = *(const XXH128_hash_t*)h128_2;
5497     int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64);
5498     /* note : bets that, in most cases, hash values are different */
5499     if (hcmp) return hcmp;
5500     return (h1.low64 > h2.low64) - (h2.low64 > h1.low64);
5501 }
5502 
5503 
5504 /*======   Canonical representation   ======*/
5505 /*! @ingroup xxh3_family */
5506 XXH_PUBLIC_API void
XXH128_canonicalFromHash(XXH128_canonical_t * dst,XXH128_hash_t hash)5507 XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash)
5508 {
5509     XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t));
5510     if (XXH_CPU_LITTLE_ENDIAN) {
5511         hash.high64 = XXH_swap64(hash.high64);
5512         hash.low64  = XXH_swap64(hash.low64);
5513     }
5514     XXH_memcpy(dst, &hash.high64, sizeof(hash.high64));
5515     XXH_memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64));
5516 }
5517 
5518 /*! @ingroup xxh3_family */
5519 XXH_PUBLIC_API XXH128_hash_t
XXH128_hashFromCanonical(const XXH128_canonical_t * src)5520 XXH128_hashFromCanonical(const XXH128_canonical_t* src)
5521 {
5522     XXH128_hash_t h;
5523     h.high64 = XXH_readBE64(src);
5524     h.low64  = XXH_readBE64(src->digest + 8);
5525     return h;
5526 }
5527 
5528 
5529 
5530 /* ==========================================
5531  * Secret generators
5532  * ==========================================
5533  */
5534 #define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x))
5535 
XXH3_combine16(void * dst,XXH128_hash_t h128)5536 static void XXH3_combine16(void* dst, XXH128_hash_t h128)
5537 {
5538     XXH_writeLE64( dst, XXH_readLE64(dst) ^ h128.low64 );
5539     XXH_writeLE64( (char*)dst+8, XXH_readLE64((char*)dst+8) ^ h128.high64 );
5540 }
5541 
5542 /*! @ingroup xxh3_family */
5543 XXH_PUBLIC_API XXH_errorcode
XXH3_generateSecret(void * secretBuffer,size_t secretSize,const void * customSeed,size_t customSeedSize)5544 XXH3_generateSecret(void* secretBuffer, size_t secretSize, const void* customSeed, size_t customSeedSize)
5545 {
5546     XXH_ASSERT(secretBuffer != NULL);
5547     if (secretBuffer == NULL) return XXH_ERROR;
5548     XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
5549     if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
5550     if (customSeedSize == 0) {
5551         customSeed = XXH3_kSecret;
5552         customSeedSize = XXH_SECRET_DEFAULT_SIZE;
5553     }
5554     XXH_ASSERT(customSeed != NULL);
5555     if (customSeed == NULL) return XXH_ERROR;
5556 
5557     /* Fill secretBuffer with a copy of customSeed - repeat as needed */
5558     {   size_t pos = 0;
5559         while (pos < secretSize) {
5560             size_t const toCopy = XXH_MIN((secretSize - pos), customSeedSize);
5561             memcpy((char*)secretBuffer + pos, customSeed, toCopy);
5562             pos += toCopy;
5563     }   }
5564 
5565     {   size_t const nbSeg16 = secretSize / 16;
5566         size_t n;
5567         XXH128_canonical_t scrambler;
5568         XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0));
5569         for (n=0; n<nbSeg16; n++) {
5570             XXH128_hash_t const h128 = XXH128(&scrambler, sizeof(scrambler), n);
5571             XXH3_combine16((char*)secretBuffer + n*16, h128);
5572         }
5573         /* last segment */
5574         XXH3_combine16((char*)secretBuffer + secretSize - 16, XXH128_hashFromCanonical(&scrambler));
5575     }
5576     return XXH_OK;
5577 }
5578 
5579 /*! @ingroup xxh3_family */
5580 XXH_PUBLIC_API void
XXH3_generateSecret_fromSeed(void * secretBuffer,XXH64_hash_t seed)5581 XXH3_generateSecret_fromSeed(void* secretBuffer, XXH64_hash_t seed)
5582 {
5583     XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
5584     XXH3_initCustomSecret(secret, seed);
5585     XXH_ASSERT(secretBuffer != NULL);
5586     memcpy(secretBuffer, secret, XXH_SECRET_DEFAULT_SIZE);
5587 }
5588 
5589 
5590 
5591 /* Pop our optimization override from above */
5592 #if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
5593   && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
5594   && defined(__OPTIMIZE__) && !defined(__OPTIMIZE_SIZE__) /* respect -O0 and -Os */
5595 #  pragma GCC pop_options
5596 #endif
5597 
5598 #endif  /* XXH_NO_LONG_LONG */
5599 
5600 #endif  /* XXH_NO_XXH3 */
5601 
5602 /*!
5603  * @}
5604  */
5605 #endif  /* XXH_IMPLEMENTATION */
5606 
5607 
5608 #if defined (__cplusplus)
5609 }
5610 #endif
5611