Qt
Internal/Contributor docs for the Qt SDK. <b>Note:</b> These are NOT official API docs; those are found <a href='https://doc.qt.io/'>here</a>.
Loading...
Searching...
No Matches
qhash.cpp
Go to the documentation of this file.
1// Copyright (C) 2020 The Qt Company Ltd.
2// Copyright (C) 2021 Intel Corporation.
3// Copyright (C) 2012 Giuseppe D'Angelo <dangelog@gmail.com>.
4// SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only
5
6// for rand_s, _CRT_RAND_S must be #defined before #including stdlib.h.
7// put it at the beginning so some indirect inclusion doesn't break it
8#ifndef _CRT_RAND_S
9#define _CRT_RAND_S
10#endif
11#include <stdlib.h>
12#include <stdint.h>
13
14#include "qhash.h"
15
16#ifdef truncate
17#undef truncate
18#endif
19
20#include <qbitarray.h>
21#include <qstring.h>
22#include <qglobal.h>
23#include <qbytearray.h>
24#include <qdatetime.h>
25#include <qbasicatomic.h>
26#include <qendian.h>
27#include <private/qrandom_p.h>
28#include <private/qsimd_p.h>
29
30#ifndef QT_BOOTSTRAPPED
31#include <qcoreapplication.h>
32#include <qrandom.h>
33#include <private/qlocale_tools_p.h>
34#endif // QT_BOOTSTRAPPED
35
36#include <array>
37#include <limits.h>
38
39#if defined(QT_NO_DEBUG) && !defined(NDEBUG)
40# define NDEBUG
41#endif
42#include <assert.h>
43
44#ifdef Q_CC_GNU
45# define Q_DECL_HOT_FUNCTION __attribute__((hot))
46#else
47# define Q_DECL_HOT_FUNCTION
48#endif
49
51
52void qt_from_latin1(char16_t *dst, const char *str, size_t size) noexcept; // qstring.cpp
53
54// We assume that pointers and size_t have the same size. If that assumption should fail
55// on a platform the code selecting the different methods below needs to be fixed.
56static_assert(sizeof(size_t) == QT_POINTER_SIZE, "size_t and pointers have different size.");
57
58namespace {
59struct HashSeedStorage
60{
61 static constexpr int SeedCount = 2;
62 QBasicAtomicInteger<quintptr> seeds[SeedCount] = { Q_BASIC_ATOMIC_INITIALIZER(0), Q_BASIC_ATOMIC_INITIALIZER(0) };
63
64#if !QT_SUPPORTS_INIT_PRIORITY || defined(QT_BOOTSTRAPPED)
65 constexpr HashSeedStorage() = default;
66#else
67 HashSeedStorage() { initialize(0); }
68#endif
69
70 enum State {
71 OverriddenByEnvironment = -1,
72 JustInitialized,
73 AlreadyInitialized
74 };
75 struct StateResult {
76 quintptr requestedSeed;
78 };
79
80 StateResult state(int which = -1);
81 Q_DECL_HOT_FUNCTION QHashSeed currentSeed(int which)
82 {
83 return { state(which).requestedSeed };
84 }
85
86 void resetSeed()
87 {
88#ifndef QT_BOOTSTRAPPED
89 if (state().state < AlreadyInitialized)
90 return;
91
92 // update the public seed
94 seeds[0].storeRelaxed(sizeof(size_t) > sizeof(quint32)
96#endif
97 }
98
99 void clearSeed()
100 {
101 state();
102 seeds[0].storeRelaxed(0); // always write (smaller code)
103 }
104
105private:
106 Q_DECL_COLD_FUNCTION Q_NEVER_INLINE StateResult initialize(int which) noexcept;
107};
108
109[[maybe_unused]] HashSeedStorage::StateResult HashSeedStorage::initialize(int which) noexcept
110{
111 StateResult result = { 0, OverriddenByEnvironment };
112#ifdef QT_BOOTSTRAPPED
113 Q_UNUSED(which);
114 Q_UNREACHABLE_RETURN(result);
115#else
116 // can't use qEnvironmentVariableIntValue (reentrancy)
117 const char *seedstr = getenv("QT_HASH_SEED");
118 if (seedstr) {
119 auto r = qstrntoll(seedstr, strlen(seedstr), 10);
120 if (r.used > 0 && size_t(r.used) == strlen(seedstr)) {
121 if (r.result) {
122 // can't use qWarning here (reentrancy)
123 fprintf(stderr, "QT_HASH_SEED: forced seed value is not 0; ignored.\n");
124 }
125
126 // we don't have to store to the seed, since it's pre-initialized by
127 // the compiler to zero
128 return result;
129 }
130 }
131
132 // update the full seed
133 auto x = qt_initial_random_value();
134 for (int i = 0; i < SeedCount; ++i) {
135 seeds[i].storeRelaxed(x.data[i]);
136 if (which == i)
137 result.requestedSeed = x.data[i];
138 }
139 result.state = JustInitialized;
140 return result;
141#endif
142}
143
144inline HashSeedStorage::StateResult HashSeedStorage::state(int which)
145{
146 constexpr quintptr BadSeed = quintptr(Q_UINT64_C(0x5555'5555'5555'5555));
147 StateResult result = { BadSeed, AlreadyInitialized };
148
149#if defined(QT_BOOTSTRAPPED)
150 result = { 0, OverriddenByEnvironment };
151#elif !QT_SUPPORTS_INIT_PRIORITY
152 // dynamic initialization
153 static auto once = [&]() {
154 result = initialize(which);
155 return true;
156 }();
157 Q_UNUSED(once);
158#endif
159
160 if (result.state == AlreadyInitialized && which >= 0)
161 return { seeds[which].loadRelaxed(), AlreadyInitialized };
162 return result;
163}
164} // unnamed namespace
165
166/*
167 The QHash seed itself.
168*/
169#ifdef Q_DECL_INIT_PRIORITY
170Q_DECL_INIT_PRIORITY(05)
171#else
172Q_CONSTINIT
173#endif
174static HashSeedStorage qt_qhash_seed;
175
176/*
177 * Hashing for memory segments is based on the public domain MurmurHash2 by
178 * Austin Appleby. See http://murmurhash.googlepages.com/
179 */
180#if QT_POINTER_SIZE == 4
182static inline uint murmurhash(const void *key, uint len, uint seed) noexcept
183{
184 // 'm' and 'r' are mixing constants generated offline.
185 // They're not really 'magic', they just happen to work well.
186
187 const unsigned int m = 0x5bd1e995;
188 const int r = 24;
189
190 // Initialize the hash to a 'random' value
191
192 unsigned int h = seed ^ len;
193
194 // Mix 4 bytes at a time into the hash
195
196 const unsigned char *data = reinterpret_cast<const unsigned char *>(key);
197 const unsigned char *end = data + (len & ~3);
198
199 while (data != end) {
200 size_t k;
201 memcpy(&k, data, sizeof(uint));
202
203 k *= m;
204 k ^= k >> r;
205 k *= m;
206
207 h *= m;
208 h ^= k;
209
210 data += 4;
211 }
212
213 // Handle the last few bytes of the input array
214 len &= 3;
215 if (len) {
216 unsigned int k = 0;
217 end += len;
218
219 while (data != end) {
220 k <<= 8;
221 k |= *data;
222 ++data;
223 }
224 h ^= k;
225 h *= m;
226 }
227
228 // Do a few final mixes of the hash to ensure the last few
229 // bytes are well-incorporated.
230
231 h ^= h >> 13;
232 h *= m;
233 h ^= h >> 15;
234
235 return h;
236}
237
238#else
240static inline uint64_t murmurhash(const void *key, uint64_t len, uint64_t seed) noexcept
241{
242 const uint64_t m = 0xc6a4a7935bd1e995ULL;
243 const int r = 47;
244
245 uint64_t h = seed ^ (len * m);
246
247 const unsigned char *data = reinterpret_cast<const unsigned char *>(key);
248 const unsigned char *end = data + (len & ~7ul);
249
250 while (data != end) {
251 uint64_t k;
252 memcpy(&k, data, sizeof(uint64_t));
253
254 k *= m;
255 k ^= k >> r;
256 k *= m;
257
258 h ^= k;
259 h *= m;
260
261 data += 8;
262 }
263
264 len &= 7;
265 if (len) {
266 // handle the last few bytes of input
267 size_t k = 0;
268 end += len;
269
270 while (data != end) {
271 k <<= 8;
272 k |= *data;
273 ++data;
274 }
275 h ^= k;
276 h *= m;
277 }
278
279 h ^= h >> r;
280 h *= m;
281 h ^= h >> r;
282
283 return h;
284}
285
286#endif
287
288namespace {
289// This is an inlined version of the SipHash implementation that is
290// trying to avoid some memcpy's from uint64 to uint8[] and back.
291
292#define ROTL(x, b) (((x) << (b)) | ((x) >> (sizeof(x) * 8 - (b))))
293
294#define SIPROUND \
295 do { \
296 v0 += v1; \
297 v1 = ROTL(v1, 13); \
298 v1 ^= v0; \
299 v0 = ROTL(v0, 32); \
300 v2 += v3; \
301 v3 = ROTL(v3, 16); \
302 v3 ^= v2; \
303 v0 += v3; \
304 v3 = ROTL(v3, 21); \
305 v3 ^= v0; \
306 v2 += v1; \
307 v1 = ROTL(v1, 17); \
308 v1 ^= v2; \
309 v2 = ROTL(v2, 32); \
310 } while (0)
311
312template <int cROUNDS = 2, int dROUNDS = 4> struct SipHash64
313{
314 /* "somepseudorandomlygeneratedbytes" */
315 uint64_t v0 = 0x736f6d6570736575ULL;
316 uint64_t v1 = 0x646f72616e646f6dULL;
317 uint64_t v2 = 0x6c7967656e657261ULL;
318 uint64_t v3 = 0x7465646279746573ULL;
319 uint64_t b;
320 uint64_t k0;
321 uint64_t k1;
322
323 inline SipHash64(uint64_t fulllen, uint64_t seed, uint64_t seed2);
324 inline void addBlock(const uint8_t *in, size_t inlen);
325 inline uint64_t finalize(const uint8_t *in, size_t left);
326};
327
328template <int cROUNDS, int dROUNDS>
329SipHash64<cROUNDS, dROUNDS>::SipHash64(uint64_t inlen, uint64_t seed, uint64_t seed2)
330{
331 b = inlen << 56;
332 k0 = seed;
333 k1 = seed2;
334 v3 ^= k1;
335 v2 ^= k0;
336 v1 ^= k1;
337 v0 ^= k0;
338}
339
340template <int cROUNDS, int dROUNDS> Q_DECL_HOT_FUNCTION void
341SipHash64<cROUNDS, dROUNDS>::addBlock(const uint8_t *in, size_t inlen)
342{
343 Q_ASSERT((inlen & 7ULL) == 0);
344 int i;
345 const uint8_t *end = in + inlen;
346 for (; in != end; in += 8) {
347 uint64_t m = qFromUnaligned<uint64_t>(in);
348 v3 ^= m;
349
350 for (i = 0; i < cROUNDS; ++i)
351 SIPROUND;
352
353 v0 ^= m;
354 }
355}
356
357template <int cROUNDS, int dROUNDS> Q_DECL_HOT_FUNCTION uint64_t
358SipHash64<cROUNDS, dROUNDS>::finalize(const uint8_t *in, size_t left)
359{
360 int i;
361 switch (left) {
362 case 7:
363 b |= ((uint64_t)in[6]) << 48;
365 case 6:
366 b |= ((uint64_t)in[5]) << 40;
368 case 5:
369 b |= ((uint64_t)in[4]) << 32;
371 case 4:
372 b |= ((uint64_t)in[3]) << 24;
374 case 3:
375 b |= ((uint64_t)in[2]) << 16;
377 case 2:
378 b |= ((uint64_t)in[1]) << 8;
380 case 1:
381 b |= ((uint64_t)in[0]);
382 break;
383 case 0:
384 break;
385 }
386
387 v3 ^= b;
388
389 for (i = 0; i < cROUNDS; ++i)
390 SIPROUND;
391
392 v0 ^= b;
393
394 v2 ^= 0xff;
395
396 for (i = 0; i < dROUNDS; ++i)
397 SIPROUND;
398
399 b = v0 ^ v1 ^ v2 ^ v3;
400 return b;
401}
402#undef SIPROUND
403
404// This is a "SipHash" implementation adopted for 32bit platforms. It performs
405// basically the same operations as the 64bit version using 4 byte at a time
406// instead of 8.
407//
408// To make this work, we also need to change the constants for the mixing
409// rotations in ROTL. We're simply using half of the 64bit constants, rounded up
410// for odd numbers.
411//
412// For the v0-v4 constants, simply use the first four bytes of the 64 bit versions.
413//
414
415#define SIPROUND \
416 do { \
417 v0 += v1; \
418 v1 = ROTL(v1, 7); \
419 v1 ^= v0; \
420 v0 = ROTL(v0, 16); \
421 v2 += v3; \
422 v3 = ROTL(v3, 8); \
423 v3 ^= v2; \
424 v0 += v3; \
425 v3 = ROTL(v3, 11); \
426 v3 ^= v0; \
427 v2 += v1; \
428 v1 = ROTL(v1, 9); \
429 v1 ^= v2; \
430 v2 = ROTL(v2, 16); \
431 } while (0)
432
433template <int cROUNDS = 2, int dROUNDS = 4> struct SipHash32
434{
435 /* "somepseudorandomlygeneratedbytes" */
436 uint v0 = 0x736f6d65U;
437 uint v1 = 0x646f7261U;
438 uint v2 = 0x6c796765U;
439 uint v3 = 0x74656462U;
440 uint b;
441 uint k0;
442 uint k1;
443
444 inline SipHash32(size_t fulllen, uint seed, uint seed2);
445 inline void addBlock(const uint8_t *in, size_t inlen);
446 inline uint finalize(const uint8_t *in, size_t left);
447};
448
449template <int cROUNDS, int dROUNDS> inline
450SipHash32<cROUNDS, dROUNDS>::SipHash32(size_t inlen, uint seed, uint seed2)
451{
452 uint k0 = seed;
453 uint k1 = seed2;
454 b = inlen << 24;
455 v3 ^= k1;
456 v2 ^= k0;
457 v1 ^= k1;
458 v0 ^= k0;
459}
460
461template <int cROUNDS, int dROUNDS> inline Q_DECL_HOT_FUNCTION void
462SipHash32<cROUNDS, dROUNDS>::addBlock(const uint8_t *in, size_t inlen)
463{
464 Q_ASSERT((inlen & 3ULL) == 0);
465 int i;
466 const uint8_t *end = in + inlen;
467 for (; in != end; in += 4) {
468 uint m = qFromUnaligned<uint>(in);
469 v3 ^= m;
470
471 for (i = 0; i < cROUNDS; ++i)
472 SIPROUND;
473
474 v0 ^= m;
475 }
476}
477
478template <int cROUNDS, int dROUNDS> inline Q_DECL_HOT_FUNCTION uint
479SipHash32<cROUNDS, dROUNDS>::finalize(const uint8_t *in, size_t left)
480{
481 int i;
482 switch (left) {
483 case 3:
484 b |= ((uint)in[2]) << 16;
486 case 2:
487 b |= ((uint)in[1]) << 8;
489 case 1:
490 b |= ((uint)in[0]);
491 break;
492 case 0:
493 break;
494 }
495
496 v3 ^= b;
497
498 for (i = 0; i < cROUNDS; ++i)
499 SIPROUND;
500
501 v0 ^= b;
502
503 v2 ^= 0xff;
504
505 for (i = 0; i < dROUNDS; ++i)
506 SIPROUND;
507
508 b = v0 ^ v1 ^ v2 ^ v3;
509 return b;
510}
511#undef SIPROUND
512#undef ROTL
513
514// Use SipHash-1-2, which has similar performance characteristics as
515// stablehash() above, instead of the SipHash-2-4 default
516template <int cROUNDS = 1, int dROUNDS = 2>
517using SipHash = std::conditional_t<sizeof(void *) == 8,
518 SipHash64<cROUNDS, dROUNDS>, SipHash32<cROUNDS, dROUNDS>>;
519} // unnamed namespace
520
522static size_t siphash(const uint8_t *in, size_t inlen, size_t seed, size_t seed2)
523{
524 constexpr size_t TailSizeMask = sizeof(void *) - 1;
525 SipHash<> hasher(inlen, seed, seed2);
526 hasher.addBlock(in, inlen & ~TailSizeMask);
527 return hasher.finalize(in + (inlen & ~TailSizeMask), inlen & TailSizeMask);
528}
529
531 None = 0,
533};
534
535template <ZeroExtension = None> static size_t
536qHashBits_fallback(const uchar *p, size_t size, size_t seed, size_t seed2) noexcept;
537template <> size_t qHashBits_fallback<None>(const uchar *p, size_t size, size_t seed, size_t seed2) noexcept
538{
539 if (size <= QT_POINTER_SIZE)
540 return murmurhash(p, size, seed);
541
542 return siphash(reinterpret_cast<const uchar *>(p), size, seed, seed2);
543}
544
545template <> size_t qHashBits_fallback<ByteToWord>(const uchar *data, size_t size, size_t seed, size_t seed2) noexcept
546{
547 auto quick_from_latin1 = [](char16_t *dest, const uchar *data, size_t size) {
548 // Quick, "inlined" version for very short blocks
549 std::copy_n(data, size, dest);
550 };
551 if (size <= QT_POINTER_SIZE / 2) {
552 std::array<char16_t, QT_POINTER_SIZE / 2> buf;
553 quick_from_latin1(buf.data(), data, size);
554 return murmurhash(buf.data(), size * 2, seed);
555 }
556
557 constexpr size_t TailSizeMask = sizeof(void *) / 2 - 1;
558 std::array<char16_t, 256> buf;
559 SipHash<> siphash(size * 2, seed, seed2);
560 ptrdiff_t offset = 0;
561 for ( ; offset + buf.size() < size; offset += buf.size()) {
562 qt_from_latin1(buf.data(), reinterpret_cast<const char *>(data) + offset, buf.size());
563 siphash.addBlock(reinterpret_cast<uint8_t *>(buf.data()), sizeof(buf));
564 }
565 if (size_t n = size - offset; n > TailSizeMask) {
566 n &= ~TailSizeMask;
567 qt_from_latin1(buf.data(), reinterpret_cast<const char *>(data) + offset, n);
568 siphash.addBlock(reinterpret_cast<uint8_t *>(buf.data()), n * 2);
569 offset += n;
570 }
571
572 quick_from_latin1(buf.data(), data + offset, size - offset);
573 return siphash.finalize(reinterpret_cast<uint8_t *>(buf.data()), (size - offset) * 2);
574}
575
576#if defined(__SANITIZE_ADDRESS__) || defined(__SANITIZE_THREAD__) // GCC
577# define QHASH_AES_SANITIZER_BUILD
578#elif __has_feature(address_sanitizer) || __has_feature(thread_sanitizer) // Clang
579# define QHASH_AES_SANITIZER_BUILD
580#endif
581
582// When built with a sanitizer, aeshash() is rightfully reported to have a
583// heap-buffer-overflow issue. However, we consider it to be safe in this
584// specific case and overcome the problem by correctly discarding the
585// out-of-range bits. To allow building the code with sanitizer,
586// QHASH_AES_SANITIZER_BUILD is used to disable aeshash() usage.
587#if QT_COMPILER_SUPPORTS_HERE(AES) && QT_COMPILER_SUPPORTS_HERE(SSE4_2) && \
588 !defined(QHASH_AES_SANITIZER_BUILD)
589# define AESHASH
590# define QT_FUNCTION_TARGET_STRING_AES_AVX2 "avx2,aes"
591# define QT_FUNCTION_TARGET_STRING_AES_AVX512 \
592 QT_FUNCTION_TARGET_STRING_ARCH_SKYLAKE_AVX512 "," \
593 QT_FUNCTION_TARGET_STRING_AES
594# define QT_FUNCTION_TARGET_STRING_VAES_AVX512 \
595 QT_FUNCTION_TARGET_STRING_ARCH_SKYLAKE_AVX512 "," \
596 QT_FUNCTION_TARGET_STRING_VAES
597# undef QHASH_AES_SANITIZER_BUILD
598# if QT_POINTER_SIZE == 8
599# define mm_set1_epz _mm_set1_epi64x
600# define mm_cvtsz_si128 _mm_cvtsi64_si128
601# define mm_cvtsi128_sz _mm_cvtsi128_si64
602# define mm256_set1_epz _mm256_set1_epi64x
603# else
604# define mm_set1_epz _mm_set1_epi32
605# define mm_cvtsz_si128 _mm_cvtsi32_si128
606# define mm_cvtsi128_sz _mm_cvtsi128_si32
607# define mm256_set1_epz _mm256_set1_epi32
608# endif
609
610namespace {
611 // This is inspired by the algorithm in the Go language. See:
612 // https://github.com/golang/go/blob/01b6cf09fc9f272d9db3d30b4c93982f4911d120/src/runtime/asm_amd64.s#L1105
613 // https://github.com/golang/go/blob/01b6cf09fc9f272d9db3d30b4c93982f4911d120/src/runtime/asm_386.s#L908
614 //
615 // Even though we're using the AESENC instruction from the CPU, this code
616 // is not encryption and this routine makes no claim to be
617 // cryptographically secure. We're simply using the instruction that performs
618 // the scrambling round (step 3 in [1]) because it's just very good at
619 // spreading the bits around.
620 //
621 // Note on Latin-1 hashing (ZX == ByteToWord): for simplicity of the
622 // algorithm, we pass sizes equivalent to the UTF-16 content (ZX == None).
623 // That means we must multiply by 2 on entry, divide by 2 on pointer
624 // advancing, and load half as much data from memory (though we produce
625 // exactly as much data in registers). The compilers appear to optimize
626 // this out.
627 //
628 // [1] https://en.wikipedia.org/wiki/Advanced_Encryption_Standard#High-level_description_of_the_algorithm
629
630 template <ZeroExtension ZX, typename T> static const T *advance(const T *ptr, ptrdiff_t n)
631 {
632 if constexpr (ZX == None)
633 return ptr + n;
634
635 // see note above on ZX == ByteToWord hashing
636 auto p = reinterpret_cast<const uchar *>(ptr);
637 n *= sizeof(T);
638 return reinterpret_cast<const T *>(p + n/2);
639 }
640
641 template <ZeroExtension> static __m128i loadu128(const void *ptr);
642 template <> Q_ALWAYS_INLINE QT_FUNCTION_TARGET(AES) __m128i loadu128<None>(const void *ptr)
643 {
644 return _mm_loadu_si128(reinterpret_cast<const __m128i *>(ptr));
645 }
646 template <> Q_ALWAYS_INLINE QT_FUNCTION_TARGET(AES) __m128i loadu128<ByteToWord>(const void *ptr)
647 {
648 // use a MOVQ followed by PMOVZXBW
649 // the compiler usually combines them as a single, loading PMOVZXBW
650 __m128i data = _mm_loadl_epi64(static_cast<const __m128i *>(ptr));
651 return _mm_cvtepu8_epi16(data);
652 }
653
654 // hash 16 bytes, running 3 scramble rounds of AES on itself (like label "final1")
656 hash16bytes(__m128i &state0, __m128i data)
657 {
658 state0 = _mm_xor_si128(state0, data);
659 state0 = _mm_aesenc_si128(state0, state0);
660 state0 = _mm_aesenc_si128(state0, state0);
661 state0 = _mm_aesenc_si128(state0, state0);
662 }
663
664 // hash twice 16 bytes, running 2 scramble rounds of AES on itself
665 template <ZeroExtension ZX>
666 static void QT_FUNCTION_TARGET(AES) QT_VECTORCALL
667 hash2x16bytes(__m128i &state0, __m128i &state1, const __m128i *src0, const __m128i *src1)
668 {
669 __m128i data0 = loadu128<ZX>(src0);
670 __m128i data1 = loadu128<ZX>(src1);
671 state0 = _mm_xor_si128(data0, state0);
672 state1 = _mm_xor_si128(data1, state1);
673 state0 = _mm_aesenc_si128(state0, state0);
674 state1 = _mm_aesenc_si128(state1, state1);
675 state0 = _mm_aesenc_si128(state0, state0);
676 state1 = _mm_aesenc_si128(state1, state1);
677 }
678
679 struct AESHashSeed
680 {
681 __m128i state0;
682 __m128i mseed2;
683 AESHashSeed(size_t seed, size_t seed2) QT_FUNCTION_TARGET(AES);
684 __m128i state1() const QT_FUNCTION_TARGET(AES);
685 __m256i state0_256() const QT_FUNCTION_TARGET(AES_AVX2)
686 { return _mm256_set_m128i(state1(), state0); }
687 };
688} // unnamed namespace
689
690Q_ALWAYS_INLINE AESHashSeed::AESHashSeed(size_t seed, size_t seed2)
691{
692 __m128i mseed = mm_cvtsz_si128(seed);
693 mseed2 = mm_set1_epz(seed2);
694
695 // mseed (epi16) = [ seed, seed >> 16, seed >> 32, seed >> 48, len, 0, 0, 0 ]
696 mseed = _mm_insert_epi16(mseed, short(seed), 4);
697 // mseed (epi16) = [ seed, seed >> 16, seed >> 32, seed >> 48, len, len, len, len ]
698 mseed = _mm_shufflehi_epi16(mseed, 0);
699
700 // merge with the process-global seed
701 __m128i key = _mm_xor_si128(mseed, mseed2);
702
703 // scramble the key
704 __m128i state0 = _mm_aesenc_si128(key, key);
705 this->state0 = state0;
706}
707
708Q_ALWAYS_INLINE __m128i AESHashSeed::state1() const
709{
710 {
711 // unlike the Go code, we don't have more per-process seed
712 __m128i state1 = _mm_aesenc_si128(state0, mseed2);
713 return state1;
714 }
715}
716
717template <ZeroExtension ZX>
718static size_t QT_FUNCTION_TARGET(AES) QT_VECTORCALL
719aeshash128_16to32(__m128i state0, __m128i state1, const __m128i *src, const __m128i *srcend)
720{
721 {
722 const __m128i *src2 = advance<ZX>(srcend, -1);
723 if (advance<ZX>(src, 1) < srcend) {
724 // epilogue: between 16 and 31 bytes
725 hash2x16bytes<ZX>(state0, state1, src, src2);
726 } else if (src != srcend) {
727 // epilogue: between 1 and 16 bytes, overlap with the end
728 __m128i data = loadu128<ZX>(src2);
729 hash16bytes(state0, data);
730 }
731
732 // combine results:
733 state0 = _mm_xor_si128(state0, state1);
734 }
735
736 return mm_cvtsi128_sz(state0);
737}
738
739// load all 16 bytes and mask off the bytes past the end of the source
740static const qint8 maskarray[] = {
741 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
742 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
743};
744
745// load 16 bytes ending at the data end, then shuffle them to the beginning
746static const qint8 shufflecontrol[] = {
747 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
748 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
749};
750
751template <ZeroExtension ZX>
752static size_t QT_FUNCTION_TARGET(AES) QT_VECTORCALL
753aeshash128_lt16(__m128i state0, const __m128i *src, const __m128i *srcend, size_t len)
754{
755 if (len) {
756 // We're going to load 16 bytes and mask zero the part we don't care
757 // (the hash of a short string is different from the hash of a longer
758 // including NULLs at the end because the length is in the key)
759 // WARNING: this may produce valgrind warnings, but it's safe
760
761 constexpr quintptr CachelineSize = 64;
762 __m128i data;
763
764 if ((quintptr(src) & (CachelineSize / 2)) == 0) {
765 // lower half of the cacheline:
766 __m128i mask = _mm_loadu_si128(reinterpret_cast<const __m128i *>(maskarray + 15 - len));
767 data = loadu128<ZX>(src);
768 data = _mm_and_si128(data, mask);
769 } else {
770 // upper half of the cacheline:
771 __m128i control = _mm_loadu_si128(reinterpret_cast<const __m128i *>(shufflecontrol + 15 - len));
772 data = loadu128<ZX>(advance<ZX>(srcend, -1));
773 data = _mm_shuffle_epi8(data, control);
774 }
775
776 hash16bytes(state0, data);
777 }
778 return mm_cvtsi128_sz(state0);
779}
780
781template <ZeroExtension ZX>
782static size_t QT_FUNCTION_TARGET(AES) QT_VECTORCALL
783aeshash128_ge32(__m128i state0, __m128i state1, const __m128i *src, const __m128i *srcend)
784{
785 // main loop: scramble two 16-byte blocks
786 for ( ; advance<ZX>(src, 2) < srcend; src = advance<ZX>(src, 2))
787 hash2x16bytes<ZX>(state0, state1, src, advance<ZX>(src, 1));
788
789 return aeshash128_16to32<ZX>(state0, state1, src, srcend);
790}
791
792# if QT_COMPILER_SUPPORTS_HERE(VAES)
793template <ZeroExtension> static __m256i loadu256(const void *ptr);
794template <> Q_ALWAYS_INLINE QT_FUNCTION_TARGET(VAES) __m256i loadu256<None>(const void *ptr)
795{
796 return _mm256_loadu_si256(reinterpret_cast<const __m256i *>(ptr));
797}
798template <> Q_ALWAYS_INLINE QT_FUNCTION_TARGET(VAES) __m256i loadu256<ByteToWord>(const void *ptr)
799{
800 // VPMOVZXBW xmm, ymm
801 __m128i data = _mm_loadu_si128(reinterpret_cast<const __m128i *>(ptr));
802 return _mm256_cvtepu8_epi16(data);
803}
804
805template <ZeroExtension ZX>
806static size_t QT_FUNCTION_TARGET(VAES_AVX512) QT_VECTORCALL
807aeshash256_lt32_avx256(__m256i state0, const uchar *p, size_t len)
808{
809 __m128i state0_128 = _mm256_castsi256_si128(state0);
810 if (len) {
811 __m256i data;
812 if constexpr (ZX == None) {
813 __mmask32 mask = _bzhi_u32(-1, unsigned(len));
814 data = _mm256_maskz_loadu_epi8(mask, p);
815 } else {
816 __mmask16 mask = _bzhi_u32(-1, unsigned(len) / 2);
817 __m128i data0 = _mm_maskz_loadu_epi8(mask, p);
818 data = _mm256_cvtepu8_epi16(data0);
819 }
820 __m128i data0 = _mm256_castsi256_si128(data);
821 if (len >= sizeof(__m128i)) {
822 state0 = _mm256_xor_si256(state0, data);
823 state0 = _mm256_aesenc_epi128(state0, state0);
824 state0 = _mm256_aesenc_epi128(state0, state0);
825 // we're XOR'ing the two halves so we skip the third AESENC
826 // state0 = _mm256_aesenc_epi128(state0, state0);
827
828 // XOR the two halves and extract
829 __m128i low = _mm256_extracti128_si256(state0, 0);
830 __m128i high = _mm256_extracti128_si256(state0, 1);
831 state0_128 = _mm_xor_si128(low, high);
832 } else {
833 hash16bytes(state0_128, data0);
834 }
835 }
836 return mm_cvtsi128_sz(state0_128);
837}
838
839template <ZeroExtension ZX>
840static size_t QT_FUNCTION_TARGET(VAES) QT_VECTORCALL
841aeshash256_ge32(__m256i state0, const __m128i *s, const __m128i *end, size_t len)
842{
843 static const auto hash32bytes = [](__m256i &state0, __m256i data) QT_FUNCTION_TARGET(VAES) {
844 state0 = _mm256_xor_si256(state0, data);
845 state0 = _mm256_aesenc_epi128(state0, state0);
846 state0 = _mm256_aesenc_epi128(state0, state0);
847 state0 = _mm256_aesenc_epi128(state0, state0);
848 };
849
850 // hash twice 32 bytes, running 2 scramble rounds of AES on itself
851 const auto hash2x32bytes = [](__m256i &state0, __m256i &state1, const void *src0,
852 const void *src1) QT_FUNCTION_TARGET(VAES) {
853 __m256i data0 = loadu256<ZX>(src0);
854 __m256i data1 = loadu256<ZX>(src1);
855 state0 = _mm256_xor_si256(data0, state0);
856 state1 = _mm256_xor_si256(data1, state1);
857 state0 = _mm256_aesenc_epi128(state0, state0);
858 state1 = _mm256_aesenc_epi128(state1, state1);
859 state0 = _mm256_aesenc_epi128(state0, state0);
860 state1 = _mm256_aesenc_epi128(state1, state1);
861 };
862
863 const __m256i *src = reinterpret_cast<const __m256i *>(s);
864 const __m256i *srcend = reinterpret_cast<const __m256i *>(end);
865
866 __m256i state1 = _mm256_aesenc_epi128(state0, mm256_set1_epz(len));
867
868 // main loop: scramble two 32-byte blocks
869 for ( ; advance<ZX>(src, 2) < srcend; src = advance<ZX>(src, 2))
870 hash2x32bytes(state0, state1, src, advance<ZX>(src, 1));
871
872 const __m256i *src2 = advance<ZX>(srcend, -1);
873 if (advance<ZX>(src, 1) < srcend) {
874 // epilogue: between 32 and 31 bytes
875 hash2x32bytes(state0, state1, src, src2);
876 } else if (src != srcend) {
877 // epilogue: between 1 and 32 bytes, overlap with the end
878 __m256i data = loadu256<ZX>(src2);
879 hash32bytes(state0, data);
880 }
881
882 // combine results:
883 state0 = _mm256_xor_si256(state0, state1);
884
885 // XOR the two halves and extract
886 __m128i low = _mm256_extracti128_si256(state0, 0);
887 __m128i high = _mm256_extracti128_si256(state0, 1);
888 return mm_cvtsi128_sz(_mm_xor_si128(low, high));
889}
890
891template <ZeroExtension ZX>
892static size_t QT_FUNCTION_TARGET(VAES)
893aeshash256(const uchar *p, size_t len, size_t seed, size_t seed2) noexcept
894{
895 AESHashSeed state(seed, seed2);
896 auto src = reinterpret_cast<const __m128i *>(p);
897 const auto srcend = reinterpret_cast<const __m128i *>(advance<ZX>(p, len));
898
899 if (len < sizeof(__m128i))
900 return aeshash128_lt16<ZX>(state.state0, src, srcend, len);
901
902 if (len <= sizeof(__m256i))
903 return aeshash128_16to32<ZX>(state.state0, state.state1(), src, srcend);
904
905 return aeshash256_ge32<ZX>(state.state0_256(), src, srcend, len);
906}
907
908template <ZeroExtension ZX>
909static size_t QT_FUNCTION_TARGET(VAES_AVX512)
910aeshash256_avx256(const uchar *p, size_t len, size_t seed, size_t seed2) noexcept
911{
912 AESHashSeed state(seed, seed2);
913 auto src = reinterpret_cast<const __m128i *>(p);
914 const auto srcend = reinterpret_cast<const __m128i *>(advance<ZX>(p, len));
915
916 if (len <= sizeof(__m256i))
917 return aeshash256_lt32_avx256<ZX>(state.state0_256(), p, len);
918
919 return aeshash256_ge32<ZX>(state.state0_256(), src, srcend, len);
920}
921# endif // VAES
922
923template <ZeroExtension ZX>
924static size_t QT_FUNCTION_TARGET(AES)
925aeshash128(const uchar *p, size_t len, size_t seed, size_t seed2) noexcept
926{
927 AESHashSeed state(seed, seed2);
928 auto src = reinterpret_cast<const __m128i *>(p);
929 const auto srcend = reinterpret_cast<const __m128i *>(advance<ZX>(p, len));
930
931 if (len < sizeof(__m128i))
932 return aeshash128_lt16<ZX>(state.state0, src, srcend, len);
933
934 if (len <= sizeof(__m256i))
935 return aeshash128_16to32<ZX>(state.state0, state.state1(), src, srcend);
936
937 return aeshash128_ge32<ZX>(state.state0, state.state1(), src, srcend);
938}
939
940template <ZeroExtension ZX = None>
941static size_t aeshash(const uchar *p, size_t len, size_t seed, size_t seed2) noexcept
942{
943 if constexpr (ZX == ByteToWord)
944 len *= 2; // see note above on ZX == ByteToWord hashing
945
946# if QT_COMPILER_SUPPORTS_HERE(VAES)
947 if (qCpuHasFeature(VAES)) {
948 if (qCpuHasFeature(AVX512VL))
949 return aeshash256_avx256<ZX>(p, len, seed, seed2);
950 return aeshash256<ZX>(p, len, seed, seed2);
951 }
952# endif
953 return aeshash128<ZX>(p, len, seed, seed2);
954}
955#endif // x86 AESNI
956
957#if defined(Q_PROCESSOR_ARM) && QT_COMPILER_SUPPORTS_HERE(AES) && !defined(QHASH_AES_SANITIZER_BUILD) && !defined(QT_BOOTSTRAPPED)
959static size_t aeshash(const uchar *p, size_t len, size_t seed, size_t seed2) noexcept
960{
961 uint8x16_t key;
962# if QT_POINTER_SIZE == 8
963 uint64x2_t vseed = vcombine_u64(vcreate_u64(seed), vcreate_u64(seed2));
964 key = vreinterpretq_u8_u64(vseed);
965# else
966
967 uint32x2_t vseed = vmov_n_u32(seed);
968 vseed = vset_lane_u32(seed2, vseed, 1);
969 key = vreinterpretq_u8_u32(vcombine_u32(vseed, vseed));
970# endif
971
972 // Compared to x86 AES, ARM splits each round into two instructions
973 // and includes the pre-xor instead of the post-xor.
974 const auto hash16bytes = [](uint8x16_t &state0, uint8x16_t data) {
975 auto state1 = state0;
976 state0 = vaeseq_u8(state0, data);
977 state0 = vaesmcq_u8(state0);
978 auto state2 = state0;
979 state0 = vaeseq_u8(state0, state1);
980 state0 = vaesmcq_u8(state0);
981 auto state3 = state0;
982 state0 = vaeseq_u8(state0, state2);
983 state0 = vaesmcq_u8(state0);
984 state0 = veorq_u8(state0, state3);
985 };
986
987 uint8x16_t state0 = key;
988
989 if (len < 8)
990 goto lt8;
991 if (len < 16)
992 goto lt16;
993 if (len < 32)
994 goto lt32;
995
996 // rounds of 32 bytes
997 {
998 // Make state1 = ~state0:
999 uint8x16_t state1 = veorq_u8(state0, vdupq_n_u8(255));
1000
1001 // do simplified rounds of 32 bytes: unlike the Go code, we only
1002 // scramble twice and we keep 256 bits of state
1003 const auto *e = p + len - 31;
1004 while (p < e) {
1005 uint8x16_t data0 = vld1q_u8(p);
1006 uint8x16_t data1 = vld1q_u8(p + 16);
1007 auto oldstate0 = state0;
1008 auto oldstate1 = state1;
1009 state0 = vaeseq_u8(state0, data0);
1010 state1 = vaeseq_u8(state1, data1);
1011 state0 = vaesmcq_u8(state0);
1012 state1 = vaesmcq_u8(state1);
1013 auto laststate0 = state0;
1014 auto laststate1 = state1;
1015 state0 = vaeseq_u8(state0, oldstate0);
1016 state1 = vaeseq_u8(state1, oldstate1);
1017 state0 = vaesmcq_u8(state0);
1018 state1 = vaesmcq_u8(state1);
1019 state0 = veorq_u8(state0, laststate0);
1020 state1 = veorq_u8(state1, laststate1);
1021 p += 32;
1022 }
1023 state0 = veorq_u8(state0, state1);
1024 }
1025 len &= 0x1f;
1026
1027 // do we still have 16 or more bytes?
1028 if (len & 0x10) {
1029lt32:
1030 uint8x16_t data = vld1q_u8(p);
1031 hash16bytes(state0, data);
1032 p += 16;
1033 }
1034 len &= 0xf;
1035
1036 if (len & 0x08) {
1037lt16:
1038 uint8x8_t data8 = vld1_u8(p);
1039 uint8x16_t data = vcombine_u8(data8, vdup_n_u8(0));
1040 hash16bytes(state0, data);
1041 p += 8;
1042 }
1043 len &= 0x7;
1044
1045lt8:
1046 if (len) {
1047 // load the last chunk of data
1048 // We're going to load 8 bytes and mask zero the part we don't care
1049 // (the hash of a short string is different from the hash of a longer
1050 // including NULLs at the end because the length is in the key)
1051 // WARNING: this may produce valgrind warnings, but it's safe
1052
1053 uint8x8_t data8;
1054
1055 if (Q_LIKELY(quintptr(p + 8) & 0xff8)) {
1056 // same page, we definitely can't fault:
1057 // load all 8 bytes and mask off the bytes past the end of the source
1058 static const qint8 maskarray[] = {
1059 -1, -1, -1, -1, -1, -1, -1,
1060 0, 0, 0, 0, 0, 0, 0,
1061 };
1062 uint8x8_t mask = vld1_u8(reinterpret_cast<const quint8 *>(maskarray) + 7 - len);
1063 data8 = vld1_u8(p);
1064 data8 = vand_u8(data8, mask);
1065 } else {
1066 // too close to the end of the page, it could fault:
1067 // load 8 bytes ending at the data end, then shuffle them to the beginning
1068 static const qint8 shufflecontrol[] = {
1069 1, 2, 3, 4, 5, 6, 7,
1070 -1, -1, -1, -1, -1, -1, -1,
1071 };
1072 uint8x8_t control = vld1_u8(reinterpret_cast<const quint8 *>(shufflecontrol) + 7 - len);
1073 data8 = vld1_u8(p - 8 + len);
1074 data8 = vtbl1_u8(data8, control);
1075 }
1076 uint8x16_t data = vcombine_u8(data8, vdup_n_u8(0));
1077 hash16bytes(state0, data);
1078 }
1079
1080 // extract state0
1081# if QT_POINTER_SIZE == 8
1082 return vgetq_lane_u64(vreinterpretq_u64_u8(state0), 0);
1083# else
1084 return vgetq_lane_u32(vreinterpretq_u32_u8(state0), 0);
1085# endif
1086}
1087#endif
1088
1089size_t qHashBits(const void *p, size_t size, size_t seed) noexcept
1090{
1091#ifdef QT_BOOTSTRAPPED
1092 // the seed is always 0 in bootstrapped mode (no seed generation code),
1093 // so help the compiler do dead code elimination
1094 seed = 0;
1095#endif
1096 // mix in the length as a secondary seed. For seed == 0, seed2 must be
1097 // size, to match what we used to do prior to Qt 6.2.
1098 size_t seed2 = size;
1099 if (seed)
1100 seed2 = qt_qhash_seed.currentSeed(1);
1101
1102 auto data = reinterpret_cast<const uchar *>(p);
1103#ifdef AESHASH
1104 if (seed && qCpuHasFeature(AES) && qCpuHasFeature(SSE4_2))
1105 return aeshash(data, size, seed, seed2);
1106#elif defined(Q_PROCESSOR_ARM) && QT_COMPILER_SUPPORTS_HERE(AES) && !defined(QHASH_AES_SANITIZER_BUILD) && !defined(QT_BOOTSTRAPPED)
1107 if (seed && qCpuHasFeature(AES))
1108 return aeshash(data, size, seed, seed2);
1109#endif
1110
1111 return qHashBits_fallback<>(data, size, seed, seed2);
1112}
1113
1114size_t qHash(QByteArrayView key, size_t seed) noexcept
1115{
1116 return qHashBits(key.constData(), size_t(key.size()), seed);
1117}
1118
1119size_t qHash(QStringView key, size_t seed) noexcept
1120{
1121 return qHashBits(key.data(), key.size()*sizeof(QChar), seed);
1122}
1123
1124#ifndef QT_BOOTSTRAPPED
1125size_t qHash(const QBitArray &bitArray, size_t seed) noexcept
1126{
1127 qsizetype m = bitArray.d.size() - 1;
1128 size_t result = qHashBits(reinterpret_cast<const uchar *>(bitArray.d.constData()), size_t(qMax(0, m)), seed);
1129
1130 // deal with the last 0 to 7 bits manually, because we can't trust that
1131 // the padding is initialized to 0 in bitArray.d
1132 qsizetype n = bitArray.size();
1133 if (n & 0x7)
1134 result = ((result << 4) + bitArray.d.at(m)) & ((1 << n) - 1);
1135 return result;
1136}
1137#endif
1138
1139size_t qHash(QLatin1StringView key, size_t seed) noexcept
1140{
1141#ifdef QT_BOOTSTRAPPED
1142 // the seed is always 0 in bootstrapped mode (no seed generation code),
1143 // so help the compiler do dead code elimination
1144 seed = 0;
1145#endif
1146
1147 auto data = reinterpret_cast<const uchar *>(key.data());
1148 size_t size = key.size();
1149
1150 // Mix in the length as a secondary seed.
1151 // Multiplied by 2 to match the byte size of the equiavlent UTF-16 string.
1152 size_t seed2 = size * 2;
1153 if (seed)
1154 seed2 = qt_qhash_seed.currentSeed(1);
1155
1156#if defined(AESHASH)
1157 if (seed && qCpuHasFeature(AES) && qCpuHasFeature(SSE4_2))
1158 return aeshash<ByteToWord>(data, size, seed, seed2);
1159#endif
1161}
1162
1217{
1218 return qt_qhash_seed.currentSeed(0);
1219}
1220
1231{
1232 qt_qhash_seed.clearSeed();
1233}
1234
1251{
1252 qt_qhash_seed.resetSeed();
1253}
1254
1255#if QT_DEPRECATED_SINCE(6,6)
1267int qGlobalQHashSeed()
1268{
1269 return int(QHashSeed::globalSeed() & INT_MAX);
1270}
1271
1296void qSetGlobalQHashSeed(int newSeed)
1297{
1298 if (Q_LIKELY(newSeed == 0 || newSeed == -1)) {
1299 if (newSeed == 0)
1301 else
1303 } else {
1304 // can't use qWarning here (reentrancy)
1305 fprintf(stderr, "qSetGlobalQHashSeed: forced seed value is not 0; ignoring call\n");
1306 }
1307}
1308#endif // QT_DEPRECATED_SINCE(6,6)
1309
1325uint qt_hash(QStringView key, uint chained) noexcept
1326{
1327 auto n = key.size();
1328 auto p = key.utf16();
1329
1330 uint h = chained;
1331
1332 while (n--) {
1333 h = (h << 4) + *p++;
1334 h ^= (h & 0xf0000000) >> 23;
1335 h &= 0x0fffffff;
1336 }
1337 return h;
1338}
1339
1625size_t qHash(double key, size_t seed) noexcept
1626{
1627 // ensure -0 gets mapped to 0
1628 key += 0.0;
1629 if constexpr (sizeof(double) == sizeof(size_t)) {
1630 size_t k;
1631 memcpy(&k, &key, sizeof(double));
1632 return QHashPrivate::hash(k, seed);
1633 } else {
1634 return murmurhash(&key, sizeof(key), seed);
1635 }
1636}
1637
1643size_t qHash(long double key, size_t seed) noexcept
1644{
1645 // ensure -0 gets mapped to 0
1646 key += static_cast<long double>(0.0);
1647 if constexpr (sizeof(long double) == sizeof(size_t)) {
1648 size_t k;
1649 memcpy(&k, &key, sizeof(long double));
1650 return QHashPrivate::hash(k, seed);
1651 } else {
1652 return murmurhash(&key, sizeof(key), seed);
1653 }
1654}
1655
2243
2247
4161#ifdef QT_HAS_CONSTEXPR_BITOPS
4162namespace QHashPrivate {
4163static_assert(qPopulationCount(SpanConstants::NEntries) == 1,
4164 "NEntries must be a power of 2 for bucketForHash() to work.");
4165
4166// ensure the size of a Span does not depend on the template parameters
4167using Node1 = Node<int, int>;
4168static_assert(sizeof(Span<Node1>) == sizeof(Span<Node<char, void *>>));
4169static_assert(sizeof(Span<Node1>) == sizeof(Span<Node<qsizetype, QHashDummyValue>>));
4170static_assert(sizeof(Span<Node1>) == sizeof(Span<Node<QString, QVariant>>));
4171static_assert(sizeof(Span<Node1>) > SpanConstants::NEntries);
4172static_assert(qNextPowerOfTwo(sizeof(Span<Node1>)) == SpanConstants::NEntries * 2);
4173
4174// ensure allocations are always a power of two, at a minimum NEntries,
4175// obeying the fomula
4176// qNextPowerOfTwo(2 * N);
4177// without overflowing
4178static constexpr size_t NEntries = SpanConstants::NEntries;
4179static_assert(GrowthPolicy::bucketsForCapacity(1) == NEntries);
4180static_assert(GrowthPolicy::bucketsForCapacity(NEntries / 2 + 0) == NEntries);
4181static_assert(GrowthPolicy::bucketsForCapacity(NEntries / 2 + 1) == 2 * NEntries);
4182static_assert(GrowthPolicy::bucketsForCapacity(NEntries * 1 - 1) == 2 * NEntries);
4183static_assert(GrowthPolicy::bucketsForCapacity(NEntries * 1 + 0) == 4 * NEntries);
4184static_assert(GrowthPolicy::bucketsForCapacity(NEntries * 1 + 1) == 4 * NEntries);
4185static_assert(GrowthPolicy::bucketsForCapacity(NEntries * 2 - 1) == 4 * NEntries);
4186static_assert(GrowthPolicy::bucketsForCapacity(NEntries * 2 + 0) == 8 * NEntries);
4187static_assert(GrowthPolicy::bucketsForCapacity(SIZE_MAX / 4) == SIZE_MAX / 2 + 1);
4188static_assert(GrowthPolicy::bucketsForCapacity(SIZE_MAX / 2) == SIZE_MAX);
4189static_assert(GrowthPolicy::bucketsForCapacity(SIZE_MAX) == SIZE_MAX);
4190}
4191#endif
4192
T loadRelaxed() const noexcept
\inmodule QtCore
Definition qbitarray.h:13
\inmodule QtCore
size_t qHash(double key, size_t seed) noexcept
Definition qhash.cpp:1625
size_t qHash(long double key, size_t seed) noexcept
Definition qhash.cpp:1643
\inmodule QtCore \reentrant
Definition qrandom.h:21
static Q_DECL_CONST_FUNCTION QRandomGenerator * system()
\threadsafe
Definition qrandom.h:270
quint32 generate()
Generates a 32-bit random quantity and returns it.
Definition qrandom.h:48
quint64 generate64()
Generates a 64-bit random quantity and returns it.
Definition qrandom.h:53
\inmodule QtCore
Definition qstringview.h:78
QString str
[2]
else opt state
[0]
Q_DECL_CONST_FUNCTION constexpr size_t hash(size_t key, size_t seed) noexcept
Combined button and popup list for selecting options.
Q_DECL_CONST_FUNCTION QT_POPCOUNT_CONSTEXPR uint qPopulationCount(quint32 v) noexcept
#define Q_BASIC_ATOMIC_INITIALIZER(a)
#define Q_FALLTHROUGH()
#define Q_NEVER_INLINE
#define Q_LIKELY(x)
#define Q_DECL_COLD_FUNCTION
#define Q_ALWAYS_INLINE
static bool initialize()
Definition qctf.cpp:94
size_t qHash(const QFileSystemWatcherPathKey &key, size_t seed=0)
size_t qHashBits(const void *p, size_t size, size_t seed) noexcept
Definition qhash.cpp:1089
size_t qHashBits_fallback< None >(const uchar *p, size_t size, size_t seed, size_t seed2) noexcept
Definition qhash.cpp:537
Q_NEVER_INLINE static Q_DECL_HOT_FUNCTION uint64_t murmurhash(const void *key, uint64_t len, uint64_t seed) noexcept
Definition qhash.cpp:240
static Q_CONSTINIT HashSeedStorage qt_qhash_seed
Definition qhash.cpp:174
QT_BEGIN_NAMESPACE void qt_from_latin1(char16_t *dst, const char *str, size_t size) noexcept
Definition qstring.cpp:920
Q_NEVER_INLINE static Q_DECL_HOT_FUNCTION size_t siphash(const uint8_t *in, size_t inlen, size_t seed, size_t seed2)
Definition qhash.cpp:522
size_t qHashBits_fallback< ByteToWord >(const uchar *data, size_t size, size_t seed, size_t seed2) noexcept
Definition qhash.cpp:545
static size_t qHashBits_fallback(const uchar *p, size_t size, size_t seed, size_t seed2) noexcept
ZeroExtension
Definition qhash.cpp:530
@ ByteToWord
Definition qhash.cpp:532
@ None
Definition qhash.cpp:531
#define SIPROUND
Definition qhash.cpp:294
#define Q_DECL_HOT_FUNCTION
Definition qhash.cpp:47
uint qt_hash(QStringView key, uint chained) noexcept
Definition qhash.cpp:1325
QSimpleParsedNumber< qlonglong > qstrntoll(const char *begin, qsizetype size, int base)
constexpr quint32 qNextPowerOfTwo(quint32 v)
Definition qmath.h:335
static ControlElement< T > * ptr(QWidget *widget)
constexpr const T & qMax(const T &a, const T &b)
Definition qminmax.h:42
GLint GLfloat GLfloat GLfloat v2
GLboolean GLboolean GLboolean b
GLint GLint GLint GLint GLint x
[0]
const GLfloat * m
GLuint64 key
GLenum GLuint GLintptr GLsizeiptr size
[1]
GLboolean r
[2]
GLuint GLuint end
GLint GLsizei GLsizei GLenum GLenum GLsizei void * data
GLenum src
GLint left
GLenum GLenum dst
GLint GLfloat v0
GLenum GLuint GLenum GLsizei const GLchar * buf
GLint GLfloat GLfloat v1
GLenum GLuint GLintptr offset
GLint GLfloat GLfloat GLfloat GLfloat v3
GLint GLint GLint GLint GLint GLint GLint GLbitfield mask
GLfloat n
GLfloat GLfloat GLfloat GLfloat h
GLdouble s
[6]
Definition qopenglext.h:235
GLuint in
GLuint64EXT * result
[6]
GLfloat GLfloat p
[1]
GLenum GLsizei len
#define QT_POINTER_SIZE
static Q_CONSTINIT QBasicAtomicInteger< unsigned > seed
Definition qrandom.cpp:196
#define Q_ASSERT(cond)
Definition qrandom.cpp:47
QRandomGenerator::InitialRandomData qt_initial_random_value() noexcept
Definition qrandom.cpp:1288
#define QT_VECTORCALL
Definition qsimd.h:128
#define qCpuHasFeature(feature)
Definition qsimd_p.h:387
#define QT_FUNCTION_TARGET(x)
Definition qsimd_p.h:133
Q_CORE_EXPORT void qt_from_latin1(char16_t *dst, const char *str, size_t size) noexcept
Definition qstring.cpp:920
#define k0
#define k1
#define Q_UNUSED(x)
#define Q_UINT64_C(c)
Definition qtypes.h:58
unsigned int quint32
Definition qtypes.h:50
unsigned char uchar
Definition qtypes.h:32
size_t quintptr
Definition qtypes.h:167
ptrdiff_t qsizetype
Definition qtypes.h:165
unsigned int uint
Definition qtypes.h:34
QT_BEGIN_NAMESPACE typedef signed char qint8
Definition qtypes.h:45
unsigned char quint8
Definition qtypes.h:46
QRandomGenerator generator(sseq)
\inmodule QtCore
static Q_CORE_EXPORT void setDeterministicGlobalSeed()
\threadsafe
Definition qhash.cpp:1230
static Q_CORE_EXPORT void resetRandomGlobalSeed()
\threadsafe
Definition qhash.cpp:1250
static Q_CORE_EXPORT QHashSeed globalSeed() noexcept
\threadsafe
Definition qhash.cpp:1216