1 /*
2 +----------------------------------------------------------------------+
3 | Zend Engine |
4 +----------------------------------------------------------------------+
5 | Copyright (c) Zend Technologies Ltd. (http://www.zend.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 2.00 of the Zend license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.zend.com/license/2_00.txt. |
11 | If you did not receive a copy of the Zend license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@zend.com so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
15 | Authors: Andi Gutmans <andi@php.net> |
16 | Zeev Suraski <zeev@php.net> |
17 | Dmitry Stogov <dmitry@php.net> |
18 +----------------------------------------------------------------------+
19 */
20
21 #include "zend.h"
22 #include "zend_globals.h"
23 #include "zend_variables.h"
24
25 #if defined(__aarch64__) || defined(_M_ARM64)
26 # include <arm_neon.h>
27 #endif
28
29 /* Prefer to use AVX2 instructions for better latency and throughput */
30 #if defined(__AVX2__)
31 # include <immintrin.h>
32 #elif defined( __SSE2__)
33 # include <mmintrin.h>
34 # include <emmintrin.h>
35 #endif
36
37 #if ZEND_DEBUG
38 # define HT_ASSERT(ht, expr) \
39 ZEND_ASSERT((expr) || (HT_FLAGS(ht) & HASH_FLAG_ALLOW_COW_VIOLATION))
40 #else
41 # define HT_ASSERT(ht, expr)
42 #endif
43
44 #define HT_ASSERT_RC1(ht) HT_ASSERT(ht, GC_REFCOUNT(ht) == 1)
45
46 #define HT_POISONED_PTR ((HashTable *) (intptr_t) -1)
47
48 #if ZEND_DEBUG
49
50 #define HT_OK 0x00
51 #define HT_IS_DESTROYING 0x01
52 #define HT_DESTROYED 0x02
53 #define HT_CLEANING 0x03
54
_zend_is_inconsistent(const HashTable * ht,const char * file,int line)55 static void _zend_is_inconsistent(const HashTable *ht, const char *file, int line)
56 {
57 if ((HT_FLAGS(ht) & HASH_FLAG_CONSISTENCY) == HT_OK) {
58 return;
59 }
60 switch (HT_FLAGS(ht) & HASH_FLAG_CONSISTENCY) {
61 case HT_IS_DESTROYING:
62 zend_output_debug_string(1, "%s(%d) : ht=%p is being destroyed", file, line, ht);
63 break;
64 case HT_DESTROYED:
65 zend_output_debug_string(1, "%s(%d) : ht=%p is already destroyed", file, line, ht);
66 break;
67 case HT_CLEANING:
68 zend_output_debug_string(1, "%s(%d) : ht=%p is being cleaned", file, line, ht);
69 break;
70 default:
71 zend_output_debug_string(1, "%s(%d) : ht=%p is inconsistent", file, line, ht);
72 break;
73 }
74 ZEND_UNREACHABLE();
75 }
76 #define IS_CONSISTENT(a) _zend_is_inconsistent(a, __FILE__, __LINE__);
77 #define SET_INCONSISTENT(n) do { \
78 HT_FLAGS(ht) = (HT_FLAGS(ht) & ~HASH_FLAG_CONSISTENCY) | (n); \
79 } while (0)
80 #else
81 #define IS_CONSISTENT(a)
82 #define SET_INCONSISTENT(n)
83 #endif
84
85 #define ZEND_HASH_IF_FULL_DO_RESIZE(ht) \
86 if ((ht)->nNumUsed >= (ht)->nTableSize) { \
87 zend_hash_do_resize(ht); \
88 }
89
zend_hash_str_find_ptr_lc(const HashTable * ht,const char * str,size_t len)90 ZEND_API void *zend_hash_str_find_ptr_lc(const HashTable *ht, const char *str, size_t len) {
91 void *result;
92 char *lc_str;
93
94 /* Stack allocate small strings to improve performance */
95 ALLOCA_FLAG(use_heap)
96
97 lc_str = zend_str_tolower_copy(do_alloca(len + 1, use_heap), str, len);
98 result = zend_hash_str_find_ptr(ht, lc_str, len);
99 free_alloca(lc_str, use_heap);
100
101 return result;
102 }
103
zend_hash_find_ptr_lc(const HashTable * ht,zend_string * key)104 ZEND_API void *zend_hash_find_ptr_lc(const HashTable *ht, zend_string *key) {
105 void *result;
106 zend_string *lc_key = zend_string_tolower(key);
107 result = zend_hash_find_ptr(ht, lc_key);
108 zend_string_release(lc_key);
109 return result;
110 }
111
112 static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht);
113
zend_hash_check_size(uint32_t nSize)114 static zend_always_inline uint32_t zend_hash_check_size(uint32_t nSize)
115 {
116 #ifdef ZEND_WIN32
117 unsigned long index;
118 #endif
119
120 /* Use big enough power of 2 */
121 /* size should be between HT_MIN_SIZE and HT_MAX_SIZE */
122 if (nSize <= HT_MIN_SIZE) {
123 return HT_MIN_SIZE;
124 } else if (UNEXPECTED(nSize > HT_MAX_SIZE)) {
125 zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", nSize, sizeof(Bucket), sizeof(Bucket));
126 }
127
128 #ifdef ZEND_WIN32
129 if (BitScanReverse(&index, nSize - 1)) {
130 return 0x2u << ((31 - index) ^ 0x1f);
131 } else {
132 /* nSize is ensured to be in the valid range, fall back to it
133 rather than using an undefined bis scan result. */
134 return nSize;
135 }
136 #elif (defined(__GNUC__) || __has_builtin(__builtin_clz)) && defined(PHP_HAVE_BUILTIN_CLZ)
137 return 0x2u << (__builtin_clz(nSize - 1) ^ 0x1f);
138 #else
139 nSize -= 1;
140 nSize |= (nSize >> 1);
141 nSize |= (nSize >> 2);
142 nSize |= (nSize >> 4);
143 nSize |= (nSize >> 8);
144 nSize |= (nSize >> 16);
145 return nSize + 1;
146 #endif
147 }
148
zend_hash_real_init_packed_ex(HashTable * ht)149 static zend_always_inline void zend_hash_real_init_packed_ex(HashTable *ht)
150 {
151 void *data;
152
153 if (UNEXPECTED(GC_FLAGS(ht) & IS_ARRAY_PERSISTENT)) {
154 data = pemalloc(HT_PACKED_SIZE_EX(ht->nTableSize, HT_MIN_MASK), 1);
155 } else if (EXPECTED(ht->nTableSize == HT_MIN_SIZE)) {
156 /* Use specialized API with constant allocation amount for a particularly common case. */
157 data = emalloc(HT_PACKED_SIZE_EX(HT_MIN_SIZE, HT_MIN_MASK));
158 } else {
159 data = emalloc(HT_PACKED_SIZE_EX(ht->nTableSize, HT_MIN_MASK));
160 }
161 HT_SET_DATA_ADDR(ht, data);
162 /* Don't overwrite iterator count. */
163 ht->u.v.flags = HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
164 HT_HASH_RESET_PACKED(ht);
165 }
166
zend_hash_real_init_mixed_ex(HashTable * ht)167 static zend_always_inline void zend_hash_real_init_mixed_ex(HashTable *ht)
168 {
169 void *data;
170 uint32_t nSize = ht->nTableSize;
171
172 ZEND_ASSERT(HT_SIZE_TO_MASK(nSize));
173
174 if (UNEXPECTED(GC_FLAGS(ht) & IS_ARRAY_PERSISTENT)) {
175 data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), 1);
176 } else if (EXPECTED(nSize == HT_MIN_SIZE)) {
177 data = emalloc(HT_SIZE_EX(HT_MIN_SIZE, HT_SIZE_TO_MASK(HT_MIN_SIZE)));
178 ht->nTableMask = HT_SIZE_TO_MASK(HT_MIN_SIZE);
179 HT_SET_DATA_ADDR(ht, data);
180 /* Don't overwrite iterator count. */
181 ht->u.v.flags = HASH_FLAG_STATIC_KEYS;
182 #if defined(__AVX2__)
183 do {
184 __m256i ymm0 = _mm256_setzero_si256();
185 ymm0 = _mm256_cmpeq_epi64(ymm0, ymm0);
186 _mm256_storeu_si256((__m256i*)&HT_HASH_EX(data, 0), ymm0);
187 _mm256_storeu_si256((__m256i*)&HT_HASH_EX(data, 8), ymm0);
188 } while(0);
189 #elif defined (__SSE2__)
190 do {
191 __m128i xmm0 = _mm_setzero_si128();
192 xmm0 = _mm_cmpeq_epi8(xmm0, xmm0);
193 _mm_storeu_si128((__m128i*)&HT_HASH_EX(data, 0), xmm0);
194 _mm_storeu_si128((__m128i*)&HT_HASH_EX(data, 4), xmm0);
195 _mm_storeu_si128((__m128i*)&HT_HASH_EX(data, 8), xmm0);
196 _mm_storeu_si128((__m128i*)&HT_HASH_EX(data, 12), xmm0);
197 } while (0);
198 #elif defined(__aarch64__) || defined(_M_ARM64)
199 do {
200 int32x4_t t = vdupq_n_s32(-1);
201 vst1q_s32((int32_t*)&HT_HASH_EX(data, 0), t);
202 vst1q_s32((int32_t*)&HT_HASH_EX(data, 4), t);
203 vst1q_s32((int32_t*)&HT_HASH_EX(data, 8), t);
204 vst1q_s32((int32_t*)&HT_HASH_EX(data, 12), t);
205 } while (0);
206 #else
207 HT_HASH_EX(data, 0) = -1;
208 HT_HASH_EX(data, 1) = -1;
209 HT_HASH_EX(data, 2) = -1;
210 HT_HASH_EX(data, 3) = -1;
211 HT_HASH_EX(data, 4) = -1;
212 HT_HASH_EX(data, 5) = -1;
213 HT_HASH_EX(data, 6) = -1;
214 HT_HASH_EX(data, 7) = -1;
215 HT_HASH_EX(data, 8) = -1;
216 HT_HASH_EX(data, 9) = -1;
217 HT_HASH_EX(data, 10) = -1;
218 HT_HASH_EX(data, 11) = -1;
219 HT_HASH_EX(data, 12) = -1;
220 HT_HASH_EX(data, 13) = -1;
221 HT_HASH_EX(data, 14) = -1;
222 HT_HASH_EX(data, 15) = -1;
223 #endif
224 return;
225 } else {
226 data = emalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)));
227 }
228 ht->nTableMask = HT_SIZE_TO_MASK(nSize);
229 HT_SET_DATA_ADDR(ht, data);
230 HT_FLAGS(ht) = HASH_FLAG_STATIC_KEYS;
231 HT_HASH_RESET(ht);
232 }
233
zend_hash_real_init_ex(HashTable * ht,bool packed)234 static zend_always_inline void zend_hash_real_init_ex(HashTable *ht, bool packed)
235 {
236 HT_ASSERT_RC1(ht);
237 ZEND_ASSERT(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED);
238 if (packed) {
239 zend_hash_real_init_packed_ex(ht);
240 } else {
241 zend_hash_real_init_mixed_ex(ht);
242 }
243 }
244
245 static const uint32_t uninitialized_bucket[-HT_MIN_MASK] =
246 {HT_INVALID_IDX, HT_INVALID_IDX};
247
248 ZEND_API const HashTable zend_empty_array = {
249 .gc.refcount = 2,
250 .gc.u.type_info = IS_ARRAY | (GC_IMMUTABLE << GC_FLAGS_SHIFT),
251 .u.flags = HASH_FLAG_UNINITIALIZED,
252 .nTableMask = HT_MIN_MASK,
253 {.arData = (Bucket*)&uninitialized_bucket[2]},
254 .nNumUsed = 0,
255 .nNumOfElements = 0,
256 .nTableSize = HT_MIN_SIZE,
257 .nInternalPointer = 0,
258 .nNextFreeElement = ZEND_LONG_MIN,
259 .pDestructor = ZVAL_PTR_DTOR
260 };
261
_zend_hash_init_int(HashTable * ht,uint32_t nSize,dtor_func_t pDestructor,bool persistent)262 static zend_always_inline void _zend_hash_init_int(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, bool persistent)
263 {
264 GC_SET_REFCOUNT(ht, 1);
265 GC_TYPE_INFO(ht) = GC_ARRAY | (persistent ? ((GC_PERSISTENT|GC_NOT_COLLECTABLE) << GC_FLAGS_SHIFT) : 0);
266 HT_FLAGS(ht) = HASH_FLAG_UNINITIALIZED;
267 ht->nTableMask = HT_MIN_MASK;
268 HT_SET_DATA_ADDR(ht, &uninitialized_bucket);
269 ht->nNumUsed = 0;
270 ht->nNumOfElements = 0;
271 ht->nInternalPointer = 0;
272 ht->nNextFreeElement = ZEND_LONG_MIN;
273 ht->pDestructor = pDestructor;
274 ht->nTableSize = zend_hash_check_size(nSize);
275 }
276
_zend_hash_init(HashTable * ht,uint32_t nSize,dtor_func_t pDestructor,bool persistent)277 ZEND_API void ZEND_FASTCALL _zend_hash_init(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, bool persistent)
278 {
279 _zend_hash_init_int(ht, nSize, pDestructor, persistent);
280 }
281
_zend_new_array_0(void)282 ZEND_API HashTable* ZEND_FASTCALL _zend_new_array_0(void)
283 {
284 HashTable *ht = emalloc(sizeof(HashTable));
285 _zend_hash_init_int(ht, HT_MIN_SIZE, ZVAL_PTR_DTOR, 0);
286 return ht;
287 }
288
_zend_new_array(uint32_t nSize)289 ZEND_API HashTable* ZEND_FASTCALL _zend_new_array(uint32_t nSize)
290 {
291 HashTable *ht = emalloc(sizeof(HashTable));
292 _zend_hash_init_int(ht, nSize, ZVAL_PTR_DTOR, 0);
293 return ht;
294 }
295
zend_new_pair(zval * val1,zval * val2)296 ZEND_API HashTable* ZEND_FASTCALL zend_new_pair(zval *val1, zval *val2)
297 {
298 zval *zv;
299 HashTable *ht = emalloc(sizeof(HashTable));
300 _zend_hash_init_int(ht, HT_MIN_SIZE, ZVAL_PTR_DTOR, 0);
301 ht->nNumUsed = ht->nNumOfElements = ht->nNextFreeElement = 2;
302 zend_hash_real_init_packed_ex(ht);
303
304 zv = ht->arPacked;
305 ZVAL_COPY_VALUE(zv, val1);
306 zv++;
307 ZVAL_COPY_VALUE(zv, val2);
308 return ht;
309 }
310
zend_hash_packed_grow(HashTable * ht)311 ZEND_API void ZEND_FASTCALL zend_hash_packed_grow(HashTable *ht)
312 {
313 HT_ASSERT_RC1(ht);
314 if (ht->nTableSize >= HT_MAX_SIZE) {
315 zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket), sizeof(Bucket));
316 }
317 uint32_t newTableSize = ht->nTableSize * 2;
318 HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_PACKED_SIZE_EX(newTableSize, HT_MIN_MASK), HT_PACKED_USED_SIZE(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
319 ht->nTableSize = newTableSize;
320 }
321
zend_hash_real_init(HashTable * ht,bool packed)322 ZEND_API void ZEND_FASTCALL zend_hash_real_init(HashTable *ht, bool packed)
323 {
324 IS_CONSISTENT(ht);
325
326 HT_ASSERT_RC1(ht);
327 zend_hash_real_init_ex(ht, packed);
328 }
329
zend_hash_real_init_packed(HashTable * ht)330 ZEND_API void ZEND_FASTCALL zend_hash_real_init_packed(HashTable *ht)
331 {
332 IS_CONSISTENT(ht);
333
334 HT_ASSERT_RC1(ht);
335 zend_hash_real_init_packed_ex(ht);
336 }
337
zend_hash_real_init_mixed(HashTable * ht)338 ZEND_API void ZEND_FASTCALL zend_hash_real_init_mixed(HashTable *ht)
339 {
340 IS_CONSISTENT(ht);
341
342 HT_ASSERT_RC1(ht);
343 zend_hash_real_init_mixed_ex(ht);
344 }
345
zend_hash_packed_to_hash(HashTable * ht)346 ZEND_API void ZEND_FASTCALL zend_hash_packed_to_hash(HashTable *ht)
347 {
348 void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
349 zval *src = ht->arPacked;
350 Bucket *dst;
351 uint32_t i;
352 uint32_t nSize = ht->nTableSize;
353
354 ZEND_ASSERT(HT_SIZE_TO_MASK(nSize));
355
356 HT_ASSERT_RC1(ht);
357 // Alloc before assign to avoid inconsistencies on OOM
358 new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
359 HT_FLAGS(ht) &= ~HASH_FLAG_PACKED;
360 ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
361 HT_SET_DATA_ADDR(ht, new_data);
362 dst = ht->arData;
363 for (i = 0; i < ht->nNumUsed; i++) {
364 ZVAL_COPY_VALUE(&dst->val, src);
365 dst->h = i;
366 dst->key = NULL;
367 dst++;
368 src++;
369 }
370 pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
371 zend_hash_rehash(ht);
372 }
373
zend_hash_to_packed(HashTable * ht)374 ZEND_API void ZEND_FASTCALL zend_hash_to_packed(HashTable *ht)
375 {
376 void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
377 Bucket *src = ht->arData;
378 zval *dst;
379 uint32_t i;
380
381 HT_ASSERT_RC1(ht);
382 new_data = pemalloc(HT_PACKED_SIZE_EX(ht->nTableSize, HT_MIN_MASK), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
383 HT_FLAGS(ht) |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
384 ht->nTableMask = HT_MIN_MASK;
385 HT_SET_DATA_ADDR(ht, new_data);
386 HT_HASH_RESET_PACKED(ht);
387 dst = ht->arPacked;
388 for (i = 0; i < ht->nNumUsed; i++) {
389 ZVAL_COPY_VALUE(dst, &src->val);
390 dst++;
391 src++;
392 }
393 pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
394 }
395
zend_hash_extend(HashTable * ht,uint32_t nSize,bool packed)396 ZEND_API void ZEND_FASTCALL zend_hash_extend(HashTable *ht, uint32_t nSize, bool packed)
397 {
398 HT_ASSERT_RC1(ht);
399
400 if (nSize == 0) return;
401
402 ZEND_ASSERT(HT_SIZE_TO_MASK(nSize));
403
404 if (UNEXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
405 if (nSize > ht->nTableSize) {
406 ht->nTableSize = zend_hash_check_size(nSize);
407 }
408 zend_hash_real_init(ht, packed);
409 } else {
410 if (packed) {
411 ZEND_ASSERT(HT_IS_PACKED(ht));
412 if (nSize > ht->nTableSize) {
413 uint32_t newTableSize = zend_hash_check_size(nSize);
414 HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_PACKED_SIZE_EX(newTableSize, HT_MIN_MASK), HT_PACKED_USED_SIZE(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
415 ht->nTableSize = newTableSize;
416 }
417 } else {
418 ZEND_ASSERT(!HT_IS_PACKED(ht));
419 if (nSize > ht->nTableSize) {
420 void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
421 Bucket *old_buckets = ht->arData;
422 nSize = zend_hash_check_size(nSize);
423 new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
424 ht->nTableSize = nSize;
425 ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
426 HT_SET_DATA_ADDR(ht, new_data);
427 memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
428 pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
429 zend_hash_rehash(ht);
430 }
431 }
432 }
433 }
434
zend_hash_discard(HashTable * ht,uint32_t nNumUsed)435 ZEND_API void ZEND_FASTCALL zend_hash_discard(HashTable *ht, uint32_t nNumUsed)
436 {
437 Bucket *p, *end, *arData;
438 uint32_t nIndex;
439
440 ZEND_ASSERT(!HT_IS_PACKED(ht));
441 arData = ht->arData;
442 p = arData + ht->nNumUsed;
443 end = arData + nNumUsed;
444 ht->nNumUsed = nNumUsed;
445 while (p != end) {
446 p--;
447 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
448 ht->nNumOfElements--;
449 /* Collision pointers always directed from higher to lower buckets */
450 #if 0
451 if (!(Z_NEXT(p->val) == HT_INVALID_IDX || HT_HASH_TO_BUCKET_EX(arData, Z_NEXT(p->val)) < p)) {
452 abort();
453 }
454 #endif
455 nIndex = p->h | ht->nTableMask;
456 HT_HASH_EX(arData, nIndex) = Z_NEXT(p->val);
457 }
458 }
459
zend_array_recalc_elements(HashTable * ht)460 static uint32_t zend_array_recalc_elements(HashTable *ht)
461 {
462 zval *val;
463 uint32_t num = ht->nNumOfElements;
464
465 ZEND_HASH_MAP_FOREACH_VAL(ht, val) {
466 if (Z_TYPE_P(val) == IS_INDIRECT) {
467 if (UNEXPECTED(Z_TYPE_P(Z_INDIRECT_P(val)) == IS_UNDEF)) {
468 num--;
469 }
470 }
471 } ZEND_HASH_FOREACH_END();
472 return num;
473 }
474 /* }}} */
475
zend_array_count(HashTable * ht)476 ZEND_API uint32_t zend_array_count(HashTable *ht)
477 {
478 uint32_t num;
479 if (UNEXPECTED(HT_FLAGS(ht) & HASH_FLAG_HAS_EMPTY_IND)) {
480 num = zend_array_recalc_elements(ht);
481 if (UNEXPECTED(ht->nNumOfElements == num)) {
482 HT_FLAGS(ht) &= ~HASH_FLAG_HAS_EMPTY_IND;
483 }
484 } else if (UNEXPECTED(ht == &EG(symbol_table))) {
485 num = zend_array_recalc_elements(ht);
486 } else {
487 num = zend_hash_num_elements(ht);
488 }
489 return num;
490 }
491 /* }}} */
492
_zend_hash_get_valid_pos(const HashTable * ht,HashPosition pos)493 static zend_always_inline HashPosition _zend_hash_get_valid_pos(const HashTable *ht, HashPosition pos)
494 {
495 if (HT_IS_PACKED(ht)) {
496 while (pos < ht->nNumUsed && Z_ISUNDEF(ht->arPacked[pos])) {
497 pos++;
498 }
499 } else {
500 while (pos < ht->nNumUsed && Z_ISUNDEF(ht->arData[pos].val)) {
501 pos++;
502 }
503 }
504 return pos;
505 }
506
_zend_hash_get_current_pos(const HashTable * ht)507 static zend_always_inline HashPosition _zend_hash_get_current_pos(const HashTable *ht)
508 {
509 return _zend_hash_get_valid_pos(ht, ht->nInternalPointer);
510 }
511
zend_hash_get_current_pos(const HashTable * ht)512 ZEND_API HashPosition ZEND_FASTCALL zend_hash_get_current_pos(const HashTable *ht)
513 {
514 return _zend_hash_get_current_pos(ht);
515 }
516
zend_hash_remove_iterator_copies(uint32_t idx)517 static void zend_hash_remove_iterator_copies(uint32_t idx) {
518 HashTableIterator *iterators = EG(ht_iterators);
519
520 HashTableIterator *iter = iterators + idx;
521 uint32_t next_idx = iter->next_copy;
522 while (next_idx != idx) {
523 uint32_t cur_idx = next_idx;
524 HashTableIterator *cur_iter = iterators + cur_idx;
525 next_idx = cur_iter->next_copy;
526 cur_iter->next_copy = cur_idx; // avoid recursion in zend_hash_iterator_del
527 zend_hash_iterator_del(cur_idx);
528 }
529 iter->next_copy = idx;
530 }
531
zend_hash_iterator_add(HashTable * ht,HashPosition pos)532 ZEND_API uint32_t ZEND_FASTCALL zend_hash_iterator_add(HashTable *ht, HashPosition pos)
533 {
534 HashTableIterator *iter = EG(ht_iterators);
535 HashTableIterator *end = iter + EG(ht_iterators_count);
536 uint32_t idx;
537
538 if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
539 HT_INC_ITERATORS_COUNT(ht);
540 }
541 while (iter != end) {
542 if (iter->ht == NULL) {
543 iter->ht = ht;
544 iter->pos = pos;
545 idx = iter - EG(ht_iterators);
546 iter->next_copy = idx;
547 if (idx + 1 > EG(ht_iterators_used)) {
548 EG(ht_iterators_used) = idx + 1;
549 }
550 return idx;
551 }
552 iter++;
553 }
554 if (EG(ht_iterators) == EG(ht_iterators_slots)) {
555 EG(ht_iterators) = emalloc(sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
556 memcpy(EG(ht_iterators), EG(ht_iterators_slots), sizeof(HashTableIterator) * EG(ht_iterators_count));
557 } else {
558 EG(ht_iterators) = erealloc(EG(ht_iterators), sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
559 }
560 iter = EG(ht_iterators) + EG(ht_iterators_count);
561 EG(ht_iterators_count) += 8;
562 iter->ht = ht;
563 iter->pos = pos;
564 memset(iter + 1, 0, sizeof(HashTableIterator) * 7);
565 idx = iter - EG(ht_iterators);
566 iter->next_copy = idx;
567 EG(ht_iterators_used) = idx + 1;
568 return idx;
569 }
570
571 // To avoid losing track of the HashTable when separating arrays, we track all copies at once.
zend_hash_iterator_find_copy_pos(uint32_t idx,HashTable * ht)572 static zend_always_inline bool zend_hash_iterator_find_copy_pos(uint32_t idx, HashTable *ht) {
573 HashTableIterator *iter = EG(ht_iterators) + idx;
574
575 uint32_t next_idx = iter->next_copy;
576 if (EXPECTED(next_idx != idx)) {
577 HashTableIterator *copy_iter;
578 while (next_idx != idx) {
579 copy_iter = EG(ht_iterators) + next_idx;
580 if (copy_iter->ht == ht) {
581 // We have found the hashtable we are actually iterating over
582 // Now clean any intermittent copies and replace the original index by the found one
583 if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
584 && EXPECTED(!HT_ITERATORS_OVERFLOW(iter->ht))) {
585 HT_DEC_ITERATORS_COUNT(iter->ht);
586 }
587 if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
588 HT_INC_ITERATORS_COUNT(ht);
589 }
590 iter->ht = copy_iter->ht;
591 iter->pos = copy_iter->pos;
592 zend_hash_remove_iterator_copies(idx);
593 return true;
594 }
595 next_idx = copy_iter->next_copy;
596 }
597 zend_hash_remove_iterator_copies(idx);
598 }
599
600 return false;
601 }
602
zend_hash_iterator_pos(uint32_t idx,HashTable * ht)603 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos(uint32_t idx, HashTable *ht)
604 {
605 HashTableIterator *iter = EG(ht_iterators) + idx;
606
607 ZEND_ASSERT(idx != (uint32_t)-1);
608 if (UNEXPECTED(iter->ht != ht) && !zend_hash_iterator_find_copy_pos(idx, ht)) {
609 if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
610 && EXPECTED(!HT_ITERATORS_OVERFLOW(iter->ht))) {
611 HT_DEC_ITERATORS_COUNT(iter->ht);
612 }
613 if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
614 HT_INC_ITERATORS_COUNT(ht);
615 }
616 iter->ht = ht;
617 iter->pos = _zend_hash_get_current_pos(ht);
618 }
619 return iter->pos;
620 }
621
zend_hash_iterator_pos_ex(uint32_t idx,zval * array)622 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos_ex(uint32_t idx, zval *array)
623 {
624 HashTable *ht = Z_ARRVAL_P(array);
625 HashTableIterator *iter = EG(ht_iterators) + idx;
626
627 ZEND_ASSERT(idx != (uint32_t)-1);
628 if (UNEXPECTED(iter->ht != ht) && !zend_hash_iterator_find_copy_pos(idx, ht)) {
629 if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
630 && EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
631 HT_DEC_ITERATORS_COUNT(iter->ht);
632 }
633 SEPARATE_ARRAY(array);
634 ht = Z_ARRVAL_P(array);
635 if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
636 HT_INC_ITERATORS_COUNT(ht);
637 }
638 iter->ht = ht;
639 iter->pos = _zend_hash_get_current_pos(ht);
640 }
641 return iter->pos;
642 }
643
zend_hash_iterator_del(uint32_t idx)644 ZEND_API void ZEND_FASTCALL zend_hash_iterator_del(uint32_t idx)
645 {
646 HashTableIterator *iter = EG(ht_iterators) + idx;
647
648 ZEND_ASSERT(idx != (uint32_t)-1);
649
650 if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
651 && EXPECTED(!HT_ITERATORS_OVERFLOW(iter->ht))) {
652 ZEND_ASSERT(HT_ITERATORS_COUNT(iter->ht) != 0);
653 HT_DEC_ITERATORS_COUNT(iter->ht);
654 }
655 iter->ht = NULL;
656
657 if (UNEXPECTED(iter->next_copy != idx)) {
658 zend_hash_remove_iterator_copies(idx);
659 }
660
661 if (idx == EG(ht_iterators_used) - 1) {
662 while (idx > 0 && EG(ht_iterators)[idx - 1].ht == NULL) {
663 idx--;
664 }
665 EG(ht_iterators_used) = idx;
666 }
667 }
668
_zend_hash_iterators_remove(HashTable * ht)669 static zend_never_inline void ZEND_FASTCALL _zend_hash_iterators_remove(HashTable *ht)
670 {
671 HashTableIterator *iter = EG(ht_iterators);
672 HashTableIterator *end = iter + EG(ht_iterators_used);
673
674 while (iter != end) {
675 if (iter->ht == ht) {
676 iter->ht = HT_POISONED_PTR;
677 }
678 iter++;
679 }
680 }
681
zend_hash_iterators_remove(HashTable * ht)682 static zend_always_inline void zend_hash_iterators_remove(HashTable *ht)
683 {
684 if (UNEXPECTED(HT_HAS_ITERATORS(ht))) {
685 _zend_hash_iterators_remove(ht);
686 }
687 }
688
zend_hash_iterators_lower_pos(HashTable * ht,HashPosition start)689 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterators_lower_pos(HashTable *ht, HashPosition start)
690 {
691 HashTableIterator *iter = EG(ht_iterators);
692 HashTableIterator *end = iter + EG(ht_iterators_used);
693 HashPosition res = ht->nNumUsed;
694
695 while (iter != end) {
696 if (iter->ht == ht) {
697 if (iter->pos >= start && iter->pos < res) {
698 res = iter->pos;
699 }
700 }
701 iter++;
702 }
703 return res;
704 }
705
_zend_hash_iterators_update(HashTable * ht,HashPosition from,HashPosition to)706 ZEND_API void ZEND_FASTCALL _zend_hash_iterators_update(HashTable *ht, HashPosition from, HashPosition to)
707 {
708 HashTableIterator *iter = EG(ht_iterators);
709 HashTableIterator *end = iter + EG(ht_iterators_used);
710
711 while (iter != end) {
712 if (iter->ht == ht && iter->pos == from) {
713 iter->pos = to;
714 }
715 iter++;
716 }
717 }
718
zend_hash_iterators_advance(HashTable * ht,HashPosition step)719 ZEND_API void ZEND_FASTCALL zend_hash_iterators_advance(HashTable *ht, HashPosition step)
720 {
721 HashTableIterator *iter = EG(ht_iterators);
722 HashTableIterator *end = iter + EG(ht_iterators_used);
723
724 while (iter != end) {
725 if (iter->ht == ht) {
726 iter->pos += step;
727 }
728 iter++;
729 }
730 }
731
732 /* Hash must be known and precomputed before */
zend_hash_find_bucket(const HashTable * ht,const zend_string * key)733 static zend_always_inline Bucket *zend_hash_find_bucket(const HashTable *ht, const zend_string *key)
734 {
735 uint32_t nIndex;
736 uint32_t idx;
737 Bucket *p, *arData;
738
739 ZEND_ASSERT(ZSTR_H(key) != 0 && "Hash must be known");
740
741 arData = ht->arData;
742 nIndex = ZSTR_H(key) | ht->nTableMask;
743 idx = HT_HASH_EX(arData, nIndex);
744
745 if (UNEXPECTED(idx == HT_INVALID_IDX)) {
746 return NULL;
747 }
748 p = HT_HASH_TO_BUCKET_EX(arData, idx);
749 if (EXPECTED(p->key == key)) { /* check for the same interned string */
750 return p;
751 }
752
753 while (1) {
754 if (p->h == ZSTR_H(key) &&
755 EXPECTED(p->key) &&
756 zend_string_equal_content(p->key, key)) {
757 return p;
758 }
759 idx = Z_NEXT(p->val);
760 if (idx == HT_INVALID_IDX) {
761 return NULL;
762 }
763 p = HT_HASH_TO_BUCKET_EX(arData, idx);
764 if (p->key == key) { /* check for the same interned string */
765 return p;
766 }
767 }
768 }
769
zend_hash_str_find_bucket(const HashTable * ht,const char * str,size_t len,zend_ulong h)770 static zend_always_inline Bucket *zend_hash_str_find_bucket(const HashTable *ht, const char *str, size_t len, zend_ulong h)
771 {
772 uint32_t nIndex;
773 uint32_t idx;
774 Bucket *p, *arData;
775
776 arData = ht->arData;
777 nIndex = h | ht->nTableMask;
778 idx = HT_HASH_EX(arData, nIndex);
779 while (idx != HT_INVALID_IDX) {
780 ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
781 p = HT_HASH_TO_BUCKET_EX(arData, idx);
782 if ((p->h == h)
783 && p->key
784 && zend_string_equals_cstr(p->key, str, len)) {
785 return p;
786 }
787 idx = Z_NEXT(p->val);
788 }
789 return NULL;
790 }
791
zend_hash_index_find_bucket(const HashTable * ht,zend_ulong h)792 static zend_always_inline Bucket *zend_hash_index_find_bucket(const HashTable *ht, zend_ulong h)
793 {
794 uint32_t nIndex;
795 uint32_t idx;
796 Bucket *p, *arData;
797
798 arData = ht->arData;
799 nIndex = h | ht->nTableMask;
800 idx = HT_HASH_EX(arData, nIndex);
801 while (idx != HT_INVALID_IDX) {
802 ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
803 p = HT_HASH_TO_BUCKET_EX(arData, idx);
804 if (p->h == h && !p->key) {
805 return p;
806 }
807 idx = Z_NEXT(p->val);
808 }
809 return NULL;
810 }
811
_zend_hash_add_or_update_i(HashTable * ht,zend_string * key,zval * pData,uint32_t flag)812 static zend_always_inline zval *_zend_hash_add_or_update_i(HashTable *ht, zend_string *key, zval *pData, uint32_t flag)
813 {
814 zend_ulong h;
815 uint32_t nIndex;
816 uint32_t idx;
817 Bucket *p, *arData;
818
819 IS_CONSISTENT(ht);
820 HT_ASSERT_RC1(ht);
821 zend_string_hash_val(key);
822
823 if (UNEXPECTED(HT_FLAGS(ht) & (HASH_FLAG_UNINITIALIZED|HASH_FLAG_PACKED))) {
824 if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
825 zend_hash_real_init_mixed(ht);
826 goto add_to_hash;
827 } else {
828 zend_hash_packed_to_hash(ht);
829 }
830 } else if ((flag & HASH_ADD_NEW) == 0 || ZEND_DEBUG) {
831 p = zend_hash_find_bucket(ht, key);
832
833 if (p) {
834 zval *data;
835
836 ZEND_ASSERT((flag & HASH_ADD_NEW) == 0);
837 if (flag & HASH_LOOKUP) {
838 return &p->val;
839 } else if (flag & HASH_ADD) {
840 if (!(flag & HASH_UPDATE_INDIRECT)) {
841 return NULL;
842 }
843 ZEND_ASSERT(&p->val != pData);
844 data = &p->val;
845 if (Z_TYPE_P(data) == IS_INDIRECT) {
846 data = Z_INDIRECT_P(data);
847 if (Z_TYPE_P(data) != IS_UNDEF) {
848 return NULL;
849 }
850 } else {
851 return NULL;
852 }
853 } else {
854 ZEND_ASSERT(&p->val != pData);
855 data = &p->val;
856 if ((flag & HASH_UPDATE_INDIRECT) && Z_TYPE_P(data) == IS_INDIRECT) {
857 data = Z_INDIRECT_P(data);
858 }
859 }
860 if (ht->pDestructor) {
861 ht->pDestructor(data);
862 }
863 ZVAL_COPY_VALUE(data, pData);
864 return data;
865 }
866 }
867
868 ZEND_HASH_IF_FULL_DO_RESIZE(ht); /* If the Hash table is full, resize it */
869
870 add_to_hash:
871 if (!ZSTR_IS_INTERNED(key)) {
872 zend_string_addref(key);
873 HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
874 }
875 idx = ht->nNumUsed++;
876 ht->nNumOfElements++;
877 arData = ht->arData;
878 p = arData + idx;
879 p->key = key;
880 p->h = h = ZSTR_H(key);
881 nIndex = h | ht->nTableMask;
882 Z_NEXT(p->val) = HT_HASH_EX(arData, nIndex);
883 HT_HASH_EX(arData, nIndex) = HT_IDX_TO_HASH(idx);
884 if (flag & HASH_LOOKUP) {
885 ZVAL_NULL(&p->val);
886 } else {
887 ZVAL_COPY_VALUE(&p->val, pData);
888 }
889
890 return &p->val;
891 }
892
_zend_hash_str_add_or_update_i(HashTable * ht,const char * str,size_t len,zend_ulong h,zval * pData,uint32_t flag)893 static zend_always_inline zval *_zend_hash_str_add_or_update_i(HashTable *ht, const char *str, size_t len, zend_ulong h, zval *pData, uint32_t flag)
894 {
895 zend_string *key;
896 uint32_t nIndex;
897 uint32_t idx;
898 Bucket *p;
899
900 IS_CONSISTENT(ht);
901 HT_ASSERT_RC1(ht);
902
903 if (UNEXPECTED(HT_FLAGS(ht) & (HASH_FLAG_UNINITIALIZED|HASH_FLAG_PACKED))) {
904 if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
905 zend_hash_real_init_mixed(ht);
906 goto add_to_hash;
907 } else {
908 zend_hash_packed_to_hash(ht);
909 }
910 } else if ((flag & HASH_ADD_NEW) == 0) {
911 p = zend_hash_str_find_bucket(ht, str, len, h);
912
913 if (p) {
914 zval *data;
915
916 if (flag & HASH_LOOKUP) {
917 return &p->val;
918 } else if (flag & HASH_ADD) {
919 if (!(flag & HASH_UPDATE_INDIRECT)) {
920 return NULL;
921 }
922 ZEND_ASSERT(&p->val != pData);
923 data = &p->val;
924 if (Z_TYPE_P(data) == IS_INDIRECT) {
925 data = Z_INDIRECT_P(data);
926 if (Z_TYPE_P(data) != IS_UNDEF) {
927 return NULL;
928 }
929 } else {
930 return NULL;
931 }
932 } else {
933 ZEND_ASSERT(&p->val != pData);
934 data = &p->val;
935 if ((flag & HASH_UPDATE_INDIRECT) && Z_TYPE_P(data) == IS_INDIRECT) {
936 data = Z_INDIRECT_P(data);
937 }
938 }
939 if (ht->pDestructor) {
940 ht->pDestructor(data);
941 }
942 ZVAL_COPY_VALUE(data, pData);
943 return data;
944 }
945 }
946
947 ZEND_HASH_IF_FULL_DO_RESIZE(ht); /* If the Hash table is full, resize it */
948
949 add_to_hash:
950 idx = ht->nNumUsed++;
951 ht->nNumOfElements++;
952 p = ht->arData + idx;
953 p->key = key = zend_string_init(str, len, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
954 #if ZEND_RC_DEBUG
955 if (GC_FLAGS(ht) & GC_PERSISTENT_LOCAL) {
956 GC_MAKE_PERSISTENT_LOCAL(key);
957 }
958 #endif
959 p->h = ZSTR_H(key) = h;
960 HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
961 if (flag & HASH_LOOKUP) {
962 ZVAL_NULL(&p->val);
963 } else {
964 ZVAL_COPY_VALUE(&p->val, pData);
965 }
966 nIndex = h | ht->nTableMask;
967 Z_NEXT(p->val) = HT_HASH(ht, nIndex);
968 HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
969
970 return &p->val;
971 }
972
zend_hash_add_or_update(HashTable * ht,zend_string * key,zval * pData,uint32_t flag)973 ZEND_API zval* ZEND_FASTCALL zend_hash_add_or_update(HashTable *ht, zend_string *key, zval *pData, uint32_t flag)
974 {
975 if (flag == HASH_ADD) {
976 return zend_hash_add(ht, key, pData);
977 } else if (flag == HASH_ADD_NEW) {
978 return zend_hash_add_new(ht, key, pData);
979 } else if (flag == HASH_UPDATE) {
980 return zend_hash_update(ht, key, pData);
981 } else {
982 ZEND_ASSERT(flag == (HASH_UPDATE|HASH_UPDATE_INDIRECT));
983 return zend_hash_update_ind(ht, key, pData);
984 }
985 }
986
zend_hash_add(HashTable * ht,zend_string * key,zval * pData)987 ZEND_API zval* ZEND_FASTCALL zend_hash_add(HashTable *ht, zend_string *key, zval *pData)
988 {
989 return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD);
990 }
991
zend_hash_update(HashTable * ht,zend_string * key,zval * pData)992 ZEND_API zval* ZEND_FASTCALL zend_hash_update(HashTable *ht, zend_string *key, zval *pData)
993 {
994 return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE);
995 }
996
zend_hash_update_ind(HashTable * ht,zend_string * key,zval * pData)997 ZEND_API zval* ZEND_FASTCALL zend_hash_update_ind(HashTable *ht, zend_string *key, zval *pData)
998 {
999 return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT);
1000 }
1001
zend_hash_add_new(HashTable * ht,zend_string * key,zval * pData)1002 ZEND_API zval* ZEND_FASTCALL zend_hash_add_new(HashTable *ht, zend_string *key, zval *pData)
1003 {
1004 return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD_NEW);
1005 }
1006
zend_hash_lookup(HashTable * ht,zend_string * key)1007 ZEND_API zval* ZEND_FASTCALL zend_hash_lookup(HashTable *ht, zend_string *key)
1008 {
1009 return _zend_hash_add_or_update_i(ht, key, NULL, HASH_LOOKUP);
1010 }
1011
zend_hash_str_add_or_update(HashTable * ht,const char * str,size_t len,zval * pData,uint32_t flag)1012 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_or_update(HashTable *ht, const char *str, size_t len, zval *pData, uint32_t flag)
1013 {
1014 if (flag == HASH_ADD) {
1015 return zend_hash_str_add(ht, str, len, pData);
1016 } else if (flag == HASH_ADD_NEW) {
1017 return zend_hash_str_add_new(ht, str, len, pData);
1018 } else if (flag == HASH_UPDATE) {
1019 return zend_hash_str_update(ht, str, len, pData);
1020 } else {
1021 ZEND_ASSERT(flag == (HASH_UPDATE|HASH_UPDATE_INDIRECT));
1022 return zend_hash_str_update_ind(ht, str, len, pData);
1023 }
1024 }
1025
zend_hash_str_update(HashTable * ht,const char * str,size_t len,zval * pData)1026 ZEND_API zval* ZEND_FASTCALL zend_hash_str_update(HashTable *ht, const char *str, size_t len, zval *pData)
1027 {
1028 zend_ulong h = zend_hash_func(str, len);
1029
1030 return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_UPDATE);
1031 }
1032
zend_hash_str_update_ind(HashTable * ht,const char * str,size_t len,zval * pData)1033 ZEND_API zval* ZEND_FASTCALL zend_hash_str_update_ind(HashTable *ht, const char *str, size_t len, zval *pData)
1034 {
1035 zend_ulong h = zend_hash_func(str, len);
1036
1037 return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT);
1038 }
1039
zend_hash_str_add(HashTable * ht,const char * str,size_t len,zval * pData)1040 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add(HashTable *ht, const char *str, size_t len, zval *pData)
1041 {
1042 zend_ulong h = zend_hash_func(str, len);
1043
1044 return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_ADD);
1045 }
1046
zend_hash_str_add_new(HashTable * ht,const char * str,size_t len,zval * pData)1047 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_new(HashTable *ht, const char *str, size_t len, zval *pData)
1048 {
1049 zend_ulong h = zend_hash_func(str, len);
1050
1051 return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_ADD_NEW);
1052 }
1053
zend_hash_index_add_empty_element(HashTable * ht,zend_ulong h)1054 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_empty_element(HashTable *ht, zend_ulong h)
1055 {
1056 zval dummy;
1057
1058 ZVAL_NULL(&dummy);
1059 return zend_hash_index_add(ht, h, &dummy);
1060 }
1061
zend_hash_add_empty_element(HashTable * ht,zend_string * key)1062 ZEND_API zval* ZEND_FASTCALL zend_hash_add_empty_element(HashTable *ht, zend_string *key)
1063 {
1064 zval dummy;
1065
1066 ZVAL_NULL(&dummy);
1067 return zend_hash_add(ht, key, &dummy);
1068 }
1069
zend_hash_str_add_empty_element(HashTable * ht,const char * str,size_t len)1070 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_empty_element(HashTable *ht, const char *str, size_t len)
1071 {
1072 zval dummy;
1073
1074 ZVAL_NULL(&dummy);
1075 return zend_hash_str_add(ht, str, len, &dummy);
1076 }
1077
_zend_hash_index_add_or_update_i(HashTable * ht,zend_ulong h,zval * pData,uint32_t flag)1078 static zend_always_inline zval *_zend_hash_index_add_or_update_i(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag)
1079 {
1080 uint32_t nIndex;
1081 uint32_t idx;
1082 Bucket *p;
1083 zval *zv;
1084
1085 IS_CONSISTENT(ht);
1086 HT_ASSERT_RC1(ht);
1087
1088 if ((flag & HASH_ADD_NEXT) && h == ZEND_LONG_MIN) {
1089 h = 0;
1090 }
1091
1092 if (HT_IS_PACKED(ht)) {
1093 if ((flag & (HASH_ADD_NEW|HASH_ADD_NEXT)) != (HASH_ADD_NEW|HASH_ADD_NEXT)
1094 && h < ht->nNumUsed) {
1095 zv = ht->arPacked + h;
1096 if (Z_TYPE_P(zv) != IS_UNDEF) {
1097 if (flag & HASH_LOOKUP) {
1098 return zv;
1099 }
1100 replace:
1101 if (flag & HASH_ADD) {
1102 return NULL;
1103 }
1104 if (ht->pDestructor) {
1105 ht->pDestructor(zv);
1106 }
1107 ZVAL_COPY_VALUE(zv, pData);
1108 return zv;
1109 } else { /* we have to keep the order :( */
1110 goto convert_to_hash;
1111 }
1112 } else if (EXPECTED(h < ht->nTableSize)) {
1113 add_to_packed:
1114 zv = ht->arPacked + h;
1115 /* incremental initialization of empty Buckets */
1116 if ((flag & (HASH_ADD_NEW|HASH_ADD_NEXT)) != (HASH_ADD_NEW|HASH_ADD_NEXT)) {
1117 if (h > ht->nNumUsed) {
1118 zval *q = ht->arPacked + ht->nNumUsed;
1119 while (q != zv) {
1120 ZVAL_UNDEF(q);
1121 q++;
1122 }
1123 }
1124 }
1125 ht->nNextFreeElement = ht->nNumUsed = h + 1;
1126 ht->nNumOfElements++;
1127 if (flag & HASH_LOOKUP) {
1128 ZVAL_NULL(zv);
1129 } else {
1130 ZVAL_COPY_VALUE(zv, pData);
1131 }
1132
1133 return zv;
1134 } else if ((h >> 1) < ht->nTableSize &&
1135 (ht->nTableSize >> 1) < ht->nNumOfElements) {
1136 zend_hash_packed_grow(ht);
1137 goto add_to_packed;
1138 } else {
1139 if (ht->nNumUsed >= ht->nTableSize) {
1140 ht->nTableSize += ht->nTableSize;
1141 }
1142 convert_to_hash:
1143 zend_hash_packed_to_hash(ht);
1144 }
1145 } else if (HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED) {
1146 if (h < ht->nTableSize) {
1147 zend_hash_real_init_packed_ex(ht);
1148 goto add_to_packed;
1149 }
1150 zend_hash_real_init_mixed(ht);
1151 } else {
1152 if ((flag & HASH_ADD_NEW) == 0 || ZEND_DEBUG) {
1153 p = zend_hash_index_find_bucket(ht, h);
1154 if (p) {
1155 if (flag & HASH_LOOKUP) {
1156 return &p->val;
1157 }
1158 ZEND_ASSERT((flag & HASH_ADD_NEW) == 0);
1159 zv = &p->val;
1160 goto replace;
1161 }
1162 }
1163 ZEND_HASH_IF_FULL_DO_RESIZE(ht); /* If the Hash table is full, resize it */
1164 }
1165
1166 idx = ht->nNumUsed++;
1167 nIndex = h | ht->nTableMask;
1168 p = ht->arData + idx;
1169 Z_NEXT(p->val) = HT_HASH(ht, nIndex);
1170 HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
1171 if ((zend_long)h >= ht->nNextFreeElement) {
1172 ht->nNextFreeElement = (zend_long)h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
1173 }
1174 ht->nNumOfElements++;
1175 p->h = h;
1176 p->key = NULL;
1177 if (flag & HASH_LOOKUP) {
1178 ZVAL_NULL(&p->val);
1179 } else {
1180 ZVAL_COPY_VALUE(&p->val, pData);
1181 }
1182
1183 return &p->val;
1184 }
1185
zend_hash_index_add_or_update(HashTable * ht,zend_ulong h,zval * pData,uint32_t flag)1186 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_or_update(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag)
1187 {
1188 if (flag == HASH_ADD) {
1189 return zend_hash_index_add(ht, h, pData);
1190 } else if (flag == (HASH_ADD|HASH_ADD_NEW)) {
1191 return zend_hash_index_add_new(ht, h, pData);
1192 } else if (flag == (HASH_ADD|HASH_ADD_NEXT)) {
1193 ZEND_ASSERT(h == ht->nNextFreeElement);
1194 return zend_hash_next_index_insert(ht, pData);
1195 } else if (flag == (HASH_ADD|HASH_ADD_NEW|HASH_ADD_NEXT)) {
1196 ZEND_ASSERT(h == ht->nNextFreeElement);
1197 return zend_hash_next_index_insert_new(ht, pData);
1198 } else {
1199 ZEND_ASSERT(flag == HASH_UPDATE);
1200 return zend_hash_index_update(ht, h, pData);
1201 }
1202 }
1203
zend_hash_index_add(HashTable * ht,zend_ulong h,zval * pData)1204 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add(HashTable *ht, zend_ulong h, zval *pData)
1205 {
1206 return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD);
1207 }
1208
zend_hash_index_add_new(HashTable * ht,zend_ulong h,zval * pData)1209 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_new(HashTable *ht, zend_ulong h, zval *pData)
1210 {
1211 return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD | HASH_ADD_NEW);
1212 }
1213
zend_hash_index_update(HashTable * ht,zend_ulong h,zval * pData)1214 ZEND_API zval* ZEND_FASTCALL zend_hash_index_update(HashTable *ht, zend_ulong h, zval *pData)
1215 {
1216 return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_UPDATE);
1217 }
1218
zend_hash_next_index_insert(HashTable * ht,zval * pData)1219 ZEND_API zval* ZEND_FASTCALL zend_hash_next_index_insert(HashTable *ht, zval *pData)
1220 {
1221 return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEXT);
1222 }
1223
zend_hash_next_index_insert_new(HashTable * ht,zval * pData)1224 ZEND_API zval* ZEND_FASTCALL zend_hash_next_index_insert_new(HashTable *ht, zval *pData)
1225 {
1226 return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEW | HASH_ADD_NEXT);
1227 }
1228
zend_hash_index_lookup(HashTable * ht,zend_ulong h)1229 ZEND_API zval* ZEND_FASTCALL zend_hash_index_lookup(HashTable *ht, zend_ulong h)
1230 {
1231 return _zend_hash_index_add_or_update_i(ht, h, NULL, HASH_LOOKUP);
1232 }
1233
zend_hash_set_bucket_key(HashTable * ht,Bucket * b,zend_string * key)1234 ZEND_API zval* ZEND_FASTCALL zend_hash_set_bucket_key(HashTable *ht, Bucket *b, zend_string *key)
1235 {
1236 uint32_t nIndex;
1237 uint32_t idx, i;
1238 Bucket *p, *arData;
1239
1240 IS_CONSISTENT(ht);
1241 HT_ASSERT_RC1(ht);
1242 ZEND_ASSERT(!HT_IS_PACKED(ht));
1243
1244 (void)zend_string_hash_val(key);
1245 p = zend_hash_find_bucket(ht, key);
1246 if (UNEXPECTED(p)) {
1247 return (p == b) ? &p->val : NULL;
1248 }
1249
1250 if (!ZSTR_IS_INTERNED(key)) {
1251 zend_string_addref(key);
1252 HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
1253 }
1254
1255 arData = ht->arData;
1256
1257 /* del from hash */
1258 idx = HT_IDX_TO_HASH(b - arData);
1259 nIndex = b->h | ht->nTableMask;
1260 i = HT_HASH_EX(arData, nIndex);
1261 if (i == idx) {
1262 HT_HASH_EX(arData, nIndex) = Z_NEXT(b->val);
1263 } else {
1264 p = HT_HASH_TO_BUCKET_EX(arData, i);
1265 while (Z_NEXT(p->val) != idx) {
1266 i = Z_NEXT(p->val);
1267 p = HT_HASH_TO_BUCKET_EX(arData, i);
1268 }
1269 Z_NEXT(p->val) = Z_NEXT(b->val);
1270 }
1271 zend_string_release(b->key);
1272
1273 /* add to hash */
1274 idx = b - arData;
1275 b->key = key;
1276 b->h = ZSTR_H(key);
1277 nIndex = b->h | ht->nTableMask;
1278 idx = HT_IDX_TO_HASH(idx);
1279 i = HT_HASH_EX(arData, nIndex);
1280 if (i == HT_INVALID_IDX || i < idx) {
1281 Z_NEXT(b->val) = i;
1282 HT_HASH_EX(arData, nIndex) = idx;
1283 } else {
1284 p = HT_HASH_TO_BUCKET_EX(arData, i);
1285 while (Z_NEXT(p->val) != HT_INVALID_IDX && Z_NEXT(p->val) > idx) {
1286 i = Z_NEXT(p->val);
1287 p = HT_HASH_TO_BUCKET_EX(arData, i);
1288 }
1289 Z_NEXT(b->val) = Z_NEXT(p->val);
1290 Z_NEXT(p->val) = idx;
1291 }
1292 return &b->val;
1293 }
1294
zend_hash_do_resize(HashTable * ht)1295 static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht)
1296 {
1297
1298 IS_CONSISTENT(ht);
1299 HT_ASSERT_RC1(ht);
1300
1301 ZEND_ASSERT(!HT_IS_PACKED(ht));
1302 if (ht->nNumUsed > ht->nNumOfElements + (ht->nNumOfElements >> 5)) { /* additional term is there to amortize the cost of compaction */
1303 zend_hash_rehash(ht);
1304 } else if (ht->nTableSize < HT_MAX_SIZE) { /* Let's double the table size */
1305 void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
1306 uint32_t nSize = ht->nTableSize + ht->nTableSize;
1307 Bucket *old_buckets = ht->arData;
1308
1309 ZEND_ASSERT(HT_SIZE_TO_MASK(nSize));
1310
1311 new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1312 ht->nTableSize = nSize;
1313 ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
1314 HT_SET_DATA_ADDR(ht, new_data);
1315 memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
1316 pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1317 zend_hash_rehash(ht);
1318 } else {
1319 zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket) + sizeof(uint32_t), sizeof(Bucket));
1320 }
1321 }
1322
zend_hash_rehash(HashTable * ht)1323 ZEND_API void ZEND_FASTCALL zend_hash_rehash(HashTable *ht)
1324 {
1325 Bucket *p;
1326 uint32_t nIndex, i;
1327
1328 IS_CONSISTENT(ht);
1329
1330 if (UNEXPECTED(ht->nNumOfElements == 0)) {
1331 if (!(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
1332 ht->nNumUsed = 0;
1333 HT_HASH_RESET(ht);
1334 }
1335 return;
1336 }
1337
1338 HT_HASH_RESET(ht);
1339 i = 0;
1340 p = ht->arData;
1341 if (HT_IS_WITHOUT_HOLES(ht)) {
1342 do {
1343 nIndex = p->h | ht->nTableMask;
1344 Z_NEXT(p->val) = HT_HASH(ht, nIndex);
1345 HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
1346 p++;
1347 } while (++i < ht->nNumUsed);
1348 } else {
1349 uint32_t old_num_used = ht->nNumUsed;
1350 do {
1351 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) {
1352 uint32_t j = i;
1353 Bucket *q = p;
1354
1355 if (EXPECTED(!HT_HAS_ITERATORS(ht))) {
1356 while (++i < ht->nNumUsed) {
1357 p++;
1358 if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
1359 ZVAL_COPY_VALUE(&q->val, &p->val);
1360 q->h = p->h;
1361 nIndex = q->h | ht->nTableMask;
1362 q->key = p->key;
1363 Z_NEXT(q->val) = HT_HASH(ht, nIndex);
1364 HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
1365 if (UNEXPECTED(ht->nInternalPointer == i)) {
1366 ht->nInternalPointer = j;
1367 }
1368 q++;
1369 j++;
1370 }
1371 }
1372 } else {
1373 uint32_t iter_pos = zend_hash_iterators_lower_pos(ht, i + 1);
1374
1375 while (++i < ht->nNumUsed) {
1376 p++;
1377 if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
1378 ZVAL_COPY_VALUE(&q->val, &p->val);
1379 q->h = p->h;
1380 nIndex = q->h | ht->nTableMask;
1381 q->key = p->key;
1382 Z_NEXT(q->val) = HT_HASH(ht, nIndex);
1383 HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
1384 if (UNEXPECTED(ht->nInternalPointer == i)) {
1385 ht->nInternalPointer = j;
1386 }
1387 if (UNEXPECTED(i >= iter_pos)) {
1388 do {
1389 zend_hash_iterators_update(ht, iter_pos, j);
1390 iter_pos = zend_hash_iterators_lower_pos(ht, iter_pos + 1);
1391 } while (iter_pos < i);
1392 }
1393 q++;
1394 j++;
1395 }
1396 }
1397 }
1398 ht->nNumUsed = j;
1399 break;
1400 }
1401 nIndex = p->h | ht->nTableMask;
1402 Z_NEXT(p->val) = HT_HASH(ht, nIndex);
1403 HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
1404 p++;
1405 } while (++i < ht->nNumUsed);
1406
1407 /* Migrate pointer to one past the end of the array to the new one past the end, so that
1408 * newly inserted elements are picked up correctly. */
1409 if (UNEXPECTED(HT_HAS_ITERATORS(ht))) {
1410 _zend_hash_iterators_update(ht, old_num_used, ht->nNumUsed);
1411 }
1412 }
1413 }
1414
_zend_hash_packed_del_val(HashTable * ht,uint32_t idx,zval * zv)1415 static zend_always_inline void _zend_hash_packed_del_val(HashTable *ht, uint32_t idx, zval *zv)
1416 {
1417 idx = HT_HASH_TO_IDX(idx);
1418 ht->nNumOfElements--;
1419 if (ht->nInternalPointer == idx || UNEXPECTED(HT_HAS_ITERATORS(ht))) {
1420 uint32_t new_idx;
1421
1422 new_idx = idx;
1423 while (1) {
1424 new_idx++;
1425 if (new_idx >= ht->nNumUsed) {
1426 break;
1427 } else if (Z_TYPE(ht->arPacked[new_idx]) != IS_UNDEF) {
1428 break;
1429 }
1430 }
1431 if (ht->nInternalPointer == idx) {
1432 ht->nInternalPointer = new_idx;
1433 }
1434 zend_hash_iterators_update(ht, idx, new_idx);
1435 }
1436 if (ht->nNumUsed - 1 == idx) {
1437 do {
1438 ht->nNumUsed--;
1439 } while (ht->nNumUsed > 0 && (UNEXPECTED(Z_TYPE(ht->arPacked[ht->nNumUsed-1]) == IS_UNDEF)));
1440 ht->nInternalPointer = MIN(ht->nInternalPointer, ht->nNumUsed);
1441 }
1442 if (ht->pDestructor) {
1443 zval tmp;
1444 ZVAL_COPY_VALUE(&tmp, zv);
1445 ZVAL_UNDEF(zv);
1446 ht->pDestructor(&tmp);
1447 } else {
1448 ZVAL_UNDEF(zv);
1449 }
1450 }
1451
_zend_hash_del_el_ex(HashTable * ht,uint32_t idx,Bucket * p,Bucket * prev)1452 static zend_always_inline void _zend_hash_del_el_ex(HashTable *ht, uint32_t idx, Bucket *p, Bucket *prev)
1453 {
1454 if (prev) {
1455 Z_NEXT(prev->val) = Z_NEXT(p->val);
1456 } else {
1457 HT_HASH(ht, p->h | ht->nTableMask) = Z_NEXT(p->val);
1458 }
1459 idx = HT_HASH_TO_IDX(idx);
1460 ht->nNumOfElements--;
1461 if (ht->nInternalPointer == idx || UNEXPECTED(HT_HAS_ITERATORS(ht))) {
1462 uint32_t new_idx;
1463
1464 new_idx = idx;
1465 while (1) {
1466 new_idx++;
1467 if (new_idx >= ht->nNumUsed) {
1468 break;
1469 } else if (Z_TYPE(ht->arData[new_idx].val) != IS_UNDEF) {
1470 break;
1471 }
1472 }
1473 if (ht->nInternalPointer == idx) {
1474 ht->nInternalPointer = new_idx;
1475 }
1476 zend_hash_iterators_update(ht, idx, new_idx);
1477 }
1478 if (ht->nNumUsed - 1 == idx) {
1479 do {
1480 ht->nNumUsed--;
1481 } while (ht->nNumUsed > 0 && (UNEXPECTED(Z_TYPE(ht->arData[ht->nNumUsed-1].val) == IS_UNDEF)));
1482 ht->nInternalPointer = MIN(ht->nInternalPointer, ht->nNumUsed);
1483 }
1484 if (ht->pDestructor) {
1485 zval tmp;
1486 ZVAL_COPY_VALUE(&tmp, &p->val);
1487 ZVAL_UNDEF(&p->val);
1488 ht->pDestructor(&tmp);
1489 } else {
1490 ZVAL_UNDEF(&p->val);
1491 }
1492 }
1493
_zend_hash_del_el(HashTable * ht,uint32_t idx,Bucket * p)1494 static zend_always_inline void _zend_hash_del_el(HashTable *ht, uint32_t idx, Bucket *p)
1495 {
1496 Bucket *prev = NULL;
1497 uint32_t nIndex;
1498 uint32_t i;
1499
1500 nIndex = p->h | ht->nTableMask;
1501 i = HT_HASH(ht, nIndex);
1502
1503 if (i != idx) {
1504 prev = HT_HASH_TO_BUCKET(ht, i);
1505 while (Z_NEXT(prev->val) != idx) {
1506 i = Z_NEXT(prev->val);
1507 prev = HT_HASH_TO_BUCKET(ht, i);
1508 }
1509 }
1510
1511 if (p->key) {
1512 zend_string_release(p->key);
1513 p->key = NULL;
1514 }
1515 _zend_hash_del_el_ex(ht, idx, p, prev);
1516 }
1517
zend_hash_packed_del_val(HashTable * ht,zval * zv)1518 ZEND_API void ZEND_FASTCALL zend_hash_packed_del_val(HashTable *ht, zval *zv)
1519 {
1520 IS_CONSISTENT(ht);
1521 HT_ASSERT_RC1(ht);
1522 ZEND_ASSERT(HT_IS_PACKED(ht));
1523 _zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(zv - ht->arPacked), zv);
1524 }
1525
1526
zend_hash_del_bucket(HashTable * ht,Bucket * p)1527 ZEND_API void ZEND_FASTCALL zend_hash_del_bucket(HashTable *ht, Bucket *p)
1528 {
1529 IS_CONSISTENT(ht);
1530 HT_ASSERT_RC1(ht);
1531 ZEND_ASSERT(!HT_IS_PACKED(ht));
1532 _zend_hash_del_el(ht, HT_IDX_TO_HASH(p - ht->arData), p);
1533 }
1534
zend_hash_del(HashTable * ht,zend_string * key)1535 ZEND_API zend_result ZEND_FASTCALL zend_hash_del(HashTable *ht, zend_string *key)
1536 {
1537 zend_ulong h;
1538 uint32_t nIndex;
1539 uint32_t idx;
1540 Bucket *p;
1541 Bucket *prev = NULL;
1542
1543 IS_CONSISTENT(ht);
1544 HT_ASSERT_RC1(ht);
1545
1546 h = zend_string_hash_val(key);
1547 nIndex = h | ht->nTableMask;
1548
1549 idx = HT_HASH(ht, nIndex);
1550 while (idx != HT_INVALID_IDX) {
1551 p = HT_HASH_TO_BUCKET(ht, idx);
1552 if ((p->key == key) ||
1553 (p->h == h &&
1554 p->key &&
1555 zend_string_equal_content(p->key, key))) {
1556 zend_string_release(p->key);
1557 p->key = NULL;
1558 _zend_hash_del_el_ex(ht, idx, p, prev);
1559 return SUCCESS;
1560 }
1561 prev = p;
1562 idx = Z_NEXT(p->val);
1563 }
1564 return FAILURE;
1565 }
1566
zend_hash_del_ind(HashTable * ht,zend_string * key)1567 ZEND_API zend_result ZEND_FASTCALL zend_hash_del_ind(HashTable *ht, zend_string *key)
1568 {
1569 zend_ulong h;
1570 uint32_t nIndex;
1571 uint32_t idx;
1572 Bucket *p;
1573 Bucket *prev = NULL;
1574
1575 IS_CONSISTENT(ht);
1576 HT_ASSERT_RC1(ht);
1577
1578 h = zend_string_hash_val(key);
1579 nIndex = h | ht->nTableMask;
1580
1581 idx = HT_HASH(ht, nIndex);
1582 while (idx != HT_INVALID_IDX) {
1583 p = HT_HASH_TO_BUCKET(ht, idx);
1584 if ((p->key == key) ||
1585 (p->h == h &&
1586 p->key &&
1587 zend_string_equal_content(p->key, key))) {
1588 if (Z_TYPE(p->val) == IS_INDIRECT) {
1589 zval *data = Z_INDIRECT(p->val);
1590
1591 if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
1592 return FAILURE;
1593 } else {
1594 if (ht->pDestructor) {
1595 zval tmp;
1596 ZVAL_COPY_VALUE(&tmp, data);
1597 ZVAL_UNDEF(data);
1598 ht->pDestructor(&tmp);
1599 } else {
1600 ZVAL_UNDEF(data);
1601 }
1602 HT_FLAGS(ht) |= HASH_FLAG_HAS_EMPTY_IND;
1603 }
1604 } else {
1605 zend_string_release(p->key);
1606 p->key = NULL;
1607 _zend_hash_del_el_ex(ht, idx, p, prev);
1608 }
1609 return SUCCESS;
1610 }
1611 prev = p;
1612 idx = Z_NEXT(p->val);
1613 }
1614 return FAILURE;
1615 }
1616
zend_hash_str_del_ind(HashTable * ht,const char * str,size_t len)1617 ZEND_API zend_result ZEND_FASTCALL zend_hash_str_del_ind(HashTable *ht, const char *str, size_t len)
1618 {
1619 zend_ulong h;
1620 uint32_t nIndex;
1621 uint32_t idx;
1622 Bucket *p;
1623 Bucket *prev = NULL;
1624
1625 IS_CONSISTENT(ht);
1626 HT_ASSERT_RC1(ht);
1627
1628 h = zend_inline_hash_func(str, len);
1629 nIndex = h | ht->nTableMask;
1630
1631 idx = HT_HASH(ht, nIndex);
1632 while (idx != HT_INVALID_IDX) {
1633 p = HT_HASH_TO_BUCKET(ht, idx);
1634 if ((p->h == h)
1635 && p->key
1636 && zend_string_equals_cstr(p->key, str, len)) {
1637 if (Z_TYPE(p->val) == IS_INDIRECT) {
1638 zval *data = Z_INDIRECT(p->val);
1639
1640 if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
1641 return FAILURE;
1642 } else {
1643 if (ht->pDestructor) {
1644 ht->pDestructor(data);
1645 }
1646 ZVAL_UNDEF(data);
1647 HT_FLAGS(ht) |= HASH_FLAG_HAS_EMPTY_IND;
1648 }
1649 } else {
1650 zend_string_release(p->key);
1651 p->key = NULL;
1652 _zend_hash_del_el_ex(ht, idx, p, prev);
1653 }
1654 return SUCCESS;
1655 }
1656 prev = p;
1657 idx = Z_NEXT(p->val);
1658 }
1659 return FAILURE;
1660 }
1661
zend_hash_str_del(HashTable * ht,const char * str,size_t len)1662 ZEND_API zend_result ZEND_FASTCALL zend_hash_str_del(HashTable *ht, const char *str, size_t len)
1663 {
1664 zend_ulong h;
1665 uint32_t nIndex;
1666 uint32_t idx;
1667 Bucket *p;
1668 Bucket *prev = NULL;
1669
1670 IS_CONSISTENT(ht);
1671 HT_ASSERT_RC1(ht);
1672
1673 h = zend_inline_hash_func(str, len);
1674 nIndex = h | ht->nTableMask;
1675
1676 idx = HT_HASH(ht, nIndex);
1677 while (idx != HT_INVALID_IDX) {
1678 p = HT_HASH_TO_BUCKET(ht, idx);
1679 if ((p->h == h)
1680 && p->key
1681 && zend_string_equals_cstr(p->key, str, len)) {
1682 zend_string_release(p->key);
1683 p->key = NULL;
1684 _zend_hash_del_el_ex(ht, idx, p, prev);
1685 return SUCCESS;
1686 }
1687 prev = p;
1688 idx = Z_NEXT(p->val);
1689 }
1690 return FAILURE;
1691 }
1692
zend_hash_index_del(HashTable * ht,zend_ulong h)1693 ZEND_API zend_result ZEND_FASTCALL zend_hash_index_del(HashTable *ht, zend_ulong h)
1694 {
1695 uint32_t nIndex;
1696 uint32_t idx;
1697 Bucket *p;
1698 Bucket *prev = NULL;
1699
1700 IS_CONSISTENT(ht);
1701 HT_ASSERT_RC1(ht);
1702
1703 if (HT_IS_PACKED(ht)) {
1704 if (h < ht->nNumUsed) {
1705 zval *zv = ht->arPacked + h;
1706 if (Z_TYPE_P(zv) != IS_UNDEF) {
1707 _zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(h), zv);
1708 return SUCCESS;
1709 }
1710 }
1711 return FAILURE;
1712 }
1713 nIndex = h | ht->nTableMask;
1714
1715 idx = HT_HASH(ht, nIndex);
1716 while (idx != HT_INVALID_IDX) {
1717 p = HT_HASH_TO_BUCKET(ht, idx);
1718 if ((p->h == h) && (p->key == NULL)) {
1719 _zend_hash_del_el_ex(ht, idx, p, prev);
1720 return SUCCESS;
1721 }
1722 prev = p;
1723 idx = Z_NEXT(p->val);
1724 }
1725 return FAILURE;
1726 }
1727
zend_hash_destroy(HashTable * ht)1728 ZEND_API void ZEND_FASTCALL zend_hash_destroy(HashTable *ht)
1729 {
1730 IS_CONSISTENT(ht);
1731 HT_ASSERT(ht, GC_REFCOUNT(ht) <= 1);
1732
1733 if (ht->nNumUsed) {
1734 if (HT_IS_PACKED(ht)) {
1735 if (ht->pDestructor) {
1736 zval *zv = ht->arPacked;
1737 zval *end = zv + ht->nNumUsed;
1738
1739 SET_INCONSISTENT(HT_IS_DESTROYING);
1740 if (HT_IS_WITHOUT_HOLES(ht)) {
1741 do {
1742 ht->pDestructor(zv);
1743 } while (++zv != end);
1744 } else {
1745 do {
1746 if (EXPECTED(Z_TYPE_P(zv) != IS_UNDEF)) {
1747 ht->pDestructor(zv);
1748 }
1749 } while (++zv != end);
1750 }
1751 SET_INCONSISTENT(HT_DESTROYED);
1752 }
1753 zend_hash_iterators_remove(ht);
1754 } else {
1755 Bucket *p = ht->arData;
1756 Bucket *end = p + ht->nNumUsed;
1757
1758 if (ht->pDestructor) {
1759 SET_INCONSISTENT(HT_IS_DESTROYING);
1760
1761 if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1762 if (HT_IS_WITHOUT_HOLES(ht)) {
1763 do {
1764 ht->pDestructor(&p->val);
1765 } while (++p != end);
1766 } else {
1767 do {
1768 if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1769 ht->pDestructor(&p->val);
1770 }
1771 } while (++p != end);
1772 }
1773 } else if (HT_IS_WITHOUT_HOLES(ht)) {
1774 do {
1775 ht->pDestructor(&p->val);
1776 if (EXPECTED(p->key)) {
1777 zend_string_release(p->key);
1778 }
1779 } while (++p != end);
1780 } else {
1781 do {
1782 if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1783 ht->pDestructor(&p->val);
1784 if (EXPECTED(p->key)) {
1785 zend_string_release(p->key);
1786 }
1787 }
1788 } while (++p != end);
1789 }
1790
1791 SET_INCONSISTENT(HT_DESTROYED);
1792 } else {
1793 if (!HT_HAS_STATIC_KEYS_ONLY(ht)) {
1794 do {
1795 if (EXPECTED(p->key)) {
1796 zend_string_release(p->key);
1797 }
1798 } while (++p != end);
1799 }
1800 }
1801 zend_hash_iterators_remove(ht);
1802 }
1803 } else if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
1804 return;
1805 }
1806 pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1807 }
1808
zend_array_destroy(HashTable * ht)1809 ZEND_API void ZEND_FASTCALL zend_array_destroy(HashTable *ht)
1810 {
1811 IS_CONSISTENT(ht);
1812 HT_ASSERT(ht, GC_REFCOUNT(ht) <= 1);
1813
1814 /* break possible cycles */
1815 GC_REMOVE_FROM_BUFFER(ht);
1816 GC_TYPE_INFO(ht) = GC_NULL /*???| (GC_WHITE << 16)*/;
1817
1818 if (ht->nNumUsed) {
1819 /* In some rare cases destructors of regular arrays may be changed */
1820 if (UNEXPECTED(ht->pDestructor != ZVAL_PTR_DTOR)) {
1821 zend_hash_destroy(ht);
1822 goto free_ht;
1823 }
1824
1825 SET_INCONSISTENT(HT_IS_DESTROYING);
1826
1827 if (HT_IS_PACKED(ht)) {
1828 zval *zv = ht->arPacked;
1829 zval *end = zv + ht->nNumUsed;
1830
1831 do {
1832 i_zval_ptr_dtor(zv);
1833 } while (++zv != end);
1834 } else {
1835 Bucket *p = ht->arData;
1836 Bucket *end = p + ht->nNumUsed;
1837
1838 if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1839 do {
1840 i_zval_ptr_dtor(&p->val);
1841 } while (++p != end);
1842 } else if (HT_IS_WITHOUT_HOLES(ht)) {
1843 do {
1844 i_zval_ptr_dtor(&p->val);
1845 if (EXPECTED(p->key)) {
1846 zend_string_release_ex(p->key, 0);
1847 }
1848 } while (++p != end);
1849 } else {
1850 do {
1851 if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1852 i_zval_ptr_dtor(&p->val);
1853 if (EXPECTED(p->key)) {
1854 zend_string_release_ex(p->key, 0);
1855 }
1856 }
1857 } while (++p != end);
1858 }
1859 }
1860 } else if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
1861 goto free_ht;
1862 }
1863 SET_INCONSISTENT(HT_DESTROYED);
1864 efree(HT_GET_DATA_ADDR(ht));
1865 free_ht:
1866 zend_hash_iterators_remove(ht);
1867 FREE_HASHTABLE(ht);
1868 }
1869
zend_hash_clean(HashTable * ht)1870 ZEND_API void ZEND_FASTCALL zend_hash_clean(HashTable *ht)
1871 {
1872 IS_CONSISTENT(ht);
1873 HT_ASSERT_RC1(ht);
1874
1875 if (ht->nNumUsed) {
1876 if (HT_IS_PACKED(ht)) {
1877 zval *zv = ht->arPacked;
1878 zval *end = zv + ht->nNumUsed;
1879
1880 if (ht->pDestructor) {
1881 if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1882 if (HT_IS_WITHOUT_HOLES(ht)) {
1883 do {
1884 ht->pDestructor(zv);
1885 } while (++zv != end);
1886 } else {
1887 do {
1888 if (EXPECTED(Z_TYPE_P(zv) != IS_UNDEF)) {
1889 ht->pDestructor(zv);
1890 }
1891 } while (++zv != end);
1892 }
1893 }
1894 }
1895 } else {
1896 Bucket *p = ht->arData;
1897 Bucket *end = p + ht->nNumUsed;
1898
1899 if (ht->pDestructor) {
1900 if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1901 if (HT_IS_WITHOUT_HOLES(ht)) {
1902 do {
1903 ht->pDestructor(&p->val);
1904 } while (++p != end);
1905 } else {
1906 do {
1907 if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1908 ht->pDestructor(&p->val);
1909 }
1910 } while (++p != end);
1911 }
1912 } else if (HT_IS_WITHOUT_HOLES(ht)) {
1913 do {
1914 ht->pDestructor(&p->val);
1915 if (EXPECTED(p->key)) {
1916 zend_string_release(p->key);
1917 }
1918 } while (++p != end);
1919 } else {
1920 do {
1921 if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1922 ht->pDestructor(&p->val);
1923 if (EXPECTED(p->key)) {
1924 zend_string_release(p->key);
1925 }
1926 }
1927 } while (++p != end);
1928 }
1929 } else {
1930 if (!HT_HAS_STATIC_KEYS_ONLY(ht)) {
1931 do {
1932 if (EXPECTED(p->key)) {
1933 zend_string_release(p->key);
1934 }
1935 } while (++p != end);
1936 }
1937 }
1938 HT_HASH_RESET(ht);
1939 }
1940 }
1941 ht->nNumUsed = 0;
1942 ht->nNumOfElements = 0;
1943 ht->nNextFreeElement = ZEND_LONG_MIN;
1944 ht->nInternalPointer = 0;
1945 }
1946
zend_symtable_clean(HashTable * ht)1947 ZEND_API void ZEND_FASTCALL zend_symtable_clean(HashTable *ht)
1948 {
1949 Bucket *p, *end;
1950
1951 IS_CONSISTENT(ht);
1952 HT_ASSERT_RC1(ht);
1953
1954 if (ht->nNumUsed) {
1955 ZEND_ASSERT(!HT_IS_PACKED(ht));
1956 p = ht->arData;
1957 end = p + ht->nNumUsed;
1958 if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1959 do {
1960 i_zval_ptr_dtor(&p->val);
1961 } while (++p != end);
1962 } else if (HT_IS_WITHOUT_HOLES(ht)) {
1963 do {
1964 i_zval_ptr_dtor(&p->val);
1965 if (EXPECTED(p->key)) {
1966 zend_string_release(p->key);
1967 }
1968 } while (++p != end);
1969 } else {
1970 do {
1971 if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1972 i_zval_ptr_dtor(&p->val);
1973 if (EXPECTED(p->key)) {
1974 zend_string_release(p->key);
1975 }
1976 }
1977 } while (++p != end);
1978 }
1979 HT_HASH_RESET(ht);
1980 }
1981 ht->nNumUsed = 0;
1982 ht->nNumOfElements = 0;
1983 ht->nNextFreeElement = ZEND_LONG_MIN;
1984 ht->nInternalPointer = 0;
1985 }
1986
zend_hash_graceful_destroy(HashTable * ht)1987 ZEND_API void ZEND_FASTCALL zend_hash_graceful_destroy(HashTable *ht)
1988 {
1989 uint32_t idx;
1990
1991 IS_CONSISTENT(ht);
1992 HT_ASSERT_RC1(ht);
1993
1994 if (HT_IS_PACKED(ht)) {
1995 zval *zv = ht->arPacked;
1996
1997 for (idx = 0; idx < ht->nNumUsed; idx++, zv++) {
1998 if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
1999 _zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv);
2000 }
2001 } else {
2002 Bucket *p = ht->arData;
2003
2004 for (idx = 0; idx < ht->nNumUsed; idx++, p++) {
2005 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2006 _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
2007 }
2008 }
2009 if (!(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
2010 pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
2011 }
2012
2013 SET_INCONSISTENT(HT_DESTROYED);
2014 }
2015
zend_hash_graceful_reverse_destroy(HashTable * ht)2016 ZEND_API void ZEND_FASTCALL zend_hash_graceful_reverse_destroy(HashTable *ht)
2017 {
2018 uint32_t idx;
2019
2020 IS_CONSISTENT(ht);
2021 HT_ASSERT_RC1(ht);
2022
2023 idx = ht->nNumUsed;
2024 if (HT_IS_PACKED(ht)) {
2025 zval *zv = ht->arPacked + ht->nNumUsed;
2026
2027 while (idx > 0) {
2028 idx--;
2029 zv--;
2030 if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
2031 _zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv);
2032 }
2033 } else {
2034 Bucket *p = ht->arData + ht->nNumUsed;
2035
2036 while (idx > 0) {
2037 idx--;
2038 p--;
2039 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2040 _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
2041 }
2042 }
2043
2044 if (!(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
2045 pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
2046 }
2047
2048 SET_INCONSISTENT(HT_DESTROYED);
2049 }
2050
2051 /* This is used to recurse elements and selectively delete certain entries
2052 * from a hashtable. apply_func() receives the data and decides if the entry
2053 * should be deleted or recursion should be stopped. The following three
2054 * return codes are possible:
2055 * ZEND_HASH_APPLY_KEEP - continue
2056 * ZEND_HASH_APPLY_STOP - stop iteration
2057 * ZEND_HASH_APPLY_REMOVE - delete the element, combinable with the former
2058 */
2059
zend_hash_apply(HashTable * ht,apply_func_t apply_func)2060 ZEND_API void ZEND_FASTCALL zend_hash_apply(HashTable *ht, apply_func_t apply_func)
2061 {
2062 uint32_t idx;
2063 int result;
2064
2065 IS_CONSISTENT(ht);
2066 if (HT_IS_PACKED(ht)) {
2067 for (idx = 0; idx < ht->nNumUsed; idx++) {
2068 zval *zv = ht->arPacked + idx;
2069
2070 if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
2071 result = apply_func(zv);
2072
2073 if (result & ZEND_HASH_APPLY_REMOVE) {
2074 HT_ASSERT_RC1(ht);
2075 _zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv);
2076 }
2077 if (result & ZEND_HASH_APPLY_STOP) {
2078 break;
2079 }
2080 }
2081 } else {
2082 for (idx = 0; idx < ht->nNumUsed; idx++) {
2083 Bucket *p = ht->arData + idx;
2084
2085 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2086 result = apply_func(&p->val);
2087
2088 if (result & ZEND_HASH_APPLY_REMOVE) {
2089 HT_ASSERT_RC1(ht);
2090 _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
2091 }
2092 if (result & ZEND_HASH_APPLY_STOP) {
2093 break;
2094 }
2095 }
2096 }
2097 }
2098
2099
zend_hash_apply_with_argument(HashTable * ht,apply_func_arg_t apply_func,void * argument)2100 ZEND_API void ZEND_FASTCALL zend_hash_apply_with_argument(HashTable *ht, apply_func_arg_t apply_func, void *argument)
2101 {
2102 uint32_t idx;
2103 int result;
2104
2105 IS_CONSISTENT(ht);
2106 if (HT_IS_PACKED(ht)) {
2107 for (idx = 0; idx < ht->nNumUsed; idx++) {
2108 zval *zv = ht->arPacked + idx;
2109 if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
2110 result = apply_func(zv, argument);
2111
2112 if (result & ZEND_HASH_APPLY_REMOVE) {
2113 HT_ASSERT_RC1(ht);
2114 _zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv);
2115 }
2116 if (result & ZEND_HASH_APPLY_STOP) {
2117 break;
2118 }
2119 }
2120 } else {
2121 for (idx = 0; idx < ht->nNumUsed; idx++) {
2122 Bucket *p = ht->arData + idx;
2123 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2124 result = apply_func(&p->val, argument);
2125
2126 if (result & ZEND_HASH_APPLY_REMOVE) {
2127 HT_ASSERT_RC1(ht);
2128 _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
2129 }
2130 if (result & ZEND_HASH_APPLY_STOP) {
2131 break;
2132 }
2133 }
2134 }
2135 }
2136
2137
zend_hash_apply_with_arguments(HashTable * ht,apply_func_args_t apply_func,int num_args,...)2138 ZEND_API void zend_hash_apply_with_arguments(HashTable *ht, apply_func_args_t apply_func, int num_args, ...)
2139 {
2140 uint32_t idx;
2141 va_list args;
2142 zend_hash_key hash_key;
2143 int result;
2144
2145 IS_CONSISTENT(ht);
2146
2147 if (HT_IS_PACKED(ht)) {
2148 for (idx = 0; idx < ht->nNumUsed; idx++) {
2149 zval *zv = ht->arPacked + idx;
2150
2151 if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
2152 va_start(args, num_args);
2153 hash_key.h = idx;
2154 hash_key.key = NULL;
2155
2156 result = apply_func(zv, num_args, args, &hash_key);
2157
2158 if (result & ZEND_HASH_APPLY_REMOVE) {
2159 HT_ASSERT_RC1(ht);
2160 _zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv);
2161 }
2162 if (result & ZEND_HASH_APPLY_STOP) {
2163 va_end(args);
2164 break;
2165 }
2166 va_end(args);
2167 }
2168 } else {
2169 for (idx = 0; idx < ht->nNumUsed; idx++) {
2170 Bucket *p = ht->arData + idx;
2171
2172 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2173 va_start(args, num_args);
2174 hash_key.h = p->h;
2175 hash_key.key = p->key;
2176
2177 result = apply_func(&p->val, num_args, args, &hash_key);
2178
2179 if (result & ZEND_HASH_APPLY_REMOVE) {
2180 HT_ASSERT_RC1(ht);
2181 _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
2182 }
2183 if (result & ZEND_HASH_APPLY_STOP) {
2184 va_end(args);
2185 break;
2186 }
2187 va_end(args);
2188 }
2189 }
2190 }
2191
2192
zend_hash_reverse_apply(HashTable * ht,apply_func_t apply_func)2193 ZEND_API void ZEND_FASTCALL zend_hash_reverse_apply(HashTable *ht, apply_func_t apply_func)
2194 {
2195 uint32_t idx;
2196 int result;
2197
2198 IS_CONSISTENT(ht);
2199
2200 idx = ht->nNumUsed;
2201 if (HT_IS_PACKED(ht)) {
2202 zval *zv;
2203
2204 while (idx > 0) {
2205 idx--;
2206 zv = ht->arPacked + idx;
2207 if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
2208
2209 result = apply_func(zv);
2210
2211 if (result & ZEND_HASH_APPLY_REMOVE) {
2212 HT_ASSERT_RC1(ht);
2213 _zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv);
2214 }
2215 if (result & ZEND_HASH_APPLY_STOP) {
2216 break;
2217 }
2218 }
2219 } else {
2220 Bucket *p;
2221
2222 while (idx > 0) {
2223 idx--;
2224 p = ht->arData + idx;
2225 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2226
2227 result = apply_func(&p->val);
2228
2229 if (result & ZEND_HASH_APPLY_REMOVE) {
2230 HT_ASSERT_RC1(ht);
2231 _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
2232 }
2233 if (result & ZEND_HASH_APPLY_STOP) {
2234 break;
2235 }
2236 }
2237 }
2238 }
2239
2240
zend_hash_copy(HashTable * target,HashTable * source,copy_ctor_func_t pCopyConstructor)2241 ZEND_API void ZEND_FASTCALL zend_hash_copy(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor)
2242 {
2243 uint32_t idx;
2244 zval *new_entry, *data;
2245
2246 IS_CONSISTENT(source);
2247 IS_CONSISTENT(target);
2248 HT_ASSERT_RC1(target);
2249
2250 if (HT_IS_PACKED(source)) {
2251 for (idx = 0; idx < source->nNumUsed; idx++) {
2252 zval *zv = source->arPacked + idx;
2253 if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
2254
2255 new_entry = zend_hash_index_update(target, idx, zv);
2256 if (pCopyConstructor) {
2257 pCopyConstructor(new_entry);
2258 }
2259 }
2260 return;
2261 }
2262
2263 for (idx = 0; idx < source->nNumUsed; idx++) {
2264 Bucket *p = source->arData + idx;
2265
2266 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2267
2268 /* INDIRECT element may point to UNDEF-ined slots */
2269 data = &p->val;
2270 if (Z_TYPE_P(data) == IS_INDIRECT) {
2271 data = Z_INDIRECT_P(data);
2272 if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
2273 continue;
2274 }
2275 }
2276 if (p->key) {
2277 new_entry = zend_hash_update(target, p->key, data);
2278 } else {
2279 new_entry = zend_hash_index_update(target, p->h, data);
2280 }
2281 if (pCopyConstructor) {
2282 pCopyConstructor(new_entry);
2283 }
2284 }
2285 }
2286
2287
zend_array_dup_value(HashTable * source,HashTable * target,zval * data,zval * dest,bool packed,bool with_holes)2288 static zend_always_inline bool zend_array_dup_value(HashTable *source, HashTable *target, zval *data, zval *dest, bool packed, bool with_holes)
2289 {
2290 if (with_holes) {
2291 if (!packed && Z_TYPE_INFO_P(data) == IS_INDIRECT) {
2292 data = Z_INDIRECT_P(data);
2293 }
2294 if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) {
2295 return 0;
2296 }
2297 } else if (!packed) {
2298 /* INDIRECT element may point to UNDEF-ined slots */
2299 if (Z_TYPE_INFO_P(data) == IS_INDIRECT) {
2300 data = Z_INDIRECT_P(data);
2301 if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) {
2302 return 0;
2303 }
2304 }
2305 }
2306
2307 do {
2308 if (Z_OPT_REFCOUNTED_P(data)) {
2309 if (Z_ISREF_P(data) && Z_REFCOUNT_P(data) == 1 &&
2310 (Z_TYPE_P(Z_REFVAL_P(data)) != IS_ARRAY ||
2311 Z_ARRVAL_P(Z_REFVAL_P(data)) != source)) {
2312 data = Z_REFVAL_P(data);
2313 if (!Z_OPT_REFCOUNTED_P(data)) {
2314 break;
2315 }
2316 }
2317 Z_ADDREF_P(data);
2318 }
2319 } while (0);
2320 ZVAL_COPY_VALUE(dest, data);
2321
2322 return 1;
2323 }
2324
zend_array_dup_element(HashTable * source,HashTable * target,uint32_t idx,Bucket * p,Bucket * q,bool packed,bool static_keys,bool with_holes)2325 static zend_always_inline bool zend_array_dup_element(HashTable *source, HashTable *target, uint32_t idx, Bucket *p, Bucket *q, bool packed, bool static_keys, bool with_holes)
2326 {
2327 if (!zend_array_dup_value(source, target, &p->val, &q->val, packed, with_holes)) {
2328 return 0;
2329 }
2330
2331 if (!packed) {
2332 uint32_t nIndex;
2333
2334 q->h = p->h;
2335 q->key = p->key;
2336 if (!static_keys && q->key) {
2337 zend_string_addref(q->key);
2338 }
2339
2340 nIndex = q->h | target->nTableMask;
2341 Z_NEXT(q->val) = HT_HASH(target, nIndex);
2342 HT_HASH(target, nIndex) = HT_IDX_TO_HASH(idx);
2343 }
2344 return 1;
2345 }
2346
2347 // We need to duplicate iterators to be able to search through all copy-on-write copies to find the actually iterated HashTable and position back
zend_array_dup_ht_iterators(HashTable * source,HashTable * target)2348 static void zend_array_dup_ht_iterators(HashTable *source, HashTable *target) {
2349 uint32_t iter_index = 0;
2350 uint32_t end_index = EG(ht_iterators_used);
2351
2352 while (iter_index != end_index) {
2353 HashTableIterator *iter = &EG(ht_iterators)[iter_index];
2354 if (iter->ht == source) {
2355 uint32_t copy_idx = zend_hash_iterator_add(target, iter->pos);
2356 /* Refetch iter because the memory may be reallocated. */
2357 iter = &EG(ht_iterators)[iter_index];
2358 HashTableIterator *copy_iter = EG(ht_iterators) + copy_idx;
2359 copy_iter->next_copy = iter->next_copy;
2360 iter->next_copy = copy_idx;
2361 }
2362 iter_index++;
2363 }
2364 }
2365
zend_array_dup_packed_elements(HashTable * source,HashTable * target,bool with_holes)2366 static zend_always_inline void zend_array_dup_packed_elements(HashTable *source, HashTable *target, bool with_holes)
2367 {
2368 zval *p = source->arPacked;
2369 zval *q = target->arPacked;
2370 zval *end = p + source->nNumUsed;
2371
2372 do {
2373 if (!zend_array_dup_value(source, target, p, q, 1, with_holes)) {
2374 if (with_holes) {
2375 ZVAL_UNDEF(q);
2376 }
2377 }
2378 p++; q++;
2379 } while (p != end);
2380
2381 if (UNEXPECTED(HT_HAS_ITERATORS(source))) {
2382 zend_array_dup_ht_iterators(source, target);
2383 }
2384 }
2385
zend_array_dup_elements(HashTable * source,HashTable * target,bool static_keys,bool with_holes)2386 static zend_always_inline uint32_t zend_array_dup_elements(HashTable *source, HashTable *target, bool static_keys, bool with_holes)
2387 {
2388 uint32_t idx = 0;
2389 Bucket *p = source->arData;
2390 Bucket *q = target->arData;
2391 Bucket *end = p + source->nNumUsed;
2392
2393 if (UNEXPECTED(HT_HAS_ITERATORS(source))) {
2394 zend_array_dup_ht_iterators(source, target);
2395 }
2396
2397 do {
2398 if (!zend_array_dup_element(source, target, idx, p, q, 0, static_keys, with_holes)) {
2399 uint32_t target_idx = idx;
2400
2401 idx++; p++;
2402 if (EXPECTED(!HT_HAS_ITERATORS(target))) {
2403 while (p != end) {
2404 if (zend_array_dup_element(source, target, target_idx, p, q, 0, static_keys, with_holes)) {
2405 if (source->nInternalPointer == idx) {
2406 target->nInternalPointer = target_idx;
2407 }
2408 target_idx++; q++;
2409 }
2410 idx++; p++;
2411 }
2412 } else {
2413 target->nNumUsed = source->nNumUsed;
2414 uint32_t iter_pos = zend_hash_iterators_lower_pos(target, idx);
2415
2416 while (p != end) {
2417 if (zend_array_dup_element(source, target, target_idx, p, q, 0, static_keys, with_holes)) {
2418 if (source->nInternalPointer == idx) {
2419 target->nInternalPointer = target_idx;
2420 }
2421 if (UNEXPECTED(idx >= iter_pos)) {
2422 do {
2423 zend_hash_iterators_update(target, iter_pos, target_idx);
2424 iter_pos = zend_hash_iterators_lower_pos(target, iter_pos + 1);
2425 } while (iter_pos < idx);
2426 }
2427 target_idx++; q++;
2428 }
2429 idx++; p++;
2430 }
2431 }
2432 return target_idx;
2433 }
2434 idx++; p++; q++;
2435 } while (p != end);
2436 return idx;
2437 }
2438
zend_array_dup(HashTable * source)2439 ZEND_API HashTable* ZEND_FASTCALL zend_array_dup(HashTable *source)
2440 {
2441 uint32_t idx;
2442 HashTable *target;
2443
2444 IS_CONSISTENT(source);
2445
2446 ALLOC_HASHTABLE(target);
2447 GC_SET_REFCOUNT(target, 1);
2448 GC_TYPE_INFO(target) = GC_ARRAY;
2449
2450 target->pDestructor = ZVAL_PTR_DTOR;
2451
2452 if (source->nNumOfElements == 0) {
2453 HT_FLAGS(target) = HASH_FLAG_UNINITIALIZED;
2454 target->nTableMask = HT_MIN_MASK;
2455 target->nNumUsed = 0;
2456 target->nNumOfElements = 0;
2457 target->nNextFreeElement = source->nNextFreeElement;
2458 target->nInternalPointer = 0;
2459 target->nTableSize = HT_MIN_SIZE;
2460 HT_SET_DATA_ADDR(target, &uninitialized_bucket);
2461 } else if (GC_FLAGS(source) & IS_ARRAY_IMMUTABLE) {
2462 HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK;
2463 target->nTableMask = source->nTableMask;
2464 target->nNumUsed = source->nNumUsed;
2465 target->nNumOfElements = source->nNumOfElements;
2466 target->nNextFreeElement = source->nNextFreeElement;
2467 target->nTableSize = source->nTableSize;
2468 if (HT_IS_PACKED(source)) {
2469 HT_SET_DATA_ADDR(target, emalloc(HT_PACKED_SIZE(target)));
2470 target->nInternalPointer = source->nInternalPointer;
2471 memcpy(HT_GET_DATA_ADDR(target), HT_GET_DATA_ADDR(source), HT_PACKED_USED_SIZE(source));
2472 } else {
2473 HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
2474 target->nInternalPointer = source->nInternalPointer;
2475 memcpy(HT_GET_DATA_ADDR(target), HT_GET_DATA_ADDR(source), HT_USED_SIZE(source));
2476 }
2477 } else if (HT_IS_PACKED(source)) {
2478 HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK;
2479 target->nTableMask = HT_MIN_MASK;
2480 target->nNumUsed = source->nNumUsed;
2481 target->nNumOfElements = source->nNumOfElements;
2482 target->nNextFreeElement = source->nNextFreeElement;
2483 target->nTableSize = source->nTableSize;
2484 HT_SET_DATA_ADDR(target, emalloc(HT_PACKED_SIZE_EX(target->nTableSize, HT_MIN_MASK)));
2485 target->nInternalPointer =
2486 (source->nInternalPointer < source->nNumUsed) ?
2487 source->nInternalPointer : 0;
2488
2489 HT_HASH_RESET_PACKED(target);
2490
2491 if (HT_IS_WITHOUT_HOLES(target)) {
2492 zend_array_dup_packed_elements(source, target, 0);
2493 } else {
2494 zend_array_dup_packed_elements(source, target, 1);
2495 }
2496 } else {
2497 HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK;
2498 target->nTableMask = source->nTableMask;
2499 target->nNextFreeElement = source->nNextFreeElement;
2500 target->nInternalPointer =
2501 (source->nInternalPointer < source->nNumUsed) ?
2502 source->nInternalPointer : 0;
2503
2504 target->nTableSize = source->nTableSize;
2505 HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
2506 HT_HASH_RESET(target);
2507
2508 if (HT_HAS_STATIC_KEYS_ONLY(target)) {
2509 if (HT_IS_WITHOUT_HOLES(source)) {
2510 idx = zend_array_dup_elements(source, target, 1, 0);
2511 } else {
2512 idx = zend_array_dup_elements(source, target, 1, 1);
2513 }
2514 } else {
2515 if (HT_IS_WITHOUT_HOLES(source)) {
2516 idx = zend_array_dup_elements(source, target, 0, 0);
2517 } else {
2518 idx = zend_array_dup_elements(source, target, 0, 1);
2519 }
2520 }
2521 target->nNumUsed = idx;
2522 target->nNumOfElements = idx;
2523 }
2524 return target;
2525 }
2526
zend_array_to_list(HashTable * source)2527 ZEND_API HashTable* zend_array_to_list(HashTable *source)
2528 {
2529 HashTable *result = _zend_new_array(zend_hash_num_elements(source));
2530 zend_hash_real_init_packed(result);
2531
2532 ZEND_HASH_FILL_PACKED(result) {
2533 zval *entry;
2534
2535 ZEND_HASH_FOREACH_VAL(source, entry) {
2536 if (UNEXPECTED(Z_ISREF_P(entry) && Z_REFCOUNT_P(entry) == 1)) {
2537 entry = Z_REFVAL_P(entry);
2538 }
2539 Z_TRY_ADDREF_P(entry);
2540 ZEND_HASH_FILL_ADD(entry);
2541 } ZEND_HASH_FOREACH_END();
2542 } ZEND_HASH_FILL_END();
2543
2544 return result;
2545 }
2546
2547
zend_hash_merge(HashTable * target,HashTable * source,copy_ctor_func_t pCopyConstructor,bool overwrite)2548 ZEND_API void ZEND_FASTCALL zend_hash_merge(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor, bool overwrite)
2549 {
2550 uint32_t idx;
2551 Bucket *p;
2552 zval *t, *s;
2553
2554 IS_CONSISTENT(source);
2555 IS_CONSISTENT(target);
2556 HT_ASSERT_RC1(target);
2557
2558 if (overwrite) {
2559 if (HT_IS_PACKED(source)) {
2560 for (idx = 0; idx < source->nNumUsed; idx++) {
2561 s = source->arPacked + idx;
2562 if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) {
2563 continue;
2564 }
2565 t = zend_hash_index_update(target, idx, s);
2566 if (pCopyConstructor) {
2567 pCopyConstructor(t);
2568 }
2569 }
2570 return;
2571 }
2572
2573 for (idx = 0; idx < source->nNumUsed; idx++) {
2574 p = source->arData + idx;
2575 s = &p->val;
2576 if (UNEXPECTED(Z_TYPE_P(s) == IS_INDIRECT)) {
2577 s = Z_INDIRECT_P(s);
2578 }
2579 if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) {
2580 continue;
2581 }
2582 if (p->key) {
2583 t = _zend_hash_add_or_update_i(target, p->key, s, HASH_UPDATE | HASH_UPDATE_INDIRECT);
2584 if (pCopyConstructor) {
2585 pCopyConstructor(t);
2586 }
2587 } else {
2588 t = zend_hash_index_update(target, p->h, s);
2589 if (pCopyConstructor) {
2590 pCopyConstructor(t);
2591 }
2592 }
2593 }
2594 } else {
2595 if (HT_IS_PACKED(source)) {
2596 for (idx = 0; idx < source->nNumUsed; idx++) {
2597 s = source->arPacked + idx;
2598 if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) {
2599 continue;
2600 }
2601 t = zend_hash_index_add(target, idx, s);
2602 if (t && pCopyConstructor) {
2603 pCopyConstructor(t);
2604 }
2605 }
2606 return;
2607 }
2608
2609 for (idx = 0; idx < source->nNumUsed; idx++) {
2610 p = source->arData + idx;
2611 s = &p->val;
2612 if (UNEXPECTED(Z_TYPE_P(s) == IS_INDIRECT)) {
2613 s = Z_INDIRECT_P(s);
2614 }
2615 if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) {
2616 continue;
2617 }
2618 if (p->key) {
2619 t = _zend_hash_add_or_update_i(target, p->key, s, HASH_ADD | HASH_UPDATE_INDIRECT);
2620 if (t && pCopyConstructor) {
2621 pCopyConstructor(t);
2622 }
2623 } else {
2624 t = zend_hash_index_add(target, p->h, s);
2625 if (t && pCopyConstructor) {
2626 pCopyConstructor(t);
2627 }
2628 }
2629 }
2630 }
2631 }
2632
2633
zend_hash_replace_checker_wrapper(HashTable * target,zval * source_data,zend_ulong h,zend_string * key,void * pParam,merge_checker_func_t merge_checker_func)2634 static bool ZEND_FASTCALL zend_hash_replace_checker_wrapper(HashTable *target, zval *source_data, zend_ulong h, zend_string *key, void *pParam, merge_checker_func_t merge_checker_func)
2635 {
2636 zend_hash_key hash_key;
2637
2638 hash_key.h = h;
2639 hash_key.key = key;
2640 return merge_checker_func(target, source_data, &hash_key, pParam);
2641 }
2642
2643
zend_hash_merge_ex(HashTable * target,HashTable * source,copy_ctor_func_t pCopyConstructor,merge_checker_func_t pMergeSource,void * pParam)2644 ZEND_API void ZEND_FASTCALL zend_hash_merge_ex(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor, merge_checker_func_t pMergeSource, void *pParam)
2645 {
2646 uint32_t idx;
2647 Bucket *p;
2648 zval *t;
2649
2650 IS_CONSISTENT(source);
2651 IS_CONSISTENT(target);
2652 HT_ASSERT_RC1(target);
2653
2654 ZEND_ASSERT(!HT_IS_PACKED(source));
2655 for (idx = 0; idx < source->nNumUsed; idx++) {
2656 p = source->arData + idx;
2657 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2658 if (zend_hash_replace_checker_wrapper(target, &p->val, p->h, p->key, pParam, pMergeSource)) {
2659 t = zend_hash_update(target, p->key, &p->val);
2660 if (pCopyConstructor) {
2661 pCopyConstructor(t);
2662 }
2663 }
2664 }
2665 }
2666
2667
2668 /* Returns the hash table data if found and NULL if not. */
zend_hash_find(const HashTable * ht,zend_string * key)2669 ZEND_API zval* ZEND_FASTCALL zend_hash_find(const HashTable *ht, zend_string *key)
2670 {
2671 Bucket *p;
2672
2673 IS_CONSISTENT(ht);
2674
2675 (void)zend_string_hash_val(key);
2676 p = zend_hash_find_bucket(ht, key);
2677 return p ? &p->val : NULL;
2678 }
2679
zend_hash_find_known_hash(const HashTable * ht,const zend_string * key)2680 ZEND_API zval* ZEND_FASTCALL zend_hash_find_known_hash(const HashTable *ht, const zend_string *key)
2681 {
2682 Bucket *p;
2683
2684 IS_CONSISTENT(ht);
2685
2686 p = zend_hash_find_bucket(ht, key);
2687 return p ? &p->val : NULL;
2688 }
2689
zend_hash_str_find(const HashTable * ht,const char * str,size_t len)2690 ZEND_API zval* ZEND_FASTCALL zend_hash_str_find(const HashTable *ht, const char *str, size_t len)
2691 {
2692 zend_ulong h;
2693 Bucket *p;
2694
2695 IS_CONSISTENT(ht);
2696
2697 h = zend_inline_hash_func(str, len);
2698 p = zend_hash_str_find_bucket(ht, str, len, h);
2699 return p ? &p->val : NULL;
2700 }
2701
zend_hash_index_find(const HashTable * ht,zend_ulong h)2702 ZEND_API zval* ZEND_FASTCALL zend_hash_index_find(const HashTable *ht, zend_ulong h)
2703 {
2704 Bucket *p;
2705
2706 IS_CONSISTENT(ht);
2707
2708 if (HT_IS_PACKED(ht)) {
2709 if (h < ht->nNumUsed) {
2710 zval *zv = ht->arPacked + h;
2711
2712 if (Z_TYPE_P(zv) != IS_UNDEF) {
2713 return zv;
2714 }
2715 }
2716 return NULL;
2717 }
2718
2719 p = zend_hash_index_find_bucket(ht, h);
2720 return p ? &p->val : NULL;
2721 }
2722
_zend_hash_index_find(const HashTable * ht,zend_ulong h)2723 ZEND_API zval* ZEND_FASTCALL _zend_hash_index_find(const HashTable *ht, zend_ulong h)
2724 {
2725 Bucket *p;
2726
2727 IS_CONSISTENT(ht);
2728 ZEND_ASSERT(!HT_IS_PACKED(ht));
2729
2730 p = zend_hash_index_find_bucket(ht, h);
2731 return p ? &p->val : NULL;
2732 }
2733
zend_hash_internal_pointer_reset_ex(HashTable * ht,HashPosition * pos)2734 ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_reset_ex(HashTable *ht, HashPosition *pos)
2735 {
2736 IS_CONSISTENT(ht);
2737 HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2738 *pos = _zend_hash_get_valid_pos(ht, 0);
2739 }
2740
2741
2742 /* This function will be extremely optimized by remembering
2743 * the end of the list
2744 */
zend_hash_internal_pointer_end_ex(HashTable * ht,HashPosition * pos)2745 ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_end_ex(HashTable *ht, HashPosition *pos)
2746 {
2747 uint32_t idx;
2748
2749 IS_CONSISTENT(ht);
2750 HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2751
2752 idx = ht->nNumUsed;
2753 if (HT_IS_PACKED(ht)) {
2754 while (idx > 0) {
2755 idx--;
2756 if (Z_TYPE(ht->arPacked[idx]) != IS_UNDEF) {
2757 *pos = idx;
2758 return;
2759 }
2760 }
2761 } else {
2762 while (idx > 0) {
2763 idx--;
2764 if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2765 *pos = idx;
2766 return;
2767 }
2768 }
2769 }
2770 *pos = ht->nNumUsed;
2771 }
2772
2773
zend_hash_move_forward_ex(HashTable * ht,HashPosition * pos)2774 ZEND_API zend_result ZEND_FASTCALL zend_hash_move_forward_ex(HashTable *ht, HashPosition *pos)
2775 {
2776 uint32_t idx;
2777
2778 IS_CONSISTENT(ht);
2779 HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2780
2781 idx = _zend_hash_get_valid_pos(ht, *pos);
2782 if (idx < ht->nNumUsed) {
2783 if (HT_IS_PACKED(ht)) {
2784 while (1) {
2785 idx++;
2786 if (idx >= ht->nNumUsed) {
2787 *pos = ht->nNumUsed;
2788 return SUCCESS;
2789 }
2790 if (Z_TYPE(ht->arPacked[idx]) != IS_UNDEF) {
2791 *pos = idx;
2792 return SUCCESS;
2793 }
2794 }
2795 } else {
2796 while (1) {
2797 idx++;
2798 if (idx >= ht->nNumUsed) {
2799 *pos = ht->nNumUsed;
2800 return SUCCESS;
2801 }
2802 if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2803 *pos = idx;
2804 return SUCCESS;
2805 }
2806 }
2807 }
2808 } else {
2809 return FAILURE;
2810 }
2811 }
2812
zend_hash_move_backwards_ex(HashTable * ht,HashPosition * pos)2813 ZEND_API zend_result ZEND_FASTCALL zend_hash_move_backwards_ex(HashTable *ht, HashPosition *pos)
2814 {
2815 uint32_t idx = *pos;
2816
2817 IS_CONSISTENT(ht);
2818 HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2819
2820 if (idx < ht->nNumUsed) {
2821 if (HT_IS_PACKED(ht)) {
2822 while (idx > 0) {
2823 idx--;
2824 if (Z_TYPE(ht->arPacked[idx]) != IS_UNDEF) {
2825 *pos = idx;
2826 return SUCCESS;
2827 }
2828 }
2829 } else {
2830 while (idx > 0) {
2831 idx--;
2832 if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2833 *pos = idx;
2834 return SUCCESS;
2835 }
2836 }
2837 }
2838 *pos = ht->nNumUsed;
2839 return SUCCESS;
2840 } else {
2841 return FAILURE;
2842 }
2843 }
2844
2845
2846 /* This function should be made binary safe */
zend_hash_get_current_key_ex(const HashTable * ht,zend_string ** str_index,zend_ulong * num_index,const HashPosition * pos)2847 ZEND_API int ZEND_FASTCALL zend_hash_get_current_key_ex(const HashTable *ht, zend_string **str_index, zend_ulong *num_index, const HashPosition *pos)
2848 {
2849 uint32_t idx;
2850 Bucket *p;
2851
2852 IS_CONSISTENT(ht);
2853 idx = _zend_hash_get_valid_pos(ht, *pos);
2854 if (idx < ht->nNumUsed) {
2855 if (HT_IS_PACKED(ht)) {
2856 *num_index = idx;
2857 return HASH_KEY_IS_LONG;
2858 }
2859 p = ht->arData + idx;
2860 if (p->key) {
2861 *str_index = p->key;
2862 return HASH_KEY_IS_STRING;
2863 } else {
2864 *num_index = p->h;
2865 return HASH_KEY_IS_LONG;
2866 }
2867 }
2868 return HASH_KEY_NON_EXISTENT;
2869 }
2870
zend_hash_get_current_key_zval_ex(const HashTable * ht,zval * key,const HashPosition * pos)2871 ZEND_API void ZEND_FASTCALL zend_hash_get_current_key_zval_ex(const HashTable *ht, zval *key, const HashPosition *pos)
2872 {
2873 uint32_t idx;
2874 Bucket *p;
2875
2876 IS_CONSISTENT(ht);
2877 idx = _zend_hash_get_valid_pos(ht, *pos);
2878 if (idx >= ht->nNumUsed) {
2879 ZVAL_NULL(key);
2880 } else {
2881 if (HT_IS_PACKED(ht)) {
2882 ZVAL_LONG(key, idx);
2883 return;
2884 }
2885 p = ht->arData + idx;
2886 if (p->key) {
2887 ZVAL_STR_COPY(key, p->key);
2888 } else {
2889 ZVAL_LONG(key, p->h);
2890 }
2891 }
2892 }
2893
zend_hash_get_current_key_type_ex(HashTable * ht,HashPosition * pos)2894 ZEND_API int ZEND_FASTCALL zend_hash_get_current_key_type_ex(HashTable *ht, HashPosition *pos)
2895 {
2896 uint32_t idx;
2897 Bucket *p;
2898
2899 IS_CONSISTENT(ht);
2900 idx = _zend_hash_get_valid_pos(ht, *pos);
2901 if (idx < ht->nNumUsed) {
2902 if (HT_IS_PACKED(ht)) {
2903 return HASH_KEY_IS_LONG;
2904 }
2905 p = ht->arData + idx;
2906 if (p->key) {
2907 return HASH_KEY_IS_STRING;
2908 } else {
2909 return HASH_KEY_IS_LONG;
2910 }
2911 }
2912 return HASH_KEY_NON_EXISTENT;
2913 }
2914
2915
zend_hash_get_current_data_ex(HashTable * ht,HashPosition * pos)2916 ZEND_API zval* ZEND_FASTCALL zend_hash_get_current_data_ex(HashTable *ht, HashPosition *pos)
2917 {
2918 uint32_t idx;
2919 Bucket *p;
2920
2921 IS_CONSISTENT(ht);
2922 idx = _zend_hash_get_valid_pos(ht, *pos);
2923 if (idx < ht->nNumUsed) {
2924 if (HT_IS_PACKED(ht)) {
2925 return &ht->arPacked[idx];
2926 }
2927 p = ht->arData + idx;
2928 return &p->val;
2929 } else {
2930 return NULL;
2931 }
2932 }
2933
zend_hash_bucket_swap(Bucket * p,Bucket * q)2934 ZEND_API void zend_hash_bucket_swap(Bucket *p, Bucket *q)
2935 {
2936 zval val;
2937 zend_ulong h;
2938 zend_string *key;
2939
2940 val = p->val;
2941 h = p->h;
2942 key = p->key;
2943
2944 p->val = q->val;
2945 p->h = q->h;
2946 p->key = q->key;
2947
2948 q->val = val;
2949 q->h = h;
2950 q->key = key;
2951 }
2952
zend_hash_bucket_renum_swap(Bucket * p,Bucket * q)2953 ZEND_API void zend_hash_bucket_renum_swap(Bucket *p, Bucket *q)
2954 {
2955 zval val;
2956
2957 val = p->val;
2958 p->val = q->val;
2959 q->val = val;
2960 }
2961
zend_hash_bucket_packed_swap(Bucket * p,Bucket * q)2962 ZEND_API void zend_hash_bucket_packed_swap(Bucket *p, Bucket *q)
2963 {
2964 zval val;
2965 zend_ulong h;
2966
2967 val = p->val;
2968 h = p->h;
2969
2970 p->val = q->val;
2971 p->h = q->h;
2972
2973 q->val = val;
2974 q->h = h;
2975 }
2976
zend_hash_sort_ex(HashTable * ht,sort_func_t sort,bucket_compare_func_t compar,bool renumber)2977 ZEND_API void ZEND_FASTCALL zend_hash_sort_ex(HashTable *ht, sort_func_t sort, bucket_compare_func_t compar, bool renumber)
2978 {
2979 Bucket *p;
2980 uint32_t i, j;
2981
2982 IS_CONSISTENT(ht);
2983 HT_ASSERT_RC1(ht);
2984
2985 if (!(ht->nNumOfElements>1) && !(renumber && ht->nNumOfElements>0)) {
2986 /* Doesn't require sorting */
2987 return;
2988 }
2989
2990 if (HT_IS_PACKED(ht)) {
2991 zend_hash_packed_to_hash(ht); // TODO: ???
2992 }
2993
2994 if (HT_IS_WITHOUT_HOLES(ht)) {
2995 /* Store original order of elements in extra space to allow stable sorting. */
2996 for (i = 0; i < ht->nNumUsed; i++) {
2997 Z_EXTRA(ht->arData[i].val) = i;
2998 }
2999 } else {
3000 /* Remove holes and store original order. */
3001 for (j = 0, i = 0; j < ht->nNumUsed; j++) {
3002 p = ht->arData + j;
3003 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
3004 if (i != j) {
3005 ht->arData[i] = *p;
3006 }
3007 Z_EXTRA(ht->arData[i].val) = i;
3008 i++;
3009 }
3010 ht->nNumUsed = i;
3011 }
3012
3013 if (!HT_IS_PACKED(ht)) {
3014 /* We broke the hash collisions chains overriding Z_NEXT() by Z_EXTRA().
3015 * Reset the hash headers table as well to avoid possible inconsistent
3016 * access on recursive data structures.
3017 *
3018 * See Zend/tests/bug63882_2.phpt
3019 */
3020 HT_HASH_RESET(ht);
3021 }
3022
3023 sort((void *)ht->arData, ht->nNumUsed, sizeof(Bucket), (compare_func_t) compar,
3024 (swap_func_t)(renumber? zend_hash_bucket_renum_swap :
3025 (HT_IS_PACKED(ht) ? zend_hash_bucket_packed_swap : zend_hash_bucket_swap)));
3026
3027 ht->nInternalPointer = 0;
3028
3029 if (renumber) {
3030 for (j = 0; j < i; j++) {
3031 p = ht->arData + j;
3032 p->h = j;
3033 if (p->key) {
3034 zend_string_release(p->key);
3035 p->key = NULL;
3036 }
3037 }
3038
3039 ht->nNextFreeElement = i;
3040 }
3041 if (HT_IS_PACKED(ht)) {
3042 if (!renumber) {
3043 zend_hash_packed_to_hash(ht);
3044 }
3045 } else {
3046 if (renumber) {
3047 void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
3048 Bucket *old_buckets = ht->arData;
3049 zval *zv;
3050
3051 new_data = pemalloc(HT_PACKED_SIZE_EX(ht->nTableSize, HT_MIN_MASK), (GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
3052 HT_FLAGS(ht) |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
3053 ht->nTableMask = HT_MIN_MASK;
3054 HT_SET_DATA_ADDR(ht, new_data);
3055 p = old_buckets;
3056 zv = ht->arPacked;
3057 for (i = 0; i < ht->nTableSize; i++) {
3058 ZVAL_COPY_VALUE(zv, &p->val);
3059 zv++;
3060 p++;
3061 }
3062 pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
3063 HT_HASH_RESET_PACKED(ht);
3064 } else {
3065 zend_hash_rehash(ht);
3066 }
3067 }
3068 }
3069
zend_hash_compare_impl(HashTable * ht1,HashTable * ht2,compare_func_t compar,bool ordered)3070 static zend_always_inline int zend_hash_compare_impl(HashTable *ht1, HashTable *ht2, compare_func_t compar, bool ordered) {
3071 uint32_t idx1, idx2;
3072 zend_string *key1, *key2;
3073 zend_ulong h1, h2;
3074 zval *pData1, *pData2;;
3075 int result;
3076
3077 if (ht1->nNumOfElements != ht2->nNumOfElements) {
3078 return ht1->nNumOfElements > ht2->nNumOfElements ? 1 : -1;
3079 }
3080
3081 for (idx1 = 0, idx2 = 0; idx1 < ht1->nNumUsed; idx1++) {
3082 if (HT_IS_PACKED(ht1)) {
3083 pData1 = ht1->arPacked + idx1;
3084 h1 = idx1;
3085 key1 = NULL;
3086 } else {
3087 Bucket *p = ht1->arData + idx1;
3088 pData1 = &p->val;
3089 h1 = p->h;
3090 key1 = p->key;
3091 }
3092
3093 if (Z_TYPE_P(pData1) == IS_UNDEF) continue;
3094 if (ordered) {
3095 if (HT_IS_PACKED(ht2)) {
3096 while (1) {
3097 ZEND_ASSERT(idx2 != ht2->nNumUsed);
3098 pData2 = ht2->arPacked + idx2;
3099 h2 = idx2;
3100 key2 = NULL;
3101 if (Z_TYPE_P(pData2) != IS_UNDEF) break;
3102 idx2++;
3103 }
3104 } else {
3105 while (1) {
3106 Bucket *p;
3107 ZEND_ASSERT(idx2 != ht2->nNumUsed);
3108 p = ht2->arData + idx2;
3109 pData2 = &p->val;
3110 h2 = p->h;
3111 key2 = p->key;
3112 if (Z_TYPE_P(pData2) != IS_UNDEF) break;
3113 idx2++;
3114 }
3115 }
3116 if (key1 == NULL && key2 == NULL) { /* numeric indices */
3117 if (h1 != h2) {
3118 return h1 > h2 ? 1 : -1;
3119 }
3120 } else if (key1 != NULL && key2 != NULL) { /* string indices */
3121 if (ZSTR_LEN(key1) != ZSTR_LEN(key2)) {
3122 return ZSTR_LEN(key1) > ZSTR_LEN(key2) ? 1 : -1;
3123 }
3124
3125 result = memcmp(ZSTR_VAL(key1), ZSTR_VAL(key2), ZSTR_LEN(key1));
3126 if (result != 0) {
3127 return result;
3128 }
3129 } else {
3130 /* Mixed key types: A string key is considered as larger */
3131 return key1 != NULL ? 1 : -1;
3132 }
3133 idx2++;
3134 } else {
3135 if (key1 == NULL) { /* numeric index */
3136 pData2 = zend_hash_index_find(ht2, h1);
3137 if (pData2 == NULL) {
3138 return 1;
3139 }
3140 } else { /* string index */
3141 pData2 = zend_hash_find(ht2, key1);
3142 if (pData2 == NULL) {
3143 return 1;
3144 }
3145 }
3146 }
3147
3148 if (Z_TYPE_P(pData1) == IS_INDIRECT) {
3149 pData1 = Z_INDIRECT_P(pData1);
3150 }
3151 if (Z_TYPE_P(pData2) == IS_INDIRECT) {
3152 pData2 = Z_INDIRECT_P(pData2);
3153 }
3154
3155 if (Z_TYPE_P(pData1) == IS_UNDEF) {
3156 if (Z_TYPE_P(pData2) != IS_UNDEF) {
3157 return -1;
3158 }
3159 } else if (Z_TYPE_P(pData2) == IS_UNDEF) {
3160 return 1;
3161 } else {
3162 result = compar(pData1, pData2);
3163 if (result != 0) {
3164 return result;
3165 }
3166 }
3167 }
3168
3169 return 0;
3170 }
3171
zend_hash_compare(HashTable * ht1,HashTable * ht2,compare_func_t compar,bool ordered)3172 ZEND_API int zend_hash_compare(HashTable *ht1, HashTable *ht2, compare_func_t compar, bool ordered)
3173 {
3174 int result;
3175 IS_CONSISTENT(ht1);
3176 IS_CONSISTENT(ht2);
3177
3178 if (ht1 == ht2) {
3179 return 0;
3180 }
3181
3182 /* It's enough to protect only one of the arrays.
3183 * The second one may be referenced from the first and this may cause
3184 * false recursion detection.
3185 */
3186 if (UNEXPECTED(GC_IS_RECURSIVE(ht1))) {
3187 zend_error_noreturn(E_ERROR, "Nesting level too deep - recursive dependency?");
3188 }
3189
3190 GC_TRY_PROTECT_RECURSION(ht1);
3191 result = zend_hash_compare_impl(ht1, ht2, compar, ordered);
3192 GC_TRY_UNPROTECT_RECURSION(ht1);
3193
3194 return result;
3195 }
3196
3197
zend_hash_minmax(const HashTable * ht,compare_func_t compar,uint32_t flag)3198 ZEND_API zval* ZEND_FASTCALL zend_hash_minmax(const HashTable *ht, compare_func_t compar, uint32_t flag)
3199 {
3200 uint32_t idx;
3201 zval *res;
3202
3203 IS_CONSISTENT(ht);
3204
3205 if (ht->nNumOfElements == 0 ) {
3206 return NULL;
3207 }
3208
3209 if (HT_IS_PACKED(ht)) {
3210 zval *zv;
3211
3212 idx = 0;
3213 while (1) {
3214 if (idx == ht->nNumUsed) {
3215 return NULL;
3216 }
3217 if (Z_TYPE(ht->arPacked[idx]) != IS_UNDEF) break;
3218 idx++;
3219 }
3220 res = ht->arPacked + idx;
3221 for (; idx < ht->nNumUsed; idx++) {
3222 zv = ht->arPacked + idx;
3223 if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
3224
3225 if (flag) {
3226 if (compar(res, zv) < 0) { /* max */
3227 res = zv;
3228 }
3229 } else {
3230 if (compar(res, zv) > 0) { /* min */
3231 res = zv;
3232 }
3233 }
3234 }
3235 } else {
3236 Bucket *p;
3237
3238 idx = 0;
3239 while (1) {
3240 if (idx == ht->nNumUsed) {
3241 return NULL;
3242 }
3243 if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) break;
3244 idx++;
3245 }
3246 res = &ht->arData[idx].val;
3247 for (; idx < ht->nNumUsed; idx++) {
3248 p = ht->arData + idx;
3249 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
3250
3251 if (flag) {
3252 if (compar(res, &p->val) < 0) { /* max */
3253 res = &p->val;
3254 }
3255 } else {
3256 if (compar(res, &p->val) > 0) { /* min */
3257 res = &p->val;
3258 }
3259 }
3260 }
3261 }
3262 return res;
3263 }
3264
_zend_handle_numeric_str_ex(const char * key,size_t length,zend_ulong * idx)3265 ZEND_API bool ZEND_FASTCALL _zend_handle_numeric_str_ex(const char *key, size_t length, zend_ulong *idx)
3266 {
3267 const char *tmp = key;
3268
3269 const char *end = key + length;
3270
3271 if (*tmp == '-') {
3272 tmp++;
3273 }
3274
3275 if ((*tmp == '0' && length > 1) /* numbers with leading zeros */
3276 || (end - tmp > MAX_LENGTH_OF_LONG - 1) /* number too long */
3277 || (SIZEOF_ZEND_LONG == 4 &&
3278 end - tmp == MAX_LENGTH_OF_LONG - 1 &&
3279 *tmp > '2')) { /* overflow */
3280 return 0;
3281 }
3282 *idx = (*tmp - '0');
3283 while (1) {
3284 ++tmp;
3285 if (tmp == end) {
3286 if (*key == '-') {
3287 if (*idx-1 > ZEND_LONG_MAX) { /* overflow */
3288 return 0;
3289 }
3290 *idx = 0 - *idx;
3291 } else if (*idx > ZEND_LONG_MAX) { /* overflow */
3292 return 0;
3293 }
3294 return 1;
3295 }
3296 if (*tmp <= '9' && *tmp >= '0') {
3297 *idx = (*idx * 10) + (*tmp - '0');
3298 } else {
3299 return 0;
3300 }
3301 }
3302 }
3303
3304 /* Takes a "symtable" hashtable (contains integer and non-numeric string keys)
3305 * and converts it to a "proptable" (contains only string keys).
3306 * If the symtable didn't need duplicating, its refcount is incremented.
3307 */
zend_symtable_to_proptable(HashTable * ht)3308 ZEND_API HashTable* ZEND_FASTCALL zend_symtable_to_proptable(HashTable *ht)
3309 {
3310 zend_ulong num_key;
3311 zend_string *str_key;
3312 zval *zv;
3313
3314 if (UNEXPECTED(HT_IS_PACKED(ht))) {
3315 goto convert;
3316 }
3317
3318 ZEND_HASH_MAP_FOREACH_STR_KEY(ht, str_key) {
3319 if (!str_key) {
3320 goto convert;
3321 }
3322 } ZEND_HASH_FOREACH_END();
3323
3324 if (!(GC_FLAGS(ht) & IS_ARRAY_IMMUTABLE)) {
3325 GC_ADDREF(ht);
3326 }
3327
3328 return ht;
3329
3330 convert:
3331 {
3332 HashTable *new_ht = zend_new_array(zend_hash_num_elements(ht));
3333
3334 ZEND_HASH_FOREACH_KEY_VAL(ht, num_key, str_key, zv) {
3335 if (!str_key) {
3336 str_key = zend_long_to_str(num_key);
3337 zend_string_delref(str_key);
3338 }
3339 do {
3340 if (Z_OPT_REFCOUNTED_P(zv)) {
3341 if (Z_ISREF_P(zv) && Z_REFCOUNT_P(zv) == 1) {
3342 zv = Z_REFVAL_P(zv);
3343 if (!Z_OPT_REFCOUNTED_P(zv)) {
3344 break;
3345 }
3346 }
3347 Z_ADDREF_P(zv);
3348 }
3349 } while (0);
3350 zend_hash_update(new_ht, str_key, zv);
3351 } ZEND_HASH_FOREACH_END();
3352
3353 return new_ht;
3354 }
3355 }
3356
3357 /* Takes a "proptable" hashtable (contains only string keys) and converts it to
3358 * a "symtable" (contains integer and non-numeric string keys).
3359 * If the proptable didn't need duplicating, its refcount is incremented.
3360 */
zend_proptable_to_symtable(HashTable * ht,bool always_duplicate)3361 ZEND_API HashTable* ZEND_FASTCALL zend_proptable_to_symtable(HashTable *ht, bool always_duplicate)
3362 {
3363 zend_ulong num_key;
3364 zend_string *str_key;
3365 zval *zv;
3366
3367 if (!HT_IS_PACKED(ht)) {
3368 ZEND_HASH_MAP_FOREACH_STR_KEY(ht, str_key) {
3369 /* The `str_key &&` here might seem redundant: property tables should
3370 * only have string keys. Unfortunately, this isn't true, at the very
3371 * least because of ArrayObject, which stores a symtable where the
3372 * property table should be.
3373 */
3374 if (str_key && ZEND_HANDLE_NUMERIC(str_key, num_key)) {
3375 goto convert;
3376 }
3377 } ZEND_HASH_FOREACH_END();
3378 }
3379
3380 if (always_duplicate) {
3381 return zend_array_dup(ht);
3382 }
3383
3384 if (EXPECTED(!(GC_FLAGS(ht) & IS_ARRAY_IMMUTABLE))) {
3385 GC_ADDREF(ht);
3386 }
3387
3388 return ht;
3389
3390 convert:
3391 {
3392 HashTable *new_ht = zend_new_array(zend_hash_num_elements(ht));
3393
3394 ZEND_HASH_MAP_FOREACH_KEY_VAL_IND(ht, num_key, str_key, zv) {
3395 do {
3396 if (Z_OPT_REFCOUNTED_P(zv)) {
3397 if (Z_ISREF_P(zv) && Z_REFCOUNT_P(zv) == 1) {
3398 zv = Z_REFVAL_P(zv);
3399 if (!Z_OPT_REFCOUNTED_P(zv)) {
3400 break;
3401 }
3402 }
3403 Z_ADDREF_P(zv);
3404 }
3405 } while (0);
3406 /* Again, thank ArrayObject for `!str_key ||`. */
3407 if (!str_key || ZEND_HANDLE_NUMERIC(str_key, num_key)) {
3408 zend_hash_index_update(new_ht, num_key, zv);
3409 } else {
3410 zend_hash_update(new_ht, str_key, zv);
3411 }
3412 } ZEND_HASH_FOREACH_END();
3413
3414 return new_ht;
3415 }
3416 }
3417