1 /*
2 +----------------------------------------------------------------------+
3 | Zend Engine |
4 +----------------------------------------------------------------------+
5 | Copyright (c) Zend Technologies Ltd. (http://www.zend.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 2.00 of the Zend license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.zend.com/license/2_00.txt. |
11 | If you did not receive a copy of the Zend license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@zend.com so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
15 | Authors: Andi Gutmans <andi@php.net> |
16 | Zeev Suraski <zeev@php.net> |
17 | Dmitry Stogov <dmitry@php.net> |
18 +----------------------------------------------------------------------+
19 */
20
21 #include "zend.h"
22 #include "zend_globals.h"
23 #include "zend_variables.h"
24
25 #if defined(__aarch64__) || defined(_M_ARM64)
26 # include <arm_neon.h>
27 #endif
28
29 /* Prefer to use AVX2 instructions for better latency and throughput */
30 #if defined(__AVX2__)
31 # include <immintrin.h>
32 #elif defined( __SSE2__)
33 # include <mmintrin.h>
34 # include <emmintrin.h>
35 #endif
36
37 #if ZEND_DEBUG
38 # define HT_ASSERT(ht, expr) \
39 ZEND_ASSERT((expr) || (HT_FLAGS(ht) & HASH_FLAG_ALLOW_COW_VIOLATION))
40 #else
41 # define HT_ASSERT(ht, expr)
42 #endif
43
44 #define HT_ASSERT_RC1(ht) HT_ASSERT(ht, GC_REFCOUNT(ht) == 1)
45
46 #define HT_POISONED_PTR ((HashTable *) (intptr_t) -1)
47
48 #if ZEND_DEBUG
49
50 #define HT_OK 0x00
51 #define HT_IS_DESTROYING 0x01
52 #define HT_DESTROYED 0x02
53 #define HT_CLEANING 0x03
54
_zend_is_inconsistent(const HashTable * ht,const char * file,int line)55 static void _zend_is_inconsistent(const HashTable *ht, const char *file, int line)
56 {
57 if ((HT_FLAGS(ht) & HASH_FLAG_CONSISTENCY) == HT_OK) {
58 return;
59 }
60 switch (HT_FLAGS(ht) & HASH_FLAG_CONSISTENCY) {
61 case HT_IS_DESTROYING:
62 zend_output_debug_string(1, "%s(%d) : ht=%p is being destroyed", file, line, ht);
63 break;
64 case HT_DESTROYED:
65 zend_output_debug_string(1, "%s(%d) : ht=%p is already destroyed", file, line, ht);
66 break;
67 case HT_CLEANING:
68 zend_output_debug_string(1, "%s(%d) : ht=%p is being cleaned", file, line, ht);
69 break;
70 default:
71 zend_output_debug_string(1, "%s(%d) : ht=%p is inconsistent", file, line, ht);
72 break;
73 }
74 ZEND_UNREACHABLE();
75 }
76 #define IS_CONSISTENT(a) _zend_is_inconsistent(a, __FILE__, __LINE__);
77 #define SET_INCONSISTENT(n) do { \
78 HT_FLAGS(ht) = (HT_FLAGS(ht) & ~HASH_FLAG_CONSISTENCY) | (n); \
79 } while (0)
80 #else
81 #define IS_CONSISTENT(a)
82 #define SET_INCONSISTENT(n)
83 #endif
84
85 #define ZEND_HASH_IF_FULL_DO_RESIZE(ht) \
86 if ((ht)->nNumUsed >= (ht)->nTableSize) { \
87 zend_hash_do_resize(ht); \
88 }
89
zend_hash_str_find_ptr_lc(const HashTable * ht,const char * str,size_t len)90 ZEND_API void *zend_hash_str_find_ptr_lc(const HashTable *ht, const char *str, size_t len) {
91 void *result;
92 char *lc_str;
93
94 /* Stack allocate small strings to improve performance */
95 ALLOCA_FLAG(use_heap)
96
97 lc_str = zend_str_tolower_copy(do_alloca(len + 1, use_heap), str, len);
98 result = zend_hash_str_find_ptr(ht, lc_str, len);
99 free_alloca(lc_str, use_heap);
100
101 return result;
102 }
103
zend_hash_find_ptr_lc(const HashTable * ht,zend_string * key)104 ZEND_API void *zend_hash_find_ptr_lc(const HashTable *ht, zend_string *key) {
105 void *result;
106 zend_string *lc_key = zend_string_tolower(key);
107 result = zend_hash_find_ptr(ht, lc_key);
108 zend_string_release(lc_key);
109 return result;
110 }
111
112 static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht);
113
zend_hash_check_size(uint32_t nSize)114 static zend_always_inline uint32_t zend_hash_check_size(uint32_t nSize)
115 {
116 #ifdef ZEND_WIN32
117 unsigned long index;
118 #endif
119
120 /* Use big enough power of 2 */
121 /* size should be between HT_MIN_SIZE and HT_MAX_SIZE */
122 if (nSize <= HT_MIN_SIZE) {
123 return HT_MIN_SIZE;
124 } else if (UNEXPECTED(nSize > HT_MAX_SIZE)) {
125 zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", nSize, sizeof(Bucket), sizeof(Bucket));
126 }
127
128 #ifdef ZEND_WIN32
129 if (BitScanReverse(&index, nSize - 1)) {
130 return 0x2u << ((31 - index) ^ 0x1f);
131 } else {
132 /* nSize is ensured to be in the valid range, fall back to it
133 rather than using an undefined bis scan result. */
134 return nSize;
135 }
136 #elif (defined(__GNUC__) || __has_builtin(__builtin_clz)) && defined(PHP_HAVE_BUILTIN_CLZ)
137 return 0x2u << (__builtin_clz(nSize - 1) ^ 0x1f);
138 #else
139 nSize -= 1;
140 nSize |= (nSize >> 1);
141 nSize |= (nSize >> 2);
142 nSize |= (nSize >> 4);
143 nSize |= (nSize >> 8);
144 nSize |= (nSize >> 16);
145 return nSize + 1;
146 #endif
147 }
148
zend_hash_real_init_packed_ex(HashTable * ht)149 static zend_always_inline void zend_hash_real_init_packed_ex(HashTable *ht)
150 {
151 void *data;
152
153 if (UNEXPECTED(GC_FLAGS(ht) & IS_ARRAY_PERSISTENT)) {
154 data = pemalloc(HT_PACKED_SIZE_EX(ht->nTableSize, HT_MIN_MASK), 1);
155 } else if (EXPECTED(ht->nTableSize == HT_MIN_SIZE)) {
156 /* Use specialized API with constant allocation amount for a particularly common case. */
157 data = emalloc(HT_PACKED_SIZE_EX(HT_MIN_SIZE, HT_MIN_MASK));
158 } else {
159 data = emalloc(HT_PACKED_SIZE_EX(ht->nTableSize, HT_MIN_MASK));
160 }
161 HT_SET_DATA_ADDR(ht, data);
162 /* Don't overwrite iterator count. */
163 ht->u.v.flags = HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
164 HT_HASH_RESET_PACKED(ht);
165 }
166
zend_hash_real_init_mixed_ex(HashTable * ht)167 static zend_always_inline void zend_hash_real_init_mixed_ex(HashTable *ht)
168 {
169 void *data;
170 uint32_t nSize = ht->nTableSize;
171
172 ZEND_ASSERT(HT_SIZE_TO_MASK(nSize));
173
174 if (UNEXPECTED(GC_FLAGS(ht) & IS_ARRAY_PERSISTENT)) {
175 data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), 1);
176 } else if (EXPECTED(nSize == HT_MIN_SIZE)) {
177 data = emalloc(HT_SIZE_EX(HT_MIN_SIZE, HT_SIZE_TO_MASK(HT_MIN_SIZE)));
178 ht->nTableMask = HT_SIZE_TO_MASK(HT_MIN_SIZE);
179 HT_SET_DATA_ADDR(ht, data);
180 /* Don't overwrite iterator count. */
181 ht->u.v.flags = HASH_FLAG_STATIC_KEYS;
182 #if defined(__AVX2__)
183 do {
184 __m256i ymm0 = _mm256_setzero_si256();
185 ymm0 = _mm256_cmpeq_epi64(ymm0, ymm0);
186 _mm256_storeu_si256((__m256i*)&HT_HASH_EX(data, 0), ymm0);
187 _mm256_storeu_si256((__m256i*)&HT_HASH_EX(data, 8), ymm0);
188 } while(0);
189 #elif defined (__SSE2__)
190 do {
191 __m128i xmm0 = _mm_setzero_si128();
192 xmm0 = _mm_cmpeq_epi8(xmm0, xmm0);
193 _mm_storeu_si128((__m128i*)&HT_HASH_EX(data, 0), xmm0);
194 _mm_storeu_si128((__m128i*)&HT_HASH_EX(data, 4), xmm0);
195 _mm_storeu_si128((__m128i*)&HT_HASH_EX(data, 8), xmm0);
196 _mm_storeu_si128((__m128i*)&HT_HASH_EX(data, 12), xmm0);
197 } while (0);
198 #elif defined(__aarch64__) || defined(_M_ARM64)
199 do {
200 int32x4_t t = vdupq_n_s32(-1);
201 vst1q_s32((int32_t*)&HT_HASH_EX(data, 0), t);
202 vst1q_s32((int32_t*)&HT_HASH_EX(data, 4), t);
203 vst1q_s32((int32_t*)&HT_HASH_EX(data, 8), t);
204 vst1q_s32((int32_t*)&HT_HASH_EX(data, 12), t);
205 } while (0);
206 #else
207 HT_HASH_EX(data, 0) = -1;
208 HT_HASH_EX(data, 1) = -1;
209 HT_HASH_EX(data, 2) = -1;
210 HT_HASH_EX(data, 3) = -1;
211 HT_HASH_EX(data, 4) = -1;
212 HT_HASH_EX(data, 5) = -1;
213 HT_HASH_EX(data, 6) = -1;
214 HT_HASH_EX(data, 7) = -1;
215 HT_HASH_EX(data, 8) = -1;
216 HT_HASH_EX(data, 9) = -1;
217 HT_HASH_EX(data, 10) = -1;
218 HT_HASH_EX(data, 11) = -1;
219 HT_HASH_EX(data, 12) = -1;
220 HT_HASH_EX(data, 13) = -1;
221 HT_HASH_EX(data, 14) = -1;
222 HT_HASH_EX(data, 15) = -1;
223 #endif
224 return;
225 } else {
226 data = emalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)));
227 }
228 ht->nTableMask = HT_SIZE_TO_MASK(nSize);
229 HT_SET_DATA_ADDR(ht, data);
230 HT_FLAGS(ht) = HASH_FLAG_STATIC_KEYS;
231 HT_HASH_RESET(ht);
232 }
233
zend_hash_real_init_ex(HashTable * ht,bool packed)234 static zend_always_inline void zend_hash_real_init_ex(HashTable *ht, bool packed)
235 {
236 HT_ASSERT_RC1(ht);
237 ZEND_ASSERT(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED);
238 if (packed) {
239 zend_hash_real_init_packed_ex(ht);
240 } else {
241 zend_hash_real_init_mixed_ex(ht);
242 }
243 }
244
245 static const uint32_t uninitialized_bucket[-HT_MIN_MASK] =
246 {HT_INVALID_IDX, HT_INVALID_IDX};
247
248 ZEND_API const HashTable zend_empty_array = {
249 .gc.refcount = 2,
250 .gc.u.type_info = IS_ARRAY | (GC_IMMUTABLE << GC_FLAGS_SHIFT),
251 .u.flags = HASH_FLAG_UNINITIALIZED,
252 .nTableMask = HT_MIN_MASK,
253 {.arData = (Bucket*)&uninitialized_bucket[2]},
254 .nNumUsed = 0,
255 .nNumOfElements = 0,
256 .nTableSize = HT_MIN_SIZE,
257 .nInternalPointer = 0,
258 .nNextFreeElement = ZEND_LONG_MIN,
259 .pDestructor = ZVAL_PTR_DTOR
260 };
261
_zend_hash_init_int(HashTable * ht,uint32_t nSize,dtor_func_t pDestructor,bool persistent)262 static zend_always_inline void _zend_hash_init_int(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, bool persistent)
263 {
264 GC_SET_REFCOUNT(ht, 1);
265 GC_TYPE_INFO(ht) = GC_ARRAY | (persistent ? ((GC_PERSISTENT|GC_NOT_COLLECTABLE) << GC_FLAGS_SHIFT) : 0);
266 HT_FLAGS(ht) = HASH_FLAG_UNINITIALIZED;
267 ht->nTableMask = HT_MIN_MASK;
268 HT_SET_DATA_ADDR(ht, &uninitialized_bucket);
269 ht->nNumUsed = 0;
270 ht->nNumOfElements = 0;
271 ht->nInternalPointer = 0;
272 ht->nNextFreeElement = ZEND_LONG_MIN;
273 ht->pDestructor = pDestructor;
274 ht->nTableSize = zend_hash_check_size(nSize);
275 }
276
_zend_hash_init(HashTable * ht,uint32_t nSize,dtor_func_t pDestructor,bool persistent)277 ZEND_API void ZEND_FASTCALL _zend_hash_init(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, bool persistent)
278 {
279 _zend_hash_init_int(ht, nSize, pDestructor, persistent);
280 }
281
_zend_new_array_0(void)282 ZEND_API HashTable* ZEND_FASTCALL _zend_new_array_0(void)
283 {
284 HashTable *ht = emalloc(sizeof(HashTable));
285 _zend_hash_init_int(ht, HT_MIN_SIZE, ZVAL_PTR_DTOR, 0);
286 return ht;
287 }
288
_zend_new_array(uint32_t nSize)289 ZEND_API HashTable* ZEND_FASTCALL _zend_new_array(uint32_t nSize)
290 {
291 HashTable *ht = emalloc(sizeof(HashTable));
292 _zend_hash_init_int(ht, nSize, ZVAL_PTR_DTOR, 0);
293 return ht;
294 }
295
zend_new_pair(zval * val1,zval * val2)296 ZEND_API HashTable* ZEND_FASTCALL zend_new_pair(zval *val1, zval *val2)
297 {
298 zval *zv;
299 HashTable *ht = emalloc(sizeof(HashTable));
300 _zend_hash_init_int(ht, HT_MIN_SIZE, ZVAL_PTR_DTOR, 0);
301 ht->nNumUsed = ht->nNumOfElements = ht->nNextFreeElement = 2;
302 zend_hash_real_init_packed_ex(ht);
303
304 zv = ht->arPacked;
305 ZVAL_COPY_VALUE(zv, val1);
306 zv++;
307 ZVAL_COPY_VALUE(zv, val2);
308 return ht;
309 }
310
zend_hash_packed_grow(HashTable * ht)311 ZEND_API void ZEND_FASTCALL zend_hash_packed_grow(HashTable *ht)
312 {
313 HT_ASSERT_RC1(ht);
314 if (ht->nTableSize >= HT_MAX_SIZE) {
315 zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket), sizeof(Bucket));
316 }
317 uint32_t newTableSize = ht->nTableSize * 2;
318 HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_PACKED_SIZE_EX(newTableSize, HT_MIN_MASK), HT_PACKED_USED_SIZE(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
319 ht->nTableSize = newTableSize;
320 }
321
zend_hash_real_init(HashTable * ht,bool packed)322 ZEND_API void ZEND_FASTCALL zend_hash_real_init(HashTable *ht, bool packed)
323 {
324 IS_CONSISTENT(ht);
325
326 HT_ASSERT_RC1(ht);
327 zend_hash_real_init_ex(ht, packed);
328 }
329
zend_hash_real_init_packed(HashTable * ht)330 ZEND_API void ZEND_FASTCALL zend_hash_real_init_packed(HashTable *ht)
331 {
332 IS_CONSISTENT(ht);
333
334 HT_ASSERT_RC1(ht);
335 zend_hash_real_init_packed_ex(ht);
336 }
337
zend_hash_real_init_mixed(HashTable * ht)338 ZEND_API void ZEND_FASTCALL zend_hash_real_init_mixed(HashTable *ht)
339 {
340 IS_CONSISTENT(ht);
341
342 HT_ASSERT_RC1(ht);
343 zend_hash_real_init_mixed_ex(ht);
344 }
345
zend_hash_packed_to_hash(HashTable * ht)346 ZEND_API void ZEND_FASTCALL zend_hash_packed_to_hash(HashTable *ht)
347 {
348 void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
349 zval *src = ht->arPacked;
350 Bucket *dst;
351 uint32_t i;
352 uint32_t nSize = ht->nTableSize;
353
354 ZEND_ASSERT(HT_SIZE_TO_MASK(nSize));
355
356 HT_ASSERT_RC1(ht);
357 // Alloc before assign to avoid inconsistencies on OOM
358 new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
359 HT_FLAGS(ht) &= ~HASH_FLAG_PACKED;
360 ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
361 HT_SET_DATA_ADDR(ht, new_data);
362 dst = ht->arData;
363 for (i = 0; i < ht->nNumUsed; i++) {
364 ZVAL_COPY_VALUE(&dst->val, src);
365 dst->h = i;
366 dst->key = NULL;
367 dst++;
368 src++;
369 }
370 pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
371 zend_hash_rehash(ht);
372 }
373
zend_hash_to_packed(HashTable * ht)374 ZEND_API void ZEND_FASTCALL zend_hash_to_packed(HashTable *ht)
375 {
376 void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
377 Bucket *src = ht->arData;
378 zval *dst;
379 uint32_t i;
380
381 HT_ASSERT_RC1(ht);
382 new_data = pemalloc(HT_PACKED_SIZE_EX(ht->nTableSize, HT_MIN_MASK), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
383 HT_FLAGS(ht) |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
384 ht->nTableMask = HT_MIN_MASK;
385 HT_SET_DATA_ADDR(ht, new_data);
386 HT_HASH_RESET_PACKED(ht);
387 dst = ht->arPacked;
388 for (i = 0; i < ht->nNumUsed; i++) {
389 ZVAL_COPY_VALUE(dst, &src->val);
390 dst++;
391 src++;
392 }
393 pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
394 }
395
zend_hash_extend(HashTable * ht,uint32_t nSize,bool packed)396 ZEND_API void ZEND_FASTCALL zend_hash_extend(HashTable *ht, uint32_t nSize, bool packed)
397 {
398 HT_ASSERT_RC1(ht);
399
400 if (nSize == 0) return;
401
402 ZEND_ASSERT(HT_SIZE_TO_MASK(nSize));
403
404 if (UNEXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
405 if (nSize > ht->nTableSize) {
406 ht->nTableSize = zend_hash_check_size(nSize);
407 }
408 zend_hash_real_init(ht, packed);
409 } else {
410 if (packed) {
411 ZEND_ASSERT(HT_IS_PACKED(ht));
412 if (nSize > ht->nTableSize) {
413 uint32_t newTableSize = zend_hash_check_size(nSize);
414 HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_PACKED_SIZE_EX(newTableSize, HT_MIN_MASK), HT_PACKED_USED_SIZE(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
415 ht->nTableSize = newTableSize;
416 }
417 } else {
418 ZEND_ASSERT(!HT_IS_PACKED(ht));
419 if (nSize > ht->nTableSize) {
420 void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
421 Bucket *old_buckets = ht->arData;
422 nSize = zend_hash_check_size(nSize);
423 new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
424 ht->nTableSize = nSize;
425 ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
426 HT_SET_DATA_ADDR(ht, new_data);
427 memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
428 pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
429 zend_hash_rehash(ht);
430 }
431 }
432 }
433 }
434
zend_hash_discard(HashTable * ht,uint32_t nNumUsed)435 ZEND_API void ZEND_FASTCALL zend_hash_discard(HashTable *ht, uint32_t nNumUsed)
436 {
437 Bucket *p, *end, *arData;
438 uint32_t nIndex;
439
440 ZEND_ASSERT(!HT_IS_PACKED(ht));
441 arData = ht->arData;
442 p = arData + ht->nNumUsed;
443 end = arData + nNumUsed;
444 ht->nNumUsed = nNumUsed;
445 while (p != end) {
446 p--;
447 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
448 ht->nNumOfElements--;
449 /* Collision pointers always directed from higher to lower buckets */
450 #if 0
451 if (!(Z_NEXT(p->val) == HT_INVALID_IDX || HT_HASH_TO_BUCKET_EX(arData, Z_NEXT(p->val)) < p)) {
452 abort();
453 }
454 #endif
455 nIndex = p->h | ht->nTableMask;
456 HT_HASH_EX(arData, nIndex) = Z_NEXT(p->val);
457 }
458 }
459
zend_array_recalc_elements(HashTable * ht)460 static uint32_t zend_array_recalc_elements(HashTable *ht)
461 {
462 zval *val;
463 uint32_t num = ht->nNumOfElements;
464
465 ZEND_HASH_MAP_FOREACH_VAL(ht, val) {
466 if (Z_TYPE_P(val) == IS_INDIRECT) {
467 if (UNEXPECTED(Z_TYPE_P(Z_INDIRECT_P(val)) == IS_UNDEF)) {
468 num--;
469 }
470 }
471 } ZEND_HASH_FOREACH_END();
472 return num;
473 }
474 /* }}} */
475
zend_array_count(HashTable * ht)476 ZEND_API uint32_t zend_array_count(HashTable *ht)
477 {
478 uint32_t num;
479 if (UNEXPECTED(HT_FLAGS(ht) & HASH_FLAG_HAS_EMPTY_IND)) {
480 num = zend_array_recalc_elements(ht);
481 if (UNEXPECTED(ht->nNumOfElements == num)) {
482 HT_FLAGS(ht) &= ~HASH_FLAG_HAS_EMPTY_IND;
483 }
484 } else if (UNEXPECTED(ht == &EG(symbol_table))) {
485 num = zend_array_recalc_elements(ht);
486 } else {
487 num = zend_hash_num_elements(ht);
488 }
489 return num;
490 }
491 /* }}} */
492
_zend_hash_get_valid_pos(const HashTable * ht,HashPosition pos)493 static zend_always_inline HashPosition _zend_hash_get_valid_pos(const HashTable *ht, HashPosition pos)
494 {
495 if (HT_IS_PACKED(ht)) {
496 while (pos < ht->nNumUsed && Z_ISUNDEF(ht->arPacked[pos])) {
497 pos++;
498 }
499 } else {
500 while (pos < ht->nNumUsed && Z_ISUNDEF(ht->arData[pos].val)) {
501 pos++;
502 }
503 }
504 return pos;
505 }
506
_zend_hash_get_current_pos(const HashTable * ht)507 static zend_always_inline HashPosition _zend_hash_get_current_pos(const HashTable *ht)
508 {
509 return _zend_hash_get_valid_pos(ht, ht->nInternalPointer);
510 }
511
zend_hash_get_current_pos(const HashTable * ht)512 ZEND_API HashPosition ZEND_FASTCALL zend_hash_get_current_pos(const HashTable *ht)
513 {
514 return _zend_hash_get_current_pos(ht);
515 }
516
zend_hash_remove_iterator_copies(uint32_t idx)517 static void zend_hash_remove_iterator_copies(uint32_t idx) {
518 HashTableIterator *iterators = EG(ht_iterators);
519
520 HashTableIterator *iter = iterators + idx;
521 uint32_t next_idx = iter->next_copy;
522 while (next_idx != idx) {
523 uint32_t cur_idx = next_idx;
524 HashTableIterator *cur_iter = iterators + cur_idx;
525 next_idx = cur_iter->next_copy;
526 cur_iter->next_copy = cur_idx; // avoid recursion in zend_hash_iterator_del
527 zend_hash_iterator_del(cur_idx);
528 }
529 iter->next_copy = idx;
530 }
531
zend_hash_iterator_add(HashTable * ht,HashPosition pos)532 ZEND_API uint32_t ZEND_FASTCALL zend_hash_iterator_add(HashTable *ht, HashPosition pos)
533 {
534 HashTableIterator *iter = EG(ht_iterators);
535 HashTableIterator *end = iter + EG(ht_iterators_count);
536 uint32_t idx;
537
538 if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
539 HT_INC_ITERATORS_COUNT(ht);
540 }
541 while (iter != end) {
542 if (iter->ht == NULL) {
543 iter->ht = ht;
544 iter->pos = pos;
545 idx = iter - EG(ht_iterators);
546 iter->next_copy = idx;
547 if (idx + 1 > EG(ht_iterators_used)) {
548 EG(ht_iterators_used) = idx + 1;
549 }
550 return idx;
551 }
552 iter++;
553 }
554 if (EG(ht_iterators) == EG(ht_iterators_slots)) {
555 EG(ht_iterators) = emalloc(sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
556 memcpy(EG(ht_iterators), EG(ht_iterators_slots), sizeof(HashTableIterator) * EG(ht_iterators_count));
557 } else {
558 EG(ht_iterators) = erealloc(EG(ht_iterators), sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
559 }
560 iter = EG(ht_iterators) + EG(ht_iterators_count);
561 EG(ht_iterators_count) += 8;
562 iter->ht = ht;
563 iter->pos = pos;
564 memset(iter + 1, 0, sizeof(HashTableIterator) * 7);
565 idx = iter - EG(ht_iterators);
566 iter->next_copy = idx;
567 EG(ht_iterators_used) = idx + 1;
568 return idx;
569 }
570
571 // To avoid losing track of the HashTable when separating arrays, we track all copies at once.
zend_hash_iterator_find_copy_pos(uint32_t idx,HashTable * ht)572 static zend_always_inline bool zend_hash_iterator_find_copy_pos(uint32_t idx, HashTable *ht) {
573 HashTableIterator *iter = EG(ht_iterators) + idx;
574
575 uint32_t next_idx = iter->next_copy;
576 if (EXPECTED(next_idx != idx)) {
577 HashTableIterator *copy_iter;
578 while (next_idx != idx) {
579 copy_iter = EG(ht_iterators) + next_idx;
580 if (copy_iter->ht == ht) {
581 // We have found the hashtable we are actually iterating over
582 // Now clean any intermittent copies and replace the original index by the found one
583 if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
584 && EXPECTED(!HT_ITERATORS_OVERFLOW(iter->ht))) {
585 HT_DEC_ITERATORS_COUNT(iter->ht);
586 }
587 if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
588 HT_INC_ITERATORS_COUNT(ht);
589 }
590 iter->ht = copy_iter->ht;
591 iter->pos = copy_iter->pos;
592 zend_hash_remove_iterator_copies(idx);
593 return true;
594 }
595 next_idx = copy_iter->next_copy;
596 }
597 zend_hash_remove_iterator_copies(idx);
598 }
599
600 return false;
601 }
602
zend_hash_iterator_pos(uint32_t idx,HashTable * ht)603 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos(uint32_t idx, HashTable *ht)
604 {
605 HashTableIterator *iter = EG(ht_iterators) + idx;
606
607 ZEND_ASSERT(idx != (uint32_t)-1);
608 if (UNEXPECTED(iter->ht != ht) && !zend_hash_iterator_find_copy_pos(idx, ht)) {
609 if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
610 && EXPECTED(!HT_ITERATORS_OVERFLOW(iter->ht))) {
611 HT_DEC_ITERATORS_COUNT(iter->ht);
612 }
613 if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
614 HT_INC_ITERATORS_COUNT(ht);
615 }
616 iter->ht = ht;
617 iter->pos = _zend_hash_get_current_pos(ht);
618 }
619 return iter->pos;
620 }
621
zend_hash_iterator_pos_ex(uint32_t idx,zval * array)622 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos_ex(uint32_t idx, zval *array)
623 {
624 HashTable *ht = Z_ARRVAL_P(array);
625 HashTableIterator *iter = EG(ht_iterators) + idx;
626
627 ZEND_ASSERT(idx != (uint32_t)-1);
628 if (UNEXPECTED(iter->ht != ht) && !zend_hash_iterator_find_copy_pos(idx, ht)) {
629 if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
630 && EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
631 HT_DEC_ITERATORS_COUNT(iter->ht);
632 }
633 SEPARATE_ARRAY(array);
634 ht = Z_ARRVAL_P(array);
635 if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
636 HT_INC_ITERATORS_COUNT(ht);
637 }
638 iter->ht = ht;
639 iter->pos = _zend_hash_get_current_pos(ht);
640 }
641 return iter->pos;
642 }
643
zend_hash_iterator_del(uint32_t idx)644 ZEND_API void ZEND_FASTCALL zend_hash_iterator_del(uint32_t idx)
645 {
646 HashTableIterator *iter = EG(ht_iterators) + idx;
647
648 ZEND_ASSERT(idx != (uint32_t)-1);
649
650 if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
651 && EXPECTED(!HT_ITERATORS_OVERFLOW(iter->ht))) {
652 ZEND_ASSERT(HT_ITERATORS_COUNT(iter->ht) != 0);
653 HT_DEC_ITERATORS_COUNT(iter->ht);
654 }
655 iter->ht = NULL;
656
657 if (UNEXPECTED(iter->next_copy != idx)) {
658 zend_hash_remove_iterator_copies(idx);
659 }
660
661 if (idx == EG(ht_iterators_used) - 1) {
662 while (idx > 0 && EG(ht_iterators)[idx - 1].ht == NULL) {
663 idx--;
664 }
665 EG(ht_iterators_used) = idx;
666 }
667 }
668
_zend_hash_iterators_remove(HashTable * ht)669 static zend_never_inline void ZEND_FASTCALL _zend_hash_iterators_remove(HashTable *ht)
670 {
671 HashTableIterator *iter = EG(ht_iterators);
672 HashTableIterator *end = iter + EG(ht_iterators_used);
673
674 while (iter != end) {
675 if (iter->ht == ht) {
676 iter->ht = HT_POISONED_PTR;
677 }
678 iter++;
679 }
680 }
681
zend_hash_iterators_remove(HashTable * ht)682 static zend_always_inline void zend_hash_iterators_remove(HashTable *ht)
683 {
684 if (UNEXPECTED(HT_HAS_ITERATORS(ht))) {
685 _zend_hash_iterators_remove(ht);
686 }
687 }
688
zend_hash_iterators_lower_pos(HashTable * ht,HashPosition start)689 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterators_lower_pos(HashTable *ht, HashPosition start)
690 {
691 HashTableIterator *iter = EG(ht_iterators);
692 HashTableIterator *end = iter + EG(ht_iterators_used);
693 HashPosition res = ht->nNumUsed;
694
695 while (iter != end) {
696 if (iter->ht == ht) {
697 if (iter->pos >= start && iter->pos < res) {
698 res = iter->pos;
699 }
700 }
701 iter++;
702 }
703 return res;
704 }
705
_zend_hash_iterators_update(HashTable * ht,HashPosition from,HashPosition to)706 ZEND_API void ZEND_FASTCALL _zend_hash_iterators_update(HashTable *ht, HashPosition from, HashPosition to)
707 {
708 HashTableIterator *iter = EG(ht_iterators);
709 HashTableIterator *end = iter + EG(ht_iterators_used);
710
711 while (iter != end) {
712 if (iter->ht == ht && iter->pos == from) {
713 iter->pos = to;
714 }
715 iter++;
716 }
717 }
718
zend_hash_iterators_advance(HashTable * ht,HashPosition step)719 ZEND_API void ZEND_FASTCALL zend_hash_iterators_advance(HashTable *ht, HashPosition step)
720 {
721 HashTableIterator *iter = EG(ht_iterators);
722 HashTableIterator *end = iter + EG(ht_iterators_used);
723
724 while (iter != end) {
725 if (iter->ht == ht) {
726 iter->pos += step;
727 }
728 iter++;
729 }
730 }
731
732 /* Hash must be known and precomputed before */
zend_hash_find_bucket(const HashTable * ht,const zend_string * key)733 static zend_always_inline Bucket *zend_hash_find_bucket(const HashTable *ht, const zend_string *key)
734 {
735 uint32_t nIndex;
736 uint32_t idx;
737 Bucket *p, *arData;
738
739 ZEND_ASSERT(ZSTR_H(key) != 0 && "Hash must be known");
740
741 arData = ht->arData;
742 nIndex = ZSTR_H(key) | ht->nTableMask;
743 idx = HT_HASH_EX(arData, nIndex);
744
745 if (UNEXPECTED(idx == HT_INVALID_IDX)) {
746 return NULL;
747 }
748 p = HT_HASH_TO_BUCKET_EX(arData, idx);
749 if (EXPECTED(p->key == key)) { /* check for the same interned string */
750 return p;
751 }
752
753 while (1) {
754 if (p->h == ZSTR_H(key) &&
755 EXPECTED(p->key) &&
756 zend_string_equal_content(p->key, key)) {
757 return p;
758 }
759 idx = Z_NEXT(p->val);
760 if (idx == HT_INVALID_IDX) {
761 return NULL;
762 }
763 p = HT_HASH_TO_BUCKET_EX(arData, idx);
764 if (p->key == key) { /* check for the same interned string */
765 return p;
766 }
767 }
768 }
769
zend_hash_str_find_bucket(const HashTable * ht,const char * str,size_t len,zend_ulong h)770 static zend_always_inline Bucket *zend_hash_str_find_bucket(const HashTable *ht, const char *str, size_t len, zend_ulong h)
771 {
772 uint32_t nIndex;
773 uint32_t idx;
774 Bucket *p, *arData;
775
776 arData = ht->arData;
777 nIndex = h | ht->nTableMask;
778 idx = HT_HASH_EX(arData, nIndex);
779 while (idx != HT_INVALID_IDX) {
780 ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
781 p = HT_HASH_TO_BUCKET_EX(arData, idx);
782 if ((p->h == h)
783 && p->key
784 && zend_string_equals_cstr(p->key, str, len)) {
785 return p;
786 }
787 idx = Z_NEXT(p->val);
788 }
789 return NULL;
790 }
791
zend_hash_index_find_bucket(const HashTable * ht,zend_ulong h)792 static zend_always_inline Bucket *zend_hash_index_find_bucket(const HashTable *ht, zend_ulong h)
793 {
794 uint32_t nIndex;
795 uint32_t idx;
796 Bucket *p, *arData;
797
798 arData = ht->arData;
799 nIndex = h | ht->nTableMask;
800 idx = HT_HASH_EX(arData, nIndex);
801 while (idx != HT_INVALID_IDX) {
802 ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
803 p = HT_HASH_TO_BUCKET_EX(arData, idx);
804 if (p->h == h && !p->key) {
805 return p;
806 }
807 idx = Z_NEXT(p->val);
808 }
809 return NULL;
810 }
811
_zend_hash_add_or_update_i(HashTable * ht,zend_string * key,zval * pData,uint32_t flag)812 static zend_always_inline zval *_zend_hash_add_or_update_i(HashTable *ht, zend_string *key, zval *pData, uint32_t flag)
813 {
814 zend_ulong h;
815 uint32_t nIndex;
816 uint32_t idx;
817 Bucket *p, *arData;
818
819 IS_CONSISTENT(ht);
820 HT_ASSERT_RC1(ht);
821 zend_string_hash_val(key);
822
823 if (UNEXPECTED(HT_FLAGS(ht) & (HASH_FLAG_UNINITIALIZED|HASH_FLAG_PACKED))) {
824 if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
825 zend_hash_real_init_mixed(ht);
826 goto add_to_hash;
827 } else {
828 zend_hash_packed_to_hash(ht);
829 }
830 } else if ((flag & HASH_ADD_NEW) == 0 || ZEND_DEBUG) {
831 p = zend_hash_find_bucket(ht, key);
832
833 if (p) {
834 zval *data;
835
836 ZEND_ASSERT((flag & HASH_ADD_NEW) == 0);
837 if (flag & HASH_LOOKUP) {
838 return &p->val;
839 } else if (flag & HASH_ADD) {
840 if (!(flag & HASH_UPDATE_INDIRECT)) {
841 return NULL;
842 }
843 ZEND_ASSERT(&p->val != pData);
844 data = &p->val;
845 if (Z_TYPE_P(data) == IS_INDIRECT) {
846 data = Z_INDIRECT_P(data);
847 if (Z_TYPE_P(data) != IS_UNDEF) {
848 return NULL;
849 }
850 } else {
851 return NULL;
852 }
853 } else {
854 ZEND_ASSERT(&p->val != pData);
855 data = &p->val;
856 if ((flag & HASH_UPDATE_INDIRECT) && Z_TYPE_P(data) == IS_INDIRECT) {
857 data = Z_INDIRECT_P(data);
858 }
859 }
860 if (ht->pDestructor) {
861 ht->pDestructor(data);
862 }
863 ZVAL_COPY_VALUE(data, pData);
864 return data;
865 }
866 }
867
868 ZEND_HASH_IF_FULL_DO_RESIZE(ht); /* If the Hash table is full, resize it */
869
870 add_to_hash:
871 if (!ZSTR_IS_INTERNED(key)) {
872 zend_string_addref(key);
873 HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
874 }
875 idx = ht->nNumUsed++;
876 ht->nNumOfElements++;
877 arData = ht->arData;
878 p = arData + idx;
879 p->key = key;
880 p->h = h = ZSTR_H(key);
881 nIndex = h | ht->nTableMask;
882 Z_NEXT(p->val) = HT_HASH_EX(arData, nIndex);
883 HT_HASH_EX(arData, nIndex) = HT_IDX_TO_HASH(idx);
884 if (flag & HASH_LOOKUP) {
885 ZVAL_NULL(&p->val);
886 } else {
887 ZVAL_COPY_VALUE(&p->val, pData);
888 }
889
890 return &p->val;
891 }
892
_zend_hash_str_add_or_update_i(HashTable * ht,const char * str,size_t len,zend_ulong h,zval * pData,uint32_t flag)893 static zend_always_inline zval *_zend_hash_str_add_or_update_i(HashTable *ht, const char *str, size_t len, zend_ulong h, zval *pData, uint32_t flag)
894 {
895 zend_string *key;
896 uint32_t nIndex;
897 uint32_t idx;
898 Bucket *p;
899
900 IS_CONSISTENT(ht);
901 HT_ASSERT_RC1(ht);
902
903 if (UNEXPECTED(HT_FLAGS(ht) & (HASH_FLAG_UNINITIALIZED|HASH_FLAG_PACKED))) {
904 if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
905 zend_hash_real_init_mixed(ht);
906 goto add_to_hash;
907 } else {
908 zend_hash_packed_to_hash(ht);
909 }
910 } else if ((flag & HASH_ADD_NEW) == 0) {
911 p = zend_hash_str_find_bucket(ht, str, len, h);
912
913 if (p) {
914 zval *data;
915
916 if (flag & HASH_LOOKUP) {
917 return &p->val;
918 } else if (flag & HASH_ADD) {
919 if (!(flag & HASH_UPDATE_INDIRECT)) {
920 return NULL;
921 }
922 ZEND_ASSERT(&p->val != pData);
923 data = &p->val;
924 if (Z_TYPE_P(data) == IS_INDIRECT) {
925 data = Z_INDIRECT_P(data);
926 if (Z_TYPE_P(data) != IS_UNDEF) {
927 return NULL;
928 }
929 } else {
930 return NULL;
931 }
932 } else {
933 ZEND_ASSERT(&p->val != pData);
934 data = &p->val;
935 if ((flag & HASH_UPDATE_INDIRECT) && Z_TYPE_P(data) == IS_INDIRECT) {
936 data = Z_INDIRECT_P(data);
937 }
938 }
939 if (ht->pDestructor) {
940 ht->pDestructor(data);
941 }
942 ZVAL_COPY_VALUE(data, pData);
943 return data;
944 }
945 }
946
947 ZEND_HASH_IF_FULL_DO_RESIZE(ht); /* If the Hash table is full, resize it */
948
949 add_to_hash:
950 idx = ht->nNumUsed++;
951 ht->nNumOfElements++;
952 p = ht->arData + idx;
953 p->key = key = zend_string_init(str, len, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
954 #if ZEND_RC_DEBUG
955 if (GC_FLAGS(ht) & GC_PERSISTENT_LOCAL) {
956 GC_MAKE_PERSISTENT_LOCAL(key);
957 }
958 #endif
959 p->h = ZSTR_H(key) = h;
960 HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
961 if (flag & HASH_LOOKUP) {
962 ZVAL_NULL(&p->val);
963 } else {
964 ZVAL_COPY_VALUE(&p->val, pData);
965 }
966 nIndex = h | ht->nTableMask;
967 Z_NEXT(p->val) = HT_HASH(ht, nIndex);
968 HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
969
970 return &p->val;
971 }
972
zend_hash_add_or_update(HashTable * ht,zend_string * key,zval * pData,uint32_t flag)973 ZEND_API zval* ZEND_FASTCALL zend_hash_add_or_update(HashTable *ht, zend_string *key, zval *pData, uint32_t flag)
974 {
975 if (flag == HASH_ADD) {
976 return zend_hash_add(ht, key, pData);
977 } else if (flag == HASH_ADD_NEW) {
978 return zend_hash_add_new(ht, key, pData);
979 } else if (flag == HASH_UPDATE) {
980 return zend_hash_update(ht, key, pData);
981 } else {
982 ZEND_ASSERT(flag == (HASH_UPDATE|HASH_UPDATE_INDIRECT));
983 return zend_hash_update_ind(ht, key, pData);
984 }
985 }
986
zend_hash_add(HashTable * ht,zend_string * key,zval * pData)987 ZEND_API zval* ZEND_FASTCALL zend_hash_add(HashTable *ht, zend_string *key, zval *pData)
988 {
989 return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD);
990 }
991
zend_hash_update(HashTable * ht,zend_string * key,zval * pData)992 ZEND_API zval* ZEND_FASTCALL zend_hash_update(HashTable *ht, zend_string *key, zval *pData)
993 {
994 return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE);
995 }
996
zend_hash_update_ind(HashTable * ht,zend_string * key,zval * pData)997 ZEND_API zval* ZEND_FASTCALL zend_hash_update_ind(HashTable *ht, zend_string *key, zval *pData)
998 {
999 return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT);
1000 }
1001
zend_hash_add_new(HashTable * ht,zend_string * key,zval * pData)1002 ZEND_API zval* ZEND_FASTCALL zend_hash_add_new(HashTable *ht, zend_string *key, zval *pData)
1003 {
1004 return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD_NEW);
1005 }
1006
zend_hash_lookup(HashTable * ht,zend_string * key)1007 ZEND_API zval* ZEND_FASTCALL zend_hash_lookup(HashTable *ht, zend_string *key)
1008 {
1009 return _zend_hash_add_or_update_i(ht, key, NULL, HASH_LOOKUP);
1010 }
1011
zend_hash_str_add_or_update(HashTable * ht,const char * str,size_t len,zval * pData,uint32_t flag)1012 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_or_update(HashTable *ht, const char *str, size_t len, zval *pData, uint32_t flag)
1013 {
1014 if (flag == HASH_ADD) {
1015 return zend_hash_str_add(ht, str, len, pData);
1016 } else if (flag == HASH_ADD_NEW) {
1017 return zend_hash_str_add_new(ht, str, len, pData);
1018 } else if (flag == HASH_UPDATE) {
1019 return zend_hash_str_update(ht, str, len, pData);
1020 } else {
1021 ZEND_ASSERT(flag == (HASH_UPDATE|HASH_UPDATE_INDIRECT));
1022 return zend_hash_str_update_ind(ht, str, len, pData);
1023 }
1024 }
1025
zend_hash_str_update(HashTable * ht,const char * str,size_t len,zval * pData)1026 ZEND_API zval* ZEND_FASTCALL zend_hash_str_update(HashTable *ht, const char *str, size_t len, zval *pData)
1027 {
1028 zend_ulong h = zend_hash_func(str, len);
1029
1030 return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_UPDATE);
1031 }
1032
zend_hash_str_update_ind(HashTable * ht,const char * str,size_t len,zval * pData)1033 ZEND_API zval* ZEND_FASTCALL zend_hash_str_update_ind(HashTable *ht, const char *str, size_t len, zval *pData)
1034 {
1035 zend_ulong h = zend_hash_func(str, len);
1036
1037 return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT);
1038 }
1039
zend_hash_str_add(HashTable * ht,const char * str,size_t len,zval * pData)1040 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add(HashTable *ht, const char *str, size_t len, zval *pData)
1041 {
1042 zend_ulong h = zend_hash_func(str, len);
1043
1044 return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_ADD);
1045 }
1046
zend_hash_str_add_new(HashTable * ht,const char * str,size_t len,zval * pData)1047 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_new(HashTable *ht, const char *str, size_t len, zval *pData)
1048 {
1049 zend_ulong h = zend_hash_func(str, len);
1050
1051 return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_ADD_NEW);
1052 }
1053
zend_hash_index_add_empty_element(HashTable * ht,zend_ulong h)1054 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_empty_element(HashTable *ht, zend_ulong h)
1055 {
1056 zval dummy;
1057
1058 ZVAL_NULL(&dummy);
1059 return zend_hash_index_add(ht, h, &dummy);
1060 }
1061
zend_hash_add_empty_element(HashTable * ht,zend_string * key)1062 ZEND_API zval* ZEND_FASTCALL zend_hash_add_empty_element(HashTable *ht, zend_string *key)
1063 {
1064 zval dummy;
1065
1066 ZVAL_NULL(&dummy);
1067 return zend_hash_add(ht, key, &dummy);
1068 }
1069
zend_hash_str_add_empty_element(HashTable * ht,const char * str,size_t len)1070 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_empty_element(HashTable *ht, const char *str, size_t len)
1071 {
1072 zval dummy;
1073
1074 ZVAL_NULL(&dummy);
1075 return zend_hash_str_add(ht, str, len, &dummy);
1076 }
1077
_zend_hash_index_add_or_update_i(HashTable * ht,zend_ulong h,zval * pData,uint32_t flag)1078 static zend_always_inline zval *_zend_hash_index_add_or_update_i(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag)
1079 {
1080 uint32_t nIndex;
1081 uint32_t idx;
1082 Bucket *p;
1083 zval *zv;
1084
1085 IS_CONSISTENT(ht);
1086 HT_ASSERT_RC1(ht);
1087
1088 if ((flag & HASH_ADD_NEXT) && h == ZEND_LONG_MIN) {
1089 h = 0;
1090 }
1091
1092 if (HT_IS_PACKED(ht)) {
1093 if ((flag & (HASH_ADD_NEW|HASH_ADD_NEXT)) != (HASH_ADD_NEW|HASH_ADD_NEXT)
1094 && h < ht->nNumUsed) {
1095 zv = ht->arPacked + h;
1096 if (Z_TYPE_P(zv) != IS_UNDEF) {
1097 if (flag & HASH_LOOKUP) {
1098 return zv;
1099 }
1100 replace:
1101 if (flag & HASH_ADD) {
1102 return NULL;
1103 }
1104 if (ht->pDestructor) {
1105 ht->pDestructor(zv);
1106 }
1107 ZVAL_COPY_VALUE(zv, pData);
1108 return zv;
1109 } else { /* we have to keep the order :( */
1110 goto convert_to_hash;
1111 }
1112 } else if (EXPECTED(h < ht->nTableSize)) {
1113 add_to_packed:
1114 zv = ht->arPacked + h;
1115 /* incremental initialization of empty Buckets */
1116 if ((flag & (HASH_ADD_NEW|HASH_ADD_NEXT)) != (HASH_ADD_NEW|HASH_ADD_NEXT)) {
1117 if (h > ht->nNumUsed) {
1118 zval *q = ht->arPacked + ht->nNumUsed;
1119 while (q != zv) {
1120 ZVAL_UNDEF(q);
1121 q++;
1122 }
1123 }
1124 }
1125 ht->nNextFreeElement = ht->nNumUsed = h + 1;
1126 ht->nNumOfElements++;
1127 if (flag & HASH_LOOKUP) {
1128 ZVAL_NULL(zv);
1129 } else {
1130 ZVAL_COPY_VALUE(zv, pData);
1131 }
1132
1133 return zv;
1134 } else if ((h >> 1) < ht->nTableSize &&
1135 (ht->nTableSize >> 1) < ht->nNumOfElements) {
1136 zend_hash_packed_grow(ht);
1137 goto add_to_packed;
1138 } else {
1139 if (ht->nNumUsed >= ht->nTableSize) {
1140 ht->nTableSize += ht->nTableSize;
1141 }
1142 convert_to_hash:
1143 zend_hash_packed_to_hash(ht);
1144 }
1145 } else if (HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED) {
1146 if (h < ht->nTableSize) {
1147 zend_hash_real_init_packed_ex(ht);
1148 goto add_to_packed;
1149 }
1150 zend_hash_real_init_mixed(ht);
1151 } else {
1152 if ((flag & HASH_ADD_NEW) == 0 || ZEND_DEBUG) {
1153 p = zend_hash_index_find_bucket(ht, h);
1154 if (p) {
1155 if (flag & HASH_LOOKUP) {
1156 return &p->val;
1157 }
1158 ZEND_ASSERT((flag & HASH_ADD_NEW) == 0);
1159 zv = &p->val;
1160 goto replace;
1161 }
1162 }
1163 ZEND_HASH_IF_FULL_DO_RESIZE(ht); /* If the Hash table is full, resize it */
1164 }
1165
1166 idx = ht->nNumUsed++;
1167 nIndex = h | ht->nTableMask;
1168 p = ht->arData + idx;
1169 Z_NEXT(p->val) = HT_HASH(ht, nIndex);
1170 HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
1171 if ((zend_long)h >= ht->nNextFreeElement) {
1172 ht->nNextFreeElement = (zend_long)h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
1173 }
1174 ht->nNumOfElements++;
1175 p->h = h;
1176 p->key = NULL;
1177 if (flag & HASH_LOOKUP) {
1178 ZVAL_NULL(&p->val);
1179 } else {
1180 ZVAL_COPY_VALUE(&p->val, pData);
1181 }
1182
1183 return &p->val;
1184 }
1185
zend_hash_index_add_or_update(HashTable * ht,zend_ulong h,zval * pData,uint32_t flag)1186 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_or_update(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag)
1187 {
1188 if (flag == HASH_ADD) {
1189 return zend_hash_index_add(ht, h, pData);
1190 } else if (flag == (HASH_ADD|HASH_ADD_NEW)) {
1191 return zend_hash_index_add_new(ht, h, pData);
1192 } else if (flag == (HASH_ADD|HASH_ADD_NEXT)) {
1193 ZEND_ASSERT(h == ht->nNextFreeElement);
1194 return zend_hash_next_index_insert(ht, pData);
1195 } else if (flag == (HASH_ADD|HASH_ADD_NEW|HASH_ADD_NEXT)) {
1196 ZEND_ASSERT(h == ht->nNextFreeElement);
1197 return zend_hash_next_index_insert_new(ht, pData);
1198 } else {
1199 ZEND_ASSERT(flag == HASH_UPDATE);
1200 return zend_hash_index_update(ht, h, pData);
1201 }
1202 }
1203
zend_hash_index_add(HashTable * ht,zend_ulong h,zval * pData)1204 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add(HashTable *ht, zend_ulong h, zval *pData)
1205 {
1206 return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD);
1207 }
1208
zend_hash_index_add_new(HashTable * ht,zend_ulong h,zval * pData)1209 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_new(HashTable *ht, zend_ulong h, zval *pData)
1210 {
1211 return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD | HASH_ADD_NEW);
1212 }
1213
zend_hash_index_update(HashTable * ht,zend_ulong h,zval * pData)1214 ZEND_API zval* ZEND_FASTCALL zend_hash_index_update(HashTable *ht, zend_ulong h, zval *pData)
1215 {
1216 return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_UPDATE);
1217 }
1218
zend_hash_next_index_insert(HashTable * ht,zval * pData)1219 ZEND_API zval* ZEND_FASTCALL zend_hash_next_index_insert(HashTable *ht, zval *pData)
1220 {
1221 return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEXT);
1222 }
1223
zend_hash_next_index_insert_new(HashTable * ht,zval * pData)1224 ZEND_API zval* ZEND_FASTCALL zend_hash_next_index_insert_new(HashTable *ht, zval *pData)
1225 {
1226 return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEW | HASH_ADD_NEXT);
1227 }
1228
zend_hash_index_lookup(HashTable * ht,zend_ulong h)1229 ZEND_API zval* ZEND_FASTCALL zend_hash_index_lookup(HashTable *ht, zend_ulong h)
1230 {
1231 return _zend_hash_index_add_or_update_i(ht, h, NULL, HASH_LOOKUP);
1232 }
1233
zend_hash_set_bucket_key(HashTable * ht,Bucket * b,zend_string * key)1234 ZEND_API zval* ZEND_FASTCALL zend_hash_set_bucket_key(HashTable *ht, Bucket *b, zend_string *key)
1235 {
1236 uint32_t nIndex;
1237 uint32_t idx, i;
1238 Bucket *p, *arData;
1239
1240 IS_CONSISTENT(ht);
1241 HT_ASSERT_RC1(ht);
1242 ZEND_ASSERT(!HT_IS_PACKED(ht));
1243
1244 (void)zend_string_hash_val(key);
1245 p = zend_hash_find_bucket(ht, key);
1246 if (UNEXPECTED(p)) {
1247 return (p == b) ? &p->val : NULL;
1248 }
1249
1250 if (!ZSTR_IS_INTERNED(key)) {
1251 zend_string_addref(key);
1252 HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
1253 }
1254
1255 arData = ht->arData;
1256
1257 /* del from hash */
1258 idx = HT_IDX_TO_HASH(b - arData);
1259 nIndex = b->h | ht->nTableMask;
1260 i = HT_HASH_EX(arData, nIndex);
1261 if (i == idx) {
1262 HT_HASH_EX(arData, nIndex) = Z_NEXT(b->val);
1263 } else {
1264 p = HT_HASH_TO_BUCKET_EX(arData, i);
1265 while (Z_NEXT(p->val) != idx) {
1266 i = Z_NEXT(p->val);
1267 p = HT_HASH_TO_BUCKET_EX(arData, i);
1268 }
1269 Z_NEXT(p->val) = Z_NEXT(b->val);
1270 }
1271 zend_string_release(b->key);
1272
1273 /* add to hash */
1274 idx = b - arData;
1275 b->key = key;
1276 b->h = ZSTR_H(key);
1277 nIndex = b->h | ht->nTableMask;
1278 idx = HT_IDX_TO_HASH(idx);
1279 i = HT_HASH_EX(arData, nIndex);
1280 if (i == HT_INVALID_IDX || i < idx) {
1281 Z_NEXT(b->val) = i;
1282 HT_HASH_EX(arData, nIndex) = idx;
1283 } else {
1284 p = HT_HASH_TO_BUCKET_EX(arData, i);
1285 while (Z_NEXT(p->val) != HT_INVALID_IDX && Z_NEXT(p->val) > idx) {
1286 i = Z_NEXT(p->val);
1287 p = HT_HASH_TO_BUCKET_EX(arData, i);
1288 }
1289 Z_NEXT(b->val) = Z_NEXT(p->val);
1290 Z_NEXT(p->val) = idx;
1291 }
1292 return &b->val;
1293 }
1294
zend_hash_do_resize(HashTable * ht)1295 static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht)
1296 {
1297
1298 IS_CONSISTENT(ht);
1299 HT_ASSERT_RC1(ht);
1300
1301 ZEND_ASSERT(!HT_IS_PACKED(ht));
1302 if (ht->nNumUsed > ht->nNumOfElements + (ht->nNumOfElements >> 5)) { /* additional term is there to amortize the cost of compaction */
1303 zend_hash_rehash(ht);
1304 } else if (ht->nTableSize < HT_MAX_SIZE) { /* Let's double the table size */
1305 void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
1306 uint32_t nSize = ht->nTableSize + ht->nTableSize;
1307 Bucket *old_buckets = ht->arData;
1308
1309 ZEND_ASSERT(HT_SIZE_TO_MASK(nSize));
1310
1311 new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1312 ht->nTableSize = nSize;
1313 ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
1314 HT_SET_DATA_ADDR(ht, new_data);
1315 memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
1316 pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1317 zend_hash_rehash(ht);
1318 } else {
1319 zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket) + sizeof(uint32_t), sizeof(Bucket));
1320 }
1321 }
1322
zend_hash_rehash(HashTable * ht)1323 ZEND_API void ZEND_FASTCALL zend_hash_rehash(HashTable *ht)
1324 {
1325 Bucket *p;
1326 uint32_t nIndex, i;
1327
1328 IS_CONSISTENT(ht);
1329
1330 if (UNEXPECTED(ht->nNumOfElements == 0)) {
1331 if (!(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
1332 ht->nNumUsed = 0;
1333 HT_HASH_RESET(ht);
1334 /* Even if the array is empty, we still need to reset the iterator positions. */
1335 ht->nInternalPointer = 0;
1336 if (UNEXPECTED(HT_HAS_ITERATORS(ht))) {
1337 HashTableIterator *iter = EG(ht_iterators);
1338 HashTableIterator *end = iter + EG(ht_iterators_used);
1339 while (iter != end) {
1340 if (iter->ht == ht) {
1341 iter->pos = 0;
1342 }
1343 iter++;
1344 }
1345 }
1346 }
1347 return;
1348 }
1349
1350 HT_HASH_RESET(ht);
1351 i = 0;
1352 p = ht->arData;
1353 if (HT_IS_WITHOUT_HOLES(ht)) {
1354 do {
1355 nIndex = p->h | ht->nTableMask;
1356 Z_NEXT(p->val) = HT_HASH(ht, nIndex);
1357 HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
1358 p++;
1359 } while (++i < ht->nNumUsed);
1360 } else {
1361 uint32_t old_num_used = ht->nNumUsed;
1362 do {
1363 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) {
1364 uint32_t j = i;
1365 Bucket *q = p;
1366
1367 if (EXPECTED(!HT_HAS_ITERATORS(ht))) {
1368 while (++i < ht->nNumUsed) {
1369 p++;
1370 if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
1371 ZVAL_COPY_VALUE(&q->val, &p->val);
1372 q->h = p->h;
1373 nIndex = q->h | ht->nTableMask;
1374 q->key = p->key;
1375 Z_NEXT(q->val) = HT_HASH(ht, nIndex);
1376 HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
1377 if (UNEXPECTED(ht->nInternalPointer == i)) {
1378 ht->nInternalPointer = j;
1379 }
1380 q++;
1381 j++;
1382 }
1383 }
1384 } else {
1385 uint32_t iter_pos = zend_hash_iterators_lower_pos(ht, i + 1);
1386
1387 while (++i < ht->nNumUsed) {
1388 p++;
1389 if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
1390 ZVAL_COPY_VALUE(&q->val, &p->val);
1391 q->h = p->h;
1392 nIndex = q->h | ht->nTableMask;
1393 q->key = p->key;
1394 Z_NEXT(q->val) = HT_HASH(ht, nIndex);
1395 HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
1396 if (UNEXPECTED(ht->nInternalPointer == i)) {
1397 ht->nInternalPointer = j;
1398 }
1399 if (UNEXPECTED(i >= iter_pos)) {
1400 do {
1401 zend_hash_iterators_update(ht, iter_pos, j);
1402 iter_pos = zend_hash_iterators_lower_pos(ht, iter_pos + 1);
1403 } while (iter_pos < i);
1404 }
1405 q++;
1406 j++;
1407 }
1408 }
1409 }
1410 ht->nNumUsed = j;
1411 break;
1412 }
1413 nIndex = p->h | ht->nTableMask;
1414 Z_NEXT(p->val) = HT_HASH(ht, nIndex);
1415 HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
1416 p++;
1417 } while (++i < ht->nNumUsed);
1418
1419 /* Migrate pointer to one past the end of the array to the new one past the end, so that
1420 * newly inserted elements are picked up correctly. */
1421 if (UNEXPECTED(HT_HAS_ITERATORS(ht))) {
1422 _zend_hash_iterators_update(ht, old_num_used, ht->nNumUsed);
1423 }
1424 }
1425 }
1426
zend_hash_iterators_clamp_max(HashTable * ht,uint32_t max)1427 static zend_always_inline void zend_hash_iterators_clamp_max(HashTable *ht, uint32_t max)
1428 {
1429 if (UNEXPECTED(HT_HAS_ITERATORS(ht))) {
1430 HashTableIterator *iter = EG(ht_iterators);
1431 HashTableIterator *end = iter + EG(ht_iterators_used);
1432 while (iter != end) {
1433 if (iter->ht == ht) {
1434 iter->pos = MIN(iter->pos, max);
1435 }
1436 iter++;
1437 }
1438 }
1439 }
1440
_zend_hash_packed_del_val(HashTable * ht,uint32_t idx,zval * zv)1441 static zend_always_inline void _zend_hash_packed_del_val(HashTable *ht, uint32_t idx, zval *zv)
1442 {
1443 idx = HT_HASH_TO_IDX(idx);
1444 ht->nNumOfElements--;
1445 if (ht->nNumUsed - 1 == idx) {
1446 do {
1447 ht->nNumUsed--;
1448 } while (ht->nNumUsed > 0 && (UNEXPECTED(Z_TYPE(ht->arPacked[ht->nNumUsed-1]) == IS_UNDEF)));
1449 ht->nInternalPointer = MIN(ht->nInternalPointer, ht->nNumUsed);
1450 zend_hash_iterators_clamp_max(ht, ht->nNumUsed);
1451 }
1452 if (ht->pDestructor) {
1453 zval tmp;
1454 ZVAL_COPY_VALUE(&tmp, zv);
1455 ZVAL_UNDEF(zv);
1456 ht->pDestructor(&tmp);
1457 } else {
1458 ZVAL_UNDEF(zv);
1459 }
1460 }
1461
_zend_hash_del_el_ex(HashTable * ht,uint32_t idx,Bucket * p,Bucket * prev)1462 static zend_always_inline void _zend_hash_del_el_ex(HashTable *ht, uint32_t idx, Bucket *p, Bucket *prev)
1463 {
1464 if (prev) {
1465 Z_NEXT(prev->val) = Z_NEXT(p->val);
1466 } else {
1467 HT_HASH(ht, p->h | ht->nTableMask) = Z_NEXT(p->val);
1468 }
1469 idx = HT_HASH_TO_IDX(idx);
1470 ht->nNumOfElements--;
1471 if (ht->nNumUsed - 1 == idx) {
1472 do {
1473 ht->nNumUsed--;
1474 } while (ht->nNumUsed > 0 && (UNEXPECTED(Z_TYPE(ht->arData[ht->nNumUsed-1].val) == IS_UNDEF)));
1475 ht->nInternalPointer = MIN(ht->nInternalPointer, ht->nNumUsed);
1476 zend_hash_iterators_clamp_max(ht, ht->nNumUsed);
1477 }
1478 if (ht->pDestructor) {
1479 zval tmp;
1480 ZVAL_COPY_VALUE(&tmp, &p->val);
1481 ZVAL_UNDEF(&p->val);
1482 ht->pDestructor(&tmp);
1483 } else {
1484 ZVAL_UNDEF(&p->val);
1485 }
1486 }
1487
_zend_hash_del_el(HashTable * ht,uint32_t idx,Bucket * p)1488 static zend_always_inline void _zend_hash_del_el(HashTable *ht, uint32_t idx, Bucket *p)
1489 {
1490 Bucket *prev = NULL;
1491 uint32_t nIndex;
1492 uint32_t i;
1493
1494 nIndex = p->h | ht->nTableMask;
1495 i = HT_HASH(ht, nIndex);
1496
1497 if (i != idx) {
1498 prev = HT_HASH_TO_BUCKET(ht, i);
1499 while (Z_NEXT(prev->val) != idx) {
1500 i = Z_NEXT(prev->val);
1501 prev = HT_HASH_TO_BUCKET(ht, i);
1502 }
1503 }
1504
1505 if (p->key) {
1506 zend_string_release(p->key);
1507 p->key = NULL;
1508 }
1509 _zend_hash_del_el_ex(ht, idx, p, prev);
1510 }
1511
zend_hash_packed_del_val(HashTable * ht,zval * zv)1512 ZEND_API void ZEND_FASTCALL zend_hash_packed_del_val(HashTable *ht, zval *zv)
1513 {
1514 IS_CONSISTENT(ht);
1515 HT_ASSERT_RC1(ht);
1516 ZEND_ASSERT(HT_IS_PACKED(ht));
1517 _zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(zv - ht->arPacked), zv);
1518 }
1519
1520
zend_hash_del_bucket(HashTable * ht,Bucket * p)1521 ZEND_API void ZEND_FASTCALL zend_hash_del_bucket(HashTable *ht, Bucket *p)
1522 {
1523 IS_CONSISTENT(ht);
1524 HT_ASSERT_RC1(ht);
1525 ZEND_ASSERT(!HT_IS_PACKED(ht));
1526 _zend_hash_del_el(ht, HT_IDX_TO_HASH(p - ht->arData), p);
1527 }
1528
zend_hash_del(HashTable * ht,zend_string * key)1529 ZEND_API zend_result ZEND_FASTCALL zend_hash_del(HashTable *ht, zend_string *key)
1530 {
1531 zend_ulong h;
1532 uint32_t nIndex;
1533 uint32_t idx;
1534 Bucket *p;
1535 Bucket *prev = NULL;
1536
1537 IS_CONSISTENT(ht);
1538 HT_ASSERT_RC1(ht);
1539
1540 h = zend_string_hash_val(key);
1541 nIndex = h | ht->nTableMask;
1542
1543 idx = HT_HASH(ht, nIndex);
1544 while (idx != HT_INVALID_IDX) {
1545 p = HT_HASH_TO_BUCKET(ht, idx);
1546 if ((p->key == key) ||
1547 (p->h == h &&
1548 p->key &&
1549 zend_string_equal_content(p->key, key))) {
1550 zend_string_release(p->key);
1551 p->key = NULL;
1552 _zend_hash_del_el_ex(ht, idx, p, prev);
1553 return SUCCESS;
1554 }
1555 prev = p;
1556 idx = Z_NEXT(p->val);
1557 }
1558 return FAILURE;
1559 }
1560
zend_hash_del_ind(HashTable * ht,zend_string * key)1561 ZEND_API zend_result ZEND_FASTCALL zend_hash_del_ind(HashTable *ht, zend_string *key)
1562 {
1563 zend_ulong h;
1564 uint32_t nIndex;
1565 uint32_t idx;
1566 Bucket *p;
1567 Bucket *prev = NULL;
1568
1569 IS_CONSISTENT(ht);
1570 HT_ASSERT_RC1(ht);
1571
1572 h = zend_string_hash_val(key);
1573 nIndex = h | ht->nTableMask;
1574
1575 idx = HT_HASH(ht, nIndex);
1576 while (idx != HT_INVALID_IDX) {
1577 p = HT_HASH_TO_BUCKET(ht, idx);
1578 if ((p->key == key) ||
1579 (p->h == h &&
1580 p->key &&
1581 zend_string_equal_content(p->key, key))) {
1582 if (Z_TYPE(p->val) == IS_INDIRECT) {
1583 zval *data = Z_INDIRECT(p->val);
1584
1585 if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
1586 return FAILURE;
1587 } else {
1588 if (ht->pDestructor) {
1589 zval tmp;
1590 ZVAL_COPY_VALUE(&tmp, data);
1591 ZVAL_UNDEF(data);
1592 ht->pDestructor(&tmp);
1593 } else {
1594 ZVAL_UNDEF(data);
1595 }
1596 HT_FLAGS(ht) |= HASH_FLAG_HAS_EMPTY_IND;
1597 }
1598 } else {
1599 zend_string_release(p->key);
1600 p->key = NULL;
1601 _zend_hash_del_el_ex(ht, idx, p, prev);
1602 }
1603 return SUCCESS;
1604 }
1605 prev = p;
1606 idx = Z_NEXT(p->val);
1607 }
1608 return FAILURE;
1609 }
1610
zend_hash_str_del_ind(HashTable * ht,const char * str,size_t len)1611 ZEND_API zend_result ZEND_FASTCALL zend_hash_str_del_ind(HashTable *ht, const char *str, size_t len)
1612 {
1613 zend_ulong h;
1614 uint32_t nIndex;
1615 uint32_t idx;
1616 Bucket *p;
1617 Bucket *prev = NULL;
1618
1619 IS_CONSISTENT(ht);
1620 HT_ASSERT_RC1(ht);
1621
1622 h = zend_inline_hash_func(str, len);
1623 nIndex = h | ht->nTableMask;
1624
1625 idx = HT_HASH(ht, nIndex);
1626 while (idx != HT_INVALID_IDX) {
1627 p = HT_HASH_TO_BUCKET(ht, idx);
1628 if ((p->h == h)
1629 && p->key
1630 && zend_string_equals_cstr(p->key, str, len)) {
1631 if (Z_TYPE(p->val) == IS_INDIRECT) {
1632 zval *data = Z_INDIRECT(p->val);
1633
1634 if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
1635 return FAILURE;
1636 } else {
1637 if (ht->pDestructor) {
1638 ht->pDestructor(data);
1639 }
1640 ZVAL_UNDEF(data);
1641 HT_FLAGS(ht) |= HASH_FLAG_HAS_EMPTY_IND;
1642 }
1643 } else {
1644 zend_string_release(p->key);
1645 p->key = NULL;
1646 _zend_hash_del_el_ex(ht, idx, p, prev);
1647 }
1648 return SUCCESS;
1649 }
1650 prev = p;
1651 idx = Z_NEXT(p->val);
1652 }
1653 return FAILURE;
1654 }
1655
zend_hash_str_del(HashTable * ht,const char * str,size_t len)1656 ZEND_API zend_result ZEND_FASTCALL zend_hash_str_del(HashTable *ht, const char *str, size_t len)
1657 {
1658 zend_ulong h;
1659 uint32_t nIndex;
1660 uint32_t idx;
1661 Bucket *p;
1662 Bucket *prev = NULL;
1663
1664 IS_CONSISTENT(ht);
1665 HT_ASSERT_RC1(ht);
1666
1667 h = zend_inline_hash_func(str, len);
1668 nIndex = h | ht->nTableMask;
1669
1670 idx = HT_HASH(ht, nIndex);
1671 while (idx != HT_INVALID_IDX) {
1672 p = HT_HASH_TO_BUCKET(ht, idx);
1673 if ((p->h == h)
1674 && p->key
1675 && zend_string_equals_cstr(p->key, str, len)) {
1676 zend_string_release(p->key);
1677 p->key = NULL;
1678 _zend_hash_del_el_ex(ht, idx, p, prev);
1679 return SUCCESS;
1680 }
1681 prev = p;
1682 idx = Z_NEXT(p->val);
1683 }
1684 return FAILURE;
1685 }
1686
zend_hash_index_del(HashTable * ht,zend_ulong h)1687 ZEND_API zend_result ZEND_FASTCALL zend_hash_index_del(HashTable *ht, zend_ulong h)
1688 {
1689 uint32_t nIndex;
1690 uint32_t idx;
1691 Bucket *p;
1692 Bucket *prev = NULL;
1693
1694 IS_CONSISTENT(ht);
1695 HT_ASSERT_RC1(ht);
1696
1697 if (HT_IS_PACKED(ht)) {
1698 if (h < ht->nNumUsed) {
1699 zval *zv = ht->arPacked + h;
1700 if (Z_TYPE_P(zv) != IS_UNDEF) {
1701 _zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(h), zv);
1702 return SUCCESS;
1703 }
1704 }
1705 return FAILURE;
1706 }
1707 nIndex = h | ht->nTableMask;
1708
1709 idx = HT_HASH(ht, nIndex);
1710 while (idx != HT_INVALID_IDX) {
1711 p = HT_HASH_TO_BUCKET(ht, idx);
1712 if ((p->h == h) && (p->key == NULL)) {
1713 _zend_hash_del_el_ex(ht, idx, p, prev);
1714 return SUCCESS;
1715 }
1716 prev = p;
1717 idx = Z_NEXT(p->val);
1718 }
1719 return FAILURE;
1720 }
1721
zend_hash_destroy(HashTable * ht)1722 ZEND_API void ZEND_FASTCALL zend_hash_destroy(HashTable *ht)
1723 {
1724 IS_CONSISTENT(ht);
1725 HT_ASSERT(ht, GC_REFCOUNT(ht) <= 1);
1726
1727 if (ht->nNumUsed) {
1728 if (HT_IS_PACKED(ht)) {
1729 if (ht->pDestructor) {
1730 zval *zv = ht->arPacked;
1731 zval *end = zv + ht->nNumUsed;
1732
1733 SET_INCONSISTENT(HT_IS_DESTROYING);
1734 if (HT_IS_WITHOUT_HOLES(ht)) {
1735 do {
1736 ht->pDestructor(zv);
1737 } while (++zv != end);
1738 } else {
1739 do {
1740 if (EXPECTED(Z_TYPE_P(zv) != IS_UNDEF)) {
1741 ht->pDestructor(zv);
1742 }
1743 } while (++zv != end);
1744 }
1745 SET_INCONSISTENT(HT_DESTROYED);
1746 }
1747 zend_hash_iterators_remove(ht);
1748 } else {
1749 Bucket *p = ht->arData;
1750 Bucket *end = p + ht->nNumUsed;
1751
1752 if (ht->pDestructor) {
1753 SET_INCONSISTENT(HT_IS_DESTROYING);
1754
1755 if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1756 if (HT_IS_WITHOUT_HOLES(ht)) {
1757 do {
1758 ht->pDestructor(&p->val);
1759 } while (++p != end);
1760 } else {
1761 do {
1762 if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1763 ht->pDestructor(&p->val);
1764 }
1765 } while (++p != end);
1766 }
1767 } else if (HT_IS_WITHOUT_HOLES(ht)) {
1768 do {
1769 ht->pDestructor(&p->val);
1770 if (EXPECTED(p->key)) {
1771 zend_string_release(p->key);
1772 }
1773 } while (++p != end);
1774 } else {
1775 do {
1776 if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1777 ht->pDestructor(&p->val);
1778 if (EXPECTED(p->key)) {
1779 zend_string_release(p->key);
1780 }
1781 }
1782 } while (++p != end);
1783 }
1784
1785 SET_INCONSISTENT(HT_DESTROYED);
1786 } else {
1787 if (!HT_HAS_STATIC_KEYS_ONLY(ht)) {
1788 do {
1789 if (EXPECTED(p->key)) {
1790 zend_string_release(p->key);
1791 }
1792 } while (++p != end);
1793 }
1794 }
1795 zend_hash_iterators_remove(ht);
1796 }
1797 } else if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
1798 return;
1799 }
1800 pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1801 }
1802
zend_array_destroy(HashTable * ht)1803 ZEND_API void ZEND_FASTCALL zend_array_destroy(HashTable *ht)
1804 {
1805 IS_CONSISTENT(ht);
1806 HT_ASSERT(ht, GC_REFCOUNT(ht) <= 1);
1807
1808 /* break possible cycles */
1809 GC_REMOVE_FROM_BUFFER(ht);
1810 GC_TYPE_INFO(ht) = GC_NULL /*???| (GC_WHITE << 16)*/;
1811
1812 if (ht->nNumUsed) {
1813 /* In some rare cases destructors of regular arrays may be changed */
1814 if (UNEXPECTED(ht->pDestructor != ZVAL_PTR_DTOR)) {
1815 zend_hash_destroy(ht);
1816 goto free_ht;
1817 }
1818
1819 SET_INCONSISTENT(HT_IS_DESTROYING);
1820
1821 if (HT_IS_PACKED(ht)) {
1822 zval *zv = ht->arPacked;
1823 zval *end = zv + ht->nNumUsed;
1824
1825 do {
1826 i_zval_ptr_dtor(zv);
1827 } while (++zv != end);
1828 } else {
1829 Bucket *p = ht->arData;
1830 Bucket *end = p + ht->nNumUsed;
1831
1832 if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1833 do {
1834 i_zval_ptr_dtor(&p->val);
1835 } while (++p != end);
1836 } else if (HT_IS_WITHOUT_HOLES(ht)) {
1837 do {
1838 i_zval_ptr_dtor(&p->val);
1839 if (EXPECTED(p->key)) {
1840 zend_string_release_ex(p->key, 0);
1841 }
1842 } while (++p != end);
1843 } else {
1844 do {
1845 if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1846 i_zval_ptr_dtor(&p->val);
1847 if (EXPECTED(p->key)) {
1848 zend_string_release_ex(p->key, 0);
1849 }
1850 }
1851 } while (++p != end);
1852 }
1853 }
1854 } else if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
1855 goto free_ht;
1856 }
1857 SET_INCONSISTENT(HT_DESTROYED);
1858 efree(HT_GET_DATA_ADDR(ht));
1859 free_ht:
1860 zend_hash_iterators_remove(ht);
1861 FREE_HASHTABLE(ht);
1862 }
1863
zend_hash_clean(HashTable * ht)1864 ZEND_API void ZEND_FASTCALL zend_hash_clean(HashTable *ht)
1865 {
1866 IS_CONSISTENT(ht);
1867 HT_ASSERT_RC1(ht);
1868
1869 if (ht->nNumUsed) {
1870 if (HT_IS_PACKED(ht)) {
1871 zval *zv = ht->arPacked;
1872 zval *end = zv + ht->nNumUsed;
1873
1874 if (ht->pDestructor) {
1875 if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1876 if (HT_IS_WITHOUT_HOLES(ht)) {
1877 do {
1878 ht->pDestructor(zv);
1879 } while (++zv != end);
1880 } else {
1881 do {
1882 if (EXPECTED(Z_TYPE_P(zv) != IS_UNDEF)) {
1883 ht->pDestructor(zv);
1884 }
1885 } while (++zv != end);
1886 }
1887 }
1888 }
1889 } else {
1890 Bucket *p = ht->arData;
1891 Bucket *end = p + ht->nNumUsed;
1892
1893 if (ht->pDestructor) {
1894 if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1895 if (HT_IS_WITHOUT_HOLES(ht)) {
1896 do {
1897 ht->pDestructor(&p->val);
1898 } while (++p != end);
1899 } else {
1900 do {
1901 if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1902 ht->pDestructor(&p->val);
1903 }
1904 } while (++p != end);
1905 }
1906 } else if (HT_IS_WITHOUT_HOLES(ht)) {
1907 do {
1908 ht->pDestructor(&p->val);
1909 if (EXPECTED(p->key)) {
1910 zend_string_release(p->key);
1911 }
1912 } while (++p != end);
1913 } else {
1914 do {
1915 if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1916 ht->pDestructor(&p->val);
1917 if (EXPECTED(p->key)) {
1918 zend_string_release(p->key);
1919 }
1920 }
1921 } while (++p != end);
1922 }
1923 } else {
1924 if (!HT_HAS_STATIC_KEYS_ONLY(ht)) {
1925 do {
1926 if (EXPECTED(p->key)) {
1927 zend_string_release(p->key);
1928 }
1929 } while (++p != end);
1930 }
1931 }
1932 HT_HASH_RESET(ht);
1933 }
1934 }
1935 ht->nNumUsed = 0;
1936 ht->nNumOfElements = 0;
1937 ht->nNextFreeElement = ZEND_LONG_MIN;
1938 ht->nInternalPointer = 0;
1939 }
1940
zend_symtable_clean(HashTable * ht)1941 ZEND_API void ZEND_FASTCALL zend_symtable_clean(HashTable *ht)
1942 {
1943 Bucket *p, *end;
1944
1945 IS_CONSISTENT(ht);
1946 HT_ASSERT_RC1(ht);
1947
1948 if (ht->nNumUsed) {
1949 ZEND_ASSERT(!HT_IS_PACKED(ht));
1950 p = ht->arData;
1951 end = p + ht->nNumUsed;
1952 if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1953 do {
1954 i_zval_ptr_dtor(&p->val);
1955 } while (++p != end);
1956 } else if (HT_IS_WITHOUT_HOLES(ht)) {
1957 do {
1958 i_zval_ptr_dtor(&p->val);
1959 if (EXPECTED(p->key)) {
1960 zend_string_release(p->key);
1961 }
1962 } while (++p != end);
1963 } else {
1964 do {
1965 if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1966 i_zval_ptr_dtor(&p->val);
1967 if (EXPECTED(p->key)) {
1968 zend_string_release(p->key);
1969 }
1970 }
1971 } while (++p != end);
1972 }
1973 HT_HASH_RESET(ht);
1974 }
1975 ht->nNumUsed = 0;
1976 ht->nNumOfElements = 0;
1977 ht->nNextFreeElement = ZEND_LONG_MIN;
1978 ht->nInternalPointer = 0;
1979 }
1980
zend_hash_graceful_destroy(HashTable * ht)1981 ZEND_API void ZEND_FASTCALL zend_hash_graceful_destroy(HashTable *ht)
1982 {
1983 uint32_t idx;
1984
1985 IS_CONSISTENT(ht);
1986 HT_ASSERT_RC1(ht);
1987
1988 if (HT_IS_PACKED(ht)) {
1989 zval *zv = ht->arPacked;
1990
1991 for (idx = 0; idx < ht->nNumUsed; idx++, zv++) {
1992 if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
1993 _zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv);
1994 }
1995 } else {
1996 Bucket *p = ht->arData;
1997
1998 for (idx = 0; idx < ht->nNumUsed; idx++, p++) {
1999 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2000 _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
2001 }
2002 }
2003 if (!(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
2004 pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
2005 }
2006
2007 SET_INCONSISTENT(HT_DESTROYED);
2008 }
2009
zend_hash_graceful_reverse_destroy(HashTable * ht)2010 ZEND_API void ZEND_FASTCALL zend_hash_graceful_reverse_destroy(HashTable *ht)
2011 {
2012 uint32_t idx;
2013
2014 IS_CONSISTENT(ht);
2015 HT_ASSERT_RC1(ht);
2016
2017 idx = ht->nNumUsed;
2018 if (HT_IS_PACKED(ht)) {
2019 zval *zv = ht->arPacked + ht->nNumUsed;
2020
2021 while (idx > 0) {
2022 idx--;
2023 zv--;
2024 if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
2025 _zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv);
2026 }
2027 } else {
2028 Bucket *p = ht->arData + ht->nNumUsed;
2029
2030 while (idx > 0) {
2031 idx--;
2032 p--;
2033 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2034 _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
2035 }
2036 }
2037
2038 if (!(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
2039 pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
2040 }
2041
2042 SET_INCONSISTENT(HT_DESTROYED);
2043 }
2044
2045 /* This is used to recurse elements and selectively delete certain entries
2046 * from a hashtable. apply_func() receives the data and decides if the entry
2047 * should be deleted or recursion should be stopped. The following three
2048 * return codes are possible:
2049 * ZEND_HASH_APPLY_KEEP - continue
2050 * ZEND_HASH_APPLY_STOP - stop iteration
2051 * ZEND_HASH_APPLY_REMOVE - delete the element, combinable with the former
2052 */
2053
zend_hash_apply(HashTable * ht,apply_func_t apply_func)2054 ZEND_API void ZEND_FASTCALL zend_hash_apply(HashTable *ht, apply_func_t apply_func)
2055 {
2056 uint32_t idx;
2057 int result;
2058
2059 IS_CONSISTENT(ht);
2060 if (HT_IS_PACKED(ht)) {
2061 for (idx = 0; idx < ht->nNumUsed; idx++) {
2062 zval *zv = ht->arPacked + idx;
2063
2064 if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
2065 result = apply_func(zv);
2066
2067 if (result & ZEND_HASH_APPLY_REMOVE) {
2068 HT_ASSERT_RC1(ht);
2069 _zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv);
2070 }
2071 if (result & ZEND_HASH_APPLY_STOP) {
2072 break;
2073 }
2074 }
2075 } else {
2076 for (idx = 0; idx < ht->nNumUsed; idx++) {
2077 Bucket *p = ht->arData + idx;
2078
2079 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2080 result = apply_func(&p->val);
2081
2082 if (result & ZEND_HASH_APPLY_REMOVE) {
2083 HT_ASSERT_RC1(ht);
2084 _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
2085 }
2086 if (result & ZEND_HASH_APPLY_STOP) {
2087 break;
2088 }
2089 }
2090 }
2091 }
2092
2093
zend_hash_apply_with_argument(HashTable * ht,apply_func_arg_t apply_func,void * argument)2094 ZEND_API void ZEND_FASTCALL zend_hash_apply_with_argument(HashTable *ht, apply_func_arg_t apply_func, void *argument)
2095 {
2096 uint32_t idx;
2097 int result;
2098
2099 IS_CONSISTENT(ht);
2100 if (HT_IS_PACKED(ht)) {
2101 for (idx = 0; idx < ht->nNumUsed; idx++) {
2102 zval *zv = ht->arPacked + idx;
2103 if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
2104 result = apply_func(zv, argument);
2105
2106 if (result & ZEND_HASH_APPLY_REMOVE) {
2107 HT_ASSERT_RC1(ht);
2108 _zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv);
2109 }
2110 if (result & ZEND_HASH_APPLY_STOP) {
2111 break;
2112 }
2113 }
2114 } else {
2115 for (idx = 0; idx < ht->nNumUsed; idx++) {
2116 Bucket *p = ht->arData + idx;
2117 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2118 result = apply_func(&p->val, argument);
2119
2120 if (result & ZEND_HASH_APPLY_REMOVE) {
2121 HT_ASSERT_RC1(ht);
2122 _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
2123 }
2124 if (result & ZEND_HASH_APPLY_STOP) {
2125 break;
2126 }
2127 }
2128 }
2129 }
2130
2131
zend_hash_apply_with_arguments(HashTable * ht,apply_func_args_t apply_func,int num_args,...)2132 ZEND_API void zend_hash_apply_with_arguments(HashTable *ht, apply_func_args_t apply_func, int num_args, ...)
2133 {
2134 uint32_t idx;
2135 va_list args;
2136 zend_hash_key hash_key;
2137 int result;
2138
2139 IS_CONSISTENT(ht);
2140
2141 if (HT_IS_PACKED(ht)) {
2142 for (idx = 0; idx < ht->nNumUsed; idx++) {
2143 zval *zv = ht->arPacked + idx;
2144
2145 if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
2146 va_start(args, num_args);
2147 hash_key.h = idx;
2148 hash_key.key = NULL;
2149
2150 result = apply_func(zv, num_args, args, &hash_key);
2151
2152 if (result & ZEND_HASH_APPLY_REMOVE) {
2153 HT_ASSERT_RC1(ht);
2154 _zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv);
2155 }
2156 if (result & ZEND_HASH_APPLY_STOP) {
2157 va_end(args);
2158 break;
2159 }
2160 va_end(args);
2161 }
2162 } else {
2163 for (idx = 0; idx < ht->nNumUsed; idx++) {
2164 Bucket *p = ht->arData + idx;
2165
2166 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2167 va_start(args, num_args);
2168 hash_key.h = p->h;
2169 hash_key.key = p->key;
2170
2171 result = apply_func(&p->val, num_args, args, &hash_key);
2172
2173 if (result & ZEND_HASH_APPLY_REMOVE) {
2174 HT_ASSERT_RC1(ht);
2175 _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
2176 }
2177 if (result & ZEND_HASH_APPLY_STOP) {
2178 va_end(args);
2179 break;
2180 }
2181 va_end(args);
2182 }
2183 }
2184 }
2185
2186
zend_hash_reverse_apply(HashTable * ht,apply_func_t apply_func)2187 ZEND_API void ZEND_FASTCALL zend_hash_reverse_apply(HashTable *ht, apply_func_t apply_func)
2188 {
2189 uint32_t idx;
2190 int result;
2191
2192 IS_CONSISTENT(ht);
2193
2194 idx = ht->nNumUsed;
2195 if (HT_IS_PACKED(ht)) {
2196 zval *zv;
2197
2198 while (idx > 0) {
2199 idx--;
2200 zv = ht->arPacked + idx;
2201 if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
2202
2203 result = apply_func(zv);
2204
2205 if (result & ZEND_HASH_APPLY_REMOVE) {
2206 HT_ASSERT_RC1(ht);
2207 _zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv);
2208 }
2209 if (result & ZEND_HASH_APPLY_STOP) {
2210 break;
2211 }
2212 }
2213 } else {
2214 Bucket *p;
2215
2216 while (idx > 0) {
2217 idx--;
2218 p = ht->arData + idx;
2219 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2220
2221 result = apply_func(&p->val);
2222
2223 if (result & ZEND_HASH_APPLY_REMOVE) {
2224 HT_ASSERT_RC1(ht);
2225 _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
2226 }
2227 if (result & ZEND_HASH_APPLY_STOP) {
2228 break;
2229 }
2230 }
2231 }
2232 }
2233
2234
zend_hash_copy(HashTable * target,HashTable * source,copy_ctor_func_t pCopyConstructor)2235 ZEND_API void ZEND_FASTCALL zend_hash_copy(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor)
2236 {
2237 uint32_t idx;
2238 zval *new_entry, *data;
2239
2240 IS_CONSISTENT(source);
2241 IS_CONSISTENT(target);
2242 HT_ASSERT_RC1(target);
2243
2244 if (HT_IS_PACKED(source)) {
2245 for (idx = 0; idx < source->nNumUsed; idx++) {
2246 zval *zv = source->arPacked + idx;
2247 if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
2248
2249 new_entry = zend_hash_index_update(target, idx, zv);
2250 if (pCopyConstructor) {
2251 pCopyConstructor(new_entry);
2252 }
2253 }
2254 return;
2255 }
2256
2257 for (idx = 0; idx < source->nNumUsed; idx++) {
2258 Bucket *p = source->arData + idx;
2259
2260 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2261
2262 /* INDIRECT element may point to UNDEF-ined slots */
2263 data = &p->val;
2264 if (Z_TYPE_P(data) == IS_INDIRECT) {
2265 data = Z_INDIRECT_P(data);
2266 if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
2267 continue;
2268 }
2269 }
2270 if (p->key) {
2271 new_entry = zend_hash_update(target, p->key, data);
2272 } else {
2273 new_entry = zend_hash_index_update(target, p->h, data);
2274 }
2275 if (pCopyConstructor) {
2276 pCopyConstructor(new_entry);
2277 }
2278 }
2279 }
2280
2281
zend_array_dup_value(HashTable * source,HashTable * target,zval * data,zval * dest,bool packed,bool with_holes)2282 static zend_always_inline bool zend_array_dup_value(HashTable *source, HashTable *target, zval *data, zval *dest, bool packed, bool with_holes)
2283 {
2284 if (with_holes) {
2285 if (!packed && Z_TYPE_INFO_P(data) == IS_INDIRECT) {
2286 data = Z_INDIRECT_P(data);
2287 }
2288 if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) {
2289 return 0;
2290 }
2291 } else if (!packed) {
2292 /* INDIRECT element may point to UNDEF-ined slots */
2293 if (Z_TYPE_INFO_P(data) == IS_INDIRECT) {
2294 data = Z_INDIRECT_P(data);
2295 if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) {
2296 return 0;
2297 }
2298 }
2299 }
2300
2301 do {
2302 if (Z_OPT_REFCOUNTED_P(data)) {
2303 if (Z_ISREF_P(data) && Z_REFCOUNT_P(data) == 1 &&
2304 (Z_TYPE_P(Z_REFVAL_P(data)) != IS_ARRAY ||
2305 Z_ARRVAL_P(Z_REFVAL_P(data)) != source)) {
2306 data = Z_REFVAL_P(data);
2307 if (!Z_OPT_REFCOUNTED_P(data)) {
2308 break;
2309 }
2310 }
2311 Z_ADDREF_P(data);
2312 }
2313 } while (0);
2314 ZVAL_COPY_VALUE(dest, data);
2315
2316 return 1;
2317 }
2318
zend_array_dup_element(HashTable * source,HashTable * target,uint32_t idx,Bucket * p,Bucket * q,bool packed,bool static_keys,bool with_holes)2319 static zend_always_inline bool zend_array_dup_element(HashTable *source, HashTable *target, uint32_t idx, Bucket *p, Bucket *q, bool packed, bool static_keys, bool with_holes)
2320 {
2321 if (!zend_array_dup_value(source, target, &p->val, &q->val, packed, with_holes)) {
2322 return 0;
2323 }
2324
2325 if (!packed) {
2326 uint32_t nIndex;
2327
2328 q->h = p->h;
2329 q->key = p->key;
2330 if (!static_keys && q->key) {
2331 zend_string_addref(q->key);
2332 }
2333
2334 nIndex = q->h | target->nTableMask;
2335 Z_NEXT(q->val) = HT_HASH(target, nIndex);
2336 HT_HASH(target, nIndex) = HT_IDX_TO_HASH(idx);
2337 }
2338 return 1;
2339 }
2340
2341 // We need to duplicate iterators to be able to search through all copy-on-write copies to find the actually iterated HashTable and position back
zend_array_dup_ht_iterators(HashTable * source,HashTable * target)2342 static void zend_array_dup_ht_iterators(HashTable *source, HashTable *target) {
2343 HashTableIterator *iter = EG(ht_iterators);
2344 HashTableIterator *end = iter + EG(ht_iterators_used);
2345
2346 while (iter != end) {
2347 if (iter->ht == source) {
2348 uint32_t copy_idx = zend_hash_iterator_add(target, iter->pos);
2349 HashTableIterator *copy_iter = EG(ht_iterators) + copy_idx;
2350 copy_iter->next_copy = iter->next_copy;
2351 iter->next_copy = copy_idx;
2352 }
2353 iter++;
2354 }
2355 }
2356
zend_array_dup_packed_elements(HashTable * source,HashTable * target,bool with_holes)2357 static zend_always_inline void zend_array_dup_packed_elements(HashTable *source, HashTable *target, bool with_holes)
2358 {
2359 zval *p = source->arPacked;
2360 zval *q = target->arPacked;
2361 zval *end = p + source->nNumUsed;
2362
2363 do {
2364 if (!zend_array_dup_value(source, target, p, q, 1, with_holes)) {
2365 if (with_holes) {
2366 ZVAL_UNDEF(q);
2367 }
2368 }
2369 p++; q++;
2370 } while (p != end);
2371
2372 if (UNEXPECTED(HT_HAS_ITERATORS(source))) {
2373 zend_array_dup_ht_iterators(source, target);
2374 }
2375 }
2376
zend_array_dup_elements(HashTable * source,HashTable * target,bool static_keys,bool with_holes)2377 static zend_always_inline uint32_t zend_array_dup_elements(HashTable *source, HashTable *target, bool static_keys, bool with_holes)
2378 {
2379 uint32_t idx = 0;
2380 Bucket *p = source->arData;
2381 Bucket *q = target->arData;
2382 Bucket *end = p + source->nNumUsed;
2383
2384 if (UNEXPECTED(HT_HAS_ITERATORS(source))) {
2385 zend_array_dup_ht_iterators(source, target);
2386 }
2387
2388 do {
2389 if (!zend_array_dup_element(source, target, idx, p, q, 0, static_keys, with_holes)) {
2390 uint32_t target_idx = idx;
2391
2392 idx++; p++;
2393 if (EXPECTED(!HT_HAS_ITERATORS(target))) {
2394 while (p != end) {
2395 if (zend_array_dup_element(source, target, target_idx, p, q, 0, static_keys, with_holes)) {
2396 if (source->nInternalPointer == idx) {
2397 target->nInternalPointer = target_idx;
2398 }
2399 target_idx++; q++;
2400 }
2401 idx++; p++;
2402 }
2403 } else {
2404 target->nNumUsed = source->nNumUsed;
2405 uint32_t iter_pos = zend_hash_iterators_lower_pos(target, idx);
2406
2407 while (p != end) {
2408 if (zend_array_dup_element(source, target, target_idx, p, q, 0, static_keys, with_holes)) {
2409 if (source->nInternalPointer == idx) {
2410 target->nInternalPointer = target_idx;
2411 }
2412 if (UNEXPECTED(idx >= iter_pos)) {
2413 do {
2414 zend_hash_iterators_update(target, iter_pos, target_idx);
2415 iter_pos = zend_hash_iterators_lower_pos(target, iter_pos + 1);
2416 } while (iter_pos < idx);
2417 }
2418 target_idx++; q++;
2419 }
2420 idx++; p++;
2421 }
2422 }
2423 return target_idx;
2424 }
2425 idx++; p++; q++;
2426 } while (p != end);
2427 return idx;
2428 }
2429
zend_array_dup(HashTable * source)2430 ZEND_API HashTable* ZEND_FASTCALL zend_array_dup(HashTable *source)
2431 {
2432 uint32_t idx;
2433 HashTable *target;
2434
2435 IS_CONSISTENT(source);
2436
2437 ALLOC_HASHTABLE(target);
2438 GC_SET_REFCOUNT(target, 1);
2439 GC_TYPE_INFO(target) = GC_ARRAY;
2440
2441 target->pDestructor = ZVAL_PTR_DTOR;
2442
2443 if (source->nNumOfElements == 0) {
2444 HT_FLAGS(target) = HASH_FLAG_UNINITIALIZED;
2445 target->nTableMask = HT_MIN_MASK;
2446 target->nNumUsed = 0;
2447 target->nNumOfElements = 0;
2448 target->nNextFreeElement = source->nNextFreeElement;
2449 target->nInternalPointer = 0;
2450 target->nTableSize = HT_MIN_SIZE;
2451 HT_SET_DATA_ADDR(target, &uninitialized_bucket);
2452 } else if (GC_FLAGS(source) & IS_ARRAY_IMMUTABLE) {
2453 HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK;
2454 target->nTableMask = source->nTableMask;
2455 target->nNumUsed = source->nNumUsed;
2456 target->nNumOfElements = source->nNumOfElements;
2457 target->nNextFreeElement = source->nNextFreeElement;
2458 target->nTableSize = source->nTableSize;
2459 if (HT_IS_PACKED(source)) {
2460 HT_SET_DATA_ADDR(target, emalloc(HT_PACKED_SIZE(target)));
2461 target->nInternalPointer = source->nInternalPointer;
2462 memcpy(HT_GET_DATA_ADDR(target), HT_GET_DATA_ADDR(source), HT_PACKED_USED_SIZE(source));
2463 } else {
2464 HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
2465 target->nInternalPointer = source->nInternalPointer;
2466 memcpy(HT_GET_DATA_ADDR(target), HT_GET_DATA_ADDR(source), HT_USED_SIZE(source));
2467 }
2468 } else if (HT_IS_PACKED(source)) {
2469 HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK;
2470 target->nTableMask = HT_MIN_MASK;
2471 target->nNumUsed = source->nNumUsed;
2472 target->nNumOfElements = source->nNumOfElements;
2473 target->nNextFreeElement = source->nNextFreeElement;
2474 target->nTableSize = source->nTableSize;
2475 HT_SET_DATA_ADDR(target, emalloc(HT_PACKED_SIZE_EX(target->nTableSize, HT_MIN_MASK)));
2476 target->nInternalPointer =
2477 (source->nInternalPointer < source->nNumUsed) ?
2478 source->nInternalPointer : 0;
2479
2480 HT_HASH_RESET_PACKED(target);
2481
2482 if (HT_IS_WITHOUT_HOLES(target)) {
2483 zend_array_dup_packed_elements(source, target, 0);
2484 } else {
2485 zend_array_dup_packed_elements(source, target, 1);
2486 }
2487 } else {
2488 HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK;
2489 target->nTableMask = source->nTableMask;
2490 target->nNextFreeElement = source->nNextFreeElement;
2491 target->nInternalPointer =
2492 (source->nInternalPointer < source->nNumUsed) ?
2493 source->nInternalPointer : 0;
2494
2495 target->nTableSize = source->nTableSize;
2496 HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
2497 HT_HASH_RESET(target);
2498
2499 if (HT_HAS_STATIC_KEYS_ONLY(target)) {
2500 if (HT_IS_WITHOUT_HOLES(source)) {
2501 idx = zend_array_dup_elements(source, target, 1, 0);
2502 } else {
2503 idx = zend_array_dup_elements(source, target, 1, 1);
2504 }
2505 } else {
2506 if (HT_IS_WITHOUT_HOLES(source)) {
2507 idx = zend_array_dup_elements(source, target, 0, 0);
2508 } else {
2509 idx = zend_array_dup_elements(source, target, 0, 1);
2510 }
2511 }
2512 target->nNumUsed = idx;
2513 target->nNumOfElements = idx;
2514 }
2515 return target;
2516 }
2517
zend_array_to_list(HashTable * source)2518 ZEND_API HashTable* zend_array_to_list(HashTable *source)
2519 {
2520 HashTable *result = _zend_new_array(zend_hash_num_elements(source));
2521 zend_hash_real_init_packed(result);
2522
2523 ZEND_HASH_FILL_PACKED(result) {
2524 zval *entry;
2525
2526 ZEND_HASH_FOREACH_VAL(source, entry) {
2527 if (UNEXPECTED(Z_ISREF_P(entry) && Z_REFCOUNT_P(entry) == 1)) {
2528 entry = Z_REFVAL_P(entry);
2529 }
2530 Z_TRY_ADDREF_P(entry);
2531 ZEND_HASH_FILL_ADD(entry);
2532 } ZEND_HASH_FOREACH_END();
2533 } ZEND_HASH_FILL_END();
2534
2535 return result;
2536 }
2537
2538
zend_hash_merge(HashTable * target,HashTable * source,copy_ctor_func_t pCopyConstructor,bool overwrite)2539 ZEND_API void ZEND_FASTCALL zend_hash_merge(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor, bool overwrite)
2540 {
2541 uint32_t idx;
2542 Bucket *p;
2543 zval *t, *s;
2544
2545 IS_CONSISTENT(source);
2546 IS_CONSISTENT(target);
2547 HT_ASSERT_RC1(target);
2548
2549 if (overwrite) {
2550 if (HT_IS_PACKED(source)) {
2551 for (idx = 0; idx < source->nNumUsed; idx++) {
2552 s = source->arPacked + idx;
2553 if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) {
2554 continue;
2555 }
2556 t = zend_hash_index_update(target, idx, s);
2557 if (pCopyConstructor) {
2558 pCopyConstructor(t);
2559 }
2560 }
2561 return;
2562 }
2563
2564 for (idx = 0; idx < source->nNumUsed; idx++) {
2565 p = source->arData + idx;
2566 s = &p->val;
2567 if (UNEXPECTED(Z_TYPE_P(s) == IS_INDIRECT)) {
2568 s = Z_INDIRECT_P(s);
2569 }
2570 if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) {
2571 continue;
2572 }
2573 if (p->key) {
2574 t = _zend_hash_add_or_update_i(target, p->key, s, HASH_UPDATE | HASH_UPDATE_INDIRECT);
2575 if (pCopyConstructor) {
2576 pCopyConstructor(t);
2577 }
2578 } else {
2579 t = zend_hash_index_update(target, p->h, s);
2580 if (pCopyConstructor) {
2581 pCopyConstructor(t);
2582 }
2583 }
2584 }
2585 } else {
2586 if (HT_IS_PACKED(source)) {
2587 for (idx = 0; idx < source->nNumUsed; idx++) {
2588 s = source->arPacked + idx;
2589 if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) {
2590 continue;
2591 }
2592 t = zend_hash_index_add(target, idx, s);
2593 if (t && pCopyConstructor) {
2594 pCopyConstructor(t);
2595 }
2596 }
2597 return;
2598 }
2599
2600 for (idx = 0; idx < source->nNumUsed; idx++) {
2601 p = source->arData + idx;
2602 s = &p->val;
2603 if (UNEXPECTED(Z_TYPE_P(s) == IS_INDIRECT)) {
2604 s = Z_INDIRECT_P(s);
2605 }
2606 if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) {
2607 continue;
2608 }
2609 if (p->key) {
2610 t = _zend_hash_add_or_update_i(target, p->key, s, HASH_ADD | HASH_UPDATE_INDIRECT);
2611 if (t && pCopyConstructor) {
2612 pCopyConstructor(t);
2613 }
2614 } else {
2615 t = zend_hash_index_add(target, p->h, s);
2616 if (t && pCopyConstructor) {
2617 pCopyConstructor(t);
2618 }
2619 }
2620 }
2621 }
2622 }
2623
2624
zend_hash_replace_checker_wrapper(HashTable * target,zval * source_data,zend_ulong h,zend_string * key,void * pParam,merge_checker_func_t merge_checker_func)2625 static bool ZEND_FASTCALL zend_hash_replace_checker_wrapper(HashTable *target, zval *source_data, zend_ulong h, zend_string *key, void *pParam, merge_checker_func_t merge_checker_func)
2626 {
2627 zend_hash_key hash_key;
2628
2629 hash_key.h = h;
2630 hash_key.key = key;
2631 return merge_checker_func(target, source_data, &hash_key, pParam);
2632 }
2633
2634
zend_hash_merge_ex(HashTable * target,HashTable * source,copy_ctor_func_t pCopyConstructor,merge_checker_func_t pMergeSource,void * pParam)2635 ZEND_API void ZEND_FASTCALL zend_hash_merge_ex(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor, merge_checker_func_t pMergeSource, void *pParam)
2636 {
2637 uint32_t idx;
2638 Bucket *p;
2639 zval *t;
2640
2641 IS_CONSISTENT(source);
2642 IS_CONSISTENT(target);
2643 HT_ASSERT_RC1(target);
2644
2645 ZEND_ASSERT(!HT_IS_PACKED(source));
2646 for (idx = 0; idx < source->nNumUsed; idx++) {
2647 p = source->arData + idx;
2648 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2649 if (zend_hash_replace_checker_wrapper(target, &p->val, p->h, p->key, pParam, pMergeSource)) {
2650 t = zend_hash_update(target, p->key, &p->val);
2651 if (pCopyConstructor) {
2652 pCopyConstructor(t);
2653 }
2654 }
2655 }
2656 }
2657
2658
2659 /* Returns the hash table data if found and NULL if not. */
zend_hash_find(const HashTable * ht,zend_string * key)2660 ZEND_API zval* ZEND_FASTCALL zend_hash_find(const HashTable *ht, zend_string *key)
2661 {
2662 Bucket *p;
2663
2664 IS_CONSISTENT(ht);
2665
2666 (void)zend_string_hash_val(key);
2667 p = zend_hash_find_bucket(ht, key);
2668 return p ? &p->val : NULL;
2669 }
2670
zend_hash_find_known_hash(const HashTable * ht,const zend_string * key)2671 ZEND_API zval* ZEND_FASTCALL zend_hash_find_known_hash(const HashTable *ht, const zend_string *key)
2672 {
2673 Bucket *p;
2674
2675 IS_CONSISTENT(ht);
2676
2677 p = zend_hash_find_bucket(ht, key);
2678 return p ? &p->val : NULL;
2679 }
2680
zend_hash_str_find(const HashTable * ht,const char * str,size_t len)2681 ZEND_API zval* ZEND_FASTCALL zend_hash_str_find(const HashTable *ht, const char *str, size_t len)
2682 {
2683 zend_ulong h;
2684 Bucket *p;
2685
2686 IS_CONSISTENT(ht);
2687
2688 h = zend_inline_hash_func(str, len);
2689 p = zend_hash_str_find_bucket(ht, str, len, h);
2690 return p ? &p->val : NULL;
2691 }
2692
zend_hash_index_find(const HashTable * ht,zend_ulong h)2693 ZEND_API zval* ZEND_FASTCALL zend_hash_index_find(const HashTable *ht, zend_ulong h)
2694 {
2695 Bucket *p;
2696
2697 IS_CONSISTENT(ht);
2698
2699 if (HT_IS_PACKED(ht)) {
2700 if (h < ht->nNumUsed) {
2701 zval *zv = ht->arPacked + h;
2702
2703 if (Z_TYPE_P(zv) != IS_UNDEF) {
2704 return zv;
2705 }
2706 }
2707 return NULL;
2708 }
2709
2710 p = zend_hash_index_find_bucket(ht, h);
2711 return p ? &p->val : NULL;
2712 }
2713
_zend_hash_index_find(const HashTable * ht,zend_ulong h)2714 ZEND_API zval* ZEND_FASTCALL _zend_hash_index_find(const HashTable *ht, zend_ulong h)
2715 {
2716 Bucket *p;
2717
2718 IS_CONSISTENT(ht);
2719 ZEND_ASSERT(!HT_IS_PACKED(ht));
2720
2721 p = zend_hash_index_find_bucket(ht, h);
2722 return p ? &p->val : NULL;
2723 }
2724
zend_hash_internal_pointer_reset_ex(HashTable * ht,HashPosition * pos)2725 ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_reset_ex(HashTable *ht, HashPosition *pos)
2726 {
2727 IS_CONSISTENT(ht);
2728 HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2729 *pos = _zend_hash_get_valid_pos(ht, 0);
2730 }
2731
2732
2733 /* This function will be extremely optimized by remembering
2734 * the end of the list
2735 */
zend_hash_internal_pointer_end_ex(HashTable * ht,HashPosition * pos)2736 ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_end_ex(HashTable *ht, HashPosition *pos)
2737 {
2738 uint32_t idx;
2739
2740 IS_CONSISTENT(ht);
2741 HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2742
2743 idx = ht->nNumUsed;
2744 if (HT_IS_PACKED(ht)) {
2745 while (idx > 0) {
2746 idx--;
2747 if (Z_TYPE(ht->arPacked[idx]) != IS_UNDEF) {
2748 *pos = idx;
2749 return;
2750 }
2751 }
2752 } else {
2753 while (idx > 0) {
2754 idx--;
2755 if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2756 *pos = idx;
2757 return;
2758 }
2759 }
2760 }
2761 *pos = ht->nNumUsed;
2762 }
2763
2764
zend_hash_move_forward_ex(HashTable * ht,HashPosition * pos)2765 ZEND_API zend_result ZEND_FASTCALL zend_hash_move_forward_ex(HashTable *ht, HashPosition *pos)
2766 {
2767 uint32_t idx;
2768
2769 IS_CONSISTENT(ht);
2770 HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2771
2772 idx = _zend_hash_get_valid_pos(ht, *pos);
2773 if (idx < ht->nNumUsed) {
2774 if (HT_IS_PACKED(ht)) {
2775 while (1) {
2776 idx++;
2777 if (idx >= ht->nNumUsed) {
2778 *pos = ht->nNumUsed;
2779 return SUCCESS;
2780 }
2781 if (Z_TYPE(ht->arPacked[idx]) != IS_UNDEF) {
2782 *pos = idx;
2783 return SUCCESS;
2784 }
2785 }
2786 } else {
2787 while (1) {
2788 idx++;
2789 if (idx >= ht->nNumUsed) {
2790 *pos = ht->nNumUsed;
2791 return SUCCESS;
2792 }
2793 if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2794 *pos = idx;
2795 return SUCCESS;
2796 }
2797 }
2798 }
2799 } else {
2800 return FAILURE;
2801 }
2802 }
2803
zend_hash_move_backwards_ex(HashTable * ht,HashPosition * pos)2804 ZEND_API zend_result ZEND_FASTCALL zend_hash_move_backwards_ex(HashTable *ht, HashPosition *pos)
2805 {
2806 uint32_t idx = *pos;
2807
2808 IS_CONSISTENT(ht);
2809 HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2810
2811 if (idx < ht->nNumUsed) {
2812 if (HT_IS_PACKED(ht)) {
2813 while (idx > 0) {
2814 idx--;
2815 if (Z_TYPE(ht->arPacked[idx]) != IS_UNDEF) {
2816 *pos = idx;
2817 return SUCCESS;
2818 }
2819 }
2820 } else {
2821 while (idx > 0) {
2822 idx--;
2823 if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2824 *pos = idx;
2825 return SUCCESS;
2826 }
2827 }
2828 }
2829 *pos = ht->nNumUsed;
2830 return SUCCESS;
2831 } else {
2832 return FAILURE;
2833 }
2834 }
2835
2836
2837 /* This function should be made binary safe */
zend_hash_get_current_key_ex(const HashTable * ht,zend_string ** str_index,zend_ulong * num_index,const HashPosition * pos)2838 ZEND_API int ZEND_FASTCALL zend_hash_get_current_key_ex(const HashTable *ht, zend_string **str_index, zend_ulong *num_index, const HashPosition *pos)
2839 {
2840 uint32_t idx;
2841 Bucket *p;
2842
2843 IS_CONSISTENT(ht);
2844 idx = _zend_hash_get_valid_pos(ht, *pos);
2845 if (idx < ht->nNumUsed) {
2846 if (HT_IS_PACKED(ht)) {
2847 *num_index = idx;
2848 return HASH_KEY_IS_LONG;
2849 }
2850 p = ht->arData + idx;
2851 if (p->key) {
2852 *str_index = p->key;
2853 return HASH_KEY_IS_STRING;
2854 } else {
2855 *num_index = p->h;
2856 return HASH_KEY_IS_LONG;
2857 }
2858 }
2859 return HASH_KEY_NON_EXISTENT;
2860 }
2861
zend_hash_get_current_key_zval_ex(const HashTable * ht,zval * key,const HashPosition * pos)2862 ZEND_API void ZEND_FASTCALL zend_hash_get_current_key_zval_ex(const HashTable *ht, zval *key, const HashPosition *pos)
2863 {
2864 uint32_t idx;
2865 Bucket *p;
2866
2867 IS_CONSISTENT(ht);
2868 idx = _zend_hash_get_valid_pos(ht, *pos);
2869 if (idx >= ht->nNumUsed) {
2870 ZVAL_NULL(key);
2871 } else {
2872 if (HT_IS_PACKED(ht)) {
2873 ZVAL_LONG(key, idx);
2874 return;
2875 }
2876 p = ht->arData + idx;
2877 if (p->key) {
2878 ZVAL_STR_COPY(key, p->key);
2879 } else {
2880 ZVAL_LONG(key, p->h);
2881 }
2882 }
2883 }
2884
zend_hash_get_current_key_type_ex(HashTable * ht,HashPosition * pos)2885 ZEND_API int ZEND_FASTCALL zend_hash_get_current_key_type_ex(HashTable *ht, HashPosition *pos)
2886 {
2887 uint32_t idx;
2888 Bucket *p;
2889
2890 IS_CONSISTENT(ht);
2891 idx = _zend_hash_get_valid_pos(ht, *pos);
2892 if (idx < ht->nNumUsed) {
2893 if (HT_IS_PACKED(ht)) {
2894 return HASH_KEY_IS_LONG;
2895 }
2896 p = ht->arData + idx;
2897 if (p->key) {
2898 return HASH_KEY_IS_STRING;
2899 } else {
2900 return HASH_KEY_IS_LONG;
2901 }
2902 }
2903 return HASH_KEY_NON_EXISTENT;
2904 }
2905
2906
zend_hash_get_current_data_ex(HashTable * ht,HashPosition * pos)2907 ZEND_API zval* ZEND_FASTCALL zend_hash_get_current_data_ex(HashTable *ht, HashPosition *pos)
2908 {
2909 uint32_t idx;
2910 Bucket *p;
2911
2912 IS_CONSISTENT(ht);
2913 idx = _zend_hash_get_valid_pos(ht, *pos);
2914 if (idx < ht->nNumUsed) {
2915 if (HT_IS_PACKED(ht)) {
2916 return &ht->arPacked[idx];
2917 }
2918 p = ht->arData + idx;
2919 return &p->val;
2920 } else {
2921 return NULL;
2922 }
2923 }
2924
zend_hash_bucket_swap(Bucket * p,Bucket * q)2925 ZEND_API void zend_hash_bucket_swap(Bucket *p, Bucket *q)
2926 {
2927 zval val;
2928 zend_ulong h;
2929 zend_string *key;
2930
2931 val = p->val;
2932 h = p->h;
2933 key = p->key;
2934
2935 p->val = q->val;
2936 p->h = q->h;
2937 p->key = q->key;
2938
2939 q->val = val;
2940 q->h = h;
2941 q->key = key;
2942 }
2943
zend_hash_bucket_renum_swap(Bucket * p,Bucket * q)2944 ZEND_API void zend_hash_bucket_renum_swap(Bucket *p, Bucket *q)
2945 {
2946 zval val;
2947
2948 val = p->val;
2949 p->val = q->val;
2950 q->val = val;
2951 }
2952
zend_hash_bucket_packed_swap(Bucket * p,Bucket * q)2953 ZEND_API void zend_hash_bucket_packed_swap(Bucket *p, Bucket *q)
2954 {
2955 zval val;
2956 zend_ulong h;
2957
2958 val = p->val;
2959 h = p->h;
2960
2961 p->val = q->val;
2962 p->h = q->h;
2963
2964 q->val = val;
2965 q->h = h;
2966 }
2967
zend_hash_sort_ex(HashTable * ht,sort_func_t sort,bucket_compare_func_t compar,bool renumber)2968 ZEND_API void ZEND_FASTCALL zend_hash_sort_ex(HashTable *ht, sort_func_t sort, bucket_compare_func_t compar, bool renumber)
2969 {
2970 Bucket *p;
2971 uint32_t i, j;
2972
2973 IS_CONSISTENT(ht);
2974 HT_ASSERT_RC1(ht);
2975
2976 if (!(ht->nNumOfElements>1) && !(renumber && ht->nNumOfElements>0)) {
2977 /* Doesn't require sorting */
2978 return;
2979 }
2980
2981 if (HT_IS_PACKED(ht)) {
2982 zend_hash_packed_to_hash(ht); // TODO: ???
2983 }
2984
2985 if (HT_IS_WITHOUT_HOLES(ht)) {
2986 /* Store original order of elements in extra space to allow stable sorting. */
2987 for (i = 0; i < ht->nNumUsed; i++) {
2988 Z_EXTRA(ht->arData[i].val) = i;
2989 }
2990 } else {
2991 /* Remove holes and store original order. */
2992 for (j = 0, i = 0; j < ht->nNumUsed; j++) {
2993 p = ht->arData + j;
2994 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2995 if (i != j) {
2996 ht->arData[i] = *p;
2997 }
2998 Z_EXTRA(ht->arData[i].val) = i;
2999 i++;
3000 }
3001 ht->nNumUsed = i;
3002 }
3003
3004 if (!HT_IS_PACKED(ht)) {
3005 /* We broke the hash collisions chains overriding Z_NEXT() by Z_EXTRA().
3006 * Reset the hash headers table as well to avoid possible inconsistent
3007 * access on recursive data structures.
3008 *
3009 * See Zend/tests/bug63882_2.phpt
3010 */
3011 HT_HASH_RESET(ht);
3012 }
3013
3014 sort((void *)ht->arData, ht->nNumUsed, sizeof(Bucket), (compare_func_t) compar,
3015 (swap_func_t)(renumber? zend_hash_bucket_renum_swap :
3016 (HT_IS_PACKED(ht) ? zend_hash_bucket_packed_swap : zend_hash_bucket_swap)));
3017
3018 ht->nInternalPointer = 0;
3019
3020 if (renumber) {
3021 for (j = 0; j < i; j++) {
3022 p = ht->arData + j;
3023 p->h = j;
3024 if (p->key) {
3025 zend_string_release(p->key);
3026 p->key = NULL;
3027 }
3028 }
3029
3030 ht->nNextFreeElement = i;
3031 }
3032 if (HT_IS_PACKED(ht)) {
3033 if (!renumber) {
3034 zend_hash_packed_to_hash(ht);
3035 }
3036 } else {
3037 if (renumber) {
3038 void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
3039 Bucket *old_buckets = ht->arData;
3040 zval *zv;
3041
3042 new_data = pemalloc(HT_PACKED_SIZE_EX(ht->nTableSize, HT_MIN_MASK), (GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
3043 HT_FLAGS(ht) |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
3044 ht->nTableMask = HT_MIN_MASK;
3045 HT_SET_DATA_ADDR(ht, new_data);
3046 p = old_buckets;
3047 zv = ht->arPacked;
3048 for (i = 0; i < ht->nTableSize; i++) {
3049 ZVAL_COPY_VALUE(zv, &p->val);
3050 zv++;
3051 p++;
3052 }
3053 pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
3054 HT_HASH_RESET_PACKED(ht);
3055 } else {
3056 zend_hash_rehash(ht);
3057 }
3058 }
3059 }
3060
zend_hash_compare_impl(HashTable * ht1,HashTable * ht2,compare_func_t compar,bool ordered)3061 static zend_always_inline int zend_hash_compare_impl(HashTable *ht1, HashTable *ht2, compare_func_t compar, bool ordered) {
3062 uint32_t idx1, idx2;
3063 zend_string *key1, *key2;
3064 zend_ulong h1, h2;
3065 zval *pData1, *pData2;;
3066 int result;
3067
3068 if (ht1->nNumOfElements != ht2->nNumOfElements) {
3069 return ht1->nNumOfElements > ht2->nNumOfElements ? 1 : -1;
3070 }
3071
3072 for (idx1 = 0, idx2 = 0; idx1 < ht1->nNumUsed; idx1++) {
3073 if (HT_IS_PACKED(ht1)) {
3074 pData1 = ht1->arPacked + idx1;
3075 h1 = idx1;
3076 key1 = NULL;
3077 } else {
3078 Bucket *p = ht1->arData + idx1;
3079 pData1 = &p->val;
3080 h1 = p->h;
3081 key1 = p->key;
3082 }
3083
3084 if (Z_TYPE_P(pData1) == IS_UNDEF) continue;
3085 if (ordered) {
3086 if (HT_IS_PACKED(ht2)) {
3087 while (1) {
3088 ZEND_ASSERT(idx2 != ht2->nNumUsed);
3089 pData2 = ht2->arPacked + idx2;
3090 h2 = idx2;
3091 key2 = NULL;
3092 if (Z_TYPE_P(pData2) != IS_UNDEF) break;
3093 idx2++;
3094 }
3095 } else {
3096 while (1) {
3097 Bucket *p;
3098 ZEND_ASSERT(idx2 != ht2->nNumUsed);
3099 p = ht2->arData + idx2;
3100 pData2 = &p->val;
3101 h2 = p->h;
3102 key2 = p->key;
3103 if (Z_TYPE_P(pData2) != IS_UNDEF) break;
3104 idx2++;
3105 }
3106 }
3107 if (key1 == NULL && key2 == NULL) { /* numeric indices */
3108 if (h1 != h2) {
3109 return h1 > h2 ? 1 : -1;
3110 }
3111 } else if (key1 != NULL && key2 != NULL) { /* string indices */
3112 if (ZSTR_LEN(key1) != ZSTR_LEN(key2)) {
3113 return ZSTR_LEN(key1) > ZSTR_LEN(key2) ? 1 : -1;
3114 }
3115
3116 result = memcmp(ZSTR_VAL(key1), ZSTR_VAL(key2), ZSTR_LEN(key1));
3117 if (result != 0) {
3118 return result;
3119 }
3120 } else {
3121 /* Mixed key types: A string key is considered as larger */
3122 return key1 != NULL ? 1 : -1;
3123 }
3124 idx2++;
3125 } else {
3126 if (key1 == NULL) { /* numeric index */
3127 pData2 = zend_hash_index_find(ht2, h1);
3128 if (pData2 == NULL) {
3129 return 1;
3130 }
3131 } else { /* string index */
3132 pData2 = zend_hash_find(ht2, key1);
3133 if (pData2 == NULL) {
3134 return 1;
3135 }
3136 }
3137 }
3138
3139 if (Z_TYPE_P(pData1) == IS_INDIRECT) {
3140 pData1 = Z_INDIRECT_P(pData1);
3141 }
3142 if (Z_TYPE_P(pData2) == IS_INDIRECT) {
3143 pData2 = Z_INDIRECT_P(pData2);
3144 }
3145
3146 if (Z_TYPE_P(pData1) == IS_UNDEF) {
3147 if (Z_TYPE_P(pData2) != IS_UNDEF) {
3148 return -1;
3149 }
3150 } else if (Z_TYPE_P(pData2) == IS_UNDEF) {
3151 return 1;
3152 } else {
3153 result = compar(pData1, pData2);
3154 if (result != 0) {
3155 return result;
3156 }
3157 }
3158 }
3159
3160 return 0;
3161 }
3162
zend_hash_compare(HashTable * ht1,HashTable * ht2,compare_func_t compar,bool ordered)3163 ZEND_API int zend_hash_compare(HashTable *ht1, HashTable *ht2, compare_func_t compar, bool ordered)
3164 {
3165 int result;
3166 IS_CONSISTENT(ht1);
3167 IS_CONSISTENT(ht2);
3168
3169 if (ht1 == ht2) {
3170 return 0;
3171 }
3172
3173 /* It's enough to protect only one of the arrays.
3174 * The second one may be referenced from the first and this may cause
3175 * false recursion detection.
3176 */
3177 if (UNEXPECTED(GC_IS_RECURSIVE(ht1))) {
3178 zend_error_noreturn(E_ERROR, "Nesting level too deep - recursive dependency?");
3179 }
3180
3181 GC_TRY_PROTECT_RECURSION(ht1);
3182 result = zend_hash_compare_impl(ht1, ht2, compar, ordered);
3183 GC_TRY_UNPROTECT_RECURSION(ht1);
3184
3185 return result;
3186 }
3187
3188
zend_hash_minmax(const HashTable * ht,compare_func_t compar,uint32_t flag)3189 ZEND_API zval* ZEND_FASTCALL zend_hash_minmax(const HashTable *ht, compare_func_t compar, uint32_t flag)
3190 {
3191 uint32_t idx;
3192 zval *res;
3193
3194 IS_CONSISTENT(ht);
3195
3196 if (ht->nNumOfElements == 0 ) {
3197 return NULL;
3198 }
3199
3200 if (HT_IS_PACKED(ht)) {
3201 zval *zv;
3202
3203 idx = 0;
3204 while (1) {
3205 if (idx == ht->nNumUsed) {
3206 return NULL;
3207 }
3208 if (Z_TYPE(ht->arPacked[idx]) != IS_UNDEF) break;
3209 idx++;
3210 }
3211 res = ht->arPacked + idx;
3212 for (; idx < ht->nNumUsed; idx++) {
3213 zv = ht->arPacked + idx;
3214 if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
3215
3216 if (flag) {
3217 if (compar(res, zv) < 0) { /* max */
3218 res = zv;
3219 }
3220 } else {
3221 if (compar(res, zv) > 0) { /* min */
3222 res = zv;
3223 }
3224 }
3225 }
3226 } else {
3227 Bucket *p;
3228
3229 idx = 0;
3230 while (1) {
3231 if (idx == ht->nNumUsed) {
3232 return NULL;
3233 }
3234 if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) break;
3235 idx++;
3236 }
3237 res = &ht->arData[idx].val;
3238 for (; idx < ht->nNumUsed; idx++) {
3239 p = ht->arData + idx;
3240 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
3241
3242 if (flag) {
3243 if (compar(res, &p->val) < 0) { /* max */
3244 res = &p->val;
3245 }
3246 } else {
3247 if (compar(res, &p->val) > 0) { /* min */
3248 res = &p->val;
3249 }
3250 }
3251 }
3252 }
3253 return res;
3254 }
3255
_zend_handle_numeric_str_ex(const char * key,size_t length,zend_ulong * idx)3256 ZEND_API bool ZEND_FASTCALL _zend_handle_numeric_str_ex(const char *key, size_t length, zend_ulong *idx)
3257 {
3258 const char *tmp = key;
3259
3260 const char *end = key + length;
3261
3262 if (*tmp == '-') {
3263 tmp++;
3264 }
3265
3266 if ((*tmp == '0' && length > 1) /* numbers with leading zeros */
3267 || (end - tmp > MAX_LENGTH_OF_LONG - 1) /* number too long */
3268 || (SIZEOF_ZEND_LONG == 4 &&
3269 end - tmp == MAX_LENGTH_OF_LONG - 1 &&
3270 *tmp > '2')) { /* overflow */
3271 return 0;
3272 }
3273 *idx = (*tmp - '0');
3274 while (1) {
3275 ++tmp;
3276 if (tmp == end) {
3277 if (*key == '-') {
3278 if (*idx-1 > ZEND_LONG_MAX) { /* overflow */
3279 return 0;
3280 }
3281 *idx = 0 - *idx;
3282 } else if (*idx > ZEND_LONG_MAX) { /* overflow */
3283 return 0;
3284 }
3285 return 1;
3286 }
3287 if (*tmp <= '9' && *tmp >= '0') {
3288 *idx = (*idx * 10) + (*tmp - '0');
3289 } else {
3290 return 0;
3291 }
3292 }
3293 }
3294
3295 /* Takes a "symtable" hashtable (contains integer and non-numeric string keys)
3296 * and converts it to a "proptable" (contains only string keys).
3297 * If the symtable didn't need duplicating, its refcount is incremented.
3298 */
zend_symtable_to_proptable(HashTable * ht)3299 ZEND_API HashTable* ZEND_FASTCALL zend_symtable_to_proptable(HashTable *ht)
3300 {
3301 zend_ulong num_key;
3302 zend_string *str_key;
3303 zval *zv;
3304
3305 if (UNEXPECTED(HT_IS_PACKED(ht))) {
3306 goto convert;
3307 }
3308
3309 ZEND_HASH_MAP_FOREACH_STR_KEY(ht, str_key) {
3310 if (!str_key) {
3311 goto convert;
3312 }
3313 } ZEND_HASH_FOREACH_END();
3314
3315 if (!(GC_FLAGS(ht) & IS_ARRAY_IMMUTABLE)) {
3316 GC_ADDREF(ht);
3317 }
3318
3319 return ht;
3320
3321 convert:
3322 {
3323 HashTable *new_ht = zend_new_array(zend_hash_num_elements(ht));
3324
3325 ZEND_HASH_FOREACH_KEY_VAL(ht, num_key, str_key, zv) {
3326 if (!str_key) {
3327 str_key = zend_long_to_str(num_key);
3328 zend_string_delref(str_key);
3329 }
3330 do {
3331 if (Z_OPT_REFCOUNTED_P(zv)) {
3332 if (Z_ISREF_P(zv) && Z_REFCOUNT_P(zv) == 1) {
3333 zv = Z_REFVAL_P(zv);
3334 if (!Z_OPT_REFCOUNTED_P(zv)) {
3335 break;
3336 }
3337 }
3338 Z_ADDREF_P(zv);
3339 }
3340 } while (0);
3341 zend_hash_update(new_ht, str_key, zv);
3342 } ZEND_HASH_FOREACH_END();
3343
3344 return new_ht;
3345 }
3346 }
3347
3348 /* Takes a "proptable" hashtable (contains only string keys) and converts it to
3349 * a "symtable" (contains integer and non-numeric string keys).
3350 * If the proptable didn't need duplicating, its refcount is incremented.
3351 */
zend_proptable_to_symtable(HashTable * ht,bool always_duplicate)3352 ZEND_API HashTable* ZEND_FASTCALL zend_proptable_to_symtable(HashTable *ht, bool always_duplicate)
3353 {
3354 zend_ulong num_key;
3355 zend_string *str_key;
3356 zval *zv;
3357
3358 if (!HT_IS_PACKED(ht)) {
3359 ZEND_HASH_MAP_FOREACH_STR_KEY(ht, str_key) {
3360 /* The `str_key &&` here might seem redundant: property tables should
3361 * only have string keys. Unfortunately, this isn't true, at the very
3362 * least because of ArrayObject, which stores a symtable where the
3363 * property table should be.
3364 */
3365 if (str_key && ZEND_HANDLE_NUMERIC(str_key, num_key)) {
3366 goto convert;
3367 }
3368 } ZEND_HASH_FOREACH_END();
3369 }
3370
3371 if (always_duplicate) {
3372 return zend_array_dup(ht);
3373 }
3374
3375 if (EXPECTED(!(GC_FLAGS(ht) & IS_ARRAY_IMMUTABLE))) {
3376 GC_ADDREF(ht);
3377 }
3378
3379 return ht;
3380
3381 convert:
3382 {
3383 HashTable *new_ht = zend_new_array(zend_hash_num_elements(ht));
3384
3385 ZEND_HASH_MAP_FOREACH_KEY_VAL_IND(ht, num_key, str_key, zv) {
3386 do {
3387 if (Z_OPT_REFCOUNTED_P(zv)) {
3388 if (Z_ISREF_P(zv) && Z_REFCOUNT_P(zv) == 1) {
3389 zv = Z_REFVAL_P(zv);
3390 if (!Z_OPT_REFCOUNTED_P(zv)) {
3391 break;
3392 }
3393 }
3394 Z_ADDREF_P(zv);
3395 }
3396 } while (0);
3397 /* Again, thank ArrayObject for `!str_key ||`. */
3398 if (!str_key || ZEND_HANDLE_NUMERIC(str_key, num_key)) {
3399 zend_hash_index_update(new_ht, num_key, zv);
3400 } else {
3401 zend_hash_update(new_ht, str_key, zv);
3402 }
3403 } ZEND_HASH_FOREACH_END();
3404
3405 return new_ht;
3406 }
3407 }
3408