xref: /PHP-8.2/Zend/zend_hash.c (revision 2bdce613)
1 /*
2    +----------------------------------------------------------------------+
3    | Zend Engine                                                          |
4    +----------------------------------------------------------------------+
5    | Copyright (c) Zend Technologies Ltd. (http://www.zend.com)           |
6    +----------------------------------------------------------------------+
7    | This source file is subject to version 2.00 of the Zend license,     |
8    | that is bundled with this package in the file LICENSE, and is        |
9    | available through the world-wide-web at the following url:           |
10    | http://www.zend.com/license/2_00.txt.                                |
11    | If you did not receive a copy of the Zend license and are unable to  |
12    | obtain it through the world-wide-web, please send a note to          |
13    | license@zend.com so we can mail you a copy immediately.              |
14    +----------------------------------------------------------------------+
15    | Authors: Andi Gutmans <andi@php.net>                                 |
16    |          Zeev Suraski <zeev@php.net>                                 |
17    |          Dmitry Stogov <dmitry@php.net>                              |
18    +----------------------------------------------------------------------+
19 */
20 
21 #include "zend.h"
22 #include "zend_globals.h"
23 #include "zend_variables.h"
24 
25 #if defined(__aarch64__) || defined(_M_ARM64)
26 # include <arm_neon.h>
27 #endif
28 
29 #ifdef __SSE2__
30 # include <mmintrin.h>
31 # include <emmintrin.h>
32 #endif
33 
34 #if ZEND_DEBUG
35 # define HT_ASSERT(ht, expr) \
36 	ZEND_ASSERT((expr) || (HT_FLAGS(ht) & HASH_FLAG_ALLOW_COW_VIOLATION))
37 #else
38 # define HT_ASSERT(ht, expr)
39 #endif
40 
41 #define HT_ASSERT_RC1(ht) HT_ASSERT(ht, GC_REFCOUNT(ht) == 1)
42 
43 #define HT_POISONED_PTR ((HashTable *) (intptr_t) -1)
44 
45 #if ZEND_DEBUG
46 
47 #define HT_OK					0x00
48 #define HT_IS_DESTROYING		0x01
49 #define HT_DESTROYED			0x02
50 #define HT_CLEANING				0x03
51 
_zend_is_inconsistent(const HashTable * ht,const char * file,int line)52 static void _zend_is_inconsistent(const HashTable *ht, const char *file, int line)
53 {
54 	if ((HT_FLAGS(ht) & HASH_FLAG_CONSISTENCY) == HT_OK) {
55 		return;
56 	}
57 	switch (HT_FLAGS(ht) & HASH_FLAG_CONSISTENCY) {
58 		case HT_IS_DESTROYING:
59 			zend_output_debug_string(1, "%s(%d) : ht=%p is being destroyed", file, line, ht);
60 			break;
61 		case HT_DESTROYED:
62 			zend_output_debug_string(1, "%s(%d) : ht=%p is already destroyed", file, line, ht);
63 			break;
64 		case HT_CLEANING:
65 			zend_output_debug_string(1, "%s(%d) : ht=%p is being cleaned", file, line, ht);
66 			break;
67 		default:
68 			zend_output_debug_string(1, "%s(%d) : ht=%p is inconsistent", file, line, ht);
69 			break;
70 	}
71 	ZEND_UNREACHABLE();
72 }
73 #define IS_CONSISTENT(a) _zend_is_inconsistent(a, __FILE__, __LINE__);
74 #define SET_INCONSISTENT(n) do { \
75 		HT_FLAGS(ht) = (HT_FLAGS(ht) & ~HASH_FLAG_CONSISTENCY) | (n); \
76 	} while (0)
77 #else
78 #define IS_CONSISTENT(a)
79 #define SET_INCONSISTENT(n)
80 #endif
81 
82 #define ZEND_HASH_IF_FULL_DO_RESIZE(ht)				\
83 	if ((ht)->nNumUsed >= (ht)->nTableSize) {		\
84 		zend_hash_do_resize(ht);					\
85 	}
86 
zend_hash_str_find_ptr_lc(const HashTable * ht,const char * str,size_t len)87 ZEND_API void *zend_hash_str_find_ptr_lc(const HashTable *ht, const char *str, size_t len) {
88 	void *result;
89 	char *lc_str;
90 
91 	/* Stack allocate small strings to improve performance */
92 	ALLOCA_FLAG(use_heap)
93 
94 	lc_str = zend_str_tolower_copy(do_alloca(len + 1, use_heap), str, len);
95 	result = zend_hash_str_find_ptr(ht, lc_str, len);
96 	free_alloca(lc_str, use_heap);
97 
98 	return result;
99 }
100 
zend_hash_find_ptr_lc(const HashTable * ht,zend_string * key)101 ZEND_API void *zend_hash_find_ptr_lc(const HashTable *ht, zend_string *key) {
102 	void *result;
103 	zend_string *lc_key = zend_string_tolower(key);
104 	result = zend_hash_find_ptr(ht, lc_key);
105 	zend_string_release(lc_key);
106 	return result;
107 }
108 
109 static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht);
110 
zend_hash_check_size(uint32_t nSize)111 static zend_always_inline uint32_t zend_hash_check_size(uint32_t nSize)
112 {
113 #ifdef ZEND_WIN32
114 	unsigned long index;
115 #endif
116 
117 	/* Use big enough power of 2 */
118 	/* size should be between HT_MIN_SIZE and HT_MAX_SIZE */
119 	if (nSize <= HT_MIN_SIZE) {
120 		return HT_MIN_SIZE;
121 	} else if (UNEXPECTED(nSize >= HT_MAX_SIZE)) {
122 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", nSize, sizeof(Bucket), sizeof(Bucket));
123 	}
124 
125 #ifdef ZEND_WIN32
126 	if (BitScanReverse(&index, nSize - 1)) {
127 		return 0x2u << ((31 - index) ^ 0x1f);
128 	} else {
129 		/* nSize is ensured to be in the valid range, fall back to it
130 		   rather than using an undefined bis scan result. */
131 		return nSize;
132 	}
133 #elif (defined(__GNUC__) || __has_builtin(__builtin_clz))  && defined(PHP_HAVE_BUILTIN_CLZ)
134 	return 0x2u << (__builtin_clz(nSize - 1) ^ 0x1f);
135 #else
136 	nSize -= 1;
137 	nSize |= (nSize >> 1);
138 	nSize |= (nSize >> 2);
139 	nSize |= (nSize >> 4);
140 	nSize |= (nSize >> 8);
141 	nSize |= (nSize >> 16);
142 	return nSize + 1;
143 #endif
144 }
145 
zend_hash_real_init_packed_ex(HashTable * ht)146 static zend_always_inline void zend_hash_real_init_packed_ex(HashTable *ht)
147 {
148 	void *data;
149 
150 	if (UNEXPECTED(GC_FLAGS(ht) & IS_ARRAY_PERSISTENT)) {
151 		data = pemalloc(HT_PACKED_SIZE_EX(ht->nTableSize, HT_MIN_MASK), 1);
152 	} else if (EXPECTED(ht->nTableSize == HT_MIN_SIZE)) {
153 		/* Use specialized API with constant allocation amount for a particularly common case. */
154 		data = emalloc(HT_PACKED_SIZE_EX(HT_MIN_SIZE, HT_MIN_MASK));
155 	} else {
156 		data = emalloc(HT_PACKED_SIZE_EX(ht->nTableSize, HT_MIN_MASK));
157 	}
158 	HT_SET_DATA_ADDR(ht, data);
159 	/* Don't overwrite iterator count. */
160 	ht->u.v.flags = HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
161 	HT_HASH_RESET_PACKED(ht);
162 }
163 
zend_hash_real_init_mixed_ex(HashTable * ht)164 static zend_always_inline void zend_hash_real_init_mixed_ex(HashTable *ht)
165 {
166 	void *data;
167 	uint32_t nSize = ht->nTableSize;
168 
169 	ZEND_ASSERT(HT_SIZE_TO_MASK(nSize));
170 
171 	if (UNEXPECTED(GC_FLAGS(ht) & IS_ARRAY_PERSISTENT)) {
172 		data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), 1);
173 	} else if (EXPECTED(nSize == HT_MIN_SIZE)) {
174 		data = emalloc(HT_SIZE_EX(HT_MIN_SIZE, HT_SIZE_TO_MASK(HT_MIN_SIZE)));
175 		ht->nTableMask = HT_SIZE_TO_MASK(HT_MIN_SIZE);
176 		HT_SET_DATA_ADDR(ht, data);
177 		/* Don't overwrite iterator count. */
178 		ht->u.v.flags = HASH_FLAG_STATIC_KEYS;
179 #ifdef __SSE2__
180 		do {
181 			__m128i xmm0 = _mm_setzero_si128();
182 			xmm0 = _mm_cmpeq_epi8(xmm0, xmm0);
183 			_mm_storeu_si128((__m128i*)&HT_HASH_EX(data,  0), xmm0);
184 			_mm_storeu_si128((__m128i*)&HT_HASH_EX(data,  4), xmm0);
185 			_mm_storeu_si128((__m128i*)&HT_HASH_EX(data,  8), xmm0);
186 			_mm_storeu_si128((__m128i*)&HT_HASH_EX(data, 12), xmm0);
187 		} while (0);
188 #elif defined(__aarch64__) || defined(_M_ARM64)
189 		do {
190 			int32x4_t t = vdupq_n_s32(-1);
191 			vst1q_s32((int32_t*)&HT_HASH_EX(data,  0), t);
192 			vst1q_s32((int32_t*)&HT_HASH_EX(data,  4), t);
193 			vst1q_s32((int32_t*)&HT_HASH_EX(data,  8), t);
194 			vst1q_s32((int32_t*)&HT_HASH_EX(data, 12), t);
195 		} while (0);
196 #else
197 		HT_HASH_EX(data,  0) = -1;
198 		HT_HASH_EX(data,  1) = -1;
199 		HT_HASH_EX(data,  2) = -1;
200 		HT_HASH_EX(data,  3) = -1;
201 		HT_HASH_EX(data,  4) = -1;
202 		HT_HASH_EX(data,  5) = -1;
203 		HT_HASH_EX(data,  6) = -1;
204 		HT_HASH_EX(data,  7) = -1;
205 		HT_HASH_EX(data,  8) = -1;
206 		HT_HASH_EX(data,  9) = -1;
207 		HT_HASH_EX(data, 10) = -1;
208 		HT_HASH_EX(data, 11) = -1;
209 		HT_HASH_EX(data, 12) = -1;
210 		HT_HASH_EX(data, 13) = -1;
211 		HT_HASH_EX(data, 14) = -1;
212 		HT_HASH_EX(data, 15) = -1;
213 #endif
214 		return;
215 	} else {
216 		data = emalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)));
217 	}
218 	ht->nTableMask = HT_SIZE_TO_MASK(nSize);
219 	HT_SET_DATA_ADDR(ht, data);
220 	HT_FLAGS(ht) = HASH_FLAG_STATIC_KEYS;
221 	HT_HASH_RESET(ht);
222 }
223 
zend_hash_real_init_ex(HashTable * ht,bool packed)224 static zend_always_inline void zend_hash_real_init_ex(HashTable *ht, bool packed)
225 {
226 	HT_ASSERT_RC1(ht);
227 	ZEND_ASSERT(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED);
228 	if (packed) {
229 		zend_hash_real_init_packed_ex(ht);
230 	} else {
231 		zend_hash_real_init_mixed_ex(ht);
232 	}
233 }
234 
235 static const uint32_t uninitialized_bucket[-HT_MIN_MASK] =
236 	{HT_INVALID_IDX, HT_INVALID_IDX};
237 
238 ZEND_API const HashTable zend_empty_array = {
239 	.gc.refcount = 2,
240 	.gc.u.type_info = IS_ARRAY | (GC_IMMUTABLE << GC_FLAGS_SHIFT),
241 	.u.flags = HASH_FLAG_UNINITIALIZED,
242 	.nTableMask = HT_MIN_MASK,
243 	{.arData = (Bucket*)&uninitialized_bucket[2]},
244 	.nNumUsed = 0,
245 	.nNumOfElements = 0,
246 	.nTableSize = HT_MIN_SIZE,
247 	.nInternalPointer = 0,
248 	.nNextFreeElement = 0,
249 	.pDestructor = ZVAL_PTR_DTOR
250 };
251 
_zend_hash_init_int(HashTable * ht,uint32_t nSize,dtor_func_t pDestructor,bool persistent)252 static zend_always_inline void _zend_hash_init_int(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, bool persistent)
253 {
254 	GC_SET_REFCOUNT(ht, 1);
255 	GC_TYPE_INFO(ht) = GC_ARRAY | (persistent ? ((GC_PERSISTENT|GC_NOT_COLLECTABLE) << GC_FLAGS_SHIFT) : 0);
256 	HT_FLAGS(ht) = HASH_FLAG_UNINITIALIZED;
257 	ht->nTableMask = HT_MIN_MASK;
258 	HT_SET_DATA_ADDR(ht, &uninitialized_bucket);
259 	ht->nNumUsed = 0;
260 	ht->nNumOfElements = 0;
261 	ht->nInternalPointer = 0;
262 	ht->nNextFreeElement = ZEND_LONG_MIN;
263 	ht->pDestructor = pDestructor;
264 	ht->nTableSize = zend_hash_check_size(nSize);
265 }
266 
_zend_hash_init(HashTable * ht,uint32_t nSize,dtor_func_t pDestructor,bool persistent)267 ZEND_API void ZEND_FASTCALL _zend_hash_init(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, bool persistent)
268 {
269 	_zend_hash_init_int(ht, nSize, pDestructor, persistent);
270 }
271 
_zend_new_array_0(void)272 ZEND_API HashTable* ZEND_FASTCALL _zend_new_array_0(void)
273 {
274 	HashTable *ht = emalloc(sizeof(HashTable));
275 	_zend_hash_init_int(ht, HT_MIN_SIZE, ZVAL_PTR_DTOR, 0);
276 	return ht;
277 }
278 
_zend_new_array(uint32_t nSize)279 ZEND_API HashTable* ZEND_FASTCALL _zend_new_array(uint32_t nSize)
280 {
281 	HashTable *ht = emalloc(sizeof(HashTable));
282 	_zend_hash_init_int(ht, nSize, ZVAL_PTR_DTOR, 0);
283 	return ht;
284 }
285 
zend_new_pair(zval * val1,zval * val2)286 ZEND_API HashTable* ZEND_FASTCALL zend_new_pair(zval *val1, zval *val2)
287 {
288 	zval *zv;
289 	HashTable *ht = emalloc(sizeof(HashTable));
290 	_zend_hash_init_int(ht, HT_MIN_SIZE, ZVAL_PTR_DTOR, 0);
291 	ht->nNumUsed = ht->nNumOfElements = ht->nNextFreeElement = 2;
292 	zend_hash_real_init_packed_ex(ht);
293 
294 	zv = ht->arPacked;
295 	ZVAL_COPY_VALUE(zv, val1);
296 	zv++;
297 	ZVAL_COPY_VALUE(zv, val2);
298 	return ht;
299 }
300 
zend_hash_packed_grow(HashTable * ht)301 ZEND_API void ZEND_FASTCALL zend_hash_packed_grow(HashTable *ht)
302 {
303 	HT_ASSERT_RC1(ht);
304 	if (ht->nTableSize >= HT_MAX_SIZE) {
305 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket), sizeof(Bucket));
306 	}
307 	uint32_t newTableSize = ht->nTableSize * 2;
308 	HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_PACKED_SIZE_EX(newTableSize, HT_MIN_MASK), HT_PACKED_USED_SIZE(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
309 	ht->nTableSize = newTableSize;
310 }
311 
zend_hash_real_init(HashTable * ht,bool packed)312 ZEND_API void ZEND_FASTCALL zend_hash_real_init(HashTable *ht, bool packed)
313 {
314 	IS_CONSISTENT(ht);
315 
316 	HT_ASSERT_RC1(ht);
317 	zend_hash_real_init_ex(ht, packed);
318 }
319 
zend_hash_real_init_packed(HashTable * ht)320 ZEND_API void ZEND_FASTCALL zend_hash_real_init_packed(HashTable *ht)
321 {
322 	IS_CONSISTENT(ht);
323 
324 	HT_ASSERT_RC1(ht);
325 	zend_hash_real_init_packed_ex(ht);
326 }
327 
zend_hash_real_init_mixed(HashTable * ht)328 ZEND_API void ZEND_FASTCALL zend_hash_real_init_mixed(HashTable *ht)
329 {
330 	IS_CONSISTENT(ht);
331 
332 	HT_ASSERT_RC1(ht);
333 	zend_hash_real_init_mixed_ex(ht);
334 }
335 
zend_hash_packed_to_hash(HashTable * ht)336 ZEND_API void ZEND_FASTCALL zend_hash_packed_to_hash(HashTable *ht)
337 {
338 	void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
339 	zval *src = ht->arPacked;
340 	Bucket *dst;
341 	uint32_t i;
342 	uint32_t nSize = ht->nTableSize;
343 
344 	ZEND_ASSERT(HT_SIZE_TO_MASK(nSize));
345 
346 	HT_ASSERT_RC1(ht);
347 	// Alloc before assign to avoid inconsistencies on OOM
348 	new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
349 	HT_FLAGS(ht) &= ~HASH_FLAG_PACKED;
350 	ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
351 	HT_SET_DATA_ADDR(ht, new_data);
352 	dst = ht->arData;
353 	for (i = 0; i < ht->nNumUsed; i++) {
354 		ZVAL_COPY_VALUE(&dst->val, src);
355 		dst->h = i;
356 		dst->key = NULL;
357 		dst++;
358 		src++;
359 	}
360 	pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
361 	zend_hash_rehash(ht);
362 }
363 
zend_hash_to_packed(HashTable * ht)364 ZEND_API void ZEND_FASTCALL zend_hash_to_packed(HashTable *ht)
365 {
366 	void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
367 	Bucket *src = ht->arData;
368 	zval *dst;
369 	uint32_t i;
370 
371 	HT_ASSERT_RC1(ht);
372 	new_data = pemalloc(HT_PACKED_SIZE_EX(ht->nTableSize, HT_MIN_MASK), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
373 	HT_FLAGS(ht) |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
374 	ht->nTableMask = HT_MIN_MASK;
375 	HT_SET_DATA_ADDR(ht, new_data);
376 	HT_HASH_RESET_PACKED(ht);
377 	dst = ht->arPacked;
378 	for (i = 0; i < ht->nNumUsed; i++) {
379 		ZVAL_COPY_VALUE(dst, &src->val);
380 		dst++;
381 		src++;
382 	}
383 	pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
384 }
385 
zend_hash_extend(HashTable * ht,uint32_t nSize,bool packed)386 ZEND_API void ZEND_FASTCALL zend_hash_extend(HashTable *ht, uint32_t nSize, bool packed)
387 {
388 	HT_ASSERT_RC1(ht);
389 
390 	if (nSize == 0) return;
391 
392 	ZEND_ASSERT(HT_SIZE_TO_MASK(nSize));
393 
394 	if (UNEXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
395 		if (nSize > ht->nTableSize) {
396 			ht->nTableSize = zend_hash_check_size(nSize);
397 		}
398 		zend_hash_real_init(ht, packed);
399 	} else {
400 		if (packed) {
401 			ZEND_ASSERT(HT_IS_PACKED(ht));
402 			if (nSize > ht->nTableSize) {
403 				uint32_t newTableSize = zend_hash_check_size(nSize);
404 				HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_PACKED_SIZE_EX(newTableSize, HT_MIN_MASK), HT_PACKED_USED_SIZE(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
405 				ht->nTableSize = newTableSize;
406 			}
407 		} else {
408 			ZEND_ASSERT(!HT_IS_PACKED(ht));
409 			if (nSize > ht->nTableSize) {
410 				void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
411 				Bucket *old_buckets = ht->arData;
412 				nSize = zend_hash_check_size(nSize);
413 				new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
414 				ht->nTableSize = nSize;
415 				ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
416 				HT_SET_DATA_ADDR(ht, new_data);
417 				memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
418 				pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
419 				zend_hash_rehash(ht);
420 			}
421 		}
422 	}
423 }
424 
zend_hash_discard(HashTable * ht,uint32_t nNumUsed)425 ZEND_API void ZEND_FASTCALL zend_hash_discard(HashTable *ht, uint32_t nNumUsed)
426 {
427 	Bucket *p, *end, *arData;
428 	uint32_t nIndex;
429 
430 	ZEND_ASSERT(!HT_IS_PACKED(ht));
431 	arData = ht->arData;
432 	p = arData + ht->nNumUsed;
433 	end = arData + nNumUsed;
434 	ht->nNumUsed = nNumUsed;
435 	while (p != end) {
436 		p--;
437 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
438 		ht->nNumOfElements--;
439 		/* Collision pointers always directed from higher to lower buckets */
440 #if 0
441 		if (!(Z_NEXT(p->val) == HT_INVALID_IDX || HT_HASH_TO_BUCKET_EX(arData, Z_NEXT(p->val)) < p)) {
442 			abort();
443 		}
444 #endif
445 		nIndex = p->h | ht->nTableMask;
446 		HT_HASH_EX(arData, nIndex) = Z_NEXT(p->val);
447 	}
448 }
449 
zend_array_recalc_elements(HashTable * ht)450 static uint32_t zend_array_recalc_elements(HashTable *ht)
451 {
452 	zval *val;
453 	uint32_t num = ht->nNumOfElements;
454 
455 	ZEND_HASH_MAP_FOREACH_VAL(ht, val) {
456 		if (Z_TYPE_P(val) == IS_INDIRECT) {
457 			if (UNEXPECTED(Z_TYPE_P(Z_INDIRECT_P(val)) == IS_UNDEF)) {
458 				num--;
459 			}
460 		}
461 	} ZEND_HASH_FOREACH_END();
462 	return num;
463 }
464 /* }}} */
465 
zend_array_count(HashTable * ht)466 ZEND_API uint32_t zend_array_count(HashTable *ht)
467 {
468 	uint32_t num;
469 	if (UNEXPECTED(HT_FLAGS(ht) & HASH_FLAG_HAS_EMPTY_IND)) {
470 		num = zend_array_recalc_elements(ht);
471 		if (UNEXPECTED(ht->nNumOfElements == num)) {
472 			HT_FLAGS(ht) &= ~HASH_FLAG_HAS_EMPTY_IND;
473 		}
474 	} else if (UNEXPECTED(ht == &EG(symbol_table))) {
475 		num = zend_array_recalc_elements(ht);
476 	} else {
477 		num = zend_hash_num_elements(ht);
478 	}
479 	return num;
480 }
481 /* }}} */
482 
_zend_hash_get_valid_pos(const HashTable * ht,HashPosition pos)483 static zend_always_inline HashPosition _zend_hash_get_valid_pos(const HashTable *ht, HashPosition pos)
484 {
485 	if (HT_IS_PACKED(ht)) {
486 		while (pos < ht->nNumUsed && Z_ISUNDEF(ht->arPacked[pos])) {
487 			pos++;
488 		}
489 	} else {
490 		while (pos < ht->nNumUsed && Z_ISUNDEF(ht->arData[pos].val)) {
491 			pos++;
492 		}
493 	}
494 	return pos;
495 }
496 
_zend_hash_get_current_pos(const HashTable * ht)497 static zend_always_inline HashPosition _zend_hash_get_current_pos(const HashTable *ht)
498 {
499 	return _zend_hash_get_valid_pos(ht, ht->nInternalPointer);
500 }
501 
zend_hash_get_current_pos(const HashTable * ht)502 ZEND_API HashPosition ZEND_FASTCALL zend_hash_get_current_pos(const HashTable *ht)
503 {
504 	return _zend_hash_get_current_pos(ht);
505 }
506 
zend_hash_iterator_add(HashTable * ht,HashPosition pos)507 ZEND_API uint32_t ZEND_FASTCALL zend_hash_iterator_add(HashTable *ht, HashPosition pos)
508 {
509 	HashTableIterator *iter = EG(ht_iterators);
510 	HashTableIterator *end  = iter + EG(ht_iterators_count);
511 	uint32_t idx;
512 
513 	if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
514 		HT_INC_ITERATORS_COUNT(ht);
515 	}
516 	while (iter != end) {
517 		if (iter->ht == NULL) {
518 			iter->ht = ht;
519 			iter->pos = pos;
520 			idx = iter - EG(ht_iterators);
521 			if (idx + 1 > EG(ht_iterators_used)) {
522 				EG(ht_iterators_used) = idx + 1;
523 			}
524 			return idx;
525 		}
526 		iter++;
527 	}
528 	if (EG(ht_iterators) == EG(ht_iterators_slots)) {
529 		EG(ht_iterators) = emalloc(sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
530 		memcpy(EG(ht_iterators), EG(ht_iterators_slots), sizeof(HashTableIterator) * EG(ht_iterators_count));
531 	} else {
532 		EG(ht_iterators) = erealloc(EG(ht_iterators), sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
533 	}
534 	iter = EG(ht_iterators) + EG(ht_iterators_count);
535 	EG(ht_iterators_count) += 8;
536 	iter->ht = ht;
537 	iter->pos = pos;
538 	memset(iter + 1, 0, sizeof(HashTableIterator) * 7);
539 	idx = iter - EG(ht_iterators);
540 	EG(ht_iterators_used) = idx + 1;
541 	return idx;
542 }
543 
zend_hash_iterator_pos(uint32_t idx,HashTable * ht)544 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos(uint32_t idx, HashTable *ht)
545 {
546 	HashTableIterator *iter = EG(ht_iterators) + idx;
547 
548 	ZEND_ASSERT(idx != (uint32_t)-1);
549 	if (UNEXPECTED(iter->ht != ht)) {
550 		if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
551 				&& EXPECTED(!HT_ITERATORS_OVERFLOW(iter->ht))) {
552 			HT_DEC_ITERATORS_COUNT(iter->ht);
553 		}
554 		if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
555 			HT_INC_ITERATORS_COUNT(ht);
556 		}
557 		iter->ht = ht;
558 		iter->pos = _zend_hash_get_current_pos(ht);
559 	}
560 	return iter->pos;
561 }
562 
zend_hash_iterator_pos_ex(uint32_t idx,zval * array)563 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos_ex(uint32_t idx, zval *array)
564 {
565 	HashTable *ht = Z_ARRVAL_P(array);
566 	HashTableIterator *iter = EG(ht_iterators) + idx;
567 
568 	ZEND_ASSERT(idx != (uint32_t)-1);
569 	if (UNEXPECTED(iter->ht != ht)) {
570 		if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
571 				&& EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
572 			HT_DEC_ITERATORS_COUNT(iter->ht);
573 		}
574 		SEPARATE_ARRAY(array);
575 		ht = Z_ARRVAL_P(array);
576 		if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
577 			HT_INC_ITERATORS_COUNT(ht);
578 		}
579 		iter->ht = ht;
580 		iter->pos = _zend_hash_get_current_pos(ht);
581 	}
582 	return iter->pos;
583 }
584 
zend_hash_iterator_del(uint32_t idx)585 ZEND_API void ZEND_FASTCALL zend_hash_iterator_del(uint32_t idx)
586 {
587 	HashTableIterator *iter = EG(ht_iterators) + idx;
588 
589 	ZEND_ASSERT(idx != (uint32_t)-1);
590 
591 	if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
592 			&& EXPECTED(!HT_ITERATORS_OVERFLOW(iter->ht))) {
593 		ZEND_ASSERT(HT_ITERATORS_COUNT(iter->ht) != 0);
594 		HT_DEC_ITERATORS_COUNT(iter->ht);
595 	}
596 	iter->ht = NULL;
597 
598 	if (idx == EG(ht_iterators_used) - 1) {
599 		while (idx > 0 && EG(ht_iterators)[idx - 1].ht == NULL) {
600 			idx--;
601 		}
602 		EG(ht_iterators_used) = idx;
603 	}
604 }
605 
_zend_hash_iterators_remove(HashTable * ht)606 static zend_never_inline void ZEND_FASTCALL _zend_hash_iterators_remove(HashTable *ht)
607 {
608 	HashTableIterator *iter = EG(ht_iterators);
609 	HashTableIterator *end  = iter + EG(ht_iterators_used);
610 
611 	while (iter != end) {
612 		if (iter->ht == ht) {
613 			iter->ht = HT_POISONED_PTR;
614 		}
615 		iter++;
616 	}
617 }
618 
zend_hash_iterators_remove(HashTable * ht)619 static zend_always_inline void zend_hash_iterators_remove(HashTable *ht)
620 {
621 	if (UNEXPECTED(HT_HAS_ITERATORS(ht))) {
622 		_zend_hash_iterators_remove(ht);
623 	}
624 }
625 
zend_hash_iterators_lower_pos(HashTable * ht,HashPosition start)626 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterators_lower_pos(HashTable *ht, HashPosition start)
627 {
628 	HashTableIterator *iter = EG(ht_iterators);
629 	HashTableIterator *end  = iter + EG(ht_iterators_used);
630 	HashPosition res = ht->nNumUsed;
631 
632 	while (iter != end) {
633 		if (iter->ht == ht) {
634 			if (iter->pos >= start && iter->pos < res) {
635 				res = iter->pos;
636 			}
637 		}
638 		iter++;
639 	}
640 	return res;
641 }
642 
_zend_hash_iterators_update(HashTable * ht,HashPosition from,HashPosition to)643 ZEND_API void ZEND_FASTCALL _zend_hash_iterators_update(HashTable *ht, HashPosition from, HashPosition to)
644 {
645 	HashTableIterator *iter = EG(ht_iterators);
646 	HashTableIterator *end  = iter + EG(ht_iterators_used);
647 
648 	while (iter != end) {
649 		if (iter->ht == ht && iter->pos == from) {
650 			iter->pos = to;
651 		}
652 		iter++;
653 	}
654 }
655 
zend_hash_iterators_advance(HashTable * ht,HashPosition step)656 ZEND_API void ZEND_FASTCALL zend_hash_iterators_advance(HashTable *ht, HashPosition step)
657 {
658 	HashTableIterator *iter = EG(ht_iterators);
659 	HashTableIterator *end  = iter + EG(ht_iterators_used);
660 
661 	while (iter != end) {
662 		if (iter->ht == ht) {
663 			iter->pos += step;
664 		}
665 		iter++;
666 	}
667 }
668 
669 /* Hash must be known and precomputed before */
zend_hash_find_bucket(const HashTable * ht,const zend_string * key)670 static zend_always_inline Bucket *zend_hash_find_bucket(const HashTable *ht, const zend_string *key)
671 {
672 	uint32_t nIndex;
673 	uint32_t idx;
674 	Bucket *p, *arData;
675 
676 	ZEND_ASSERT(ZSTR_H(key) != 0 && "Hash must be known");
677 
678 	arData = ht->arData;
679 	nIndex = ZSTR_H(key) | ht->nTableMask;
680 	idx = HT_HASH_EX(arData, nIndex);
681 
682 	if (UNEXPECTED(idx == HT_INVALID_IDX)) {
683 		return NULL;
684 	}
685 	p = HT_HASH_TO_BUCKET_EX(arData, idx);
686 	if (EXPECTED(p->key == key)) { /* check for the same interned string */
687 		return p;
688 	}
689 
690 	while (1) {
691 		if (p->h == ZSTR_H(key) &&
692 		    EXPECTED(p->key) &&
693 		    zend_string_equal_content(p->key, key)) {
694 			return p;
695 		}
696 		idx = Z_NEXT(p->val);
697 		if (idx == HT_INVALID_IDX) {
698 			return NULL;
699 		}
700 		p = HT_HASH_TO_BUCKET_EX(arData, idx);
701 		if (p->key == key) { /* check for the same interned string */
702 			return p;
703 		}
704 	}
705 }
706 
zend_hash_str_find_bucket(const HashTable * ht,const char * str,size_t len,zend_ulong h)707 static zend_always_inline Bucket *zend_hash_str_find_bucket(const HashTable *ht, const char *str, size_t len, zend_ulong h)
708 {
709 	uint32_t nIndex;
710 	uint32_t idx;
711 	Bucket *p, *arData;
712 
713 	arData = ht->arData;
714 	nIndex = h | ht->nTableMask;
715 	idx = HT_HASH_EX(arData, nIndex);
716 	while (idx != HT_INVALID_IDX) {
717 		ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
718 		p = HT_HASH_TO_BUCKET_EX(arData, idx);
719 		if ((p->h == h)
720 			 && p->key
721 			 && zend_string_equals_cstr(p->key, str, len)) {
722 			return p;
723 		}
724 		idx = Z_NEXT(p->val);
725 	}
726 	return NULL;
727 }
728 
zend_hash_index_find_bucket(const HashTable * ht,zend_ulong h)729 static zend_always_inline Bucket *zend_hash_index_find_bucket(const HashTable *ht, zend_ulong h)
730 {
731 	uint32_t nIndex;
732 	uint32_t idx;
733 	Bucket *p, *arData;
734 
735 	arData = ht->arData;
736 	nIndex = h | ht->nTableMask;
737 	idx = HT_HASH_EX(arData, nIndex);
738 	while (idx != HT_INVALID_IDX) {
739 		ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
740 		p = HT_HASH_TO_BUCKET_EX(arData, idx);
741 		if (p->h == h && !p->key) {
742 			return p;
743 		}
744 		idx = Z_NEXT(p->val);
745 	}
746 	return NULL;
747 }
748 
_zend_hash_add_or_update_i(HashTable * ht,zend_string * key,zval * pData,uint32_t flag)749 static zend_always_inline zval *_zend_hash_add_or_update_i(HashTable *ht, zend_string *key, zval *pData, uint32_t flag)
750 {
751 	zend_ulong h;
752 	uint32_t nIndex;
753 	uint32_t idx;
754 	Bucket *p, *arData;
755 
756 	IS_CONSISTENT(ht);
757 	HT_ASSERT_RC1(ht);
758 	zend_string_hash_val(key);
759 
760 	if (UNEXPECTED(HT_FLAGS(ht) & (HASH_FLAG_UNINITIALIZED|HASH_FLAG_PACKED))) {
761 		if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
762 			zend_hash_real_init_mixed(ht);
763 			goto add_to_hash;
764 		} else {
765 			zend_hash_packed_to_hash(ht);
766 		}
767 	} else if ((flag & HASH_ADD_NEW) == 0 || ZEND_DEBUG) {
768 		p = zend_hash_find_bucket(ht, key);
769 
770 		if (p) {
771 			zval *data;
772 
773 			ZEND_ASSERT((flag & HASH_ADD_NEW) == 0);
774 			if (flag & HASH_LOOKUP) {
775 				return &p->val;
776 			} else if (flag & HASH_ADD) {
777 				if (!(flag & HASH_UPDATE_INDIRECT)) {
778 					return NULL;
779 				}
780 				ZEND_ASSERT(&p->val != pData);
781 				data = &p->val;
782 				if (Z_TYPE_P(data) == IS_INDIRECT) {
783 					data = Z_INDIRECT_P(data);
784 					if (Z_TYPE_P(data) != IS_UNDEF) {
785 						return NULL;
786 					}
787 				} else {
788 					return NULL;
789 				}
790 			} else {
791 				ZEND_ASSERT(&p->val != pData);
792 				data = &p->val;
793 				if ((flag & HASH_UPDATE_INDIRECT) && Z_TYPE_P(data) == IS_INDIRECT) {
794 					data = Z_INDIRECT_P(data);
795 				}
796 			}
797 			if (ht->pDestructor) {
798 				ht->pDestructor(data);
799 			}
800 			ZVAL_COPY_VALUE(data, pData);
801 			return data;
802 		}
803 	}
804 
805 	ZEND_HASH_IF_FULL_DO_RESIZE(ht);		/* If the Hash table is full, resize it */
806 
807 add_to_hash:
808 	if (!ZSTR_IS_INTERNED(key)) {
809 		zend_string_addref(key);
810 		HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
811 	}
812 	idx = ht->nNumUsed++;
813 	ht->nNumOfElements++;
814 	arData = ht->arData;
815 	p = arData + idx;
816 	p->key = key;
817 	p->h = h = ZSTR_H(key);
818 	nIndex = h | ht->nTableMask;
819 	Z_NEXT(p->val) = HT_HASH_EX(arData, nIndex);
820 	HT_HASH_EX(arData, nIndex) = HT_IDX_TO_HASH(idx);
821 	if (flag & HASH_LOOKUP) {
822 		ZVAL_NULL(&p->val);
823 	} else {
824 		ZVAL_COPY_VALUE(&p->val, pData);
825 	}
826 
827 	return &p->val;
828 }
829 
_zend_hash_str_add_or_update_i(HashTable * ht,const char * str,size_t len,zend_ulong h,zval * pData,uint32_t flag)830 static zend_always_inline zval *_zend_hash_str_add_or_update_i(HashTable *ht, const char *str, size_t len, zend_ulong h, zval *pData, uint32_t flag)
831 {
832 	zend_string *key;
833 	uint32_t nIndex;
834 	uint32_t idx;
835 	Bucket *p;
836 
837 	IS_CONSISTENT(ht);
838 	HT_ASSERT_RC1(ht);
839 
840 	if (UNEXPECTED(HT_FLAGS(ht) & (HASH_FLAG_UNINITIALIZED|HASH_FLAG_PACKED))) {
841 		if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
842 			zend_hash_real_init_mixed(ht);
843 			goto add_to_hash;
844 		} else {
845 			zend_hash_packed_to_hash(ht);
846 		}
847 	} else if ((flag & HASH_ADD_NEW) == 0) {
848 		p = zend_hash_str_find_bucket(ht, str, len, h);
849 
850 		if (p) {
851 			zval *data;
852 
853 			if (flag & HASH_LOOKUP) {
854 				return &p->val;
855 			} else if (flag & HASH_ADD) {
856 				if (!(flag & HASH_UPDATE_INDIRECT)) {
857 					return NULL;
858 				}
859 				ZEND_ASSERT(&p->val != pData);
860 				data = &p->val;
861 				if (Z_TYPE_P(data) == IS_INDIRECT) {
862 					data = Z_INDIRECT_P(data);
863 					if (Z_TYPE_P(data) != IS_UNDEF) {
864 						return NULL;
865 					}
866 				} else {
867 					return NULL;
868 				}
869 			} else {
870 				ZEND_ASSERT(&p->val != pData);
871 				data = &p->val;
872 				if ((flag & HASH_UPDATE_INDIRECT) && Z_TYPE_P(data) == IS_INDIRECT) {
873 					data = Z_INDIRECT_P(data);
874 				}
875 			}
876 			if (ht->pDestructor) {
877 				ht->pDestructor(data);
878 			}
879 			ZVAL_COPY_VALUE(data, pData);
880 			return data;
881 		}
882 	}
883 
884 	ZEND_HASH_IF_FULL_DO_RESIZE(ht);		/* If the Hash table is full, resize it */
885 
886 add_to_hash:
887 	idx = ht->nNumUsed++;
888 	ht->nNumOfElements++;
889 	p = ht->arData + idx;
890 	p->key = key = zend_string_init(str, len, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
891 #if ZEND_RC_DEBUG
892 	if (GC_FLAGS(ht) & GC_PERSISTENT_LOCAL) {
893 		GC_MAKE_PERSISTENT_LOCAL(key);
894 	}
895 #endif
896 	p->h = ZSTR_H(key) = h;
897 	HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
898 	if (flag & HASH_LOOKUP) {
899 		ZVAL_NULL(&p->val);
900 	} else {
901 		ZVAL_COPY_VALUE(&p->val, pData);
902 	}
903 	nIndex = h | ht->nTableMask;
904 	Z_NEXT(p->val) = HT_HASH(ht, nIndex);
905 	HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
906 
907 	return &p->val;
908 }
909 
zend_hash_add_or_update(HashTable * ht,zend_string * key,zval * pData,uint32_t flag)910 ZEND_API zval* ZEND_FASTCALL zend_hash_add_or_update(HashTable *ht, zend_string *key, zval *pData, uint32_t flag)
911 {
912 	if (flag == HASH_ADD) {
913 		return zend_hash_add(ht, key, pData);
914 	} else if (flag == HASH_ADD_NEW) {
915 		return zend_hash_add_new(ht, key, pData);
916 	} else if (flag == HASH_UPDATE) {
917 		return zend_hash_update(ht, key, pData);
918 	} else {
919 		ZEND_ASSERT(flag == (HASH_UPDATE|HASH_UPDATE_INDIRECT));
920 		return zend_hash_update_ind(ht, key, pData);
921 	}
922 }
923 
zend_hash_add(HashTable * ht,zend_string * key,zval * pData)924 ZEND_API zval* ZEND_FASTCALL zend_hash_add(HashTable *ht, zend_string *key, zval *pData)
925 {
926 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD);
927 }
928 
zend_hash_update(HashTable * ht,zend_string * key,zval * pData)929 ZEND_API zval* ZEND_FASTCALL zend_hash_update(HashTable *ht, zend_string *key, zval *pData)
930 {
931 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE);
932 }
933 
zend_hash_update_ind(HashTable * ht,zend_string * key,zval * pData)934 ZEND_API zval* ZEND_FASTCALL zend_hash_update_ind(HashTable *ht, zend_string *key, zval *pData)
935 {
936 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT);
937 }
938 
zend_hash_add_new(HashTable * ht,zend_string * key,zval * pData)939 ZEND_API zval* ZEND_FASTCALL zend_hash_add_new(HashTable *ht, zend_string *key, zval *pData)
940 {
941 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD_NEW);
942 }
943 
zend_hash_lookup(HashTable * ht,zend_string * key)944 ZEND_API zval* ZEND_FASTCALL zend_hash_lookup(HashTable *ht, zend_string *key)
945 {
946 	return _zend_hash_add_or_update_i(ht, key, NULL, HASH_LOOKUP);
947 }
948 
zend_hash_str_add_or_update(HashTable * ht,const char * str,size_t len,zval * pData,uint32_t flag)949 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_or_update(HashTable *ht, const char *str, size_t len, zval *pData, uint32_t flag)
950 {
951 	if (flag == HASH_ADD) {
952 		return zend_hash_str_add(ht, str, len, pData);
953 	} else if (flag == HASH_ADD_NEW) {
954 		return zend_hash_str_add_new(ht, str, len, pData);
955 	} else if (flag == HASH_UPDATE) {
956 		return zend_hash_str_update(ht, str, len, pData);
957 	} else {
958 		ZEND_ASSERT(flag == (HASH_UPDATE|HASH_UPDATE_INDIRECT));
959 		return zend_hash_str_update_ind(ht, str, len, pData);
960 	}
961 }
962 
zend_hash_str_update(HashTable * ht,const char * str,size_t len,zval * pData)963 ZEND_API zval* ZEND_FASTCALL zend_hash_str_update(HashTable *ht, const char *str, size_t len, zval *pData)
964 {
965 	zend_ulong h = zend_hash_func(str, len);
966 
967 	return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_UPDATE);
968 }
969 
zend_hash_str_update_ind(HashTable * ht,const char * str,size_t len,zval * pData)970 ZEND_API zval* ZEND_FASTCALL zend_hash_str_update_ind(HashTable *ht, const char *str, size_t len, zval *pData)
971 {
972 	zend_ulong h = zend_hash_func(str, len);
973 
974 	return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT);
975 }
976 
zend_hash_str_add(HashTable * ht,const char * str,size_t len,zval * pData)977 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add(HashTable *ht, const char *str, size_t len, zval *pData)
978 {
979 	zend_ulong h = zend_hash_func(str, len);
980 
981 	return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_ADD);
982 }
983 
zend_hash_str_add_new(HashTable * ht,const char * str,size_t len,zval * pData)984 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_new(HashTable *ht, const char *str, size_t len, zval *pData)
985 {
986 	zend_ulong h = zend_hash_func(str, len);
987 
988 	return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_ADD_NEW);
989 }
990 
zend_hash_index_add_empty_element(HashTable * ht,zend_ulong h)991 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_empty_element(HashTable *ht, zend_ulong h)
992 {
993 	zval dummy;
994 
995 	ZVAL_NULL(&dummy);
996 	return zend_hash_index_add(ht, h, &dummy);
997 }
998 
zend_hash_add_empty_element(HashTable * ht,zend_string * key)999 ZEND_API zval* ZEND_FASTCALL zend_hash_add_empty_element(HashTable *ht, zend_string *key)
1000 {
1001 	zval dummy;
1002 
1003 	ZVAL_NULL(&dummy);
1004 	return zend_hash_add(ht, key, &dummy);
1005 }
1006 
zend_hash_str_add_empty_element(HashTable * ht,const char * str,size_t len)1007 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_empty_element(HashTable *ht, const char *str, size_t len)
1008 {
1009 	zval dummy;
1010 
1011 	ZVAL_NULL(&dummy);
1012 	return zend_hash_str_add(ht, str, len, &dummy);
1013 }
1014 
_zend_hash_index_add_or_update_i(HashTable * ht,zend_ulong h,zval * pData,uint32_t flag)1015 static zend_always_inline zval *_zend_hash_index_add_or_update_i(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag)
1016 {
1017 	uint32_t nIndex;
1018 	uint32_t idx;
1019 	Bucket *p;
1020 	zval *zv;
1021 
1022 	IS_CONSISTENT(ht);
1023 	HT_ASSERT_RC1(ht);
1024 
1025 	if ((flag & HASH_ADD_NEXT) && h == ZEND_LONG_MIN) {
1026 		h = 0;
1027 	}
1028 
1029 	if (HT_IS_PACKED(ht)) {
1030 		if ((flag & (HASH_ADD_NEW|HASH_ADD_NEXT)) != (HASH_ADD_NEW|HASH_ADD_NEXT)
1031 		 && h < ht->nNumUsed) {
1032 			zv = ht->arPacked + h;
1033 			if (Z_TYPE_P(zv) != IS_UNDEF) {
1034 				if (flag & HASH_LOOKUP) {
1035 					return zv;
1036 				}
1037 replace:
1038 				if (flag & HASH_ADD) {
1039 					return NULL;
1040 				}
1041 				if (ht->pDestructor) {
1042 					ht->pDestructor(zv);
1043 				}
1044 				ZVAL_COPY_VALUE(zv, pData);
1045 				return zv;
1046 			} else { /* we have to keep the order :( */
1047 				goto convert_to_hash;
1048 			}
1049 		} else if (EXPECTED(h < ht->nTableSize)) {
1050 add_to_packed:
1051 			zv = ht->arPacked + h;
1052 			/* incremental initialization of empty Buckets */
1053 			if ((flag & (HASH_ADD_NEW|HASH_ADD_NEXT)) != (HASH_ADD_NEW|HASH_ADD_NEXT)) {
1054 				if (h > ht->nNumUsed) {
1055 					zval *q = ht->arPacked + ht->nNumUsed;
1056 					while (q != zv) {
1057 						ZVAL_UNDEF(q);
1058 						q++;
1059 					}
1060 				}
1061 			}
1062 			ht->nNextFreeElement = ht->nNumUsed = h + 1;
1063 			ht->nNumOfElements++;
1064 			if (flag & HASH_LOOKUP) {
1065 				ZVAL_NULL(zv);
1066 			} else {
1067 				ZVAL_COPY_VALUE(zv, pData);
1068 			}
1069 
1070 			return zv;
1071 		} else if ((h >> 1) < ht->nTableSize &&
1072 		           (ht->nTableSize >> 1) < ht->nNumOfElements) {
1073 			zend_hash_packed_grow(ht);
1074 			goto add_to_packed;
1075 		} else {
1076 			if (ht->nNumUsed >= ht->nTableSize) {
1077 				ht->nTableSize += ht->nTableSize;
1078 			}
1079 convert_to_hash:
1080 			zend_hash_packed_to_hash(ht);
1081 		}
1082 	} else if (HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED) {
1083 		if (h < ht->nTableSize) {
1084 			zend_hash_real_init_packed_ex(ht);
1085 			goto add_to_packed;
1086 		}
1087 		zend_hash_real_init_mixed(ht);
1088 	} else {
1089 		if ((flag & HASH_ADD_NEW) == 0 || ZEND_DEBUG) {
1090 			p = zend_hash_index_find_bucket(ht, h);
1091 			if (p) {
1092 				if (flag & HASH_LOOKUP) {
1093 					return &p->val;
1094 				}
1095 				ZEND_ASSERT((flag & HASH_ADD_NEW) == 0);
1096 				zv = &p->val;
1097 				goto replace;
1098 			}
1099 		}
1100 		ZEND_HASH_IF_FULL_DO_RESIZE(ht);		/* If the Hash table is full, resize it */
1101 	}
1102 
1103 	idx = ht->nNumUsed++;
1104 	nIndex = h | ht->nTableMask;
1105 	p = ht->arData + idx;
1106 	Z_NEXT(p->val) = HT_HASH(ht, nIndex);
1107 	HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
1108 	if ((zend_long)h >= ht->nNextFreeElement) {
1109 		ht->nNextFreeElement = (zend_long)h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
1110 	}
1111 	ht->nNumOfElements++;
1112 	p->h = h;
1113 	p->key = NULL;
1114 	if (flag & HASH_LOOKUP) {
1115 		ZVAL_NULL(&p->val);
1116 	} else {
1117 		ZVAL_COPY_VALUE(&p->val, pData);
1118 	}
1119 
1120 	return &p->val;
1121 }
1122 
zend_hash_index_add_or_update(HashTable * ht,zend_ulong h,zval * pData,uint32_t flag)1123 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_or_update(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag)
1124 {
1125 	if (flag == HASH_ADD) {
1126 		return zend_hash_index_add(ht, h, pData);
1127 	} else if (flag == (HASH_ADD|HASH_ADD_NEW)) {
1128 		return zend_hash_index_add_new(ht, h, pData);
1129 	} else if (flag == (HASH_ADD|HASH_ADD_NEXT)) {
1130 		ZEND_ASSERT(h == ht->nNextFreeElement);
1131 		return zend_hash_next_index_insert(ht, pData);
1132 	} else if (flag == (HASH_ADD|HASH_ADD_NEW|HASH_ADD_NEXT)) {
1133 		ZEND_ASSERT(h == ht->nNextFreeElement);
1134 		return zend_hash_next_index_insert_new(ht, pData);
1135 	} else {
1136 		ZEND_ASSERT(flag == HASH_UPDATE);
1137 		return zend_hash_index_update(ht, h, pData);
1138 	}
1139 }
1140 
zend_hash_index_add(HashTable * ht,zend_ulong h,zval * pData)1141 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add(HashTable *ht, zend_ulong h, zval *pData)
1142 {
1143 	return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD);
1144 }
1145 
zend_hash_index_add_new(HashTable * ht,zend_ulong h,zval * pData)1146 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_new(HashTable *ht, zend_ulong h, zval *pData)
1147 {
1148 	return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD | HASH_ADD_NEW);
1149 }
1150 
zend_hash_index_update(HashTable * ht,zend_ulong h,zval * pData)1151 ZEND_API zval* ZEND_FASTCALL zend_hash_index_update(HashTable *ht, zend_ulong h, zval *pData)
1152 {
1153 	return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_UPDATE);
1154 }
1155 
zend_hash_next_index_insert(HashTable * ht,zval * pData)1156 ZEND_API zval* ZEND_FASTCALL zend_hash_next_index_insert(HashTable *ht, zval *pData)
1157 {
1158 	return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEXT);
1159 }
1160 
zend_hash_next_index_insert_new(HashTable * ht,zval * pData)1161 ZEND_API zval* ZEND_FASTCALL zend_hash_next_index_insert_new(HashTable *ht, zval *pData)
1162 {
1163 	return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEW | HASH_ADD_NEXT);
1164 }
1165 
zend_hash_index_lookup(HashTable * ht,zend_ulong h)1166 ZEND_API zval* ZEND_FASTCALL zend_hash_index_lookup(HashTable *ht, zend_ulong h)
1167 {
1168 	return _zend_hash_index_add_or_update_i(ht, h, NULL, HASH_LOOKUP);
1169 }
1170 
zend_hash_set_bucket_key(HashTable * ht,Bucket * b,zend_string * key)1171 ZEND_API zval* ZEND_FASTCALL zend_hash_set_bucket_key(HashTable *ht, Bucket *b, zend_string *key)
1172 {
1173 	uint32_t nIndex;
1174 	uint32_t idx, i;
1175 	Bucket *p, *arData;
1176 
1177 	IS_CONSISTENT(ht);
1178 	HT_ASSERT_RC1(ht);
1179 	ZEND_ASSERT(!HT_IS_PACKED(ht));
1180 
1181 	(void)zend_string_hash_val(key);
1182 	p = zend_hash_find_bucket(ht, key);
1183 	if (UNEXPECTED(p)) {
1184 		return (p == b) ? &p->val : NULL;
1185 	}
1186 
1187 	if (!ZSTR_IS_INTERNED(key)) {
1188 		zend_string_addref(key);
1189 		HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
1190 	}
1191 
1192 	arData = ht->arData;
1193 
1194 	/* del from hash */
1195 	idx = HT_IDX_TO_HASH(b - arData);
1196 	nIndex = b->h | ht->nTableMask;
1197 	i = HT_HASH_EX(arData, nIndex);
1198 	if (i == idx) {
1199 		HT_HASH_EX(arData, nIndex) = Z_NEXT(b->val);
1200 	} else {
1201 		p = HT_HASH_TO_BUCKET_EX(arData, i);
1202 		while (Z_NEXT(p->val) != idx) {
1203 			i = Z_NEXT(p->val);
1204 			p = HT_HASH_TO_BUCKET_EX(arData, i);
1205 		}
1206 		Z_NEXT(p->val) = Z_NEXT(b->val);
1207 	}
1208 	zend_string_release(b->key);
1209 
1210 	/* add to hash */
1211 	idx = b - arData;
1212 	b->key = key;
1213 	b->h = ZSTR_H(key);
1214 	nIndex = b->h | ht->nTableMask;
1215 	idx = HT_IDX_TO_HASH(idx);
1216 	i = HT_HASH_EX(arData, nIndex);
1217 	if (i == HT_INVALID_IDX || i < idx) {
1218 		Z_NEXT(b->val) = i;
1219 		HT_HASH_EX(arData, nIndex) = idx;
1220 	} else {
1221 		p = HT_HASH_TO_BUCKET_EX(arData, i);
1222 		while (Z_NEXT(p->val) != HT_INVALID_IDX && Z_NEXT(p->val) > idx) {
1223 			i = Z_NEXT(p->val);
1224 			p = HT_HASH_TO_BUCKET_EX(arData, i);
1225 		}
1226 		Z_NEXT(b->val) = Z_NEXT(p->val);
1227 		Z_NEXT(p->val) = idx;
1228 	}
1229 	return &b->val;
1230 }
1231 
zend_hash_do_resize(HashTable * ht)1232 static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht)
1233 {
1234 
1235 	IS_CONSISTENT(ht);
1236 	HT_ASSERT_RC1(ht);
1237 
1238 	ZEND_ASSERT(!HT_IS_PACKED(ht));
1239 	if (ht->nNumUsed > ht->nNumOfElements + (ht->nNumOfElements >> 5)) { /* additional term is there to amortize the cost of compaction */
1240 		zend_hash_rehash(ht);
1241 	} else if (ht->nTableSize < HT_MAX_SIZE) {	/* Let's double the table size */
1242 		void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
1243 		uint32_t nSize = ht->nTableSize + ht->nTableSize;
1244 		Bucket *old_buckets = ht->arData;
1245 
1246 		ZEND_ASSERT(HT_SIZE_TO_MASK(nSize));
1247 
1248 		new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1249 		ht->nTableSize = nSize;
1250 		ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
1251 		HT_SET_DATA_ADDR(ht, new_data);
1252 		memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
1253 		pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1254 		zend_hash_rehash(ht);
1255 	} else {
1256 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket) + sizeof(uint32_t), sizeof(Bucket));
1257 	}
1258 }
1259 
zend_hash_rehash(HashTable * ht)1260 ZEND_API void ZEND_FASTCALL zend_hash_rehash(HashTable *ht)
1261 {
1262 	Bucket *p;
1263 	uint32_t nIndex, i;
1264 
1265 	IS_CONSISTENT(ht);
1266 
1267 	if (UNEXPECTED(ht->nNumOfElements == 0)) {
1268 		if (!(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
1269 			ht->nNumUsed = 0;
1270 			HT_HASH_RESET(ht);
1271 		}
1272 		return;
1273 	}
1274 
1275 	HT_HASH_RESET(ht);
1276 	i = 0;
1277 	p = ht->arData;
1278 	if (HT_IS_WITHOUT_HOLES(ht)) {
1279 		do {
1280 			nIndex = p->h | ht->nTableMask;
1281 			Z_NEXT(p->val) = HT_HASH(ht, nIndex);
1282 			HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
1283 			p++;
1284 		} while (++i < ht->nNumUsed);
1285 	} else {
1286 		uint32_t old_num_used = ht->nNumUsed;
1287 		do {
1288 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) {
1289 				uint32_t j = i;
1290 				Bucket *q = p;
1291 
1292 				if (EXPECTED(!HT_HAS_ITERATORS(ht))) {
1293 					while (++i < ht->nNumUsed) {
1294 						p++;
1295 						if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
1296 							ZVAL_COPY_VALUE(&q->val, &p->val);
1297 							q->h = p->h;
1298 							nIndex = q->h | ht->nTableMask;
1299 							q->key = p->key;
1300 							Z_NEXT(q->val) = HT_HASH(ht, nIndex);
1301 							HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
1302 							if (UNEXPECTED(ht->nInternalPointer == i)) {
1303 								ht->nInternalPointer = j;
1304 							}
1305 							q++;
1306 							j++;
1307 						}
1308 					}
1309 				} else {
1310 					uint32_t iter_pos = zend_hash_iterators_lower_pos(ht, i + 1);
1311 
1312 					while (++i < ht->nNumUsed) {
1313 						p++;
1314 						if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
1315 							ZVAL_COPY_VALUE(&q->val, &p->val);
1316 							q->h = p->h;
1317 							nIndex = q->h | ht->nTableMask;
1318 							q->key = p->key;
1319 							Z_NEXT(q->val) = HT_HASH(ht, nIndex);
1320 							HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
1321 							if (UNEXPECTED(ht->nInternalPointer == i)) {
1322 								ht->nInternalPointer = j;
1323 							}
1324 							if (UNEXPECTED(i >= iter_pos)) {
1325 								do {
1326 									zend_hash_iterators_update(ht, iter_pos, j);
1327 									iter_pos = zend_hash_iterators_lower_pos(ht, iter_pos + 1);
1328 								} while (iter_pos < i);
1329 							}
1330 							q++;
1331 							j++;
1332 						}
1333 					}
1334 				}
1335 				ht->nNumUsed = j;
1336 				break;
1337 			}
1338 			nIndex = p->h | ht->nTableMask;
1339 			Z_NEXT(p->val) = HT_HASH(ht, nIndex);
1340 			HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
1341 			p++;
1342 		} while (++i < ht->nNumUsed);
1343 
1344 		/* Migrate pointer to one past the end of the array to the new one past the end, so that
1345 		 * newly inserted elements are picked up correctly. */
1346 		if (UNEXPECTED(HT_HAS_ITERATORS(ht))) {
1347 			_zend_hash_iterators_update(ht, old_num_used, ht->nNumUsed);
1348 		}
1349 	}
1350 }
1351 
_zend_hash_packed_del_val(HashTable * ht,uint32_t idx,zval * zv)1352 static zend_always_inline void _zend_hash_packed_del_val(HashTable *ht, uint32_t idx, zval *zv)
1353 {
1354 	idx = HT_HASH_TO_IDX(idx);
1355 	ht->nNumOfElements--;
1356 	if (ht->nInternalPointer == idx || UNEXPECTED(HT_HAS_ITERATORS(ht))) {
1357 		uint32_t new_idx;
1358 
1359 		new_idx = idx;
1360 		while (1) {
1361 			new_idx++;
1362 			if (new_idx >= ht->nNumUsed) {
1363 				break;
1364 			} else if (Z_TYPE(ht->arPacked[new_idx]) != IS_UNDEF) {
1365 				break;
1366 			}
1367 		}
1368 		if (ht->nInternalPointer == idx) {
1369 			ht->nInternalPointer = new_idx;
1370 		}
1371 		zend_hash_iterators_update(ht, idx, new_idx);
1372 	}
1373 	if (ht->nNumUsed - 1 == idx) {
1374 		do {
1375 			ht->nNumUsed--;
1376 		} while (ht->nNumUsed > 0 && (UNEXPECTED(Z_TYPE(ht->arPacked[ht->nNumUsed-1]) == IS_UNDEF)));
1377 		ht->nInternalPointer = MIN(ht->nInternalPointer, ht->nNumUsed);
1378 	}
1379 	if (ht->pDestructor) {
1380 		zval tmp;
1381 		ZVAL_COPY_VALUE(&tmp, zv);
1382 		ZVAL_UNDEF(zv);
1383 		ht->pDestructor(&tmp);
1384 	} else {
1385 		ZVAL_UNDEF(zv);
1386 	}
1387 }
1388 
_zend_hash_del_el_ex(HashTable * ht,uint32_t idx,Bucket * p,Bucket * prev)1389 static zend_always_inline void _zend_hash_del_el_ex(HashTable *ht, uint32_t idx, Bucket *p, Bucket *prev)
1390 {
1391 	if (prev) {
1392 		Z_NEXT(prev->val) = Z_NEXT(p->val);
1393 	} else {
1394 		HT_HASH(ht, p->h | ht->nTableMask) = Z_NEXT(p->val);
1395 	}
1396 	idx = HT_HASH_TO_IDX(idx);
1397 	ht->nNumOfElements--;
1398 	if (ht->nInternalPointer == idx || UNEXPECTED(HT_HAS_ITERATORS(ht))) {
1399 		uint32_t new_idx;
1400 
1401 		new_idx = idx;
1402 		while (1) {
1403 			new_idx++;
1404 			if (new_idx >= ht->nNumUsed) {
1405 				break;
1406 			} else if (Z_TYPE(ht->arData[new_idx].val) != IS_UNDEF) {
1407 				break;
1408 			}
1409 		}
1410 		if (ht->nInternalPointer == idx) {
1411 			ht->nInternalPointer = new_idx;
1412 		}
1413 		zend_hash_iterators_update(ht, idx, new_idx);
1414 	}
1415 	if (ht->nNumUsed - 1 == idx) {
1416 		do {
1417 			ht->nNumUsed--;
1418 		} while (ht->nNumUsed > 0 && (UNEXPECTED(Z_TYPE(ht->arData[ht->nNumUsed-1].val) == IS_UNDEF)));
1419 		ht->nInternalPointer = MIN(ht->nInternalPointer, ht->nNumUsed);
1420 	}
1421 	if (ht->pDestructor) {
1422 		zval tmp;
1423 		ZVAL_COPY_VALUE(&tmp, &p->val);
1424 		ZVAL_UNDEF(&p->val);
1425 		ht->pDestructor(&tmp);
1426 	} else {
1427 		ZVAL_UNDEF(&p->val);
1428 	}
1429 }
1430 
_zend_hash_del_el(HashTable * ht,uint32_t idx,Bucket * p)1431 static zend_always_inline void _zend_hash_del_el(HashTable *ht, uint32_t idx, Bucket *p)
1432 {
1433 	Bucket *prev = NULL;
1434 	uint32_t nIndex;
1435 	uint32_t i;
1436 
1437 	nIndex = p->h | ht->nTableMask;
1438 	i = HT_HASH(ht, nIndex);
1439 
1440 	if (i != idx) {
1441 		prev = HT_HASH_TO_BUCKET(ht, i);
1442 		while (Z_NEXT(prev->val) != idx) {
1443 			i = Z_NEXT(prev->val);
1444 			prev = HT_HASH_TO_BUCKET(ht, i);
1445 		}
1446 	}
1447 
1448 	if (p->key) {
1449 		zend_string_release(p->key);
1450 		p->key = NULL;
1451 	}
1452 	_zend_hash_del_el_ex(ht, idx, p, prev);
1453 }
1454 
zend_hash_packed_del_val(HashTable * ht,zval * zv)1455 ZEND_API void ZEND_FASTCALL zend_hash_packed_del_val(HashTable *ht, zval *zv)
1456 {
1457 	IS_CONSISTENT(ht);
1458 	HT_ASSERT_RC1(ht);
1459 	ZEND_ASSERT(HT_IS_PACKED(ht));
1460 	_zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(zv - ht->arPacked), zv);
1461 }
1462 
1463 
zend_hash_del_bucket(HashTable * ht,Bucket * p)1464 ZEND_API void ZEND_FASTCALL zend_hash_del_bucket(HashTable *ht, Bucket *p)
1465 {
1466 	IS_CONSISTENT(ht);
1467 	HT_ASSERT_RC1(ht);
1468 	ZEND_ASSERT(!HT_IS_PACKED(ht));
1469 	_zend_hash_del_el(ht, HT_IDX_TO_HASH(p - ht->arData), p);
1470 }
1471 
zend_hash_del(HashTable * ht,zend_string * key)1472 ZEND_API zend_result ZEND_FASTCALL zend_hash_del(HashTable *ht, zend_string *key)
1473 {
1474 	zend_ulong h;
1475 	uint32_t nIndex;
1476 	uint32_t idx;
1477 	Bucket *p;
1478 	Bucket *prev = NULL;
1479 
1480 	IS_CONSISTENT(ht);
1481 	HT_ASSERT_RC1(ht);
1482 
1483 	h = zend_string_hash_val(key);
1484 	nIndex = h | ht->nTableMask;
1485 
1486 	idx = HT_HASH(ht, nIndex);
1487 	while (idx != HT_INVALID_IDX) {
1488 		p = HT_HASH_TO_BUCKET(ht, idx);
1489 		if ((p->key == key) ||
1490 			(p->h == h &&
1491 		     p->key &&
1492 		     zend_string_equal_content(p->key, key))) {
1493 			zend_string_release(p->key);
1494 			p->key = NULL;
1495 			_zend_hash_del_el_ex(ht, idx, p, prev);
1496 			return SUCCESS;
1497 		}
1498 		prev = p;
1499 		idx = Z_NEXT(p->val);
1500 	}
1501 	return FAILURE;
1502 }
1503 
zend_hash_del_ind(HashTable * ht,zend_string * key)1504 ZEND_API zend_result ZEND_FASTCALL zend_hash_del_ind(HashTable *ht, zend_string *key)
1505 {
1506 	zend_ulong h;
1507 	uint32_t nIndex;
1508 	uint32_t idx;
1509 	Bucket *p;
1510 	Bucket *prev = NULL;
1511 
1512 	IS_CONSISTENT(ht);
1513 	HT_ASSERT_RC1(ht);
1514 
1515 	h = zend_string_hash_val(key);
1516 	nIndex = h | ht->nTableMask;
1517 
1518 	idx = HT_HASH(ht, nIndex);
1519 	while (idx != HT_INVALID_IDX) {
1520 		p = HT_HASH_TO_BUCKET(ht, idx);
1521 		if ((p->key == key) ||
1522 			(p->h == h &&
1523 		     p->key &&
1524 		     zend_string_equal_content(p->key, key))) {
1525 			if (Z_TYPE(p->val) == IS_INDIRECT) {
1526 				zval *data = Z_INDIRECT(p->val);
1527 
1528 				if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
1529 					return FAILURE;
1530 				} else {
1531 					if (ht->pDestructor) {
1532 						zval tmp;
1533 						ZVAL_COPY_VALUE(&tmp, data);
1534 						ZVAL_UNDEF(data);
1535 						ht->pDestructor(&tmp);
1536 					} else {
1537 						ZVAL_UNDEF(data);
1538 					}
1539 					HT_FLAGS(ht) |= HASH_FLAG_HAS_EMPTY_IND;
1540 				}
1541 			} else {
1542 				zend_string_release(p->key);
1543 				p->key = NULL;
1544 				_zend_hash_del_el_ex(ht, idx, p, prev);
1545 			}
1546 			return SUCCESS;
1547 		}
1548 		prev = p;
1549 		idx = Z_NEXT(p->val);
1550 	}
1551 	return FAILURE;
1552 }
1553 
zend_hash_str_del_ind(HashTable * ht,const char * str,size_t len)1554 ZEND_API zend_result ZEND_FASTCALL zend_hash_str_del_ind(HashTable *ht, const char *str, size_t len)
1555 {
1556 	zend_ulong h;
1557 	uint32_t nIndex;
1558 	uint32_t idx;
1559 	Bucket *p;
1560 	Bucket *prev = NULL;
1561 
1562 	IS_CONSISTENT(ht);
1563 	HT_ASSERT_RC1(ht);
1564 
1565 	h = zend_inline_hash_func(str, len);
1566 	nIndex = h | ht->nTableMask;
1567 
1568 	idx = HT_HASH(ht, nIndex);
1569 	while (idx != HT_INVALID_IDX) {
1570 		p = HT_HASH_TO_BUCKET(ht, idx);
1571 		if ((p->h == h)
1572 			 && p->key
1573 			 && zend_string_equals_cstr(p->key, str, len)) {
1574 			if (Z_TYPE(p->val) == IS_INDIRECT) {
1575 				zval *data = Z_INDIRECT(p->val);
1576 
1577 				if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
1578 					return FAILURE;
1579 				} else {
1580 					if (ht->pDestructor) {
1581 						ht->pDestructor(data);
1582 					}
1583 					ZVAL_UNDEF(data);
1584 					HT_FLAGS(ht) |= HASH_FLAG_HAS_EMPTY_IND;
1585 				}
1586 			} else {
1587 				zend_string_release(p->key);
1588 				p->key = NULL;
1589 				_zend_hash_del_el_ex(ht, idx, p, prev);
1590 			}
1591 			return SUCCESS;
1592 		}
1593 		prev = p;
1594 		idx = Z_NEXT(p->val);
1595 	}
1596 	return FAILURE;
1597 }
1598 
zend_hash_str_del(HashTable * ht,const char * str,size_t len)1599 ZEND_API zend_result ZEND_FASTCALL zend_hash_str_del(HashTable *ht, const char *str, size_t len)
1600 {
1601 	zend_ulong h;
1602 	uint32_t nIndex;
1603 	uint32_t idx;
1604 	Bucket *p;
1605 	Bucket *prev = NULL;
1606 
1607 	IS_CONSISTENT(ht);
1608 	HT_ASSERT_RC1(ht);
1609 
1610 	h = zend_inline_hash_func(str, len);
1611 	nIndex = h | ht->nTableMask;
1612 
1613 	idx = HT_HASH(ht, nIndex);
1614 	while (idx != HT_INVALID_IDX) {
1615 		p = HT_HASH_TO_BUCKET(ht, idx);
1616 		if ((p->h == h)
1617 			 && p->key
1618 			 && zend_string_equals_cstr(p->key, str, len)) {
1619 			zend_string_release(p->key);
1620 			p->key = NULL;
1621 			_zend_hash_del_el_ex(ht, idx, p, prev);
1622 			return SUCCESS;
1623 		}
1624 		prev = p;
1625 		idx = Z_NEXT(p->val);
1626 	}
1627 	return FAILURE;
1628 }
1629 
zend_hash_index_del(HashTable * ht,zend_ulong h)1630 ZEND_API zend_result ZEND_FASTCALL zend_hash_index_del(HashTable *ht, zend_ulong h)
1631 {
1632 	uint32_t nIndex;
1633 	uint32_t idx;
1634 	Bucket *p;
1635 	Bucket *prev = NULL;
1636 
1637 	IS_CONSISTENT(ht);
1638 	HT_ASSERT_RC1(ht);
1639 
1640 	if (HT_IS_PACKED(ht)) {
1641 		if (h < ht->nNumUsed) {
1642 			zval *zv = ht->arPacked + h;
1643 			if (Z_TYPE_P(zv) != IS_UNDEF) {
1644 				_zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(h), zv);
1645 				return SUCCESS;
1646 			}
1647 		}
1648 		return FAILURE;
1649 	}
1650 	nIndex = h | ht->nTableMask;
1651 
1652 	idx = HT_HASH(ht, nIndex);
1653 	while (idx != HT_INVALID_IDX) {
1654 		p = HT_HASH_TO_BUCKET(ht, idx);
1655 		if ((p->h == h) && (p->key == NULL)) {
1656 			_zend_hash_del_el_ex(ht, idx, p, prev);
1657 			return SUCCESS;
1658 		}
1659 		prev = p;
1660 		idx = Z_NEXT(p->val);
1661 	}
1662 	return FAILURE;
1663 }
1664 
zend_hash_destroy(HashTable * ht)1665 ZEND_API void ZEND_FASTCALL zend_hash_destroy(HashTable *ht)
1666 {
1667 	IS_CONSISTENT(ht);
1668 	HT_ASSERT(ht, GC_REFCOUNT(ht) <= 1);
1669 
1670 	if (ht->nNumUsed) {
1671 		if (HT_IS_PACKED(ht)) {
1672 			if (ht->pDestructor) {
1673 				zval *zv = ht->arPacked;
1674 				zval *end = zv + ht->nNumUsed;
1675 
1676 				SET_INCONSISTENT(HT_IS_DESTROYING);
1677 				if (HT_IS_WITHOUT_HOLES(ht)) {
1678 					do {
1679 						ht->pDestructor(zv);
1680 					} while (++zv != end);
1681 				} else {
1682 					do {
1683 						if (EXPECTED(Z_TYPE_P(zv) != IS_UNDEF)) {
1684 							ht->pDestructor(zv);
1685 						}
1686 					} while (++zv != end);
1687 				}
1688 				SET_INCONSISTENT(HT_DESTROYED);
1689 			}
1690 			zend_hash_iterators_remove(ht);
1691 		} else {
1692 			Bucket *p = ht->arData;
1693 			Bucket *end = p + ht->nNumUsed;
1694 
1695 			if (ht->pDestructor) {
1696 				SET_INCONSISTENT(HT_IS_DESTROYING);
1697 
1698 				if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1699 					if (HT_IS_WITHOUT_HOLES(ht)) {
1700 						do {
1701 							ht->pDestructor(&p->val);
1702 						} while (++p != end);
1703 					} else {
1704 						do {
1705 							if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1706 								ht->pDestructor(&p->val);
1707 							}
1708 						} while (++p != end);
1709 					}
1710 				} else if (HT_IS_WITHOUT_HOLES(ht)) {
1711 					do {
1712 						ht->pDestructor(&p->val);
1713 						if (EXPECTED(p->key)) {
1714 							zend_string_release(p->key);
1715 						}
1716 					} while (++p != end);
1717 				} else {
1718 					do {
1719 						if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1720 							ht->pDestructor(&p->val);
1721 							if (EXPECTED(p->key)) {
1722 								zend_string_release(p->key);
1723 							}
1724 						}
1725 					} while (++p != end);
1726 				}
1727 
1728 				SET_INCONSISTENT(HT_DESTROYED);
1729 			} else {
1730 				if (!HT_HAS_STATIC_KEYS_ONLY(ht)) {
1731 					do {
1732 						if (EXPECTED(p->key)) {
1733 							zend_string_release(p->key);
1734 						}
1735 					} while (++p != end);
1736 				}
1737 			}
1738 			zend_hash_iterators_remove(ht);
1739 		}
1740 	} else if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
1741 		return;
1742 	}
1743 	pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1744 }
1745 
zend_array_destroy(HashTable * ht)1746 ZEND_API void ZEND_FASTCALL zend_array_destroy(HashTable *ht)
1747 {
1748 	IS_CONSISTENT(ht);
1749 	HT_ASSERT(ht, GC_REFCOUNT(ht) <= 1);
1750 
1751 	/* break possible cycles */
1752 	GC_REMOVE_FROM_BUFFER(ht);
1753 	GC_TYPE_INFO(ht) = GC_NULL /*???| (GC_WHITE << 16)*/;
1754 
1755 	if (ht->nNumUsed) {
1756 		/* In some rare cases destructors of regular arrays may be changed */
1757 		if (UNEXPECTED(ht->pDestructor != ZVAL_PTR_DTOR)) {
1758 			zend_hash_destroy(ht);
1759 			goto free_ht;
1760 		}
1761 
1762 		SET_INCONSISTENT(HT_IS_DESTROYING);
1763 
1764 		if (HT_IS_PACKED(ht)) {
1765 			zval *zv = ht->arPacked;
1766 			zval *end = zv + ht->nNumUsed;
1767 
1768 			do {
1769 				i_zval_ptr_dtor(zv);
1770 			} while (++zv != end);
1771 		} else {
1772 			Bucket *p = ht->arData;
1773 			Bucket *end = p + ht->nNumUsed;
1774 
1775 			if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1776 				do {
1777 					i_zval_ptr_dtor(&p->val);
1778 				} while (++p != end);
1779 			} else if (HT_IS_WITHOUT_HOLES(ht)) {
1780 				do {
1781 					i_zval_ptr_dtor(&p->val);
1782 					if (EXPECTED(p->key)) {
1783 						zend_string_release_ex(p->key, 0);
1784 					}
1785 				} while (++p != end);
1786 			} else {
1787 				do {
1788 					if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1789 						i_zval_ptr_dtor(&p->val);
1790 						if (EXPECTED(p->key)) {
1791 							zend_string_release_ex(p->key, 0);
1792 						}
1793 					}
1794 				} while (++p != end);
1795 			}
1796 		}
1797 	} else if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
1798 		goto free_ht;
1799 	}
1800 	SET_INCONSISTENT(HT_DESTROYED);
1801 	efree(HT_GET_DATA_ADDR(ht));
1802 free_ht:
1803 	zend_hash_iterators_remove(ht);
1804 	FREE_HASHTABLE(ht);
1805 }
1806 
zend_hash_clean(HashTable * ht)1807 ZEND_API void ZEND_FASTCALL zend_hash_clean(HashTable *ht)
1808 {
1809 	IS_CONSISTENT(ht);
1810 	HT_ASSERT_RC1(ht);
1811 
1812 	if (ht->nNumUsed) {
1813 		if (HT_IS_PACKED(ht)) {
1814 			zval *zv = ht->arPacked;
1815 			zval *end = zv + ht->nNumUsed;
1816 
1817 			if (ht->pDestructor) {
1818 				if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1819 					if (HT_IS_WITHOUT_HOLES(ht)) {
1820 						do {
1821 							ht->pDestructor(zv);
1822 						} while (++zv != end);
1823 					} else {
1824 						do {
1825 							if (EXPECTED(Z_TYPE_P(zv) != IS_UNDEF)) {
1826 								ht->pDestructor(zv);
1827 							}
1828 						} while (++zv != end);
1829 					}
1830 				}
1831 			}
1832 		} else {
1833 			Bucket *p = ht->arData;
1834 			Bucket *end = p + ht->nNumUsed;
1835 
1836 			if (ht->pDestructor) {
1837 				if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1838 					if (HT_IS_WITHOUT_HOLES(ht)) {
1839 						do {
1840 							ht->pDestructor(&p->val);
1841 						} while (++p != end);
1842 					} else {
1843 						do {
1844 							if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1845 								ht->pDestructor(&p->val);
1846 							}
1847 						} while (++p != end);
1848 					}
1849 				} else if (HT_IS_WITHOUT_HOLES(ht)) {
1850 					do {
1851 						ht->pDestructor(&p->val);
1852 						if (EXPECTED(p->key)) {
1853 							zend_string_release(p->key);
1854 						}
1855 					} while (++p != end);
1856 				} else {
1857 					do {
1858 						if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1859 							ht->pDestructor(&p->val);
1860 							if (EXPECTED(p->key)) {
1861 								zend_string_release(p->key);
1862 							}
1863 						}
1864 					} while (++p != end);
1865 				}
1866 			} else {
1867 				if (!HT_HAS_STATIC_KEYS_ONLY(ht)) {
1868 					do {
1869 						if (EXPECTED(p->key)) {
1870 							zend_string_release(p->key);
1871 						}
1872 					} while (++p != end);
1873 				}
1874 			}
1875 			HT_HASH_RESET(ht);
1876 		}
1877 	}
1878 	ht->nNumUsed = 0;
1879 	ht->nNumOfElements = 0;
1880 	ht->nNextFreeElement = ZEND_LONG_MIN;
1881 	ht->nInternalPointer = 0;
1882 }
1883 
zend_symtable_clean(HashTable * ht)1884 ZEND_API void ZEND_FASTCALL zend_symtable_clean(HashTable *ht)
1885 {
1886 	Bucket *p, *end;
1887 
1888 	IS_CONSISTENT(ht);
1889 	HT_ASSERT_RC1(ht);
1890 
1891 	if (ht->nNumUsed) {
1892 		ZEND_ASSERT(!HT_IS_PACKED(ht));
1893 		p = ht->arData;
1894 		end = p + ht->nNumUsed;
1895 		if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1896 			do {
1897 				i_zval_ptr_dtor(&p->val);
1898 			} while (++p != end);
1899 		} else if (HT_IS_WITHOUT_HOLES(ht)) {
1900 			do {
1901 				i_zval_ptr_dtor(&p->val);
1902 				if (EXPECTED(p->key)) {
1903 					zend_string_release(p->key);
1904 				}
1905 			} while (++p != end);
1906 		} else {
1907 			do {
1908 				if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1909 					i_zval_ptr_dtor(&p->val);
1910 					if (EXPECTED(p->key)) {
1911 						zend_string_release(p->key);
1912 					}
1913 				}
1914 			} while (++p != end);
1915 		}
1916 		HT_HASH_RESET(ht);
1917 	}
1918 	ht->nNumUsed = 0;
1919 	ht->nNumOfElements = 0;
1920 	ht->nNextFreeElement = ZEND_LONG_MIN;
1921 	ht->nInternalPointer = 0;
1922 }
1923 
zend_hash_graceful_destroy(HashTable * ht)1924 ZEND_API void ZEND_FASTCALL zend_hash_graceful_destroy(HashTable *ht)
1925 {
1926 	uint32_t idx;
1927 
1928 	IS_CONSISTENT(ht);
1929 	HT_ASSERT_RC1(ht);
1930 
1931 	if (HT_IS_PACKED(ht)) {
1932 		zval *zv = ht->arPacked;
1933 
1934 		for (idx = 0; idx < ht->nNumUsed; idx++, zv++) {
1935 			if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
1936 			_zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv);
1937 		}
1938 	} else {
1939 		Bucket *p = ht->arData;
1940 
1941 		for (idx = 0; idx < ht->nNumUsed; idx++, p++) {
1942 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1943 			_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1944 		}
1945 	}
1946 	if (!(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
1947 		pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1948 	}
1949 
1950 	SET_INCONSISTENT(HT_DESTROYED);
1951 }
1952 
zend_hash_graceful_reverse_destroy(HashTable * ht)1953 ZEND_API void ZEND_FASTCALL zend_hash_graceful_reverse_destroy(HashTable *ht)
1954 {
1955 	uint32_t idx;
1956 
1957 	IS_CONSISTENT(ht);
1958 	HT_ASSERT_RC1(ht);
1959 
1960 	idx = ht->nNumUsed;
1961 	if (HT_IS_PACKED(ht)) {
1962 		zval *zv = ht->arPacked + ht->nNumUsed;
1963 
1964 		while (idx > 0) {
1965 			idx--;
1966 			zv--;
1967 			if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
1968 			_zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv);
1969 		}
1970 	} else {
1971 		Bucket *p = ht->arData + ht->nNumUsed;
1972 
1973 		while (idx > 0) {
1974 			idx--;
1975 			p--;
1976 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1977 			_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1978 		}
1979 	}
1980 
1981 	if (!(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
1982 		pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1983 	}
1984 
1985 	SET_INCONSISTENT(HT_DESTROYED);
1986 }
1987 
1988 /* This is used to recurse elements and selectively delete certain entries
1989  * from a hashtable. apply_func() receives the data and decides if the entry
1990  * should be deleted or recursion should be stopped. The following three
1991  * return codes are possible:
1992  * ZEND_HASH_APPLY_KEEP   - continue
1993  * ZEND_HASH_APPLY_STOP   - stop iteration
1994  * ZEND_HASH_APPLY_REMOVE - delete the element, combinable with the former
1995  */
1996 
zend_hash_apply(HashTable * ht,apply_func_t apply_func)1997 ZEND_API void ZEND_FASTCALL zend_hash_apply(HashTable *ht, apply_func_t apply_func)
1998 {
1999 	uint32_t idx;
2000 	int result;
2001 
2002 	IS_CONSISTENT(ht);
2003 	if (HT_IS_PACKED(ht)) {
2004 		for (idx = 0; idx < ht->nNumUsed; idx++) {
2005 			zval *zv = ht->arPacked + idx;
2006 
2007 			if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
2008 			result = apply_func(zv);
2009 
2010 			if (result & ZEND_HASH_APPLY_REMOVE) {
2011 				HT_ASSERT_RC1(ht);
2012 				_zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv);
2013 			}
2014 			if (result & ZEND_HASH_APPLY_STOP) {
2015 				break;
2016 			}
2017 		}
2018 	} else {
2019 		for (idx = 0; idx < ht->nNumUsed; idx++) {
2020 			Bucket *p = ht->arData + idx;
2021 
2022 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2023 			result = apply_func(&p->val);
2024 
2025 			if (result & ZEND_HASH_APPLY_REMOVE) {
2026 				HT_ASSERT_RC1(ht);
2027 				_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
2028 			}
2029 			if (result & ZEND_HASH_APPLY_STOP) {
2030 				break;
2031 			}
2032 		}
2033 	}
2034 }
2035 
2036 
zend_hash_apply_with_argument(HashTable * ht,apply_func_arg_t apply_func,void * argument)2037 ZEND_API void ZEND_FASTCALL zend_hash_apply_with_argument(HashTable *ht, apply_func_arg_t apply_func, void *argument)
2038 {
2039 	uint32_t idx;
2040 	int result;
2041 
2042 	IS_CONSISTENT(ht);
2043 	if (HT_IS_PACKED(ht)) {
2044 		for (idx = 0; idx < ht->nNumUsed; idx++) {
2045 			zval *zv = ht->arPacked + idx;
2046 			if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
2047 			result = apply_func(zv, argument);
2048 
2049 			if (result & ZEND_HASH_APPLY_REMOVE) {
2050 				HT_ASSERT_RC1(ht);
2051 				_zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv);
2052 			}
2053 			if (result & ZEND_HASH_APPLY_STOP) {
2054 				break;
2055 			}
2056 		}
2057 	} else {
2058 		for (idx = 0; idx < ht->nNumUsed; idx++) {
2059 			Bucket *p = ht->arData + idx;
2060 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2061 			result = apply_func(&p->val, argument);
2062 
2063 			if (result & ZEND_HASH_APPLY_REMOVE) {
2064 				HT_ASSERT_RC1(ht);
2065 				_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
2066 			}
2067 			if (result & ZEND_HASH_APPLY_STOP) {
2068 				break;
2069 			}
2070 		}
2071 	}
2072 }
2073 
2074 
zend_hash_apply_with_arguments(HashTable * ht,apply_func_args_t apply_func,int num_args,...)2075 ZEND_API void zend_hash_apply_with_arguments(HashTable *ht, apply_func_args_t apply_func, int num_args, ...)
2076 {
2077 	uint32_t idx;
2078 	va_list args;
2079 	zend_hash_key hash_key;
2080 	int result;
2081 
2082 	IS_CONSISTENT(ht);
2083 
2084 	if (HT_IS_PACKED(ht)) {
2085 		for (idx = 0; idx < ht->nNumUsed; idx++) {
2086 			zval *zv = ht->arPacked + idx;
2087 
2088 			if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
2089 			va_start(args, num_args);
2090 			hash_key.h = idx;
2091 			hash_key.key = NULL;
2092 
2093 			result = apply_func(zv, num_args, args, &hash_key);
2094 
2095 			if (result & ZEND_HASH_APPLY_REMOVE) {
2096 				HT_ASSERT_RC1(ht);
2097 				_zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv);
2098 			}
2099 			if (result & ZEND_HASH_APPLY_STOP) {
2100 				va_end(args);
2101 				break;
2102 			}
2103 			va_end(args);
2104 		}
2105 	} else {
2106 		for (idx = 0; idx < ht->nNumUsed; idx++) {
2107 			Bucket *p = ht->arData + idx;
2108 
2109 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2110 			va_start(args, num_args);
2111 			hash_key.h = p->h;
2112 			hash_key.key = p->key;
2113 
2114 			result = apply_func(&p->val, num_args, args, &hash_key);
2115 
2116 			if (result & ZEND_HASH_APPLY_REMOVE) {
2117 				HT_ASSERT_RC1(ht);
2118 				_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
2119 			}
2120 			if (result & ZEND_HASH_APPLY_STOP) {
2121 				va_end(args);
2122 				break;
2123 			}
2124 			va_end(args);
2125 		}
2126 	}
2127 }
2128 
2129 
zend_hash_reverse_apply(HashTable * ht,apply_func_t apply_func)2130 ZEND_API void ZEND_FASTCALL zend_hash_reverse_apply(HashTable *ht, apply_func_t apply_func)
2131 {
2132 	uint32_t idx;
2133 	int result;
2134 
2135 	IS_CONSISTENT(ht);
2136 
2137 	idx = ht->nNumUsed;
2138 	if (HT_IS_PACKED(ht)) {
2139 		zval *zv;
2140 
2141 		while (idx > 0) {
2142 			idx--;
2143 			zv = ht->arPacked + idx;
2144 			if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
2145 
2146 			result = apply_func(zv);
2147 
2148 			if (result & ZEND_HASH_APPLY_REMOVE) {
2149 				HT_ASSERT_RC1(ht);
2150 				_zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv);
2151 			}
2152 			if (result & ZEND_HASH_APPLY_STOP) {
2153 				break;
2154 			}
2155 		}
2156 	} else {
2157 		Bucket *p;
2158 
2159 		while (idx > 0) {
2160 			idx--;
2161 			p = ht->arData + idx;
2162 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2163 
2164 			result = apply_func(&p->val);
2165 
2166 			if (result & ZEND_HASH_APPLY_REMOVE) {
2167 				HT_ASSERT_RC1(ht);
2168 				_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
2169 			}
2170 			if (result & ZEND_HASH_APPLY_STOP) {
2171 				break;
2172 			}
2173 		}
2174 	}
2175 }
2176 
2177 
zend_hash_copy(HashTable * target,HashTable * source,copy_ctor_func_t pCopyConstructor)2178 ZEND_API void ZEND_FASTCALL zend_hash_copy(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor)
2179 {
2180 	uint32_t idx;
2181 	zval *new_entry, *data;
2182 
2183 	IS_CONSISTENT(source);
2184 	IS_CONSISTENT(target);
2185 	HT_ASSERT_RC1(target);
2186 
2187 	if (HT_IS_PACKED(source)) {
2188 		for (idx = 0; idx < source->nNumUsed; idx++) {
2189 			zval *zv = source->arPacked + idx;
2190 			if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
2191 
2192 			new_entry = zend_hash_index_update(target, idx, zv);
2193 			if (pCopyConstructor) {
2194 				pCopyConstructor(new_entry);
2195 			}
2196 		}
2197 		return;
2198 	}
2199 
2200 	for (idx = 0; idx < source->nNumUsed; idx++) {
2201 		Bucket *p = source->arData + idx;
2202 
2203 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2204 
2205 		/* INDIRECT element may point to UNDEF-ined slots */
2206 		data = &p->val;
2207 		if (Z_TYPE_P(data) == IS_INDIRECT) {
2208 			data = Z_INDIRECT_P(data);
2209 			if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
2210 				continue;
2211 			}
2212 		}
2213 		if (p->key) {
2214 			new_entry = zend_hash_update(target, p->key, data);
2215 		} else {
2216 			new_entry = zend_hash_index_update(target, p->h, data);
2217 		}
2218 		if (pCopyConstructor) {
2219 			pCopyConstructor(new_entry);
2220 		}
2221 	}
2222 }
2223 
2224 
zend_array_dup_value(HashTable * source,HashTable * target,zval * data,zval * dest,bool packed,bool with_holes)2225 static zend_always_inline bool zend_array_dup_value(HashTable *source, HashTable *target, zval *data, zval *dest, bool packed, bool with_holes)
2226 {
2227 	if (with_holes) {
2228 		if (!packed && Z_TYPE_INFO_P(data) == IS_INDIRECT) {
2229 			data = Z_INDIRECT_P(data);
2230 		}
2231 		if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) {
2232 			return 0;
2233 		}
2234 	} else if (!packed) {
2235 		/* INDIRECT element may point to UNDEF-ined slots */
2236 		if (Z_TYPE_INFO_P(data) == IS_INDIRECT) {
2237 			data = Z_INDIRECT_P(data);
2238 			if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) {
2239 				return 0;
2240 			}
2241 		}
2242 	}
2243 
2244 	do {
2245 		if (Z_OPT_REFCOUNTED_P(data)) {
2246 			if (Z_ISREF_P(data) && Z_REFCOUNT_P(data) == 1 &&
2247 			    (Z_TYPE_P(Z_REFVAL_P(data)) != IS_ARRAY ||
2248 			      Z_ARRVAL_P(Z_REFVAL_P(data)) != source)) {
2249 				data = Z_REFVAL_P(data);
2250 				if (!Z_OPT_REFCOUNTED_P(data)) {
2251 					break;
2252 				}
2253 			}
2254 			Z_ADDREF_P(data);
2255 		}
2256 	} while (0);
2257 	ZVAL_COPY_VALUE(dest, data);
2258 
2259 	return 1;
2260 }
2261 
zend_array_dup_element(HashTable * source,HashTable * target,uint32_t idx,Bucket * p,Bucket * q,bool packed,bool static_keys,bool with_holes)2262 static zend_always_inline bool zend_array_dup_element(HashTable *source, HashTable *target, uint32_t idx, Bucket *p, Bucket *q, bool packed, bool static_keys, bool with_holes)
2263 {
2264 	if (!zend_array_dup_value(source, target, &p->val, &q->val, packed, with_holes)) {
2265 		return 0;
2266 	}
2267 
2268 	if (!packed) {
2269 		uint32_t nIndex;
2270 
2271 		q->h = p->h;
2272 		q->key = p->key;
2273 		if (!static_keys && q->key) {
2274 			zend_string_addref(q->key);
2275 		}
2276 
2277 		nIndex = q->h | target->nTableMask;
2278 		Z_NEXT(q->val) = HT_HASH(target, nIndex);
2279 		HT_HASH(target, nIndex) = HT_IDX_TO_HASH(idx);
2280 	}
2281 	return 1;
2282 }
2283 
zend_array_dup_packed_elements(HashTable * source,HashTable * target,bool with_holes)2284 static zend_always_inline void zend_array_dup_packed_elements(HashTable *source, HashTable *target, bool with_holes)
2285 {
2286 	zval *p = source->arPacked;
2287 	zval *q = target->arPacked;
2288 	zval *end = p + source->nNumUsed;
2289 
2290 	do {
2291 		if (!zend_array_dup_value(source, target, p, q, 1, with_holes)) {
2292 			if (with_holes) {
2293 				ZVAL_UNDEF(q);
2294 			}
2295 		}
2296 		p++; q++;
2297 	} while (p != end);
2298 }
2299 
zend_array_dup_elements(HashTable * source,HashTable * target,bool static_keys,bool with_holes)2300 static zend_always_inline uint32_t zend_array_dup_elements(HashTable *source, HashTable *target, bool static_keys, bool with_holes)
2301 {
2302 	uint32_t idx = 0;
2303 	Bucket *p = source->arData;
2304 	Bucket *q = target->arData;
2305 	Bucket *end = p + source->nNumUsed;
2306 
2307 	do {
2308 		if (!zend_array_dup_element(source, target, idx, p, q, 0, static_keys, with_holes)) {
2309 			uint32_t target_idx = idx;
2310 
2311 			idx++; p++;
2312 			while (p != end) {
2313 				if (zend_array_dup_element(source, target, target_idx, p, q, 0, static_keys, with_holes)) {
2314 					if (source->nInternalPointer == idx) {
2315 						target->nInternalPointer = target_idx;
2316 					}
2317 					target_idx++; q++;
2318 				}
2319 				idx++; p++;
2320 			}
2321 			return target_idx;
2322 		}
2323 		idx++; p++; q++;
2324 	} while (p != end);
2325 	return idx;
2326 }
2327 
zend_array_dup(HashTable * source)2328 ZEND_API HashTable* ZEND_FASTCALL zend_array_dup(HashTable *source)
2329 {
2330 	uint32_t idx;
2331 	HashTable *target;
2332 
2333 	IS_CONSISTENT(source);
2334 
2335 	ALLOC_HASHTABLE(target);
2336 	GC_SET_REFCOUNT(target, 1);
2337 	GC_TYPE_INFO(target) = GC_ARRAY;
2338 
2339 	target->pDestructor = ZVAL_PTR_DTOR;
2340 
2341 	if (source->nNumOfElements == 0) {
2342 		HT_FLAGS(target) = HASH_FLAG_UNINITIALIZED;
2343 		target->nTableMask = HT_MIN_MASK;
2344 		target->nNumUsed = 0;
2345 		target->nNumOfElements = 0;
2346 		target->nNextFreeElement = source->nNextFreeElement;
2347 		target->nInternalPointer = 0;
2348 		target->nTableSize = HT_MIN_SIZE;
2349 		HT_SET_DATA_ADDR(target, &uninitialized_bucket);
2350 	} else if (GC_FLAGS(source) & IS_ARRAY_IMMUTABLE) {
2351 		HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK;
2352 		target->nTableMask = source->nTableMask;
2353 		target->nNumUsed = source->nNumUsed;
2354 		target->nNumOfElements = source->nNumOfElements;
2355 		target->nNextFreeElement = source->nNextFreeElement;
2356 		target->nTableSize = source->nTableSize;
2357 		if (HT_IS_PACKED(source)) {
2358 			HT_SET_DATA_ADDR(target, emalloc(HT_PACKED_SIZE(target)));
2359 			target->nInternalPointer = source->nInternalPointer;
2360 			memcpy(HT_GET_DATA_ADDR(target), HT_GET_DATA_ADDR(source), HT_PACKED_USED_SIZE(source));
2361 		} else {
2362 			HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
2363 			target->nInternalPointer = source->nInternalPointer;
2364 			memcpy(HT_GET_DATA_ADDR(target), HT_GET_DATA_ADDR(source), HT_USED_SIZE(source));
2365 		}
2366 	} else if (HT_IS_PACKED(source)) {
2367 		HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK;
2368 		target->nTableMask = HT_MIN_MASK;
2369 		target->nNumUsed = source->nNumUsed;
2370 		target->nNumOfElements = source->nNumOfElements;
2371 		target->nNextFreeElement = source->nNextFreeElement;
2372 		target->nTableSize = source->nTableSize;
2373 		HT_SET_DATA_ADDR(target, emalloc(HT_PACKED_SIZE_EX(target->nTableSize, HT_MIN_MASK)));
2374 		target->nInternalPointer =
2375 			(source->nInternalPointer < source->nNumUsed) ?
2376 				source->nInternalPointer : 0;
2377 
2378 		HT_HASH_RESET_PACKED(target);
2379 
2380 		if (HT_IS_WITHOUT_HOLES(target)) {
2381 			zend_array_dup_packed_elements(source, target, 0);
2382 		} else {
2383 			zend_array_dup_packed_elements(source, target, 1);
2384 		}
2385 	} else {
2386 		HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK;
2387 		target->nTableMask = source->nTableMask;
2388 		target->nNextFreeElement = source->nNextFreeElement;
2389 		target->nInternalPointer =
2390 			(source->nInternalPointer < source->nNumUsed) ?
2391 				source->nInternalPointer : 0;
2392 
2393 		target->nTableSize = source->nTableSize;
2394 		HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
2395 		HT_HASH_RESET(target);
2396 
2397 		if (HT_HAS_STATIC_KEYS_ONLY(target)) {
2398 			if (HT_IS_WITHOUT_HOLES(source)) {
2399 				idx = zend_array_dup_elements(source, target, 1, 0);
2400 			} else {
2401 				idx = zend_array_dup_elements(source, target, 1, 1);
2402 			}
2403 		} else {
2404 			if (HT_IS_WITHOUT_HOLES(source)) {
2405 				idx = zend_array_dup_elements(source, target, 0, 0);
2406 			} else {
2407 				idx = zend_array_dup_elements(source, target, 0, 1);
2408 			}
2409 		}
2410 		target->nNumUsed = idx;
2411 		target->nNumOfElements = idx;
2412 	}
2413 	return target;
2414 }
2415 
zend_array_to_list(HashTable * source)2416 ZEND_API HashTable* zend_array_to_list(HashTable *source)
2417 {
2418 	HashTable *result = _zend_new_array(zend_hash_num_elements(source));
2419 	zend_hash_real_init_packed(result);
2420 
2421 	ZEND_HASH_FILL_PACKED(result) {
2422 		zval *entry;
2423 
2424 		ZEND_HASH_FOREACH_VAL(source, entry) {
2425 			if (UNEXPECTED(Z_ISREF_P(entry) && Z_REFCOUNT_P(entry) == 1)) {
2426 				entry = Z_REFVAL_P(entry);
2427 			}
2428 			Z_TRY_ADDREF_P(entry);
2429 			ZEND_HASH_FILL_ADD(entry);
2430 		} ZEND_HASH_FOREACH_END();
2431 	} ZEND_HASH_FILL_END();
2432 
2433 	return result;
2434 }
2435 
2436 
zend_hash_merge(HashTable * target,HashTable * source,copy_ctor_func_t pCopyConstructor,bool overwrite)2437 ZEND_API void ZEND_FASTCALL zend_hash_merge(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor, bool overwrite)
2438 {
2439 	uint32_t idx;
2440 	Bucket *p;
2441 	zval *t, *s;
2442 
2443 	IS_CONSISTENT(source);
2444 	IS_CONSISTENT(target);
2445 	HT_ASSERT_RC1(target);
2446 
2447 	if (overwrite) {
2448 		if (HT_IS_PACKED(source)) {
2449 			for (idx = 0; idx < source->nNumUsed; idx++) {
2450 				s = source->arPacked + idx;
2451 				if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) {
2452 					continue;
2453 				}
2454 				t = zend_hash_index_update(target, idx, s);
2455 				if (pCopyConstructor) {
2456 					pCopyConstructor(t);
2457 				}
2458 			}
2459 			return;
2460 		}
2461 
2462 		for (idx = 0; idx < source->nNumUsed; idx++) {
2463 			p = source->arData + idx;
2464 			s = &p->val;
2465 			if (UNEXPECTED(Z_TYPE_P(s) == IS_INDIRECT)) {
2466 				s = Z_INDIRECT_P(s);
2467 			}
2468 			if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) {
2469 				continue;
2470 			}
2471 			if (p->key) {
2472 				t = _zend_hash_add_or_update_i(target, p->key, s, HASH_UPDATE | HASH_UPDATE_INDIRECT);
2473 				if (pCopyConstructor) {
2474 					pCopyConstructor(t);
2475 				}
2476 			} else {
2477 				t = zend_hash_index_update(target, p->h, s);
2478 				if (pCopyConstructor) {
2479 					pCopyConstructor(t);
2480 				}
2481 			}
2482 		}
2483 	} else {
2484 		if (HT_IS_PACKED(source)) {
2485 			for (idx = 0; idx < source->nNumUsed; idx++) {
2486 				s = source->arPacked + idx;
2487 				if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) {
2488 					continue;
2489 				}
2490 				t = zend_hash_index_add(target, idx, s);
2491 				if (t && pCopyConstructor) {
2492 					pCopyConstructor(t);
2493 				}
2494 			}
2495 			return;
2496 		}
2497 
2498 		for (idx = 0; idx < source->nNumUsed; idx++) {
2499 			p = source->arData + idx;
2500 			s = &p->val;
2501 			if (UNEXPECTED(Z_TYPE_P(s) == IS_INDIRECT)) {
2502 				s = Z_INDIRECT_P(s);
2503 			}
2504 			if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) {
2505 				continue;
2506 			}
2507 			if (p->key) {
2508 				t = _zend_hash_add_or_update_i(target, p->key, s, HASH_ADD | HASH_UPDATE_INDIRECT);
2509 				if (t && pCopyConstructor) {
2510 					pCopyConstructor(t);
2511 				}
2512 			} else {
2513 				t = zend_hash_index_add(target, p->h, s);
2514 				if (t && pCopyConstructor) {
2515 					pCopyConstructor(t);
2516 				}
2517 			}
2518 		}
2519 	}
2520 }
2521 
2522 
zend_hash_replace_checker_wrapper(HashTable * target,zval * source_data,zend_ulong h,zend_string * key,void * pParam,merge_checker_func_t merge_checker_func)2523 static bool ZEND_FASTCALL zend_hash_replace_checker_wrapper(HashTable *target, zval *source_data, zend_ulong h, zend_string *key, void *pParam, merge_checker_func_t merge_checker_func)
2524 {
2525 	zend_hash_key hash_key;
2526 
2527 	hash_key.h = h;
2528 	hash_key.key = key;
2529 	return merge_checker_func(target, source_data, &hash_key, pParam);
2530 }
2531 
2532 
zend_hash_merge_ex(HashTable * target,HashTable * source,copy_ctor_func_t pCopyConstructor,merge_checker_func_t pMergeSource,void * pParam)2533 ZEND_API void ZEND_FASTCALL zend_hash_merge_ex(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor, merge_checker_func_t pMergeSource, void *pParam)
2534 {
2535 	uint32_t idx;
2536 	Bucket *p;
2537 	zval *t;
2538 
2539 	IS_CONSISTENT(source);
2540 	IS_CONSISTENT(target);
2541 	HT_ASSERT_RC1(target);
2542 
2543 	ZEND_ASSERT(!HT_IS_PACKED(source));
2544 	for (idx = 0; idx < source->nNumUsed; idx++) {
2545 		p = source->arData + idx;
2546 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2547 		if (zend_hash_replace_checker_wrapper(target, &p->val, p->h, p->key, pParam, pMergeSource)) {
2548 			t = zend_hash_update(target, p->key, &p->val);
2549 			if (pCopyConstructor) {
2550 				pCopyConstructor(t);
2551 			}
2552 		}
2553 	}
2554 }
2555 
2556 
2557 /* Returns the hash table data if found and NULL if not. */
zend_hash_find(const HashTable * ht,zend_string * key)2558 ZEND_API zval* ZEND_FASTCALL zend_hash_find(const HashTable *ht, zend_string *key)
2559 {
2560 	Bucket *p;
2561 
2562 	IS_CONSISTENT(ht);
2563 
2564 	(void)zend_string_hash_val(key);
2565 	p = zend_hash_find_bucket(ht, key);
2566 	return p ? &p->val : NULL;
2567 }
2568 
zend_hash_find_known_hash(const HashTable * ht,const zend_string * key)2569 ZEND_API zval* ZEND_FASTCALL zend_hash_find_known_hash(const HashTable *ht, const zend_string *key)
2570 {
2571 	Bucket *p;
2572 
2573 	IS_CONSISTENT(ht);
2574 
2575 	p = zend_hash_find_bucket(ht, key);
2576 	return p ? &p->val : NULL;
2577 }
2578 
zend_hash_str_find(const HashTable * ht,const char * str,size_t len)2579 ZEND_API zval* ZEND_FASTCALL zend_hash_str_find(const HashTable *ht, const char *str, size_t len)
2580 {
2581 	zend_ulong h;
2582 	Bucket *p;
2583 
2584 	IS_CONSISTENT(ht);
2585 
2586 	h = zend_inline_hash_func(str, len);
2587 	p = zend_hash_str_find_bucket(ht, str, len, h);
2588 	return p ? &p->val : NULL;
2589 }
2590 
zend_hash_index_find(const HashTable * ht,zend_ulong h)2591 ZEND_API zval* ZEND_FASTCALL zend_hash_index_find(const HashTable *ht, zend_ulong h)
2592 {
2593 	Bucket *p;
2594 
2595 	IS_CONSISTENT(ht);
2596 
2597 	if (HT_IS_PACKED(ht)) {
2598 		if (h < ht->nNumUsed) {
2599 			zval *zv = ht->arPacked + h;
2600 
2601 			if (Z_TYPE_P(zv) != IS_UNDEF) {
2602 				return zv;
2603 			}
2604 		}
2605 		return NULL;
2606 	}
2607 
2608 	p = zend_hash_index_find_bucket(ht, h);
2609 	return p ? &p->val : NULL;
2610 }
2611 
_zend_hash_index_find(const HashTable * ht,zend_ulong h)2612 ZEND_API zval* ZEND_FASTCALL _zend_hash_index_find(const HashTable *ht, zend_ulong h)
2613 {
2614 	Bucket *p;
2615 
2616 	IS_CONSISTENT(ht);
2617 	ZEND_ASSERT(!HT_IS_PACKED(ht));
2618 
2619 	p = zend_hash_index_find_bucket(ht, h);
2620 	return p ? &p->val : NULL;
2621 }
2622 
zend_hash_internal_pointer_reset_ex(HashTable * ht,HashPosition * pos)2623 ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_reset_ex(HashTable *ht, HashPosition *pos)
2624 {
2625 	IS_CONSISTENT(ht);
2626 	HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2627 	*pos = _zend_hash_get_valid_pos(ht, 0);
2628 }
2629 
2630 
2631 /* This function will be extremely optimized by remembering
2632  * the end of the list
2633  */
zend_hash_internal_pointer_end_ex(HashTable * ht,HashPosition * pos)2634 ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_end_ex(HashTable *ht, HashPosition *pos)
2635 {
2636 	uint32_t idx;
2637 
2638 	IS_CONSISTENT(ht);
2639 	HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2640 
2641 	idx = ht->nNumUsed;
2642 	if (HT_IS_PACKED(ht)) {
2643 		while (idx > 0) {
2644 			idx--;
2645 			if (Z_TYPE(ht->arPacked[idx]) != IS_UNDEF) {
2646 				*pos = idx;
2647 				return;
2648 			}
2649 		}
2650 	} else {
2651 		while (idx > 0) {
2652 			idx--;
2653 			if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2654 				*pos = idx;
2655 				return;
2656 			}
2657 		}
2658 	}
2659 	*pos = ht->nNumUsed;
2660 }
2661 
2662 
zend_hash_move_forward_ex(HashTable * ht,HashPosition * pos)2663 ZEND_API zend_result ZEND_FASTCALL zend_hash_move_forward_ex(HashTable *ht, HashPosition *pos)
2664 {
2665 	uint32_t idx;
2666 
2667 	IS_CONSISTENT(ht);
2668 	HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2669 
2670 	idx = _zend_hash_get_valid_pos(ht, *pos);
2671 	if (idx < ht->nNumUsed) {
2672 		if (HT_IS_PACKED(ht)) {
2673 			while (1) {
2674 				idx++;
2675 				if (idx >= ht->nNumUsed) {
2676 					*pos = ht->nNumUsed;
2677 					return SUCCESS;
2678 				}
2679 				if (Z_TYPE(ht->arPacked[idx]) != IS_UNDEF) {
2680 					*pos = idx;
2681 					return SUCCESS;
2682 				}
2683 			}
2684 		} else {
2685 			while (1) {
2686 				idx++;
2687 				if (idx >= ht->nNumUsed) {
2688 					*pos = ht->nNumUsed;
2689 					return SUCCESS;
2690 				}
2691 				if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2692 					*pos = idx;
2693 					return SUCCESS;
2694 				}
2695 			}
2696 		}
2697 	} else {
2698 		return FAILURE;
2699 	}
2700 }
2701 
zend_hash_move_backwards_ex(HashTable * ht,HashPosition * pos)2702 ZEND_API zend_result ZEND_FASTCALL zend_hash_move_backwards_ex(HashTable *ht, HashPosition *pos)
2703 {
2704 	uint32_t idx = *pos;
2705 
2706 	IS_CONSISTENT(ht);
2707 	HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2708 
2709 	if (idx < ht->nNumUsed) {
2710 		if (HT_IS_PACKED(ht)) {
2711 			while (idx > 0) {
2712 				idx--;
2713 				if (Z_TYPE(ht->arPacked[idx]) != IS_UNDEF) {
2714 					*pos = idx;
2715 					return SUCCESS;
2716 				}
2717 			}
2718 		} else {
2719 			while (idx > 0) {
2720 				idx--;
2721 				if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2722 					*pos = idx;
2723 					return SUCCESS;
2724 				}
2725 			}
2726 		}
2727 		*pos = ht->nNumUsed;
2728 		return SUCCESS;
2729 	} else {
2730 		return FAILURE;
2731 	}
2732 }
2733 
2734 
2735 /* This function should be made binary safe  */
zend_hash_get_current_key_ex(const HashTable * ht,zend_string ** str_index,zend_ulong * num_index,const HashPosition * pos)2736 ZEND_API int ZEND_FASTCALL zend_hash_get_current_key_ex(const HashTable *ht, zend_string **str_index, zend_ulong *num_index, const HashPosition *pos)
2737 {
2738 	uint32_t idx;
2739 	Bucket *p;
2740 
2741 	IS_CONSISTENT(ht);
2742 	idx = _zend_hash_get_valid_pos(ht, *pos);
2743 	if (idx < ht->nNumUsed) {
2744 		if (HT_IS_PACKED(ht)) {
2745 			*num_index = idx;
2746 			return HASH_KEY_IS_LONG;
2747 		}
2748 		p = ht->arData + idx;
2749 		if (p->key) {
2750 			*str_index = p->key;
2751 			return HASH_KEY_IS_STRING;
2752 		} else {
2753 			*num_index = p->h;
2754 			return HASH_KEY_IS_LONG;
2755 		}
2756 	}
2757 	return HASH_KEY_NON_EXISTENT;
2758 }
2759 
zend_hash_get_current_key_zval_ex(const HashTable * ht,zval * key,const HashPosition * pos)2760 ZEND_API void ZEND_FASTCALL zend_hash_get_current_key_zval_ex(const HashTable *ht, zval *key, const HashPosition *pos)
2761 {
2762 	uint32_t idx;
2763 	Bucket *p;
2764 
2765 	IS_CONSISTENT(ht);
2766 	idx = _zend_hash_get_valid_pos(ht, *pos);
2767 	if (idx >= ht->nNumUsed) {
2768 		ZVAL_NULL(key);
2769 	} else {
2770 		if (HT_IS_PACKED(ht)) {
2771 			ZVAL_LONG(key, idx);
2772 			return;
2773 		}
2774 		p = ht->arData + idx;
2775 		if (p->key) {
2776 			ZVAL_STR_COPY(key, p->key);
2777 		} else {
2778 			ZVAL_LONG(key, p->h);
2779 		}
2780 	}
2781 }
2782 
zend_hash_get_current_key_type_ex(HashTable * ht,HashPosition * pos)2783 ZEND_API int ZEND_FASTCALL zend_hash_get_current_key_type_ex(HashTable *ht, HashPosition *pos)
2784 {
2785 	uint32_t idx;
2786 	Bucket *p;
2787 
2788 	IS_CONSISTENT(ht);
2789 	idx = _zend_hash_get_valid_pos(ht, *pos);
2790 	if (idx < ht->nNumUsed) {
2791 		if (HT_IS_PACKED(ht)) {
2792 			return HASH_KEY_IS_LONG;
2793 		}
2794 		p = ht->arData + idx;
2795 		if (p->key) {
2796 			return HASH_KEY_IS_STRING;
2797 		} else {
2798 			return HASH_KEY_IS_LONG;
2799 		}
2800 	}
2801 	return HASH_KEY_NON_EXISTENT;
2802 }
2803 
2804 
zend_hash_get_current_data_ex(HashTable * ht,HashPosition * pos)2805 ZEND_API zval* ZEND_FASTCALL zend_hash_get_current_data_ex(HashTable *ht, HashPosition *pos)
2806 {
2807 	uint32_t idx;
2808 	Bucket *p;
2809 
2810 	IS_CONSISTENT(ht);
2811 	idx = _zend_hash_get_valid_pos(ht, *pos);
2812 	if (idx < ht->nNumUsed) {
2813 		if (HT_IS_PACKED(ht)) {
2814 			return &ht->arPacked[idx];
2815 		}
2816 		p = ht->arData + idx;
2817 		return &p->val;
2818 	} else {
2819 		return NULL;
2820 	}
2821 }
2822 
zend_hash_bucket_swap(Bucket * p,Bucket * q)2823 ZEND_API void zend_hash_bucket_swap(Bucket *p, Bucket *q)
2824 {
2825 	zval val;
2826 	zend_ulong h;
2827 	zend_string *key;
2828 
2829 	val = p->val;
2830 	h = p->h;
2831 	key = p->key;
2832 
2833 	p->val = q->val;
2834 	p->h = q->h;
2835 	p->key = q->key;
2836 
2837 	q->val = val;
2838 	q->h = h;
2839 	q->key = key;
2840 }
2841 
zend_hash_bucket_renum_swap(Bucket * p,Bucket * q)2842 ZEND_API void zend_hash_bucket_renum_swap(Bucket *p, Bucket *q)
2843 {
2844 	zval val;
2845 
2846 	val = p->val;
2847 	p->val = q->val;
2848 	q->val = val;
2849 }
2850 
zend_hash_bucket_packed_swap(Bucket * p,Bucket * q)2851 ZEND_API void zend_hash_bucket_packed_swap(Bucket *p, Bucket *q)
2852 {
2853 	zval val;
2854 	zend_ulong h;
2855 
2856 	val = p->val;
2857 	h = p->h;
2858 
2859 	p->val = q->val;
2860 	p->h = q->h;
2861 
2862 	q->val = val;
2863 	q->h = h;
2864 }
2865 
zend_hash_sort_internal(HashTable * ht,sort_func_t sort,bucket_compare_func_t compar,bool renumber)2866 static void zend_hash_sort_internal(HashTable *ht, sort_func_t sort, bucket_compare_func_t compar, bool renumber)
2867 {
2868 	Bucket *p;
2869 	uint32_t i, j;
2870 
2871 	IS_CONSISTENT(ht);
2872 
2873 	if (!(ht->nNumOfElements>1) && !(renumber && ht->nNumOfElements>0)) {
2874 		/* Doesn't require sorting */
2875 		return;
2876 	}
2877 
2878 	if (HT_IS_PACKED(ht)) {
2879 		zend_hash_packed_to_hash(ht); // TODO: ???
2880 	}
2881 
2882 	if (HT_IS_WITHOUT_HOLES(ht)) {
2883 		/* Store original order of elements in extra space to allow stable sorting. */
2884 		for (i = 0; i < ht->nNumUsed; i++) {
2885 			Z_EXTRA(ht->arData[i].val) = i;
2886 		}
2887 	} else {
2888 		/* Remove holes and store original order. */
2889 		for (j = 0, i = 0; j < ht->nNumUsed; j++) {
2890 			p = ht->arData + j;
2891 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2892 			if (i != j) {
2893 				ht->arData[i] = *p;
2894 			}
2895 			Z_EXTRA(ht->arData[i].val) = i;
2896 			i++;
2897 		}
2898 		ht->nNumUsed = i;
2899 	}
2900 
2901 	if (!(HT_FLAGS(ht) & HASH_FLAG_PACKED)) {
2902 		/* We broke the hash colisions chains overriding Z_NEXT() by Z_EXTRA().
2903 		 * Reset the hash headers table as well to avoid possilbe inconsistent
2904 		 * access on recursive data structures.
2905 	     *
2906 	     * See Zend/tests/bug63882_2.phpt
2907 		 */
2908 		HT_HASH_RESET(ht);
2909 	}
2910 
2911 	sort((void *)ht->arData, ht->nNumUsed, sizeof(Bucket), (compare_func_t) compar,
2912 			(swap_func_t)(renumber? zend_hash_bucket_renum_swap :
2913 				(HT_IS_PACKED(ht) ? zend_hash_bucket_packed_swap : zend_hash_bucket_swap)));
2914 
2915 	ht->nInternalPointer = 0;
2916 
2917 	if (renumber) {
2918 		for (j = 0; j < i; j++) {
2919 			p = ht->arData + j;
2920 			p->h = j;
2921 			if (p->key) {
2922 				zend_string_release(p->key);
2923 				p->key = NULL;
2924 			}
2925 		}
2926 
2927 		ht->nNextFreeElement = i;
2928 	}
2929 	if (HT_IS_PACKED(ht)) {
2930 		if (!renumber) {
2931 			zend_hash_packed_to_hash(ht);
2932 		}
2933 	} else {
2934 		if (renumber) {
2935 			void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
2936 			Bucket *old_buckets = ht->arData;
2937 			zval *zv;
2938 
2939 			new_data = pemalloc(HT_PACKED_SIZE_EX(ht->nTableSize, HT_MIN_MASK), (GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
2940 			HT_FLAGS(ht) |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
2941 			ht->nTableMask = HT_MIN_MASK;
2942 			HT_SET_DATA_ADDR(ht, new_data);
2943 			p = old_buckets;
2944 			zv = ht->arPacked;
2945 			for (i = 0; i < ht->nTableSize; i++) {
2946 				ZVAL_COPY_VALUE(zv, &p->val);
2947 				zv++;
2948 				p++;
2949 			}
2950 			pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
2951 			HT_HASH_RESET_PACKED(ht);
2952 		} else {
2953 			zend_hash_rehash(ht);
2954 		}
2955 	}
2956 }
2957 
zend_hash_sort_ex(HashTable * ht,sort_func_t sort,bucket_compare_func_t compar,bool renumber)2958 ZEND_API void ZEND_FASTCALL zend_hash_sort_ex(HashTable *ht, sort_func_t sort, bucket_compare_func_t compar, bool renumber)
2959 {
2960 	HT_ASSERT_RC1(ht);
2961 	zend_hash_sort_internal(ht, sort, compar, renumber);
2962 }
2963 
zend_array_sort_ex(HashTable * ht,sort_func_t sort,bucket_compare_func_t compar,bool renumber)2964 void ZEND_FASTCALL zend_array_sort_ex(HashTable *ht, sort_func_t sort, bucket_compare_func_t compar, bool renumber)
2965 {
2966 	HT_ASSERT_RC1(ht);
2967 
2968 	/* Unpack the array early to avoid RCn assertion failures. */
2969 	if (HT_IS_PACKED(ht)) {
2970 		zend_hash_packed_to_hash(ht);
2971 	}
2972 
2973 	/* Adding a refcount prevents the array from going away. */
2974 	GC_ADDREF(ht);
2975 
2976 	zend_hash_sort_internal(ht, sort, compar, renumber);
2977 
2978 	if (UNEXPECTED(GC_DELREF(ht) == 0)) {
2979 		zend_array_destroy(ht);
2980 	} else {
2981 		gc_check_possible_root((zend_refcounted *)ht);
2982 	}
2983 }
2984 
zend_hash_compare_impl(HashTable * ht1,HashTable * ht2,compare_func_t compar,bool ordered)2985 static zend_always_inline int zend_hash_compare_impl(HashTable *ht1, HashTable *ht2, compare_func_t compar, bool ordered) {
2986 	uint32_t idx1, idx2;
2987 	zend_string *key1, *key2;
2988 	zend_ulong h1, h2;
2989 	zval *pData1, *pData2;;
2990 	int result;
2991 
2992 	if (ht1->nNumOfElements != ht2->nNumOfElements) {
2993 		return ht1->nNumOfElements > ht2->nNumOfElements ? 1 : -1;
2994 	}
2995 
2996 	for (idx1 = 0, idx2 = 0; idx1 < ht1->nNumUsed; idx1++) {
2997 		if (HT_IS_PACKED(ht1)) {
2998 			pData1 = ht1->arPacked + idx1;
2999 			h1 = idx1;
3000 			key1 = NULL;
3001 		} else {
3002 			Bucket *p = ht1->arData + idx1;
3003 			pData1 = &p->val;
3004 			h1 = p->h;
3005 			key1 = p->key;
3006 		}
3007 
3008 		if (Z_TYPE_P(pData1) == IS_UNDEF) continue;
3009 		if (ordered) {
3010 			if (HT_IS_PACKED(ht2)) {
3011 				while (1) {
3012 					ZEND_ASSERT(idx2 != ht2->nNumUsed);
3013 					pData2 = ht2->arPacked + idx2;
3014 					h2 = idx2;
3015 					key2 = NULL;
3016 					if (Z_TYPE_P(pData2) != IS_UNDEF) break;
3017 					idx2++;
3018 				}
3019 			} else {
3020 				while (1) {
3021 					Bucket *p;
3022 					ZEND_ASSERT(idx2 != ht2->nNumUsed);
3023 					p = ht2->arData + idx2;
3024 					pData2 = &p->val;
3025 					h2 = p->h;
3026 					key2 = p->key;
3027 					if (Z_TYPE_P(pData2) != IS_UNDEF) break;
3028 					idx2++;
3029 				}
3030 			}
3031 			if (key1 == NULL && key2 == NULL) { /* numeric indices */
3032 				if (h1 != h2) {
3033 					return h1 > h2 ? 1 : -1;
3034 				}
3035 			} else if (key1 != NULL && key2 != NULL) { /* string indices */
3036 				if (ZSTR_LEN(key1) != ZSTR_LEN(key2)) {
3037 					return ZSTR_LEN(key1) > ZSTR_LEN(key2) ? 1 : -1;
3038 				}
3039 
3040 				result = memcmp(ZSTR_VAL(key1), ZSTR_VAL(key2), ZSTR_LEN(key1));
3041 				if (result != 0) {
3042 					return result;
3043 				}
3044 			} else {
3045 				/* Mixed key types: A string key is considered as larger */
3046 				return key1 != NULL ? 1 : -1;
3047 			}
3048 			idx2++;
3049 		} else {
3050 			if (key1 == NULL) { /* numeric index */
3051 				pData2 = zend_hash_index_find(ht2, h1);
3052 				if (pData2 == NULL) {
3053 					return 1;
3054 				}
3055 			} else { /* string index */
3056 				pData2 = zend_hash_find(ht2, key1);
3057 				if (pData2 == NULL) {
3058 					return 1;
3059 				}
3060 			}
3061 		}
3062 
3063 		if (Z_TYPE_P(pData1) == IS_INDIRECT) {
3064 			pData1 = Z_INDIRECT_P(pData1);
3065 		}
3066 		if (Z_TYPE_P(pData2) == IS_INDIRECT) {
3067 			pData2 = Z_INDIRECT_P(pData2);
3068 		}
3069 
3070 		if (Z_TYPE_P(pData1) == IS_UNDEF) {
3071 			if (Z_TYPE_P(pData2) != IS_UNDEF) {
3072 				return -1;
3073 			}
3074 		} else if (Z_TYPE_P(pData2) == IS_UNDEF) {
3075 			return 1;
3076 		} else {
3077 			result = compar(pData1, pData2);
3078 			if (result != 0) {
3079 				return result;
3080 			}
3081 		}
3082 	}
3083 
3084 	return 0;
3085 }
3086 
zend_hash_compare(HashTable * ht1,HashTable * ht2,compare_func_t compar,bool ordered)3087 ZEND_API int zend_hash_compare(HashTable *ht1, HashTable *ht2, compare_func_t compar, bool ordered)
3088 {
3089 	int result;
3090 	IS_CONSISTENT(ht1);
3091 	IS_CONSISTENT(ht2);
3092 
3093 	if (ht1 == ht2) {
3094 		return 0;
3095 	}
3096 
3097 	/* It's enough to protect only one of the arrays.
3098 	 * The second one may be referenced from the first and this may cause
3099 	 * false recursion detection.
3100 	 */
3101 	if (UNEXPECTED(GC_IS_RECURSIVE(ht1))) {
3102 		zend_error_noreturn(E_ERROR, "Nesting level too deep - recursive dependency?");
3103 	}
3104 
3105 	GC_TRY_PROTECT_RECURSION(ht1);
3106 	result = zend_hash_compare_impl(ht1, ht2, compar, ordered);
3107 	GC_TRY_UNPROTECT_RECURSION(ht1);
3108 
3109 	return result;
3110 }
3111 
3112 
zend_hash_minmax(const HashTable * ht,compare_func_t compar,uint32_t flag)3113 ZEND_API zval* ZEND_FASTCALL zend_hash_minmax(const HashTable *ht, compare_func_t compar, uint32_t flag)
3114 {
3115 	uint32_t idx;
3116 	zval *res;
3117 
3118 	IS_CONSISTENT(ht);
3119 
3120 	if (ht->nNumOfElements == 0 ) {
3121 		return NULL;
3122 	}
3123 
3124 	if (HT_IS_PACKED(ht)) {
3125 		zval *zv;
3126 
3127 		idx = 0;
3128 		while (1) {
3129 			if (idx == ht->nNumUsed) {
3130 				return NULL;
3131 			}
3132 			if (Z_TYPE(ht->arPacked[idx]) != IS_UNDEF) break;
3133 			idx++;
3134 		}
3135 		res = ht->arPacked + idx;
3136 		for (; idx < ht->nNumUsed; idx++) {
3137 			zv = ht->arPacked + idx;
3138 			if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
3139 
3140 			if (flag) {
3141 				if (compar(res, zv) < 0) { /* max */
3142 					res = zv;
3143 				}
3144 			} else {
3145 				if (compar(res, zv) > 0) { /* min */
3146 					res = zv;
3147 				}
3148 			}
3149 		}
3150 	} else {
3151 		Bucket *p;
3152 
3153 		idx = 0;
3154 		while (1) {
3155 			if (idx == ht->nNumUsed) {
3156 				return NULL;
3157 			}
3158 			if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) break;
3159 			idx++;
3160 		}
3161 		res = &ht->arData[idx].val;
3162 		for (; idx < ht->nNumUsed; idx++) {
3163 			p = ht->arData + idx;
3164 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
3165 
3166 			if (flag) {
3167 				if (compar(res, &p->val) < 0) { /* max */
3168 					res = &p->val;
3169 				}
3170 			} else {
3171 				if (compar(res, &p->val) > 0) { /* min */
3172 					res = &p->val;
3173 				}
3174 			}
3175 		}
3176 	}
3177 	return res;
3178 }
3179 
_zend_handle_numeric_str_ex(const char * key,size_t length,zend_ulong * idx)3180 ZEND_API bool ZEND_FASTCALL _zend_handle_numeric_str_ex(const char *key, size_t length, zend_ulong *idx)
3181 {
3182 	const char *tmp = key;
3183 
3184 	const char *end = key + length;
3185 
3186 	if (*tmp == '-') {
3187 		tmp++;
3188 	}
3189 
3190 	if ((*tmp == '0' && length > 1) /* numbers with leading zeros */
3191 	 || (end - tmp > MAX_LENGTH_OF_LONG - 1) /* number too long */
3192 	 || (SIZEOF_ZEND_LONG == 4 &&
3193 	     end - tmp == MAX_LENGTH_OF_LONG - 1 &&
3194 	     *tmp > '2')) { /* overflow */
3195 		return 0;
3196 	}
3197 	*idx = (*tmp - '0');
3198 	while (1) {
3199 		++tmp;
3200 		if (tmp == end) {
3201 			if (*key == '-') {
3202 				if (*idx-1 > ZEND_LONG_MAX) { /* overflow */
3203 					return 0;
3204 				}
3205 				*idx = 0 - *idx;
3206 			} else if (*idx > ZEND_LONG_MAX) { /* overflow */
3207 				return 0;
3208 			}
3209 			return 1;
3210 		}
3211 		if (*tmp <= '9' && *tmp >= '0') {
3212 			*idx = (*idx * 10) + (*tmp - '0');
3213 		} else {
3214 			return 0;
3215 		}
3216 	}
3217 }
3218 
3219 /* Takes a "symtable" hashtable (contains integer and non-numeric string keys)
3220  * and converts it to a "proptable" (contains only string keys).
3221  * If the symtable didn't need duplicating, its refcount is incremented.
3222  */
zend_symtable_to_proptable(HashTable * ht)3223 ZEND_API HashTable* ZEND_FASTCALL zend_symtable_to_proptable(HashTable *ht)
3224 {
3225 	zend_ulong num_key;
3226 	zend_string *str_key;
3227 	zval *zv;
3228 
3229 	if (UNEXPECTED(HT_IS_PACKED(ht))) {
3230 		goto convert;
3231 	}
3232 
3233 	ZEND_HASH_MAP_FOREACH_STR_KEY(ht, str_key) {
3234 		if (!str_key) {
3235 			goto convert;
3236 		}
3237 	} ZEND_HASH_FOREACH_END();
3238 
3239 	if (!(GC_FLAGS(ht) & IS_ARRAY_IMMUTABLE)) {
3240 		GC_ADDREF(ht);
3241 	}
3242 
3243 	return ht;
3244 
3245 convert:
3246 	{
3247 		HashTable *new_ht = zend_new_array(zend_hash_num_elements(ht));
3248 
3249 		ZEND_HASH_FOREACH_KEY_VAL(ht, num_key, str_key, zv) {
3250 			if (!str_key) {
3251 				str_key = zend_long_to_str(num_key);
3252 				zend_string_delref(str_key);
3253 			}
3254 			do {
3255 				if (Z_OPT_REFCOUNTED_P(zv)) {
3256 					if (Z_ISREF_P(zv) && Z_REFCOUNT_P(zv) == 1) {
3257 						zv = Z_REFVAL_P(zv);
3258 						if (!Z_OPT_REFCOUNTED_P(zv)) {
3259 							break;
3260 						}
3261 					}
3262 					Z_ADDREF_P(zv);
3263 				}
3264 			} while (0);
3265 			zend_hash_update(new_ht, str_key, zv);
3266 		} ZEND_HASH_FOREACH_END();
3267 
3268 		return new_ht;
3269 	}
3270 }
3271 
3272 /* Takes a "proptable" hashtable (contains only string keys) and converts it to
3273  * a "symtable" (contains integer and non-numeric string keys).
3274  * If the proptable didn't need duplicating, its refcount is incremented.
3275  */
zend_proptable_to_symtable(HashTable * ht,bool always_duplicate)3276 ZEND_API HashTable* ZEND_FASTCALL zend_proptable_to_symtable(HashTable *ht, bool always_duplicate)
3277 {
3278 	zend_ulong num_key;
3279 	zend_string *str_key;
3280 	zval *zv;
3281 
3282 	if (!HT_IS_PACKED(ht)) {
3283 		ZEND_HASH_MAP_FOREACH_STR_KEY(ht, str_key) {
3284 			/* The `str_key &&` here might seem redundant: property tables should
3285 			 * only have string keys. Unfortunately, this isn't true, at the very
3286 			 * least because of ArrayObject, which stores a symtable where the
3287 			 * property table should be.
3288 			 */
3289 			if (str_key && ZEND_HANDLE_NUMERIC(str_key, num_key)) {
3290 				goto convert;
3291 			}
3292 		} ZEND_HASH_FOREACH_END();
3293 	}
3294 
3295 	if (always_duplicate) {
3296 		return zend_array_dup(ht);
3297 	}
3298 
3299 	if (EXPECTED(!(GC_FLAGS(ht) & IS_ARRAY_IMMUTABLE))) {
3300 		GC_ADDREF(ht);
3301 	}
3302 
3303 	return ht;
3304 
3305 convert:
3306 	{
3307 		HashTable *new_ht = zend_new_array(zend_hash_num_elements(ht));
3308 
3309 		ZEND_HASH_MAP_FOREACH_KEY_VAL_IND(ht, num_key, str_key, zv) {
3310 			do {
3311 				if (Z_OPT_REFCOUNTED_P(zv)) {
3312 					if (Z_ISREF_P(zv) && Z_REFCOUNT_P(zv) == 1) {
3313 						zv = Z_REFVAL_P(zv);
3314 						if (!Z_OPT_REFCOUNTED_P(zv)) {
3315 							break;
3316 						}
3317 					}
3318 					Z_ADDREF_P(zv);
3319 				}
3320 			} while (0);
3321 			/* Again, thank ArrayObject for `!str_key ||`. */
3322 			if (!str_key || ZEND_HANDLE_NUMERIC(str_key, num_key)) {
3323 				zend_hash_index_update(new_ht, num_key, zv);
3324 			} else {
3325 				zend_hash_update(new_ht, str_key, zv);
3326 			}
3327 		} ZEND_HASH_FOREACH_END();
3328 
3329 		return new_ht;
3330 	}
3331 }
3332