xref: /PHP-8.3/Zend/zend_hash.c (revision b175ea42)
1 /*
2    +----------------------------------------------------------------------+
3    | Zend Engine                                                          |
4    +----------------------------------------------------------------------+
5    | Copyright (c) Zend Technologies Ltd. (http://www.zend.com)           |
6    +----------------------------------------------------------------------+
7    | This source file is subject to version 2.00 of the Zend license,     |
8    | that is bundled with this package in the file LICENSE, and is        |
9    | available through the world-wide-web at the following url:           |
10    | http://www.zend.com/license/2_00.txt.                                |
11    | If you did not receive a copy of the Zend license and are unable to  |
12    | obtain it through the world-wide-web, please send a note to          |
13    | license@zend.com so we can mail you a copy immediately.              |
14    +----------------------------------------------------------------------+
15    | Authors: Andi Gutmans <andi@php.net>                                 |
16    |          Zeev Suraski <zeev@php.net>                                 |
17    |          Dmitry Stogov <dmitry@php.net>                              |
18    +----------------------------------------------------------------------+
19 */
20 
21 #include "zend.h"
22 #include "zend_globals.h"
23 #include "zend_variables.h"
24 
25 #if defined(__aarch64__) || defined(_M_ARM64)
26 # include <arm_neon.h>
27 #endif
28 
29 /* Prefer to use AVX2 instructions for better latency and throughput */
30 #if defined(__AVX2__)
31 # include <immintrin.h>
32 #elif defined( __SSE2__)
33 # include <mmintrin.h>
34 # include <emmintrin.h>
35 #endif
36 
37 #if ZEND_DEBUG
38 # define HT_ASSERT(ht, expr) \
39 	ZEND_ASSERT((expr) || (HT_FLAGS(ht) & HASH_FLAG_ALLOW_COW_VIOLATION))
40 #else
41 # define HT_ASSERT(ht, expr)
42 #endif
43 
44 #define HT_ASSERT_RC1(ht) HT_ASSERT(ht, GC_REFCOUNT(ht) == 1)
45 
46 #define HT_POISONED_PTR ((HashTable *) (intptr_t) -1)
47 
48 #if ZEND_DEBUG
49 
50 #define HT_OK					0x00
51 #define HT_IS_DESTROYING		0x01
52 #define HT_DESTROYED			0x02
53 #define HT_CLEANING				0x03
54 
_zend_is_inconsistent(const HashTable * ht,const char * file,int line)55 static void _zend_is_inconsistent(const HashTable *ht, const char *file, int line)
56 {
57 	if ((HT_FLAGS(ht) & HASH_FLAG_CONSISTENCY) == HT_OK) {
58 		return;
59 	}
60 	switch (HT_FLAGS(ht) & HASH_FLAG_CONSISTENCY) {
61 		case HT_IS_DESTROYING:
62 			zend_output_debug_string(1, "%s(%d) : ht=%p is being destroyed", file, line, ht);
63 			break;
64 		case HT_DESTROYED:
65 			zend_output_debug_string(1, "%s(%d) : ht=%p is already destroyed", file, line, ht);
66 			break;
67 		case HT_CLEANING:
68 			zend_output_debug_string(1, "%s(%d) : ht=%p is being cleaned", file, line, ht);
69 			break;
70 		default:
71 			zend_output_debug_string(1, "%s(%d) : ht=%p is inconsistent", file, line, ht);
72 			break;
73 	}
74 	ZEND_UNREACHABLE();
75 }
76 #define IS_CONSISTENT(a) _zend_is_inconsistent(a, __FILE__, __LINE__);
77 #define SET_INCONSISTENT(n) do { \
78 		HT_FLAGS(ht) = (HT_FLAGS(ht) & ~HASH_FLAG_CONSISTENCY) | (n); \
79 	} while (0)
80 #else
81 #define IS_CONSISTENT(a)
82 #define SET_INCONSISTENT(n)
83 #endif
84 
85 #define ZEND_HASH_IF_FULL_DO_RESIZE(ht)				\
86 	if ((ht)->nNumUsed >= (ht)->nTableSize) {		\
87 		zend_hash_do_resize(ht);					\
88 	}
89 
zend_hash_str_find_ptr_lc(const HashTable * ht,const char * str,size_t len)90 ZEND_API void *zend_hash_str_find_ptr_lc(const HashTable *ht, const char *str, size_t len) {
91 	void *result;
92 	char *lc_str;
93 
94 	/* Stack allocate small strings to improve performance */
95 	ALLOCA_FLAG(use_heap)
96 
97 	lc_str = zend_str_tolower_copy(do_alloca(len + 1, use_heap), str, len);
98 	result = zend_hash_str_find_ptr(ht, lc_str, len);
99 	free_alloca(lc_str, use_heap);
100 
101 	return result;
102 }
103 
zend_hash_find_ptr_lc(const HashTable * ht,zend_string * key)104 ZEND_API void *zend_hash_find_ptr_lc(const HashTable *ht, zend_string *key) {
105 	void *result;
106 	zend_string *lc_key = zend_string_tolower(key);
107 	result = zend_hash_find_ptr(ht, lc_key);
108 	zend_string_release(lc_key);
109 	return result;
110 }
111 
112 static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht);
113 
zend_hash_check_size(uint32_t nSize)114 static zend_always_inline uint32_t zend_hash_check_size(uint32_t nSize)
115 {
116 #ifdef ZEND_WIN32
117 	unsigned long index;
118 #endif
119 
120 	/* Use big enough power of 2 */
121 	/* size should be between HT_MIN_SIZE and HT_MAX_SIZE */
122 	if (nSize <= HT_MIN_SIZE) {
123 		return HT_MIN_SIZE;
124 	} else if (UNEXPECTED(nSize > HT_MAX_SIZE)) {
125 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", nSize, sizeof(Bucket), sizeof(Bucket));
126 	}
127 
128 #ifdef ZEND_WIN32
129 	if (BitScanReverse(&index, nSize - 1)) {
130 		return 0x2u << ((31 - index) ^ 0x1f);
131 	} else {
132 		/* nSize is ensured to be in the valid range, fall back to it
133 		   rather than using an undefined bis scan result. */
134 		return nSize;
135 	}
136 #elif (defined(__GNUC__) || __has_builtin(__builtin_clz))  && defined(PHP_HAVE_BUILTIN_CLZ)
137 	return 0x2u << (__builtin_clz(nSize - 1) ^ 0x1f);
138 #else
139 	nSize -= 1;
140 	nSize |= (nSize >> 1);
141 	nSize |= (nSize >> 2);
142 	nSize |= (nSize >> 4);
143 	nSize |= (nSize >> 8);
144 	nSize |= (nSize >> 16);
145 	return nSize + 1;
146 #endif
147 }
148 
zend_hash_real_init_packed_ex(HashTable * ht)149 static zend_always_inline void zend_hash_real_init_packed_ex(HashTable *ht)
150 {
151 	void *data;
152 
153 	if (UNEXPECTED(GC_FLAGS(ht) & IS_ARRAY_PERSISTENT)) {
154 		data = pemalloc(HT_PACKED_SIZE_EX(ht->nTableSize, HT_MIN_MASK), 1);
155 	} else if (EXPECTED(ht->nTableSize == HT_MIN_SIZE)) {
156 		/* Use specialized API with constant allocation amount for a particularly common case. */
157 		data = emalloc(HT_PACKED_SIZE_EX(HT_MIN_SIZE, HT_MIN_MASK));
158 	} else {
159 		data = emalloc(HT_PACKED_SIZE_EX(ht->nTableSize, HT_MIN_MASK));
160 	}
161 	HT_SET_DATA_ADDR(ht, data);
162 	/* Don't overwrite iterator count. */
163 	ht->u.v.flags = HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
164 	HT_HASH_RESET_PACKED(ht);
165 }
166 
zend_hash_real_init_mixed_ex(HashTable * ht)167 static zend_always_inline void zend_hash_real_init_mixed_ex(HashTable *ht)
168 {
169 	void *data;
170 	uint32_t nSize = ht->nTableSize;
171 
172 	ZEND_ASSERT(HT_SIZE_TO_MASK(nSize));
173 
174 	if (UNEXPECTED(GC_FLAGS(ht) & IS_ARRAY_PERSISTENT)) {
175 		data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), 1);
176 	} else if (EXPECTED(nSize == HT_MIN_SIZE)) {
177 		data = emalloc(HT_SIZE_EX(HT_MIN_SIZE, HT_SIZE_TO_MASK(HT_MIN_SIZE)));
178 		ht->nTableMask = HT_SIZE_TO_MASK(HT_MIN_SIZE);
179 		HT_SET_DATA_ADDR(ht, data);
180 		/* Don't overwrite iterator count. */
181 		ht->u.v.flags = HASH_FLAG_STATIC_KEYS;
182 #if defined(__AVX2__)
183 		do {
184 			__m256i ymm0 = _mm256_setzero_si256();
185 			ymm0 = _mm256_cmpeq_epi64(ymm0, ymm0);
186 			_mm256_storeu_si256((__m256i*)&HT_HASH_EX(data,  0), ymm0);
187 			_mm256_storeu_si256((__m256i*)&HT_HASH_EX(data,  8), ymm0);
188 		} while(0);
189 #elif defined (__SSE2__)
190 		do {
191 			__m128i xmm0 = _mm_setzero_si128();
192 			xmm0 = _mm_cmpeq_epi8(xmm0, xmm0);
193 			_mm_storeu_si128((__m128i*)&HT_HASH_EX(data,  0), xmm0);
194 			_mm_storeu_si128((__m128i*)&HT_HASH_EX(data,  4), xmm0);
195 			_mm_storeu_si128((__m128i*)&HT_HASH_EX(data,  8), xmm0);
196 			_mm_storeu_si128((__m128i*)&HT_HASH_EX(data, 12), xmm0);
197 		} while (0);
198 #elif defined(__aarch64__) || defined(_M_ARM64)
199 		do {
200 			int32x4_t t = vdupq_n_s32(-1);
201 			vst1q_s32((int32_t*)&HT_HASH_EX(data,  0), t);
202 			vst1q_s32((int32_t*)&HT_HASH_EX(data,  4), t);
203 			vst1q_s32((int32_t*)&HT_HASH_EX(data,  8), t);
204 			vst1q_s32((int32_t*)&HT_HASH_EX(data, 12), t);
205 		} while (0);
206 #else
207 		HT_HASH_EX(data,  0) = -1;
208 		HT_HASH_EX(data,  1) = -1;
209 		HT_HASH_EX(data,  2) = -1;
210 		HT_HASH_EX(data,  3) = -1;
211 		HT_HASH_EX(data,  4) = -1;
212 		HT_HASH_EX(data,  5) = -1;
213 		HT_HASH_EX(data,  6) = -1;
214 		HT_HASH_EX(data,  7) = -1;
215 		HT_HASH_EX(data,  8) = -1;
216 		HT_HASH_EX(data,  9) = -1;
217 		HT_HASH_EX(data, 10) = -1;
218 		HT_HASH_EX(data, 11) = -1;
219 		HT_HASH_EX(data, 12) = -1;
220 		HT_HASH_EX(data, 13) = -1;
221 		HT_HASH_EX(data, 14) = -1;
222 		HT_HASH_EX(data, 15) = -1;
223 #endif
224 		return;
225 	} else {
226 		data = emalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)));
227 	}
228 	ht->nTableMask = HT_SIZE_TO_MASK(nSize);
229 	HT_SET_DATA_ADDR(ht, data);
230 	HT_FLAGS(ht) = HASH_FLAG_STATIC_KEYS;
231 	HT_HASH_RESET(ht);
232 }
233 
zend_hash_real_init_ex(HashTable * ht,bool packed)234 static zend_always_inline void zend_hash_real_init_ex(HashTable *ht, bool packed)
235 {
236 	HT_ASSERT_RC1(ht);
237 	ZEND_ASSERT(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED);
238 	if (packed) {
239 		zend_hash_real_init_packed_ex(ht);
240 	} else {
241 		zend_hash_real_init_mixed_ex(ht);
242 	}
243 }
244 
245 static const uint32_t uninitialized_bucket[-HT_MIN_MASK] =
246 	{HT_INVALID_IDX, HT_INVALID_IDX};
247 
248 ZEND_API const HashTable zend_empty_array = {
249 	.gc.refcount = 2,
250 	.gc.u.type_info = IS_ARRAY | (GC_IMMUTABLE << GC_FLAGS_SHIFT),
251 	.u.flags = HASH_FLAG_UNINITIALIZED,
252 	.nTableMask = HT_MIN_MASK,
253 	{.arData = (Bucket*)&uninitialized_bucket[2]},
254 	.nNumUsed = 0,
255 	.nNumOfElements = 0,
256 	.nTableSize = HT_MIN_SIZE,
257 	.nInternalPointer = 0,
258 	.nNextFreeElement = ZEND_LONG_MIN,
259 	.pDestructor = ZVAL_PTR_DTOR
260 };
261 
_zend_hash_init_int(HashTable * ht,uint32_t nSize,dtor_func_t pDestructor,bool persistent)262 static zend_always_inline void _zend_hash_init_int(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, bool persistent)
263 {
264 	GC_SET_REFCOUNT(ht, 1);
265 	GC_TYPE_INFO(ht) = GC_ARRAY | (persistent ? ((GC_PERSISTENT|GC_NOT_COLLECTABLE) << GC_FLAGS_SHIFT) : 0);
266 	HT_FLAGS(ht) = HASH_FLAG_UNINITIALIZED;
267 	ht->nTableMask = HT_MIN_MASK;
268 	HT_SET_DATA_ADDR(ht, &uninitialized_bucket);
269 	ht->nNumUsed = 0;
270 	ht->nNumOfElements = 0;
271 	ht->nInternalPointer = 0;
272 	ht->nNextFreeElement = ZEND_LONG_MIN;
273 	ht->pDestructor = pDestructor;
274 	ht->nTableSize = zend_hash_check_size(nSize);
275 }
276 
_zend_hash_init(HashTable * ht,uint32_t nSize,dtor_func_t pDestructor,bool persistent)277 ZEND_API void ZEND_FASTCALL _zend_hash_init(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, bool persistent)
278 {
279 	_zend_hash_init_int(ht, nSize, pDestructor, persistent);
280 }
281 
_zend_new_array_0(void)282 ZEND_API HashTable* ZEND_FASTCALL _zend_new_array_0(void)
283 {
284 	HashTable *ht = emalloc(sizeof(HashTable));
285 	_zend_hash_init_int(ht, HT_MIN_SIZE, ZVAL_PTR_DTOR, 0);
286 	return ht;
287 }
288 
_zend_new_array(uint32_t nSize)289 ZEND_API HashTable* ZEND_FASTCALL _zend_new_array(uint32_t nSize)
290 {
291 	HashTable *ht = emalloc(sizeof(HashTable));
292 	_zend_hash_init_int(ht, nSize, ZVAL_PTR_DTOR, 0);
293 	return ht;
294 }
295 
zend_new_pair(zval * val1,zval * val2)296 ZEND_API HashTable* ZEND_FASTCALL zend_new_pair(zval *val1, zval *val2)
297 {
298 	zval *zv;
299 	HashTable *ht = emalloc(sizeof(HashTable));
300 	_zend_hash_init_int(ht, HT_MIN_SIZE, ZVAL_PTR_DTOR, 0);
301 	ht->nNumUsed = ht->nNumOfElements = ht->nNextFreeElement = 2;
302 	zend_hash_real_init_packed_ex(ht);
303 
304 	zv = ht->arPacked;
305 	ZVAL_COPY_VALUE(zv, val1);
306 	zv++;
307 	ZVAL_COPY_VALUE(zv, val2);
308 	return ht;
309 }
310 
zend_hash_packed_grow(HashTable * ht)311 ZEND_API void ZEND_FASTCALL zend_hash_packed_grow(HashTable *ht)
312 {
313 	HT_ASSERT_RC1(ht);
314 	if (ht->nTableSize >= HT_MAX_SIZE) {
315 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket), sizeof(Bucket));
316 	}
317 	uint32_t newTableSize = ht->nTableSize * 2;
318 	HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_PACKED_SIZE_EX(newTableSize, HT_MIN_MASK), HT_PACKED_USED_SIZE(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
319 	ht->nTableSize = newTableSize;
320 }
321 
zend_hash_real_init(HashTable * ht,bool packed)322 ZEND_API void ZEND_FASTCALL zend_hash_real_init(HashTable *ht, bool packed)
323 {
324 	IS_CONSISTENT(ht);
325 
326 	HT_ASSERT_RC1(ht);
327 	zend_hash_real_init_ex(ht, packed);
328 }
329 
zend_hash_real_init_packed(HashTable * ht)330 ZEND_API void ZEND_FASTCALL zend_hash_real_init_packed(HashTable *ht)
331 {
332 	IS_CONSISTENT(ht);
333 
334 	HT_ASSERT_RC1(ht);
335 	zend_hash_real_init_packed_ex(ht);
336 }
337 
zend_hash_real_init_mixed(HashTable * ht)338 ZEND_API void ZEND_FASTCALL zend_hash_real_init_mixed(HashTable *ht)
339 {
340 	IS_CONSISTENT(ht);
341 
342 	HT_ASSERT_RC1(ht);
343 	zend_hash_real_init_mixed_ex(ht);
344 }
345 
zend_hash_packed_to_hash(HashTable * ht)346 ZEND_API void ZEND_FASTCALL zend_hash_packed_to_hash(HashTable *ht)
347 {
348 	void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
349 	zval *src = ht->arPacked;
350 	Bucket *dst;
351 	uint32_t i;
352 	uint32_t nSize = ht->nTableSize;
353 
354 	ZEND_ASSERT(HT_SIZE_TO_MASK(nSize));
355 
356 	HT_ASSERT_RC1(ht);
357 	// Alloc before assign to avoid inconsistencies on OOM
358 	new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
359 	HT_FLAGS(ht) &= ~HASH_FLAG_PACKED;
360 	ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
361 	HT_SET_DATA_ADDR(ht, new_data);
362 	dst = ht->arData;
363 	for (i = 0; i < ht->nNumUsed; i++) {
364 		ZVAL_COPY_VALUE(&dst->val, src);
365 		dst->h = i;
366 		dst->key = NULL;
367 		dst++;
368 		src++;
369 	}
370 	pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
371 	zend_hash_rehash(ht);
372 }
373 
zend_hash_to_packed(HashTable * ht)374 ZEND_API void ZEND_FASTCALL zend_hash_to_packed(HashTable *ht)
375 {
376 	void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
377 	Bucket *src = ht->arData;
378 	zval *dst;
379 	uint32_t i;
380 
381 	HT_ASSERT_RC1(ht);
382 	new_data = pemalloc(HT_PACKED_SIZE_EX(ht->nTableSize, HT_MIN_MASK), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
383 	HT_FLAGS(ht) |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
384 	ht->nTableMask = HT_MIN_MASK;
385 	HT_SET_DATA_ADDR(ht, new_data);
386 	HT_HASH_RESET_PACKED(ht);
387 	dst = ht->arPacked;
388 	for (i = 0; i < ht->nNumUsed; i++) {
389 		ZVAL_COPY_VALUE(dst, &src->val);
390 		dst++;
391 		src++;
392 	}
393 	pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
394 }
395 
zend_hash_extend(HashTable * ht,uint32_t nSize,bool packed)396 ZEND_API void ZEND_FASTCALL zend_hash_extend(HashTable *ht, uint32_t nSize, bool packed)
397 {
398 	HT_ASSERT_RC1(ht);
399 
400 	if (nSize == 0) return;
401 
402 	ZEND_ASSERT(HT_SIZE_TO_MASK(nSize));
403 
404 	if (UNEXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
405 		if (nSize > ht->nTableSize) {
406 			ht->nTableSize = zend_hash_check_size(nSize);
407 		}
408 		zend_hash_real_init(ht, packed);
409 	} else {
410 		if (packed) {
411 			ZEND_ASSERT(HT_IS_PACKED(ht));
412 			if (nSize > ht->nTableSize) {
413 				uint32_t newTableSize = zend_hash_check_size(nSize);
414 				HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_PACKED_SIZE_EX(newTableSize, HT_MIN_MASK), HT_PACKED_USED_SIZE(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
415 				ht->nTableSize = newTableSize;
416 			}
417 		} else {
418 			ZEND_ASSERT(!HT_IS_PACKED(ht));
419 			if (nSize > ht->nTableSize) {
420 				void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
421 				Bucket *old_buckets = ht->arData;
422 				nSize = zend_hash_check_size(nSize);
423 				new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
424 				ht->nTableSize = nSize;
425 				ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
426 				HT_SET_DATA_ADDR(ht, new_data);
427 				memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
428 				pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
429 				zend_hash_rehash(ht);
430 			}
431 		}
432 	}
433 }
434 
zend_hash_discard(HashTable * ht,uint32_t nNumUsed)435 ZEND_API void ZEND_FASTCALL zend_hash_discard(HashTable *ht, uint32_t nNumUsed)
436 {
437 	Bucket *p, *end, *arData;
438 	uint32_t nIndex;
439 
440 	ZEND_ASSERT(!HT_IS_PACKED(ht));
441 	arData = ht->arData;
442 	p = arData + ht->nNumUsed;
443 	end = arData + nNumUsed;
444 	ht->nNumUsed = nNumUsed;
445 	while (p != end) {
446 		p--;
447 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
448 		ht->nNumOfElements--;
449 		/* Collision pointers always directed from higher to lower buckets */
450 #if 0
451 		if (!(Z_NEXT(p->val) == HT_INVALID_IDX || HT_HASH_TO_BUCKET_EX(arData, Z_NEXT(p->val)) < p)) {
452 			abort();
453 		}
454 #endif
455 		nIndex = p->h | ht->nTableMask;
456 		HT_HASH_EX(arData, nIndex) = Z_NEXT(p->val);
457 	}
458 }
459 
zend_array_recalc_elements(HashTable * ht)460 static uint32_t zend_array_recalc_elements(HashTable *ht)
461 {
462 	zval *val;
463 	uint32_t num = ht->nNumOfElements;
464 
465 	ZEND_HASH_MAP_FOREACH_VAL(ht, val) {
466 		if (Z_TYPE_P(val) == IS_INDIRECT) {
467 			if (UNEXPECTED(Z_TYPE_P(Z_INDIRECT_P(val)) == IS_UNDEF)) {
468 				num--;
469 			}
470 		}
471 	} ZEND_HASH_FOREACH_END();
472 	return num;
473 }
474 /* }}} */
475 
zend_array_count(HashTable * ht)476 ZEND_API uint32_t zend_array_count(HashTable *ht)
477 {
478 	uint32_t num;
479 	if (UNEXPECTED(HT_FLAGS(ht) & HASH_FLAG_HAS_EMPTY_IND)) {
480 		num = zend_array_recalc_elements(ht);
481 		if (UNEXPECTED(ht->nNumOfElements == num)) {
482 			HT_FLAGS(ht) &= ~HASH_FLAG_HAS_EMPTY_IND;
483 		}
484 	} else if (UNEXPECTED(ht == &EG(symbol_table))) {
485 		num = zend_array_recalc_elements(ht);
486 	} else {
487 		num = zend_hash_num_elements(ht);
488 	}
489 	return num;
490 }
491 /* }}} */
492 
_zend_hash_get_valid_pos(const HashTable * ht,HashPosition pos)493 static zend_always_inline HashPosition _zend_hash_get_valid_pos(const HashTable *ht, HashPosition pos)
494 {
495 	if (HT_IS_PACKED(ht)) {
496 		while (pos < ht->nNumUsed && Z_ISUNDEF(ht->arPacked[pos])) {
497 			pos++;
498 		}
499 	} else {
500 		while (pos < ht->nNumUsed && Z_ISUNDEF(ht->arData[pos].val)) {
501 			pos++;
502 		}
503 	}
504 	return pos;
505 }
506 
_zend_hash_get_current_pos(const HashTable * ht)507 static zend_always_inline HashPosition _zend_hash_get_current_pos(const HashTable *ht)
508 {
509 	return _zend_hash_get_valid_pos(ht, ht->nInternalPointer);
510 }
511 
zend_hash_get_current_pos(const HashTable * ht)512 ZEND_API HashPosition ZEND_FASTCALL zend_hash_get_current_pos(const HashTable *ht)
513 {
514 	return _zend_hash_get_current_pos(ht);
515 }
516 
zend_hash_remove_iterator_copies(uint32_t idx)517 static void zend_hash_remove_iterator_copies(uint32_t idx) {
518 	HashTableIterator *iterators = EG(ht_iterators);
519 
520 	HashTableIterator *iter = iterators + idx;
521 	uint32_t next_idx = iter->next_copy;
522 	while (next_idx != idx) {
523 		uint32_t cur_idx = next_idx;
524 		HashTableIterator *cur_iter = iterators + cur_idx;
525 		next_idx = cur_iter->next_copy;
526 		cur_iter->next_copy = cur_idx; // avoid recursion in zend_hash_iterator_del
527 		zend_hash_iterator_del(cur_idx);
528 	}
529 	iter->next_copy = idx;
530 }
531 
zend_hash_iterator_add(HashTable * ht,HashPosition pos)532 ZEND_API uint32_t ZEND_FASTCALL zend_hash_iterator_add(HashTable *ht, HashPosition pos)
533 {
534 	HashTableIterator *iter = EG(ht_iterators);
535 	HashTableIterator *end  = iter + EG(ht_iterators_count);
536 	uint32_t idx;
537 
538 	if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
539 		HT_INC_ITERATORS_COUNT(ht);
540 	}
541 	while (iter != end) {
542 		if (iter->ht == NULL) {
543 			iter->ht = ht;
544 			iter->pos = pos;
545 			idx = iter - EG(ht_iterators);
546 			iter->next_copy = idx;
547 			if (idx + 1 > EG(ht_iterators_used)) {
548 				EG(ht_iterators_used) = idx + 1;
549 			}
550 			return idx;
551 		}
552 		iter++;
553 	}
554 	if (EG(ht_iterators) == EG(ht_iterators_slots)) {
555 		EG(ht_iterators) = emalloc(sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
556 		memcpy(EG(ht_iterators), EG(ht_iterators_slots), sizeof(HashTableIterator) * EG(ht_iterators_count));
557 	} else {
558 		EG(ht_iterators) = erealloc(EG(ht_iterators), sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
559 	}
560 	iter = EG(ht_iterators) + EG(ht_iterators_count);
561 	EG(ht_iterators_count) += 8;
562 	iter->ht = ht;
563 	iter->pos = pos;
564 	memset(iter + 1, 0, sizeof(HashTableIterator) * 7);
565 	idx = iter - EG(ht_iterators);
566 	iter->next_copy = idx;
567 	EG(ht_iterators_used) = idx + 1;
568 	return idx;
569 }
570 
571 // To avoid losing track of the HashTable when separating arrays, we track all copies at once.
zend_hash_iterator_find_copy_pos(uint32_t idx,HashTable * ht)572 static zend_always_inline bool zend_hash_iterator_find_copy_pos(uint32_t idx, HashTable *ht) {
573 	HashTableIterator *iter = EG(ht_iterators) + idx;
574 
575 	uint32_t next_idx = iter->next_copy;
576 	if (EXPECTED(next_idx != idx)) {
577 		HashTableIterator *copy_iter;
578 		while (next_idx != idx) {
579 			copy_iter = EG(ht_iterators) + next_idx;
580 			if (copy_iter->ht == ht) {
581 				// We have found the hashtable we are actually iterating over
582 				// Now clean any intermittent copies and replace the original index by the found one
583 				if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
584 					&& EXPECTED(!HT_ITERATORS_OVERFLOW(iter->ht))) {
585 					HT_DEC_ITERATORS_COUNT(iter->ht);
586 				}
587 				if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
588 					HT_INC_ITERATORS_COUNT(ht);
589 				}
590 				iter->ht = copy_iter->ht;
591 				iter->pos = copy_iter->pos;
592 				zend_hash_remove_iterator_copies(idx);
593 				return true;
594 			}
595 			next_idx = copy_iter->next_copy;
596 		}
597 		zend_hash_remove_iterator_copies(idx);
598 	}
599 
600 	return false;
601 }
602 
zend_hash_iterator_pos(uint32_t idx,HashTable * ht)603 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos(uint32_t idx, HashTable *ht)
604 {
605 	HashTableIterator *iter = EG(ht_iterators) + idx;
606 
607 	ZEND_ASSERT(idx != (uint32_t)-1);
608 	if (UNEXPECTED(iter->ht != ht) && !zend_hash_iterator_find_copy_pos(idx, ht)) {
609 		if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
610 				&& EXPECTED(!HT_ITERATORS_OVERFLOW(iter->ht))) {
611 			HT_DEC_ITERATORS_COUNT(iter->ht);
612 		}
613 		if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
614 			HT_INC_ITERATORS_COUNT(ht);
615 		}
616 		iter->ht = ht;
617 		iter->pos = _zend_hash_get_current_pos(ht);
618 	}
619 	return iter->pos;
620 }
621 
zend_hash_iterator_pos_ex(uint32_t idx,zval * array)622 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos_ex(uint32_t idx, zval *array)
623 {
624 	HashTable *ht = Z_ARRVAL_P(array);
625 	HashTableIterator *iter = EG(ht_iterators) + idx;
626 
627 	ZEND_ASSERT(idx != (uint32_t)-1);
628 	if (UNEXPECTED(iter->ht != ht) && !zend_hash_iterator_find_copy_pos(idx, ht)) {
629 		if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
630 				&& EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
631 			HT_DEC_ITERATORS_COUNT(iter->ht);
632 		}
633 		SEPARATE_ARRAY(array);
634 		ht = Z_ARRVAL_P(array);
635 		if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
636 			HT_INC_ITERATORS_COUNT(ht);
637 		}
638 		iter->ht = ht;
639 		iter->pos = _zend_hash_get_current_pos(ht);
640 	}
641 	return iter->pos;
642 }
643 
zend_hash_iterator_del(uint32_t idx)644 ZEND_API void ZEND_FASTCALL zend_hash_iterator_del(uint32_t idx)
645 {
646 	HashTableIterator *iter = EG(ht_iterators) + idx;
647 
648 	ZEND_ASSERT(idx != (uint32_t)-1);
649 
650 	if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
651 			&& EXPECTED(!HT_ITERATORS_OVERFLOW(iter->ht))) {
652 		ZEND_ASSERT(HT_ITERATORS_COUNT(iter->ht) != 0);
653 		HT_DEC_ITERATORS_COUNT(iter->ht);
654 	}
655 	iter->ht = NULL;
656 
657 	if (UNEXPECTED(iter->next_copy != idx)) {
658 		zend_hash_remove_iterator_copies(idx);
659 	}
660 
661 	if (idx == EG(ht_iterators_used) - 1) {
662 		while (idx > 0 && EG(ht_iterators)[idx - 1].ht == NULL) {
663 			idx--;
664 		}
665 		EG(ht_iterators_used) = idx;
666 	}
667 }
668 
_zend_hash_iterators_remove(HashTable * ht)669 static zend_never_inline void ZEND_FASTCALL _zend_hash_iterators_remove(HashTable *ht)
670 {
671 	HashTableIterator *iter = EG(ht_iterators);
672 	HashTableIterator *end  = iter + EG(ht_iterators_used);
673 
674 	while (iter != end) {
675 		if (iter->ht == ht) {
676 			iter->ht = HT_POISONED_PTR;
677 		}
678 		iter++;
679 	}
680 }
681 
zend_hash_iterators_remove(HashTable * ht)682 static zend_always_inline void zend_hash_iterators_remove(HashTable *ht)
683 {
684 	if (UNEXPECTED(HT_HAS_ITERATORS(ht))) {
685 		_zend_hash_iterators_remove(ht);
686 	}
687 }
688 
zend_hash_iterators_lower_pos(HashTable * ht,HashPosition start)689 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterators_lower_pos(HashTable *ht, HashPosition start)
690 {
691 	HashTableIterator *iter = EG(ht_iterators);
692 	HashTableIterator *end  = iter + EG(ht_iterators_used);
693 	HashPosition res = ht->nNumUsed;
694 
695 	while (iter != end) {
696 		if (iter->ht == ht) {
697 			if (iter->pos >= start && iter->pos < res) {
698 				res = iter->pos;
699 			}
700 		}
701 		iter++;
702 	}
703 	return res;
704 }
705 
_zend_hash_iterators_update(HashTable * ht,HashPosition from,HashPosition to)706 ZEND_API void ZEND_FASTCALL _zend_hash_iterators_update(HashTable *ht, HashPosition from, HashPosition to)
707 {
708 	HashTableIterator *iter = EG(ht_iterators);
709 	HashTableIterator *end  = iter + EG(ht_iterators_used);
710 
711 	while (iter != end) {
712 		if (iter->ht == ht && iter->pos == from) {
713 			iter->pos = to;
714 		}
715 		iter++;
716 	}
717 }
718 
zend_hash_iterators_advance(HashTable * ht,HashPosition step)719 ZEND_API void ZEND_FASTCALL zend_hash_iterators_advance(HashTable *ht, HashPosition step)
720 {
721 	HashTableIterator *iter = EG(ht_iterators);
722 	HashTableIterator *end  = iter + EG(ht_iterators_used);
723 
724 	while (iter != end) {
725 		if (iter->ht == ht) {
726 			iter->pos += step;
727 		}
728 		iter++;
729 	}
730 }
731 
732 /* Hash must be known and precomputed before */
zend_hash_find_bucket(const HashTable * ht,const zend_string * key)733 static zend_always_inline Bucket *zend_hash_find_bucket(const HashTable *ht, const zend_string *key)
734 {
735 	uint32_t nIndex;
736 	uint32_t idx;
737 	Bucket *p, *arData;
738 
739 	ZEND_ASSERT(ZSTR_H(key) != 0 && "Hash must be known");
740 
741 	arData = ht->arData;
742 	nIndex = ZSTR_H(key) | ht->nTableMask;
743 	idx = HT_HASH_EX(arData, nIndex);
744 
745 	if (UNEXPECTED(idx == HT_INVALID_IDX)) {
746 		return NULL;
747 	}
748 	p = HT_HASH_TO_BUCKET_EX(arData, idx);
749 	if (EXPECTED(p->key == key)) { /* check for the same interned string */
750 		return p;
751 	}
752 
753 	while (1) {
754 		if (p->h == ZSTR_H(key) &&
755 		    EXPECTED(p->key) &&
756 		    zend_string_equal_content(p->key, key)) {
757 			return p;
758 		}
759 		idx = Z_NEXT(p->val);
760 		if (idx == HT_INVALID_IDX) {
761 			return NULL;
762 		}
763 		p = HT_HASH_TO_BUCKET_EX(arData, idx);
764 		if (p->key == key) { /* check for the same interned string */
765 			return p;
766 		}
767 	}
768 }
769 
zend_hash_str_find_bucket(const HashTable * ht,const char * str,size_t len,zend_ulong h)770 static zend_always_inline Bucket *zend_hash_str_find_bucket(const HashTable *ht, const char *str, size_t len, zend_ulong h)
771 {
772 	uint32_t nIndex;
773 	uint32_t idx;
774 	Bucket *p, *arData;
775 
776 	arData = ht->arData;
777 	nIndex = h | ht->nTableMask;
778 	idx = HT_HASH_EX(arData, nIndex);
779 	while (idx != HT_INVALID_IDX) {
780 		ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
781 		p = HT_HASH_TO_BUCKET_EX(arData, idx);
782 		if ((p->h == h)
783 			 && p->key
784 			 && zend_string_equals_cstr(p->key, str, len)) {
785 			return p;
786 		}
787 		idx = Z_NEXT(p->val);
788 	}
789 	return NULL;
790 }
791 
zend_hash_index_find_bucket(const HashTable * ht,zend_ulong h)792 static zend_always_inline Bucket *zend_hash_index_find_bucket(const HashTable *ht, zend_ulong h)
793 {
794 	uint32_t nIndex;
795 	uint32_t idx;
796 	Bucket *p, *arData;
797 
798 	arData = ht->arData;
799 	nIndex = h | ht->nTableMask;
800 	idx = HT_HASH_EX(arData, nIndex);
801 	while (idx != HT_INVALID_IDX) {
802 		ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
803 		p = HT_HASH_TO_BUCKET_EX(arData, idx);
804 		if (p->h == h && !p->key) {
805 			return p;
806 		}
807 		idx = Z_NEXT(p->val);
808 	}
809 	return NULL;
810 }
811 
_zend_hash_add_or_update_i(HashTable * ht,zend_string * key,zval * pData,uint32_t flag)812 static zend_always_inline zval *_zend_hash_add_or_update_i(HashTable *ht, zend_string *key, zval *pData, uint32_t flag)
813 {
814 	zend_ulong h;
815 	uint32_t nIndex;
816 	uint32_t idx;
817 	Bucket *p, *arData;
818 
819 	IS_CONSISTENT(ht);
820 	HT_ASSERT_RC1(ht);
821 	zend_string_hash_val(key);
822 
823 	if (UNEXPECTED(HT_FLAGS(ht) & (HASH_FLAG_UNINITIALIZED|HASH_FLAG_PACKED))) {
824 		if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
825 			zend_hash_real_init_mixed(ht);
826 			goto add_to_hash;
827 		} else {
828 			zend_hash_packed_to_hash(ht);
829 		}
830 	} else if ((flag & HASH_ADD_NEW) == 0 || ZEND_DEBUG) {
831 		p = zend_hash_find_bucket(ht, key);
832 
833 		if (p) {
834 			zval *data;
835 
836 			ZEND_ASSERT((flag & HASH_ADD_NEW) == 0);
837 			if (flag & HASH_LOOKUP) {
838 				return &p->val;
839 			} else if (flag & HASH_ADD) {
840 				if (!(flag & HASH_UPDATE_INDIRECT)) {
841 					return NULL;
842 				}
843 				ZEND_ASSERT(&p->val != pData);
844 				data = &p->val;
845 				if (Z_TYPE_P(data) == IS_INDIRECT) {
846 					data = Z_INDIRECT_P(data);
847 					if (Z_TYPE_P(data) != IS_UNDEF) {
848 						return NULL;
849 					}
850 				} else {
851 					return NULL;
852 				}
853 			} else {
854 				ZEND_ASSERT(&p->val != pData);
855 				data = &p->val;
856 				if ((flag & HASH_UPDATE_INDIRECT) && Z_TYPE_P(data) == IS_INDIRECT) {
857 					data = Z_INDIRECT_P(data);
858 				}
859 			}
860 			if (ht->pDestructor) {
861 				ht->pDestructor(data);
862 			}
863 			ZVAL_COPY_VALUE(data, pData);
864 			return data;
865 		}
866 	}
867 
868 	ZEND_HASH_IF_FULL_DO_RESIZE(ht);		/* If the Hash table is full, resize it */
869 
870 add_to_hash:
871 	if (!ZSTR_IS_INTERNED(key)) {
872 		zend_string_addref(key);
873 		HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
874 	}
875 	idx = ht->nNumUsed++;
876 	ht->nNumOfElements++;
877 	arData = ht->arData;
878 	p = arData + idx;
879 	p->key = key;
880 	p->h = h = ZSTR_H(key);
881 	nIndex = h | ht->nTableMask;
882 	Z_NEXT(p->val) = HT_HASH_EX(arData, nIndex);
883 	HT_HASH_EX(arData, nIndex) = HT_IDX_TO_HASH(idx);
884 	if (flag & HASH_LOOKUP) {
885 		ZVAL_NULL(&p->val);
886 	} else {
887 		ZVAL_COPY_VALUE(&p->val, pData);
888 	}
889 
890 	return &p->val;
891 }
892 
_zend_hash_str_add_or_update_i(HashTable * ht,const char * str,size_t len,zend_ulong h,zval * pData,uint32_t flag)893 static zend_always_inline zval *_zend_hash_str_add_or_update_i(HashTable *ht, const char *str, size_t len, zend_ulong h, zval *pData, uint32_t flag)
894 {
895 	zend_string *key;
896 	uint32_t nIndex;
897 	uint32_t idx;
898 	Bucket *p;
899 
900 	IS_CONSISTENT(ht);
901 	HT_ASSERT_RC1(ht);
902 
903 	if (UNEXPECTED(HT_FLAGS(ht) & (HASH_FLAG_UNINITIALIZED|HASH_FLAG_PACKED))) {
904 		if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
905 			zend_hash_real_init_mixed(ht);
906 			goto add_to_hash;
907 		} else {
908 			zend_hash_packed_to_hash(ht);
909 		}
910 	} else if ((flag & HASH_ADD_NEW) == 0) {
911 		p = zend_hash_str_find_bucket(ht, str, len, h);
912 
913 		if (p) {
914 			zval *data;
915 
916 			if (flag & HASH_LOOKUP) {
917 				return &p->val;
918 			} else if (flag & HASH_ADD) {
919 				if (!(flag & HASH_UPDATE_INDIRECT)) {
920 					return NULL;
921 				}
922 				ZEND_ASSERT(&p->val != pData);
923 				data = &p->val;
924 				if (Z_TYPE_P(data) == IS_INDIRECT) {
925 					data = Z_INDIRECT_P(data);
926 					if (Z_TYPE_P(data) != IS_UNDEF) {
927 						return NULL;
928 					}
929 				} else {
930 					return NULL;
931 				}
932 			} else {
933 				ZEND_ASSERT(&p->val != pData);
934 				data = &p->val;
935 				if ((flag & HASH_UPDATE_INDIRECT) && Z_TYPE_P(data) == IS_INDIRECT) {
936 					data = Z_INDIRECT_P(data);
937 				}
938 			}
939 			if (ht->pDestructor) {
940 				ht->pDestructor(data);
941 			}
942 			ZVAL_COPY_VALUE(data, pData);
943 			return data;
944 		}
945 	}
946 
947 	ZEND_HASH_IF_FULL_DO_RESIZE(ht);		/* If the Hash table is full, resize it */
948 
949 add_to_hash:
950 	idx = ht->nNumUsed++;
951 	ht->nNumOfElements++;
952 	p = ht->arData + idx;
953 	p->key = key = zend_string_init(str, len, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
954 #if ZEND_RC_DEBUG
955 	if (GC_FLAGS(ht) & GC_PERSISTENT_LOCAL) {
956 		GC_MAKE_PERSISTENT_LOCAL(key);
957 	}
958 #endif
959 	p->h = ZSTR_H(key) = h;
960 	HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
961 	if (flag & HASH_LOOKUP) {
962 		ZVAL_NULL(&p->val);
963 	} else {
964 		ZVAL_COPY_VALUE(&p->val, pData);
965 	}
966 	nIndex = h | ht->nTableMask;
967 	Z_NEXT(p->val) = HT_HASH(ht, nIndex);
968 	HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
969 
970 	return &p->val;
971 }
972 
zend_hash_add_or_update(HashTable * ht,zend_string * key,zval * pData,uint32_t flag)973 ZEND_API zval* ZEND_FASTCALL zend_hash_add_or_update(HashTable *ht, zend_string *key, zval *pData, uint32_t flag)
974 {
975 	if (flag == HASH_ADD) {
976 		return zend_hash_add(ht, key, pData);
977 	} else if (flag == HASH_ADD_NEW) {
978 		return zend_hash_add_new(ht, key, pData);
979 	} else if (flag == HASH_UPDATE) {
980 		return zend_hash_update(ht, key, pData);
981 	} else {
982 		ZEND_ASSERT(flag == (HASH_UPDATE|HASH_UPDATE_INDIRECT));
983 		return zend_hash_update_ind(ht, key, pData);
984 	}
985 }
986 
zend_hash_add(HashTable * ht,zend_string * key,zval * pData)987 ZEND_API zval* ZEND_FASTCALL zend_hash_add(HashTable *ht, zend_string *key, zval *pData)
988 {
989 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD);
990 }
991 
zend_hash_update(HashTable * ht,zend_string * key,zval * pData)992 ZEND_API zval* ZEND_FASTCALL zend_hash_update(HashTable *ht, zend_string *key, zval *pData)
993 {
994 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE);
995 }
996 
zend_hash_update_ind(HashTable * ht,zend_string * key,zval * pData)997 ZEND_API zval* ZEND_FASTCALL zend_hash_update_ind(HashTable *ht, zend_string *key, zval *pData)
998 {
999 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT);
1000 }
1001 
zend_hash_add_new(HashTable * ht,zend_string * key,zval * pData)1002 ZEND_API zval* ZEND_FASTCALL zend_hash_add_new(HashTable *ht, zend_string *key, zval *pData)
1003 {
1004 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD_NEW);
1005 }
1006 
zend_hash_lookup(HashTable * ht,zend_string * key)1007 ZEND_API zval* ZEND_FASTCALL zend_hash_lookup(HashTable *ht, zend_string *key)
1008 {
1009 	return _zend_hash_add_or_update_i(ht, key, NULL, HASH_LOOKUP);
1010 }
1011 
zend_hash_str_add_or_update(HashTable * ht,const char * str,size_t len,zval * pData,uint32_t flag)1012 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_or_update(HashTable *ht, const char *str, size_t len, zval *pData, uint32_t flag)
1013 {
1014 	if (flag == HASH_ADD) {
1015 		return zend_hash_str_add(ht, str, len, pData);
1016 	} else if (flag == HASH_ADD_NEW) {
1017 		return zend_hash_str_add_new(ht, str, len, pData);
1018 	} else if (flag == HASH_UPDATE) {
1019 		return zend_hash_str_update(ht, str, len, pData);
1020 	} else {
1021 		ZEND_ASSERT(flag == (HASH_UPDATE|HASH_UPDATE_INDIRECT));
1022 		return zend_hash_str_update_ind(ht, str, len, pData);
1023 	}
1024 }
1025 
zend_hash_str_update(HashTable * ht,const char * str,size_t len,zval * pData)1026 ZEND_API zval* ZEND_FASTCALL zend_hash_str_update(HashTable *ht, const char *str, size_t len, zval *pData)
1027 {
1028 	zend_ulong h = zend_hash_func(str, len);
1029 
1030 	return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_UPDATE);
1031 }
1032 
zend_hash_str_update_ind(HashTable * ht,const char * str,size_t len,zval * pData)1033 ZEND_API zval* ZEND_FASTCALL zend_hash_str_update_ind(HashTable *ht, const char *str, size_t len, zval *pData)
1034 {
1035 	zend_ulong h = zend_hash_func(str, len);
1036 
1037 	return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT);
1038 }
1039 
zend_hash_str_add(HashTable * ht,const char * str,size_t len,zval * pData)1040 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add(HashTable *ht, const char *str, size_t len, zval *pData)
1041 {
1042 	zend_ulong h = zend_hash_func(str, len);
1043 
1044 	return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_ADD);
1045 }
1046 
zend_hash_str_add_new(HashTable * ht,const char * str,size_t len,zval * pData)1047 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_new(HashTable *ht, const char *str, size_t len, zval *pData)
1048 {
1049 	zend_ulong h = zend_hash_func(str, len);
1050 
1051 	return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_ADD_NEW);
1052 }
1053 
zend_hash_index_add_empty_element(HashTable * ht,zend_ulong h)1054 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_empty_element(HashTable *ht, zend_ulong h)
1055 {
1056 	zval dummy;
1057 
1058 	ZVAL_NULL(&dummy);
1059 	return zend_hash_index_add(ht, h, &dummy);
1060 }
1061 
zend_hash_add_empty_element(HashTable * ht,zend_string * key)1062 ZEND_API zval* ZEND_FASTCALL zend_hash_add_empty_element(HashTable *ht, zend_string *key)
1063 {
1064 	zval dummy;
1065 
1066 	ZVAL_NULL(&dummy);
1067 	return zend_hash_add(ht, key, &dummy);
1068 }
1069 
zend_hash_str_add_empty_element(HashTable * ht,const char * str,size_t len)1070 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_empty_element(HashTable *ht, const char *str, size_t len)
1071 {
1072 	zval dummy;
1073 
1074 	ZVAL_NULL(&dummy);
1075 	return zend_hash_str_add(ht, str, len, &dummy);
1076 }
1077 
_zend_hash_index_add_or_update_i(HashTable * ht,zend_ulong h,zval * pData,uint32_t flag)1078 static zend_always_inline zval *_zend_hash_index_add_or_update_i(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag)
1079 {
1080 	uint32_t nIndex;
1081 	uint32_t idx;
1082 	Bucket *p;
1083 	zval *zv;
1084 
1085 	IS_CONSISTENT(ht);
1086 	HT_ASSERT_RC1(ht);
1087 
1088 	if ((flag & HASH_ADD_NEXT) && h == ZEND_LONG_MIN) {
1089 		h = 0;
1090 	}
1091 
1092 	if (HT_IS_PACKED(ht)) {
1093 		if ((flag & (HASH_ADD_NEW|HASH_ADD_NEXT)) != (HASH_ADD_NEW|HASH_ADD_NEXT)
1094 		 && h < ht->nNumUsed) {
1095 			zv = ht->arPacked + h;
1096 			if (Z_TYPE_P(zv) != IS_UNDEF) {
1097 				if (flag & HASH_LOOKUP) {
1098 					return zv;
1099 				}
1100 replace:
1101 				if (flag & HASH_ADD) {
1102 					return NULL;
1103 				}
1104 				if (ht->pDestructor) {
1105 					ht->pDestructor(zv);
1106 				}
1107 				ZVAL_COPY_VALUE(zv, pData);
1108 				return zv;
1109 			} else { /* we have to keep the order :( */
1110 				goto convert_to_hash;
1111 			}
1112 		} else if (EXPECTED(h < ht->nTableSize)) {
1113 add_to_packed:
1114 			zv = ht->arPacked + h;
1115 			/* incremental initialization of empty Buckets */
1116 			if ((flag & (HASH_ADD_NEW|HASH_ADD_NEXT)) != (HASH_ADD_NEW|HASH_ADD_NEXT)) {
1117 				if (h > ht->nNumUsed) {
1118 					zval *q = ht->arPacked + ht->nNumUsed;
1119 					while (q != zv) {
1120 						ZVAL_UNDEF(q);
1121 						q++;
1122 					}
1123 				}
1124 			}
1125 			ht->nNextFreeElement = ht->nNumUsed = h + 1;
1126 			ht->nNumOfElements++;
1127 			if (flag & HASH_LOOKUP) {
1128 				ZVAL_NULL(zv);
1129 			} else {
1130 				ZVAL_COPY_VALUE(zv, pData);
1131 			}
1132 
1133 			return zv;
1134 		} else if ((h >> 1) < ht->nTableSize &&
1135 		           (ht->nTableSize >> 1) < ht->nNumOfElements) {
1136 			zend_hash_packed_grow(ht);
1137 			goto add_to_packed;
1138 		} else {
1139 			if (ht->nNumUsed >= ht->nTableSize) {
1140 				ht->nTableSize += ht->nTableSize;
1141 			}
1142 convert_to_hash:
1143 			zend_hash_packed_to_hash(ht);
1144 		}
1145 	} else if (HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED) {
1146 		if (h < ht->nTableSize) {
1147 			zend_hash_real_init_packed_ex(ht);
1148 			goto add_to_packed;
1149 		}
1150 		zend_hash_real_init_mixed(ht);
1151 	} else {
1152 		if ((flag & HASH_ADD_NEW) == 0 || ZEND_DEBUG) {
1153 			p = zend_hash_index_find_bucket(ht, h);
1154 			if (p) {
1155 				if (flag & HASH_LOOKUP) {
1156 					return &p->val;
1157 				}
1158 				ZEND_ASSERT((flag & HASH_ADD_NEW) == 0);
1159 				zv = &p->val;
1160 				goto replace;
1161 			}
1162 		}
1163 		ZEND_HASH_IF_FULL_DO_RESIZE(ht);		/* If the Hash table is full, resize it */
1164 	}
1165 
1166 	idx = ht->nNumUsed++;
1167 	nIndex = h | ht->nTableMask;
1168 	p = ht->arData + idx;
1169 	Z_NEXT(p->val) = HT_HASH(ht, nIndex);
1170 	HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
1171 	if ((zend_long)h >= ht->nNextFreeElement) {
1172 		ht->nNextFreeElement = (zend_long)h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
1173 	}
1174 	ht->nNumOfElements++;
1175 	p->h = h;
1176 	p->key = NULL;
1177 	if (flag & HASH_LOOKUP) {
1178 		ZVAL_NULL(&p->val);
1179 	} else {
1180 		ZVAL_COPY_VALUE(&p->val, pData);
1181 	}
1182 
1183 	return &p->val;
1184 }
1185 
zend_hash_index_add_or_update(HashTable * ht,zend_ulong h,zval * pData,uint32_t flag)1186 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_or_update(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag)
1187 {
1188 	if (flag == HASH_ADD) {
1189 		return zend_hash_index_add(ht, h, pData);
1190 	} else if (flag == (HASH_ADD|HASH_ADD_NEW)) {
1191 		return zend_hash_index_add_new(ht, h, pData);
1192 	} else if (flag == (HASH_ADD|HASH_ADD_NEXT)) {
1193 		ZEND_ASSERT(h == ht->nNextFreeElement);
1194 		return zend_hash_next_index_insert(ht, pData);
1195 	} else if (flag == (HASH_ADD|HASH_ADD_NEW|HASH_ADD_NEXT)) {
1196 		ZEND_ASSERT(h == ht->nNextFreeElement);
1197 		return zend_hash_next_index_insert_new(ht, pData);
1198 	} else {
1199 		ZEND_ASSERT(flag == HASH_UPDATE);
1200 		return zend_hash_index_update(ht, h, pData);
1201 	}
1202 }
1203 
zend_hash_index_add(HashTable * ht,zend_ulong h,zval * pData)1204 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add(HashTable *ht, zend_ulong h, zval *pData)
1205 {
1206 	return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD);
1207 }
1208 
zend_hash_index_add_new(HashTable * ht,zend_ulong h,zval * pData)1209 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_new(HashTable *ht, zend_ulong h, zval *pData)
1210 {
1211 	return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD | HASH_ADD_NEW);
1212 }
1213 
zend_hash_index_update(HashTable * ht,zend_ulong h,zval * pData)1214 ZEND_API zval* ZEND_FASTCALL zend_hash_index_update(HashTable *ht, zend_ulong h, zval *pData)
1215 {
1216 	return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_UPDATE);
1217 }
1218 
zend_hash_next_index_insert(HashTable * ht,zval * pData)1219 ZEND_API zval* ZEND_FASTCALL zend_hash_next_index_insert(HashTable *ht, zval *pData)
1220 {
1221 	return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEXT);
1222 }
1223 
zend_hash_next_index_insert_new(HashTable * ht,zval * pData)1224 ZEND_API zval* ZEND_FASTCALL zend_hash_next_index_insert_new(HashTable *ht, zval *pData)
1225 {
1226 	return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEW | HASH_ADD_NEXT);
1227 }
1228 
zend_hash_index_lookup(HashTable * ht,zend_ulong h)1229 ZEND_API zval* ZEND_FASTCALL zend_hash_index_lookup(HashTable *ht, zend_ulong h)
1230 {
1231 	return _zend_hash_index_add_or_update_i(ht, h, NULL, HASH_LOOKUP);
1232 }
1233 
zend_hash_set_bucket_key(HashTable * ht,Bucket * b,zend_string * key)1234 ZEND_API zval* ZEND_FASTCALL zend_hash_set_bucket_key(HashTable *ht, Bucket *b, zend_string *key)
1235 {
1236 	uint32_t nIndex;
1237 	uint32_t idx, i;
1238 	Bucket *p, *arData;
1239 
1240 	IS_CONSISTENT(ht);
1241 	HT_ASSERT_RC1(ht);
1242 	ZEND_ASSERT(!HT_IS_PACKED(ht));
1243 
1244 	(void)zend_string_hash_val(key);
1245 	p = zend_hash_find_bucket(ht, key);
1246 	if (UNEXPECTED(p)) {
1247 		return (p == b) ? &p->val : NULL;
1248 	}
1249 
1250 	if (!ZSTR_IS_INTERNED(key)) {
1251 		zend_string_addref(key);
1252 		HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
1253 	}
1254 
1255 	arData = ht->arData;
1256 
1257 	/* del from hash */
1258 	idx = HT_IDX_TO_HASH(b - arData);
1259 	nIndex = b->h | ht->nTableMask;
1260 	i = HT_HASH_EX(arData, nIndex);
1261 	if (i == idx) {
1262 		HT_HASH_EX(arData, nIndex) = Z_NEXT(b->val);
1263 	} else {
1264 		p = HT_HASH_TO_BUCKET_EX(arData, i);
1265 		while (Z_NEXT(p->val) != idx) {
1266 			i = Z_NEXT(p->val);
1267 			p = HT_HASH_TO_BUCKET_EX(arData, i);
1268 		}
1269 		Z_NEXT(p->val) = Z_NEXT(b->val);
1270 	}
1271 	zend_string_release(b->key);
1272 
1273 	/* add to hash */
1274 	idx = b - arData;
1275 	b->key = key;
1276 	b->h = ZSTR_H(key);
1277 	nIndex = b->h | ht->nTableMask;
1278 	idx = HT_IDX_TO_HASH(idx);
1279 	i = HT_HASH_EX(arData, nIndex);
1280 	if (i == HT_INVALID_IDX || i < idx) {
1281 		Z_NEXT(b->val) = i;
1282 		HT_HASH_EX(arData, nIndex) = idx;
1283 	} else {
1284 		p = HT_HASH_TO_BUCKET_EX(arData, i);
1285 		while (Z_NEXT(p->val) != HT_INVALID_IDX && Z_NEXT(p->val) > idx) {
1286 			i = Z_NEXT(p->val);
1287 			p = HT_HASH_TO_BUCKET_EX(arData, i);
1288 		}
1289 		Z_NEXT(b->val) = Z_NEXT(p->val);
1290 		Z_NEXT(p->val) = idx;
1291 	}
1292 	return &b->val;
1293 }
1294 
zend_hash_do_resize(HashTable * ht)1295 static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht)
1296 {
1297 
1298 	IS_CONSISTENT(ht);
1299 	HT_ASSERT_RC1(ht);
1300 
1301 	ZEND_ASSERT(!HT_IS_PACKED(ht));
1302 	if (ht->nNumUsed > ht->nNumOfElements + (ht->nNumOfElements >> 5)) { /* additional term is there to amortize the cost of compaction */
1303 		zend_hash_rehash(ht);
1304 	} else if (ht->nTableSize < HT_MAX_SIZE) {	/* Let's double the table size */
1305 		void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
1306 		uint32_t nSize = ht->nTableSize + ht->nTableSize;
1307 		Bucket *old_buckets = ht->arData;
1308 
1309 		ZEND_ASSERT(HT_SIZE_TO_MASK(nSize));
1310 
1311 		new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1312 		ht->nTableSize = nSize;
1313 		ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
1314 		HT_SET_DATA_ADDR(ht, new_data);
1315 		memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
1316 		pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1317 		zend_hash_rehash(ht);
1318 	} else {
1319 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket) + sizeof(uint32_t), sizeof(Bucket));
1320 	}
1321 }
1322 
zend_hash_rehash(HashTable * ht)1323 ZEND_API void ZEND_FASTCALL zend_hash_rehash(HashTable *ht)
1324 {
1325 	Bucket *p;
1326 	uint32_t nIndex, i;
1327 
1328 	IS_CONSISTENT(ht);
1329 
1330 	if (UNEXPECTED(ht->nNumOfElements == 0)) {
1331 		if (!(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
1332 			ht->nNumUsed = 0;
1333 			HT_HASH_RESET(ht);
1334 		}
1335 		return;
1336 	}
1337 
1338 	HT_HASH_RESET(ht);
1339 	i = 0;
1340 	p = ht->arData;
1341 	if (HT_IS_WITHOUT_HOLES(ht)) {
1342 		do {
1343 			nIndex = p->h | ht->nTableMask;
1344 			Z_NEXT(p->val) = HT_HASH(ht, nIndex);
1345 			HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
1346 			p++;
1347 		} while (++i < ht->nNumUsed);
1348 	} else {
1349 		uint32_t old_num_used = ht->nNumUsed;
1350 		do {
1351 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) {
1352 				uint32_t j = i;
1353 				Bucket *q = p;
1354 
1355 				if (EXPECTED(!HT_HAS_ITERATORS(ht))) {
1356 					while (++i < ht->nNumUsed) {
1357 						p++;
1358 						if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
1359 							ZVAL_COPY_VALUE(&q->val, &p->val);
1360 							q->h = p->h;
1361 							nIndex = q->h | ht->nTableMask;
1362 							q->key = p->key;
1363 							Z_NEXT(q->val) = HT_HASH(ht, nIndex);
1364 							HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
1365 							if (UNEXPECTED(ht->nInternalPointer == i)) {
1366 								ht->nInternalPointer = j;
1367 							}
1368 							q++;
1369 							j++;
1370 						}
1371 					}
1372 				} else {
1373 					uint32_t iter_pos = zend_hash_iterators_lower_pos(ht, i + 1);
1374 
1375 					while (++i < ht->nNumUsed) {
1376 						p++;
1377 						if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
1378 							ZVAL_COPY_VALUE(&q->val, &p->val);
1379 							q->h = p->h;
1380 							nIndex = q->h | ht->nTableMask;
1381 							q->key = p->key;
1382 							Z_NEXT(q->val) = HT_HASH(ht, nIndex);
1383 							HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
1384 							if (UNEXPECTED(ht->nInternalPointer == i)) {
1385 								ht->nInternalPointer = j;
1386 							}
1387 							if (UNEXPECTED(i >= iter_pos)) {
1388 								do {
1389 									zend_hash_iterators_update(ht, iter_pos, j);
1390 									iter_pos = zend_hash_iterators_lower_pos(ht, iter_pos + 1);
1391 								} while (iter_pos < i);
1392 							}
1393 							q++;
1394 							j++;
1395 						}
1396 					}
1397 				}
1398 				ht->nNumUsed = j;
1399 				break;
1400 			}
1401 			nIndex = p->h | ht->nTableMask;
1402 			Z_NEXT(p->val) = HT_HASH(ht, nIndex);
1403 			HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
1404 			p++;
1405 		} while (++i < ht->nNumUsed);
1406 
1407 		/* Migrate pointer to one past the end of the array to the new one past the end, so that
1408 		 * newly inserted elements are picked up correctly. */
1409 		if (UNEXPECTED(HT_HAS_ITERATORS(ht))) {
1410 			_zend_hash_iterators_update(ht, old_num_used, ht->nNumUsed);
1411 		}
1412 	}
1413 }
1414 
_zend_hash_packed_del_val(HashTable * ht,uint32_t idx,zval * zv)1415 static zend_always_inline void _zend_hash_packed_del_val(HashTable *ht, uint32_t idx, zval *zv)
1416 {
1417 	idx = HT_HASH_TO_IDX(idx);
1418 	ht->nNumOfElements--;
1419 	if (ht->nInternalPointer == idx || UNEXPECTED(HT_HAS_ITERATORS(ht))) {
1420 		uint32_t new_idx;
1421 
1422 		new_idx = idx;
1423 		while (1) {
1424 			new_idx++;
1425 			if (new_idx >= ht->nNumUsed) {
1426 				break;
1427 			} else if (Z_TYPE(ht->arPacked[new_idx]) != IS_UNDEF) {
1428 				break;
1429 			}
1430 		}
1431 		if (ht->nInternalPointer == idx) {
1432 			ht->nInternalPointer = new_idx;
1433 		}
1434 		zend_hash_iterators_update(ht, idx, new_idx);
1435 	}
1436 	if (ht->nNumUsed - 1 == idx) {
1437 		do {
1438 			ht->nNumUsed--;
1439 		} while (ht->nNumUsed > 0 && (UNEXPECTED(Z_TYPE(ht->arPacked[ht->nNumUsed-1]) == IS_UNDEF)));
1440 		ht->nInternalPointer = MIN(ht->nInternalPointer, ht->nNumUsed);
1441 	}
1442 	if (ht->pDestructor) {
1443 		zval tmp;
1444 		ZVAL_COPY_VALUE(&tmp, zv);
1445 		ZVAL_UNDEF(zv);
1446 		ht->pDestructor(&tmp);
1447 	} else {
1448 		ZVAL_UNDEF(zv);
1449 	}
1450 }
1451 
_zend_hash_del_el_ex(HashTable * ht,uint32_t idx,Bucket * p,Bucket * prev)1452 static zend_always_inline void _zend_hash_del_el_ex(HashTable *ht, uint32_t idx, Bucket *p, Bucket *prev)
1453 {
1454 	if (prev) {
1455 		Z_NEXT(prev->val) = Z_NEXT(p->val);
1456 	} else {
1457 		HT_HASH(ht, p->h | ht->nTableMask) = Z_NEXT(p->val);
1458 	}
1459 	idx = HT_HASH_TO_IDX(idx);
1460 	ht->nNumOfElements--;
1461 	if (ht->nInternalPointer == idx || UNEXPECTED(HT_HAS_ITERATORS(ht))) {
1462 		uint32_t new_idx;
1463 
1464 		new_idx = idx;
1465 		while (1) {
1466 			new_idx++;
1467 			if (new_idx >= ht->nNumUsed) {
1468 				break;
1469 			} else if (Z_TYPE(ht->arData[new_idx].val) != IS_UNDEF) {
1470 				break;
1471 			}
1472 		}
1473 		if (ht->nInternalPointer == idx) {
1474 			ht->nInternalPointer = new_idx;
1475 		}
1476 		zend_hash_iterators_update(ht, idx, new_idx);
1477 	}
1478 	if (ht->nNumUsed - 1 == idx) {
1479 		do {
1480 			ht->nNumUsed--;
1481 		} while (ht->nNumUsed > 0 && (UNEXPECTED(Z_TYPE(ht->arData[ht->nNumUsed-1].val) == IS_UNDEF)));
1482 		ht->nInternalPointer = MIN(ht->nInternalPointer, ht->nNumUsed);
1483 	}
1484 	if (ht->pDestructor) {
1485 		zval tmp;
1486 		ZVAL_COPY_VALUE(&tmp, &p->val);
1487 		ZVAL_UNDEF(&p->val);
1488 		ht->pDestructor(&tmp);
1489 	} else {
1490 		ZVAL_UNDEF(&p->val);
1491 	}
1492 }
1493 
_zend_hash_del_el(HashTable * ht,uint32_t idx,Bucket * p)1494 static zend_always_inline void _zend_hash_del_el(HashTable *ht, uint32_t idx, Bucket *p)
1495 {
1496 	Bucket *prev = NULL;
1497 	uint32_t nIndex;
1498 	uint32_t i;
1499 
1500 	nIndex = p->h | ht->nTableMask;
1501 	i = HT_HASH(ht, nIndex);
1502 
1503 	if (i != idx) {
1504 		prev = HT_HASH_TO_BUCKET(ht, i);
1505 		while (Z_NEXT(prev->val) != idx) {
1506 			i = Z_NEXT(prev->val);
1507 			prev = HT_HASH_TO_BUCKET(ht, i);
1508 		}
1509 	}
1510 
1511 	if (p->key) {
1512 		zend_string_release(p->key);
1513 		p->key = NULL;
1514 	}
1515 	_zend_hash_del_el_ex(ht, idx, p, prev);
1516 }
1517 
zend_hash_packed_del_val(HashTable * ht,zval * zv)1518 ZEND_API void ZEND_FASTCALL zend_hash_packed_del_val(HashTable *ht, zval *zv)
1519 {
1520 	IS_CONSISTENT(ht);
1521 	HT_ASSERT_RC1(ht);
1522 	ZEND_ASSERT(HT_IS_PACKED(ht));
1523 	_zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(zv - ht->arPacked), zv);
1524 }
1525 
1526 
zend_hash_del_bucket(HashTable * ht,Bucket * p)1527 ZEND_API void ZEND_FASTCALL zend_hash_del_bucket(HashTable *ht, Bucket *p)
1528 {
1529 	IS_CONSISTENT(ht);
1530 	HT_ASSERT_RC1(ht);
1531 	ZEND_ASSERT(!HT_IS_PACKED(ht));
1532 	_zend_hash_del_el(ht, HT_IDX_TO_HASH(p - ht->arData), p);
1533 }
1534 
zend_hash_del(HashTable * ht,zend_string * key)1535 ZEND_API zend_result ZEND_FASTCALL zend_hash_del(HashTable *ht, zend_string *key)
1536 {
1537 	zend_ulong h;
1538 	uint32_t nIndex;
1539 	uint32_t idx;
1540 	Bucket *p;
1541 	Bucket *prev = NULL;
1542 
1543 	IS_CONSISTENT(ht);
1544 	HT_ASSERT_RC1(ht);
1545 
1546 	h = zend_string_hash_val(key);
1547 	nIndex = h | ht->nTableMask;
1548 
1549 	idx = HT_HASH(ht, nIndex);
1550 	while (idx != HT_INVALID_IDX) {
1551 		p = HT_HASH_TO_BUCKET(ht, idx);
1552 		if ((p->key == key) ||
1553 			(p->h == h &&
1554 		     p->key &&
1555 		     zend_string_equal_content(p->key, key))) {
1556 			zend_string_release(p->key);
1557 			p->key = NULL;
1558 			_zend_hash_del_el_ex(ht, idx, p, prev);
1559 			return SUCCESS;
1560 		}
1561 		prev = p;
1562 		idx = Z_NEXT(p->val);
1563 	}
1564 	return FAILURE;
1565 }
1566 
zend_hash_del_ind(HashTable * ht,zend_string * key)1567 ZEND_API zend_result ZEND_FASTCALL zend_hash_del_ind(HashTable *ht, zend_string *key)
1568 {
1569 	zend_ulong h;
1570 	uint32_t nIndex;
1571 	uint32_t idx;
1572 	Bucket *p;
1573 	Bucket *prev = NULL;
1574 
1575 	IS_CONSISTENT(ht);
1576 	HT_ASSERT_RC1(ht);
1577 
1578 	h = zend_string_hash_val(key);
1579 	nIndex = h | ht->nTableMask;
1580 
1581 	idx = HT_HASH(ht, nIndex);
1582 	while (idx != HT_INVALID_IDX) {
1583 		p = HT_HASH_TO_BUCKET(ht, idx);
1584 		if ((p->key == key) ||
1585 			(p->h == h &&
1586 		     p->key &&
1587 		     zend_string_equal_content(p->key, key))) {
1588 			if (Z_TYPE(p->val) == IS_INDIRECT) {
1589 				zval *data = Z_INDIRECT(p->val);
1590 
1591 				if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
1592 					return FAILURE;
1593 				} else {
1594 					if (ht->pDestructor) {
1595 						zval tmp;
1596 						ZVAL_COPY_VALUE(&tmp, data);
1597 						ZVAL_UNDEF(data);
1598 						ht->pDestructor(&tmp);
1599 					} else {
1600 						ZVAL_UNDEF(data);
1601 					}
1602 					HT_FLAGS(ht) |= HASH_FLAG_HAS_EMPTY_IND;
1603 				}
1604 			} else {
1605 				zend_string_release(p->key);
1606 				p->key = NULL;
1607 				_zend_hash_del_el_ex(ht, idx, p, prev);
1608 			}
1609 			return SUCCESS;
1610 		}
1611 		prev = p;
1612 		idx = Z_NEXT(p->val);
1613 	}
1614 	return FAILURE;
1615 }
1616 
zend_hash_str_del_ind(HashTable * ht,const char * str,size_t len)1617 ZEND_API zend_result ZEND_FASTCALL zend_hash_str_del_ind(HashTable *ht, const char *str, size_t len)
1618 {
1619 	zend_ulong h;
1620 	uint32_t nIndex;
1621 	uint32_t idx;
1622 	Bucket *p;
1623 	Bucket *prev = NULL;
1624 
1625 	IS_CONSISTENT(ht);
1626 	HT_ASSERT_RC1(ht);
1627 
1628 	h = zend_inline_hash_func(str, len);
1629 	nIndex = h | ht->nTableMask;
1630 
1631 	idx = HT_HASH(ht, nIndex);
1632 	while (idx != HT_INVALID_IDX) {
1633 		p = HT_HASH_TO_BUCKET(ht, idx);
1634 		if ((p->h == h)
1635 			 && p->key
1636 			 && zend_string_equals_cstr(p->key, str, len)) {
1637 			if (Z_TYPE(p->val) == IS_INDIRECT) {
1638 				zval *data = Z_INDIRECT(p->val);
1639 
1640 				if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
1641 					return FAILURE;
1642 				} else {
1643 					if (ht->pDestructor) {
1644 						ht->pDestructor(data);
1645 					}
1646 					ZVAL_UNDEF(data);
1647 					HT_FLAGS(ht) |= HASH_FLAG_HAS_EMPTY_IND;
1648 				}
1649 			} else {
1650 				zend_string_release(p->key);
1651 				p->key = NULL;
1652 				_zend_hash_del_el_ex(ht, idx, p, prev);
1653 			}
1654 			return SUCCESS;
1655 		}
1656 		prev = p;
1657 		idx = Z_NEXT(p->val);
1658 	}
1659 	return FAILURE;
1660 }
1661 
zend_hash_str_del(HashTable * ht,const char * str,size_t len)1662 ZEND_API zend_result ZEND_FASTCALL zend_hash_str_del(HashTable *ht, const char *str, size_t len)
1663 {
1664 	zend_ulong h;
1665 	uint32_t nIndex;
1666 	uint32_t idx;
1667 	Bucket *p;
1668 	Bucket *prev = NULL;
1669 
1670 	IS_CONSISTENT(ht);
1671 	HT_ASSERT_RC1(ht);
1672 
1673 	h = zend_inline_hash_func(str, len);
1674 	nIndex = h | ht->nTableMask;
1675 
1676 	idx = HT_HASH(ht, nIndex);
1677 	while (idx != HT_INVALID_IDX) {
1678 		p = HT_HASH_TO_BUCKET(ht, idx);
1679 		if ((p->h == h)
1680 			 && p->key
1681 			 && zend_string_equals_cstr(p->key, str, len)) {
1682 			zend_string_release(p->key);
1683 			p->key = NULL;
1684 			_zend_hash_del_el_ex(ht, idx, p, prev);
1685 			return SUCCESS;
1686 		}
1687 		prev = p;
1688 		idx = Z_NEXT(p->val);
1689 	}
1690 	return FAILURE;
1691 }
1692 
zend_hash_index_del(HashTable * ht,zend_ulong h)1693 ZEND_API zend_result ZEND_FASTCALL zend_hash_index_del(HashTable *ht, zend_ulong h)
1694 {
1695 	uint32_t nIndex;
1696 	uint32_t idx;
1697 	Bucket *p;
1698 	Bucket *prev = NULL;
1699 
1700 	IS_CONSISTENT(ht);
1701 	HT_ASSERT_RC1(ht);
1702 
1703 	if (HT_IS_PACKED(ht)) {
1704 		if (h < ht->nNumUsed) {
1705 			zval *zv = ht->arPacked + h;
1706 			if (Z_TYPE_P(zv) != IS_UNDEF) {
1707 				_zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(h), zv);
1708 				return SUCCESS;
1709 			}
1710 		}
1711 		return FAILURE;
1712 	}
1713 	nIndex = h | ht->nTableMask;
1714 
1715 	idx = HT_HASH(ht, nIndex);
1716 	while (idx != HT_INVALID_IDX) {
1717 		p = HT_HASH_TO_BUCKET(ht, idx);
1718 		if ((p->h == h) && (p->key == NULL)) {
1719 			_zend_hash_del_el_ex(ht, idx, p, prev);
1720 			return SUCCESS;
1721 		}
1722 		prev = p;
1723 		idx = Z_NEXT(p->val);
1724 	}
1725 	return FAILURE;
1726 }
1727 
zend_hash_destroy(HashTable * ht)1728 ZEND_API void ZEND_FASTCALL zend_hash_destroy(HashTable *ht)
1729 {
1730 	IS_CONSISTENT(ht);
1731 	HT_ASSERT(ht, GC_REFCOUNT(ht) <= 1);
1732 
1733 	if (ht->nNumUsed) {
1734 		if (HT_IS_PACKED(ht)) {
1735 			if (ht->pDestructor) {
1736 				zval *zv = ht->arPacked;
1737 				zval *end = zv + ht->nNumUsed;
1738 
1739 				SET_INCONSISTENT(HT_IS_DESTROYING);
1740 				if (HT_IS_WITHOUT_HOLES(ht)) {
1741 					do {
1742 						ht->pDestructor(zv);
1743 					} while (++zv != end);
1744 				} else {
1745 					do {
1746 						if (EXPECTED(Z_TYPE_P(zv) != IS_UNDEF)) {
1747 							ht->pDestructor(zv);
1748 						}
1749 					} while (++zv != end);
1750 				}
1751 				SET_INCONSISTENT(HT_DESTROYED);
1752 			}
1753 			zend_hash_iterators_remove(ht);
1754 		} else {
1755 			Bucket *p = ht->arData;
1756 			Bucket *end = p + ht->nNumUsed;
1757 
1758 			if (ht->pDestructor) {
1759 				SET_INCONSISTENT(HT_IS_DESTROYING);
1760 
1761 				if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1762 					if (HT_IS_WITHOUT_HOLES(ht)) {
1763 						do {
1764 							ht->pDestructor(&p->val);
1765 						} while (++p != end);
1766 					} else {
1767 						do {
1768 							if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1769 								ht->pDestructor(&p->val);
1770 							}
1771 						} while (++p != end);
1772 					}
1773 				} else if (HT_IS_WITHOUT_HOLES(ht)) {
1774 					do {
1775 						ht->pDestructor(&p->val);
1776 						if (EXPECTED(p->key)) {
1777 							zend_string_release(p->key);
1778 						}
1779 					} while (++p != end);
1780 				} else {
1781 					do {
1782 						if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1783 							ht->pDestructor(&p->val);
1784 							if (EXPECTED(p->key)) {
1785 								zend_string_release(p->key);
1786 							}
1787 						}
1788 					} while (++p != end);
1789 				}
1790 
1791 				SET_INCONSISTENT(HT_DESTROYED);
1792 			} else {
1793 				if (!HT_HAS_STATIC_KEYS_ONLY(ht)) {
1794 					do {
1795 						if (EXPECTED(p->key)) {
1796 							zend_string_release(p->key);
1797 						}
1798 					} while (++p != end);
1799 				}
1800 			}
1801 			zend_hash_iterators_remove(ht);
1802 		}
1803 	} else if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
1804 		return;
1805 	}
1806 	pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1807 }
1808 
zend_array_destroy(HashTable * ht)1809 ZEND_API void ZEND_FASTCALL zend_array_destroy(HashTable *ht)
1810 {
1811 	IS_CONSISTENT(ht);
1812 	HT_ASSERT(ht, GC_REFCOUNT(ht) <= 1);
1813 
1814 	/* break possible cycles */
1815 	GC_REMOVE_FROM_BUFFER(ht);
1816 	GC_TYPE_INFO(ht) = GC_NULL /*???| (GC_WHITE << 16)*/;
1817 
1818 	if (ht->nNumUsed) {
1819 		/* In some rare cases destructors of regular arrays may be changed */
1820 		if (UNEXPECTED(ht->pDestructor != ZVAL_PTR_DTOR)) {
1821 			zend_hash_destroy(ht);
1822 			goto free_ht;
1823 		}
1824 
1825 		SET_INCONSISTENT(HT_IS_DESTROYING);
1826 
1827 		if (HT_IS_PACKED(ht)) {
1828 			zval *zv = ht->arPacked;
1829 			zval *end = zv + ht->nNumUsed;
1830 
1831 			do {
1832 				i_zval_ptr_dtor(zv);
1833 			} while (++zv != end);
1834 		} else {
1835 			Bucket *p = ht->arData;
1836 			Bucket *end = p + ht->nNumUsed;
1837 
1838 			if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1839 				do {
1840 					i_zval_ptr_dtor(&p->val);
1841 				} while (++p != end);
1842 			} else if (HT_IS_WITHOUT_HOLES(ht)) {
1843 				do {
1844 					i_zval_ptr_dtor(&p->val);
1845 					if (EXPECTED(p->key)) {
1846 						zend_string_release_ex(p->key, 0);
1847 					}
1848 				} while (++p != end);
1849 			} else {
1850 				do {
1851 					if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1852 						i_zval_ptr_dtor(&p->val);
1853 						if (EXPECTED(p->key)) {
1854 							zend_string_release_ex(p->key, 0);
1855 						}
1856 					}
1857 				} while (++p != end);
1858 			}
1859 		}
1860 	} else if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
1861 		goto free_ht;
1862 	}
1863 	SET_INCONSISTENT(HT_DESTROYED);
1864 	efree(HT_GET_DATA_ADDR(ht));
1865 free_ht:
1866 	zend_hash_iterators_remove(ht);
1867 	FREE_HASHTABLE(ht);
1868 }
1869 
zend_hash_clean(HashTable * ht)1870 ZEND_API void ZEND_FASTCALL zend_hash_clean(HashTable *ht)
1871 {
1872 	IS_CONSISTENT(ht);
1873 	HT_ASSERT_RC1(ht);
1874 
1875 	if (ht->nNumUsed) {
1876 		if (HT_IS_PACKED(ht)) {
1877 			zval *zv = ht->arPacked;
1878 			zval *end = zv + ht->nNumUsed;
1879 
1880 			if (ht->pDestructor) {
1881 				if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1882 					if (HT_IS_WITHOUT_HOLES(ht)) {
1883 						do {
1884 							ht->pDestructor(zv);
1885 						} while (++zv != end);
1886 					} else {
1887 						do {
1888 							if (EXPECTED(Z_TYPE_P(zv) != IS_UNDEF)) {
1889 								ht->pDestructor(zv);
1890 							}
1891 						} while (++zv != end);
1892 					}
1893 				}
1894 			}
1895 		} else {
1896 			Bucket *p = ht->arData;
1897 			Bucket *end = p + ht->nNumUsed;
1898 
1899 			if (ht->pDestructor) {
1900 				if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1901 					if (HT_IS_WITHOUT_HOLES(ht)) {
1902 						do {
1903 							ht->pDestructor(&p->val);
1904 						} while (++p != end);
1905 					} else {
1906 						do {
1907 							if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1908 								ht->pDestructor(&p->val);
1909 							}
1910 						} while (++p != end);
1911 					}
1912 				} else if (HT_IS_WITHOUT_HOLES(ht)) {
1913 					do {
1914 						ht->pDestructor(&p->val);
1915 						if (EXPECTED(p->key)) {
1916 							zend_string_release(p->key);
1917 						}
1918 					} while (++p != end);
1919 				} else {
1920 					do {
1921 						if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1922 							ht->pDestructor(&p->val);
1923 							if (EXPECTED(p->key)) {
1924 								zend_string_release(p->key);
1925 							}
1926 						}
1927 					} while (++p != end);
1928 				}
1929 			} else {
1930 				if (!HT_HAS_STATIC_KEYS_ONLY(ht)) {
1931 					do {
1932 						if (EXPECTED(p->key)) {
1933 							zend_string_release(p->key);
1934 						}
1935 					} while (++p != end);
1936 				}
1937 			}
1938 			HT_HASH_RESET(ht);
1939 		}
1940 	}
1941 	ht->nNumUsed = 0;
1942 	ht->nNumOfElements = 0;
1943 	ht->nNextFreeElement = ZEND_LONG_MIN;
1944 	ht->nInternalPointer = 0;
1945 }
1946 
zend_symtable_clean(HashTable * ht)1947 ZEND_API void ZEND_FASTCALL zend_symtable_clean(HashTable *ht)
1948 {
1949 	Bucket *p, *end;
1950 
1951 	IS_CONSISTENT(ht);
1952 	HT_ASSERT_RC1(ht);
1953 
1954 	if (ht->nNumUsed) {
1955 		ZEND_ASSERT(!HT_IS_PACKED(ht));
1956 		p = ht->arData;
1957 		end = p + ht->nNumUsed;
1958 		if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1959 			do {
1960 				i_zval_ptr_dtor(&p->val);
1961 			} while (++p != end);
1962 		} else if (HT_IS_WITHOUT_HOLES(ht)) {
1963 			do {
1964 				i_zval_ptr_dtor(&p->val);
1965 				if (EXPECTED(p->key)) {
1966 					zend_string_release(p->key);
1967 				}
1968 			} while (++p != end);
1969 		} else {
1970 			do {
1971 				if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1972 					i_zval_ptr_dtor(&p->val);
1973 					if (EXPECTED(p->key)) {
1974 						zend_string_release(p->key);
1975 					}
1976 				}
1977 			} while (++p != end);
1978 		}
1979 		HT_HASH_RESET(ht);
1980 	}
1981 	ht->nNumUsed = 0;
1982 	ht->nNumOfElements = 0;
1983 	ht->nNextFreeElement = ZEND_LONG_MIN;
1984 	ht->nInternalPointer = 0;
1985 }
1986 
zend_hash_graceful_destroy(HashTable * ht)1987 ZEND_API void ZEND_FASTCALL zend_hash_graceful_destroy(HashTable *ht)
1988 {
1989 	uint32_t idx;
1990 
1991 	IS_CONSISTENT(ht);
1992 	HT_ASSERT_RC1(ht);
1993 
1994 	if (HT_IS_PACKED(ht)) {
1995 		zval *zv = ht->arPacked;
1996 
1997 		for (idx = 0; idx < ht->nNumUsed; idx++, zv++) {
1998 			if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
1999 			_zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv);
2000 		}
2001 	} else {
2002 		Bucket *p = ht->arData;
2003 
2004 		for (idx = 0; idx < ht->nNumUsed; idx++, p++) {
2005 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2006 			_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
2007 		}
2008 	}
2009 	if (!(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
2010 		pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
2011 	}
2012 
2013 	SET_INCONSISTENT(HT_DESTROYED);
2014 }
2015 
zend_hash_graceful_reverse_destroy(HashTable * ht)2016 ZEND_API void ZEND_FASTCALL zend_hash_graceful_reverse_destroy(HashTable *ht)
2017 {
2018 	uint32_t idx;
2019 
2020 	IS_CONSISTENT(ht);
2021 	HT_ASSERT_RC1(ht);
2022 
2023 	idx = ht->nNumUsed;
2024 	if (HT_IS_PACKED(ht)) {
2025 		zval *zv = ht->arPacked + ht->nNumUsed;
2026 
2027 		while (idx > 0) {
2028 			idx--;
2029 			zv--;
2030 			if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
2031 			_zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv);
2032 		}
2033 	} else {
2034 		Bucket *p = ht->arData + ht->nNumUsed;
2035 
2036 		while (idx > 0) {
2037 			idx--;
2038 			p--;
2039 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2040 			_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
2041 		}
2042 	}
2043 
2044 	if (!(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
2045 		pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
2046 	}
2047 
2048 	SET_INCONSISTENT(HT_DESTROYED);
2049 }
2050 
2051 /* This is used to recurse elements and selectively delete certain entries
2052  * from a hashtable. apply_func() receives the data and decides if the entry
2053  * should be deleted or recursion should be stopped. The following three
2054  * return codes are possible:
2055  * ZEND_HASH_APPLY_KEEP   - continue
2056  * ZEND_HASH_APPLY_STOP   - stop iteration
2057  * ZEND_HASH_APPLY_REMOVE - delete the element, combinable with the former
2058  */
2059 
zend_hash_apply(HashTable * ht,apply_func_t apply_func)2060 ZEND_API void ZEND_FASTCALL zend_hash_apply(HashTable *ht, apply_func_t apply_func)
2061 {
2062 	uint32_t idx;
2063 	int result;
2064 
2065 	IS_CONSISTENT(ht);
2066 	if (HT_IS_PACKED(ht)) {
2067 		for (idx = 0; idx < ht->nNumUsed; idx++) {
2068 			zval *zv = ht->arPacked + idx;
2069 
2070 			if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
2071 			result = apply_func(zv);
2072 
2073 			if (result & ZEND_HASH_APPLY_REMOVE) {
2074 				HT_ASSERT_RC1(ht);
2075 				_zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv);
2076 			}
2077 			if (result & ZEND_HASH_APPLY_STOP) {
2078 				break;
2079 			}
2080 		}
2081 	} else {
2082 		for (idx = 0; idx < ht->nNumUsed; idx++) {
2083 			Bucket *p = ht->arData + idx;
2084 
2085 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2086 			result = apply_func(&p->val);
2087 
2088 			if (result & ZEND_HASH_APPLY_REMOVE) {
2089 				HT_ASSERT_RC1(ht);
2090 				_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
2091 			}
2092 			if (result & ZEND_HASH_APPLY_STOP) {
2093 				break;
2094 			}
2095 		}
2096 	}
2097 }
2098 
2099 
zend_hash_apply_with_argument(HashTable * ht,apply_func_arg_t apply_func,void * argument)2100 ZEND_API void ZEND_FASTCALL zend_hash_apply_with_argument(HashTable *ht, apply_func_arg_t apply_func, void *argument)
2101 {
2102 	uint32_t idx;
2103 	int result;
2104 
2105 	IS_CONSISTENT(ht);
2106 	if (HT_IS_PACKED(ht)) {
2107 		for (idx = 0; idx < ht->nNumUsed; idx++) {
2108 			zval *zv = ht->arPacked + idx;
2109 			if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
2110 			result = apply_func(zv, argument);
2111 
2112 			if (result & ZEND_HASH_APPLY_REMOVE) {
2113 				HT_ASSERT_RC1(ht);
2114 				_zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv);
2115 			}
2116 			if (result & ZEND_HASH_APPLY_STOP) {
2117 				break;
2118 			}
2119 		}
2120 	} else {
2121 		for (idx = 0; idx < ht->nNumUsed; idx++) {
2122 			Bucket *p = ht->arData + idx;
2123 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2124 			result = apply_func(&p->val, argument);
2125 
2126 			if (result & ZEND_HASH_APPLY_REMOVE) {
2127 				HT_ASSERT_RC1(ht);
2128 				_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
2129 			}
2130 			if (result & ZEND_HASH_APPLY_STOP) {
2131 				break;
2132 			}
2133 		}
2134 	}
2135 }
2136 
2137 
zend_hash_apply_with_arguments(HashTable * ht,apply_func_args_t apply_func,int num_args,...)2138 ZEND_API void zend_hash_apply_with_arguments(HashTable *ht, apply_func_args_t apply_func, int num_args, ...)
2139 {
2140 	uint32_t idx;
2141 	va_list args;
2142 	zend_hash_key hash_key;
2143 	int result;
2144 
2145 	IS_CONSISTENT(ht);
2146 
2147 	if (HT_IS_PACKED(ht)) {
2148 		for (idx = 0; idx < ht->nNumUsed; idx++) {
2149 			zval *zv = ht->arPacked + idx;
2150 
2151 			if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
2152 			va_start(args, num_args);
2153 			hash_key.h = idx;
2154 			hash_key.key = NULL;
2155 
2156 			result = apply_func(zv, num_args, args, &hash_key);
2157 
2158 			if (result & ZEND_HASH_APPLY_REMOVE) {
2159 				HT_ASSERT_RC1(ht);
2160 				_zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv);
2161 			}
2162 			if (result & ZEND_HASH_APPLY_STOP) {
2163 				va_end(args);
2164 				break;
2165 			}
2166 			va_end(args);
2167 		}
2168 	} else {
2169 		for (idx = 0; idx < ht->nNumUsed; idx++) {
2170 			Bucket *p = ht->arData + idx;
2171 
2172 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2173 			va_start(args, num_args);
2174 			hash_key.h = p->h;
2175 			hash_key.key = p->key;
2176 
2177 			result = apply_func(&p->val, num_args, args, &hash_key);
2178 
2179 			if (result & ZEND_HASH_APPLY_REMOVE) {
2180 				HT_ASSERT_RC1(ht);
2181 				_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
2182 			}
2183 			if (result & ZEND_HASH_APPLY_STOP) {
2184 				va_end(args);
2185 				break;
2186 			}
2187 			va_end(args);
2188 		}
2189 	}
2190 }
2191 
2192 
zend_hash_reverse_apply(HashTable * ht,apply_func_t apply_func)2193 ZEND_API void ZEND_FASTCALL zend_hash_reverse_apply(HashTable *ht, apply_func_t apply_func)
2194 {
2195 	uint32_t idx;
2196 	int result;
2197 
2198 	IS_CONSISTENT(ht);
2199 
2200 	idx = ht->nNumUsed;
2201 	if (HT_IS_PACKED(ht)) {
2202 		zval *zv;
2203 
2204 		while (idx > 0) {
2205 			idx--;
2206 			zv = ht->arPacked + idx;
2207 			if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
2208 
2209 			result = apply_func(zv);
2210 
2211 			if (result & ZEND_HASH_APPLY_REMOVE) {
2212 				HT_ASSERT_RC1(ht);
2213 				_zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv);
2214 			}
2215 			if (result & ZEND_HASH_APPLY_STOP) {
2216 				break;
2217 			}
2218 		}
2219 	} else {
2220 		Bucket *p;
2221 
2222 		while (idx > 0) {
2223 			idx--;
2224 			p = ht->arData + idx;
2225 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2226 
2227 			result = apply_func(&p->val);
2228 
2229 			if (result & ZEND_HASH_APPLY_REMOVE) {
2230 				HT_ASSERT_RC1(ht);
2231 				_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
2232 			}
2233 			if (result & ZEND_HASH_APPLY_STOP) {
2234 				break;
2235 			}
2236 		}
2237 	}
2238 }
2239 
2240 
zend_hash_copy(HashTable * target,HashTable * source,copy_ctor_func_t pCopyConstructor)2241 ZEND_API void ZEND_FASTCALL zend_hash_copy(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor)
2242 {
2243 	uint32_t idx;
2244 	zval *new_entry, *data;
2245 
2246 	IS_CONSISTENT(source);
2247 	IS_CONSISTENT(target);
2248 	HT_ASSERT_RC1(target);
2249 
2250 	if (HT_IS_PACKED(source)) {
2251 		for (idx = 0; idx < source->nNumUsed; idx++) {
2252 			zval *zv = source->arPacked + idx;
2253 			if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
2254 
2255 			new_entry = zend_hash_index_update(target, idx, zv);
2256 			if (pCopyConstructor) {
2257 				pCopyConstructor(new_entry);
2258 			}
2259 		}
2260 		return;
2261 	}
2262 
2263 	for (idx = 0; idx < source->nNumUsed; idx++) {
2264 		Bucket *p = source->arData + idx;
2265 
2266 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2267 
2268 		/* INDIRECT element may point to UNDEF-ined slots */
2269 		data = &p->val;
2270 		if (Z_TYPE_P(data) == IS_INDIRECT) {
2271 			data = Z_INDIRECT_P(data);
2272 			if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
2273 				continue;
2274 			}
2275 		}
2276 		if (p->key) {
2277 			new_entry = zend_hash_update(target, p->key, data);
2278 		} else {
2279 			new_entry = zend_hash_index_update(target, p->h, data);
2280 		}
2281 		if (pCopyConstructor) {
2282 			pCopyConstructor(new_entry);
2283 		}
2284 	}
2285 }
2286 
2287 
zend_array_dup_value(HashTable * source,HashTable * target,zval * data,zval * dest,bool packed,bool with_holes)2288 static zend_always_inline bool zend_array_dup_value(HashTable *source, HashTable *target, zval *data, zval *dest, bool packed, bool with_holes)
2289 {
2290 	if (with_holes) {
2291 		if (!packed && Z_TYPE_INFO_P(data) == IS_INDIRECT) {
2292 			data = Z_INDIRECT_P(data);
2293 		}
2294 		if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) {
2295 			return 0;
2296 		}
2297 	} else if (!packed) {
2298 		/* INDIRECT element may point to UNDEF-ined slots */
2299 		if (Z_TYPE_INFO_P(data) == IS_INDIRECT) {
2300 			data = Z_INDIRECT_P(data);
2301 			if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) {
2302 				return 0;
2303 			}
2304 		}
2305 	}
2306 
2307 	do {
2308 		if (Z_OPT_REFCOUNTED_P(data)) {
2309 			if (Z_ISREF_P(data) && Z_REFCOUNT_P(data) == 1 &&
2310 			    (Z_TYPE_P(Z_REFVAL_P(data)) != IS_ARRAY ||
2311 			      Z_ARRVAL_P(Z_REFVAL_P(data)) != source)) {
2312 				data = Z_REFVAL_P(data);
2313 				if (!Z_OPT_REFCOUNTED_P(data)) {
2314 					break;
2315 				}
2316 			}
2317 			Z_ADDREF_P(data);
2318 		}
2319 	} while (0);
2320 	ZVAL_COPY_VALUE(dest, data);
2321 
2322 	return 1;
2323 }
2324 
zend_array_dup_element(HashTable * source,HashTable * target,uint32_t idx,Bucket * p,Bucket * q,bool packed,bool static_keys,bool with_holes)2325 static zend_always_inline bool zend_array_dup_element(HashTable *source, HashTable *target, uint32_t idx, Bucket *p, Bucket *q, bool packed, bool static_keys, bool with_holes)
2326 {
2327 	if (!zend_array_dup_value(source, target, &p->val, &q->val, packed, with_holes)) {
2328 		return 0;
2329 	}
2330 
2331 	if (!packed) {
2332 		uint32_t nIndex;
2333 
2334 		q->h = p->h;
2335 		q->key = p->key;
2336 		if (!static_keys && q->key) {
2337 			zend_string_addref(q->key);
2338 		}
2339 
2340 		nIndex = q->h | target->nTableMask;
2341 		Z_NEXT(q->val) = HT_HASH(target, nIndex);
2342 		HT_HASH(target, nIndex) = HT_IDX_TO_HASH(idx);
2343 	}
2344 	return 1;
2345 }
2346 
2347 // We need to duplicate iterators to be able to search through all copy-on-write copies to find the actually iterated HashTable and position back
zend_array_dup_ht_iterators(HashTable * source,HashTable * target)2348 static void zend_array_dup_ht_iterators(HashTable *source, HashTable *target) {
2349 	HashTableIterator *iter = EG(ht_iterators);
2350 	HashTableIterator *end  = iter + EG(ht_iterators_used);
2351 
2352 	while (iter != end) {
2353 		if (iter->ht == source) {
2354 			uint32_t copy_idx = zend_hash_iterator_add(target, iter->pos);
2355 			HashTableIterator *copy_iter = EG(ht_iterators) + copy_idx;
2356 			copy_iter->next_copy = iter->next_copy;
2357 			iter->next_copy = copy_idx;
2358 		}
2359 		iter++;
2360 	}
2361 }
2362 
zend_array_dup_packed_elements(HashTable * source,HashTable * target,bool with_holes)2363 static zend_always_inline void zend_array_dup_packed_elements(HashTable *source, HashTable *target, bool with_holes)
2364 {
2365 	zval *p = source->arPacked;
2366 	zval *q = target->arPacked;
2367 	zval *end = p + source->nNumUsed;
2368 
2369 	do {
2370 		if (!zend_array_dup_value(source, target, p, q, 1, with_holes)) {
2371 			if (with_holes) {
2372 				ZVAL_UNDEF(q);
2373 			}
2374 		}
2375 		p++; q++;
2376 	} while (p != end);
2377 
2378 	if (UNEXPECTED(HT_HAS_ITERATORS(source))) {
2379 		zend_array_dup_ht_iterators(source, target);
2380 	}
2381 }
2382 
zend_array_dup_elements(HashTable * source,HashTable * target,bool static_keys,bool with_holes)2383 static zend_always_inline uint32_t zend_array_dup_elements(HashTable *source, HashTable *target, bool static_keys, bool with_holes)
2384 {
2385 	uint32_t idx = 0;
2386 	Bucket *p = source->arData;
2387 	Bucket *q = target->arData;
2388 	Bucket *end = p + source->nNumUsed;
2389 
2390 	if (UNEXPECTED(HT_HAS_ITERATORS(source))) {
2391 		zend_array_dup_ht_iterators(source, target);
2392 	}
2393 
2394 	do {
2395 		if (!zend_array_dup_element(source, target, idx, p, q, 0, static_keys, with_holes)) {
2396 			uint32_t target_idx = idx;
2397 
2398 			idx++; p++;
2399 			if (EXPECTED(!HT_HAS_ITERATORS(target))) {
2400 				while (p != end) {
2401 					if (zend_array_dup_element(source, target, target_idx, p, q, 0, static_keys, with_holes)) {
2402 						if (source->nInternalPointer == idx) {
2403 							target->nInternalPointer = target_idx;
2404 						}
2405 						target_idx++; q++;
2406 					}
2407 					idx++; p++;
2408 				}
2409 			} else {
2410 				target->nNumUsed = source->nNumUsed;
2411 				uint32_t iter_pos = zend_hash_iterators_lower_pos(target, idx);
2412 
2413 				while (p != end) {
2414 					if (zend_array_dup_element(source, target, target_idx, p, q, 0, static_keys, with_holes)) {
2415 						if (source->nInternalPointer == idx) {
2416 							target->nInternalPointer = target_idx;
2417 						}
2418 						if (UNEXPECTED(idx >= iter_pos)) {
2419 							do {
2420 								zend_hash_iterators_update(target, iter_pos, target_idx);
2421 								iter_pos = zend_hash_iterators_lower_pos(target, iter_pos + 1);
2422 							} while (iter_pos < idx);
2423 						}
2424 						target_idx++; q++;
2425 					}
2426 					idx++; p++;
2427 				}
2428 			}
2429 			return target_idx;
2430 		}
2431 		idx++; p++; q++;
2432 	} while (p != end);
2433 	return idx;
2434 }
2435 
zend_array_dup(HashTable * source)2436 ZEND_API HashTable* ZEND_FASTCALL zend_array_dup(HashTable *source)
2437 {
2438 	uint32_t idx;
2439 	HashTable *target;
2440 
2441 	IS_CONSISTENT(source);
2442 
2443 	ALLOC_HASHTABLE(target);
2444 	GC_SET_REFCOUNT(target, 1);
2445 	GC_TYPE_INFO(target) = GC_ARRAY;
2446 
2447 	target->pDestructor = ZVAL_PTR_DTOR;
2448 
2449 	if (source->nNumOfElements == 0) {
2450 		HT_FLAGS(target) = HASH_FLAG_UNINITIALIZED;
2451 		target->nTableMask = HT_MIN_MASK;
2452 		target->nNumUsed = 0;
2453 		target->nNumOfElements = 0;
2454 		target->nNextFreeElement = source->nNextFreeElement;
2455 		target->nInternalPointer = 0;
2456 		target->nTableSize = HT_MIN_SIZE;
2457 		HT_SET_DATA_ADDR(target, &uninitialized_bucket);
2458 	} else if (GC_FLAGS(source) & IS_ARRAY_IMMUTABLE) {
2459 		HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK;
2460 		target->nTableMask = source->nTableMask;
2461 		target->nNumUsed = source->nNumUsed;
2462 		target->nNumOfElements = source->nNumOfElements;
2463 		target->nNextFreeElement = source->nNextFreeElement;
2464 		target->nTableSize = source->nTableSize;
2465 		if (HT_IS_PACKED(source)) {
2466 			HT_SET_DATA_ADDR(target, emalloc(HT_PACKED_SIZE(target)));
2467 			target->nInternalPointer = source->nInternalPointer;
2468 			memcpy(HT_GET_DATA_ADDR(target), HT_GET_DATA_ADDR(source), HT_PACKED_USED_SIZE(source));
2469 		} else {
2470 			HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
2471 			target->nInternalPointer = source->nInternalPointer;
2472 			memcpy(HT_GET_DATA_ADDR(target), HT_GET_DATA_ADDR(source), HT_USED_SIZE(source));
2473 		}
2474 	} else if (HT_IS_PACKED(source)) {
2475 		HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK;
2476 		target->nTableMask = HT_MIN_MASK;
2477 		target->nNumUsed = source->nNumUsed;
2478 		target->nNumOfElements = source->nNumOfElements;
2479 		target->nNextFreeElement = source->nNextFreeElement;
2480 		target->nTableSize = source->nTableSize;
2481 		HT_SET_DATA_ADDR(target, emalloc(HT_PACKED_SIZE_EX(target->nTableSize, HT_MIN_MASK)));
2482 		target->nInternalPointer =
2483 			(source->nInternalPointer < source->nNumUsed) ?
2484 				source->nInternalPointer : 0;
2485 
2486 		HT_HASH_RESET_PACKED(target);
2487 
2488 		if (HT_IS_WITHOUT_HOLES(target)) {
2489 			zend_array_dup_packed_elements(source, target, 0);
2490 		} else {
2491 			zend_array_dup_packed_elements(source, target, 1);
2492 		}
2493 	} else {
2494 		HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK;
2495 		target->nTableMask = source->nTableMask;
2496 		target->nNextFreeElement = source->nNextFreeElement;
2497 		target->nInternalPointer =
2498 			(source->nInternalPointer < source->nNumUsed) ?
2499 				source->nInternalPointer : 0;
2500 
2501 		target->nTableSize = source->nTableSize;
2502 		HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
2503 		HT_HASH_RESET(target);
2504 
2505 		if (HT_HAS_STATIC_KEYS_ONLY(target)) {
2506 			if (HT_IS_WITHOUT_HOLES(source)) {
2507 				idx = zend_array_dup_elements(source, target, 1, 0);
2508 			} else {
2509 				idx = zend_array_dup_elements(source, target, 1, 1);
2510 			}
2511 		} else {
2512 			if (HT_IS_WITHOUT_HOLES(source)) {
2513 				idx = zend_array_dup_elements(source, target, 0, 0);
2514 			} else {
2515 				idx = zend_array_dup_elements(source, target, 0, 1);
2516 			}
2517 		}
2518 		target->nNumUsed = idx;
2519 		target->nNumOfElements = idx;
2520 	}
2521 	return target;
2522 }
2523 
zend_array_to_list(HashTable * source)2524 ZEND_API HashTable* zend_array_to_list(HashTable *source)
2525 {
2526 	HashTable *result = _zend_new_array(zend_hash_num_elements(source));
2527 	zend_hash_real_init_packed(result);
2528 
2529 	ZEND_HASH_FILL_PACKED(result) {
2530 		zval *entry;
2531 
2532 		ZEND_HASH_FOREACH_VAL(source, entry) {
2533 			if (UNEXPECTED(Z_ISREF_P(entry) && Z_REFCOUNT_P(entry) == 1)) {
2534 				entry = Z_REFVAL_P(entry);
2535 			}
2536 			Z_TRY_ADDREF_P(entry);
2537 			ZEND_HASH_FILL_ADD(entry);
2538 		} ZEND_HASH_FOREACH_END();
2539 	} ZEND_HASH_FILL_END();
2540 
2541 	return result;
2542 }
2543 
2544 
zend_hash_merge(HashTable * target,HashTable * source,copy_ctor_func_t pCopyConstructor,bool overwrite)2545 ZEND_API void ZEND_FASTCALL zend_hash_merge(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor, bool overwrite)
2546 {
2547 	uint32_t idx;
2548 	Bucket *p;
2549 	zval *t, *s;
2550 
2551 	IS_CONSISTENT(source);
2552 	IS_CONSISTENT(target);
2553 	HT_ASSERT_RC1(target);
2554 
2555 	if (overwrite) {
2556 		if (HT_IS_PACKED(source)) {
2557 			for (idx = 0; idx < source->nNumUsed; idx++) {
2558 				s = source->arPacked + idx;
2559 				if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) {
2560 					continue;
2561 				}
2562 				t = zend_hash_index_update(target, idx, s);
2563 				if (pCopyConstructor) {
2564 					pCopyConstructor(t);
2565 				}
2566 			}
2567 			return;
2568 		}
2569 
2570 		for (idx = 0; idx < source->nNumUsed; idx++) {
2571 			p = source->arData + idx;
2572 			s = &p->val;
2573 			if (UNEXPECTED(Z_TYPE_P(s) == IS_INDIRECT)) {
2574 				s = Z_INDIRECT_P(s);
2575 			}
2576 			if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) {
2577 				continue;
2578 			}
2579 			if (p->key) {
2580 				t = _zend_hash_add_or_update_i(target, p->key, s, HASH_UPDATE | HASH_UPDATE_INDIRECT);
2581 				if (pCopyConstructor) {
2582 					pCopyConstructor(t);
2583 				}
2584 			} else {
2585 				t = zend_hash_index_update(target, p->h, s);
2586 				if (pCopyConstructor) {
2587 					pCopyConstructor(t);
2588 				}
2589 			}
2590 		}
2591 	} else {
2592 		if (HT_IS_PACKED(source)) {
2593 			for (idx = 0; idx < source->nNumUsed; idx++) {
2594 				s = source->arPacked + idx;
2595 				if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) {
2596 					continue;
2597 				}
2598 				t = zend_hash_index_add(target, idx, s);
2599 				if (t && pCopyConstructor) {
2600 					pCopyConstructor(t);
2601 				}
2602 			}
2603 			return;
2604 		}
2605 
2606 		for (idx = 0; idx < source->nNumUsed; idx++) {
2607 			p = source->arData + idx;
2608 			s = &p->val;
2609 			if (UNEXPECTED(Z_TYPE_P(s) == IS_INDIRECT)) {
2610 				s = Z_INDIRECT_P(s);
2611 			}
2612 			if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) {
2613 				continue;
2614 			}
2615 			if (p->key) {
2616 				t = _zend_hash_add_or_update_i(target, p->key, s, HASH_ADD | HASH_UPDATE_INDIRECT);
2617 				if (t && pCopyConstructor) {
2618 					pCopyConstructor(t);
2619 				}
2620 			} else {
2621 				t = zend_hash_index_add(target, p->h, s);
2622 				if (t && pCopyConstructor) {
2623 					pCopyConstructor(t);
2624 				}
2625 			}
2626 		}
2627 	}
2628 }
2629 
2630 
zend_hash_replace_checker_wrapper(HashTable * target,zval * source_data,zend_ulong h,zend_string * key,void * pParam,merge_checker_func_t merge_checker_func)2631 static bool ZEND_FASTCALL zend_hash_replace_checker_wrapper(HashTable *target, zval *source_data, zend_ulong h, zend_string *key, void *pParam, merge_checker_func_t merge_checker_func)
2632 {
2633 	zend_hash_key hash_key;
2634 
2635 	hash_key.h = h;
2636 	hash_key.key = key;
2637 	return merge_checker_func(target, source_data, &hash_key, pParam);
2638 }
2639 
2640 
zend_hash_merge_ex(HashTable * target,HashTable * source,copy_ctor_func_t pCopyConstructor,merge_checker_func_t pMergeSource,void * pParam)2641 ZEND_API void ZEND_FASTCALL zend_hash_merge_ex(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor, merge_checker_func_t pMergeSource, void *pParam)
2642 {
2643 	uint32_t idx;
2644 	Bucket *p;
2645 	zval *t;
2646 
2647 	IS_CONSISTENT(source);
2648 	IS_CONSISTENT(target);
2649 	HT_ASSERT_RC1(target);
2650 
2651 	ZEND_ASSERT(!HT_IS_PACKED(source));
2652 	for (idx = 0; idx < source->nNumUsed; idx++) {
2653 		p = source->arData + idx;
2654 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2655 		if (zend_hash_replace_checker_wrapper(target, &p->val, p->h, p->key, pParam, pMergeSource)) {
2656 			t = zend_hash_update(target, p->key, &p->val);
2657 			if (pCopyConstructor) {
2658 				pCopyConstructor(t);
2659 			}
2660 		}
2661 	}
2662 }
2663 
2664 
2665 /* Returns the hash table data if found and NULL if not. */
zend_hash_find(const HashTable * ht,zend_string * key)2666 ZEND_API zval* ZEND_FASTCALL zend_hash_find(const HashTable *ht, zend_string *key)
2667 {
2668 	Bucket *p;
2669 
2670 	IS_CONSISTENT(ht);
2671 
2672 	(void)zend_string_hash_val(key);
2673 	p = zend_hash_find_bucket(ht, key);
2674 	return p ? &p->val : NULL;
2675 }
2676 
zend_hash_find_known_hash(const HashTable * ht,const zend_string * key)2677 ZEND_API zval* ZEND_FASTCALL zend_hash_find_known_hash(const HashTable *ht, const zend_string *key)
2678 {
2679 	Bucket *p;
2680 
2681 	IS_CONSISTENT(ht);
2682 
2683 	p = zend_hash_find_bucket(ht, key);
2684 	return p ? &p->val : NULL;
2685 }
2686 
zend_hash_str_find(const HashTable * ht,const char * str,size_t len)2687 ZEND_API zval* ZEND_FASTCALL zend_hash_str_find(const HashTable *ht, const char *str, size_t len)
2688 {
2689 	zend_ulong h;
2690 	Bucket *p;
2691 
2692 	IS_CONSISTENT(ht);
2693 
2694 	h = zend_inline_hash_func(str, len);
2695 	p = zend_hash_str_find_bucket(ht, str, len, h);
2696 	return p ? &p->val : NULL;
2697 }
2698 
zend_hash_index_find(const HashTable * ht,zend_ulong h)2699 ZEND_API zval* ZEND_FASTCALL zend_hash_index_find(const HashTable *ht, zend_ulong h)
2700 {
2701 	Bucket *p;
2702 
2703 	IS_CONSISTENT(ht);
2704 
2705 	if (HT_IS_PACKED(ht)) {
2706 		if (h < ht->nNumUsed) {
2707 			zval *zv = ht->arPacked + h;
2708 
2709 			if (Z_TYPE_P(zv) != IS_UNDEF) {
2710 				return zv;
2711 			}
2712 		}
2713 		return NULL;
2714 	}
2715 
2716 	p = zend_hash_index_find_bucket(ht, h);
2717 	return p ? &p->val : NULL;
2718 }
2719 
_zend_hash_index_find(const HashTable * ht,zend_ulong h)2720 ZEND_API zval* ZEND_FASTCALL _zend_hash_index_find(const HashTable *ht, zend_ulong h)
2721 {
2722 	Bucket *p;
2723 
2724 	IS_CONSISTENT(ht);
2725 	ZEND_ASSERT(!HT_IS_PACKED(ht));
2726 
2727 	p = zend_hash_index_find_bucket(ht, h);
2728 	return p ? &p->val : NULL;
2729 }
2730 
zend_hash_internal_pointer_reset_ex(HashTable * ht,HashPosition * pos)2731 ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_reset_ex(HashTable *ht, HashPosition *pos)
2732 {
2733 	IS_CONSISTENT(ht);
2734 	HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2735 	*pos = _zend_hash_get_valid_pos(ht, 0);
2736 }
2737 
2738 
2739 /* This function will be extremely optimized by remembering
2740  * the end of the list
2741  */
zend_hash_internal_pointer_end_ex(HashTable * ht,HashPosition * pos)2742 ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_end_ex(HashTable *ht, HashPosition *pos)
2743 {
2744 	uint32_t idx;
2745 
2746 	IS_CONSISTENT(ht);
2747 	HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2748 
2749 	idx = ht->nNumUsed;
2750 	if (HT_IS_PACKED(ht)) {
2751 		while (idx > 0) {
2752 			idx--;
2753 			if (Z_TYPE(ht->arPacked[idx]) != IS_UNDEF) {
2754 				*pos = idx;
2755 				return;
2756 			}
2757 		}
2758 	} else {
2759 		while (idx > 0) {
2760 			idx--;
2761 			if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2762 				*pos = idx;
2763 				return;
2764 			}
2765 		}
2766 	}
2767 	*pos = ht->nNumUsed;
2768 }
2769 
2770 
zend_hash_move_forward_ex(HashTable * ht,HashPosition * pos)2771 ZEND_API zend_result ZEND_FASTCALL zend_hash_move_forward_ex(HashTable *ht, HashPosition *pos)
2772 {
2773 	uint32_t idx;
2774 
2775 	IS_CONSISTENT(ht);
2776 	HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2777 
2778 	idx = _zend_hash_get_valid_pos(ht, *pos);
2779 	if (idx < ht->nNumUsed) {
2780 		if (HT_IS_PACKED(ht)) {
2781 			while (1) {
2782 				idx++;
2783 				if (idx >= ht->nNumUsed) {
2784 					*pos = ht->nNumUsed;
2785 					return SUCCESS;
2786 				}
2787 				if (Z_TYPE(ht->arPacked[idx]) != IS_UNDEF) {
2788 					*pos = idx;
2789 					return SUCCESS;
2790 				}
2791 			}
2792 		} else {
2793 			while (1) {
2794 				idx++;
2795 				if (idx >= ht->nNumUsed) {
2796 					*pos = ht->nNumUsed;
2797 					return SUCCESS;
2798 				}
2799 				if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2800 					*pos = idx;
2801 					return SUCCESS;
2802 				}
2803 			}
2804 		}
2805 	} else {
2806 		return FAILURE;
2807 	}
2808 }
2809 
zend_hash_move_backwards_ex(HashTable * ht,HashPosition * pos)2810 ZEND_API zend_result ZEND_FASTCALL zend_hash_move_backwards_ex(HashTable *ht, HashPosition *pos)
2811 {
2812 	uint32_t idx = *pos;
2813 
2814 	IS_CONSISTENT(ht);
2815 	HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2816 
2817 	if (idx < ht->nNumUsed) {
2818 		if (HT_IS_PACKED(ht)) {
2819 			while (idx > 0) {
2820 				idx--;
2821 				if (Z_TYPE(ht->arPacked[idx]) != IS_UNDEF) {
2822 					*pos = idx;
2823 					return SUCCESS;
2824 				}
2825 			}
2826 		} else {
2827 			while (idx > 0) {
2828 				idx--;
2829 				if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2830 					*pos = idx;
2831 					return SUCCESS;
2832 				}
2833 			}
2834 		}
2835 		*pos = ht->nNumUsed;
2836 		return SUCCESS;
2837 	} else {
2838 		return FAILURE;
2839 	}
2840 }
2841 
2842 
2843 /* This function should be made binary safe  */
zend_hash_get_current_key_ex(const HashTable * ht,zend_string ** str_index,zend_ulong * num_index,const HashPosition * pos)2844 ZEND_API int ZEND_FASTCALL zend_hash_get_current_key_ex(const HashTable *ht, zend_string **str_index, zend_ulong *num_index, const HashPosition *pos)
2845 {
2846 	uint32_t idx;
2847 	Bucket *p;
2848 
2849 	IS_CONSISTENT(ht);
2850 	idx = _zend_hash_get_valid_pos(ht, *pos);
2851 	if (idx < ht->nNumUsed) {
2852 		if (HT_IS_PACKED(ht)) {
2853 			*num_index = idx;
2854 			return HASH_KEY_IS_LONG;
2855 		}
2856 		p = ht->arData + idx;
2857 		if (p->key) {
2858 			*str_index = p->key;
2859 			return HASH_KEY_IS_STRING;
2860 		} else {
2861 			*num_index = p->h;
2862 			return HASH_KEY_IS_LONG;
2863 		}
2864 	}
2865 	return HASH_KEY_NON_EXISTENT;
2866 }
2867 
zend_hash_get_current_key_zval_ex(const HashTable * ht,zval * key,const HashPosition * pos)2868 ZEND_API void ZEND_FASTCALL zend_hash_get_current_key_zval_ex(const HashTable *ht, zval *key, const HashPosition *pos)
2869 {
2870 	uint32_t idx;
2871 	Bucket *p;
2872 
2873 	IS_CONSISTENT(ht);
2874 	idx = _zend_hash_get_valid_pos(ht, *pos);
2875 	if (idx >= ht->nNumUsed) {
2876 		ZVAL_NULL(key);
2877 	} else {
2878 		if (HT_IS_PACKED(ht)) {
2879 			ZVAL_LONG(key, idx);
2880 			return;
2881 		}
2882 		p = ht->arData + idx;
2883 		if (p->key) {
2884 			ZVAL_STR_COPY(key, p->key);
2885 		} else {
2886 			ZVAL_LONG(key, p->h);
2887 		}
2888 	}
2889 }
2890 
zend_hash_get_current_key_type_ex(HashTable * ht,HashPosition * pos)2891 ZEND_API int ZEND_FASTCALL zend_hash_get_current_key_type_ex(HashTable *ht, HashPosition *pos)
2892 {
2893 	uint32_t idx;
2894 	Bucket *p;
2895 
2896 	IS_CONSISTENT(ht);
2897 	idx = _zend_hash_get_valid_pos(ht, *pos);
2898 	if (idx < ht->nNumUsed) {
2899 		if (HT_IS_PACKED(ht)) {
2900 			return HASH_KEY_IS_LONG;
2901 		}
2902 		p = ht->arData + idx;
2903 		if (p->key) {
2904 			return HASH_KEY_IS_STRING;
2905 		} else {
2906 			return HASH_KEY_IS_LONG;
2907 		}
2908 	}
2909 	return HASH_KEY_NON_EXISTENT;
2910 }
2911 
2912 
zend_hash_get_current_data_ex(HashTable * ht,HashPosition * pos)2913 ZEND_API zval* ZEND_FASTCALL zend_hash_get_current_data_ex(HashTable *ht, HashPosition *pos)
2914 {
2915 	uint32_t idx;
2916 	Bucket *p;
2917 
2918 	IS_CONSISTENT(ht);
2919 	idx = _zend_hash_get_valid_pos(ht, *pos);
2920 	if (idx < ht->nNumUsed) {
2921 		if (HT_IS_PACKED(ht)) {
2922 			return &ht->arPacked[idx];
2923 		}
2924 		p = ht->arData + idx;
2925 		return &p->val;
2926 	} else {
2927 		return NULL;
2928 	}
2929 }
2930 
zend_hash_bucket_swap(Bucket * p,Bucket * q)2931 ZEND_API void zend_hash_bucket_swap(Bucket *p, Bucket *q)
2932 {
2933 	zval val;
2934 	zend_ulong h;
2935 	zend_string *key;
2936 
2937 	val = p->val;
2938 	h = p->h;
2939 	key = p->key;
2940 
2941 	p->val = q->val;
2942 	p->h = q->h;
2943 	p->key = q->key;
2944 
2945 	q->val = val;
2946 	q->h = h;
2947 	q->key = key;
2948 }
2949 
zend_hash_bucket_renum_swap(Bucket * p,Bucket * q)2950 ZEND_API void zend_hash_bucket_renum_swap(Bucket *p, Bucket *q)
2951 {
2952 	zval val;
2953 
2954 	val = p->val;
2955 	p->val = q->val;
2956 	q->val = val;
2957 }
2958 
zend_hash_bucket_packed_swap(Bucket * p,Bucket * q)2959 ZEND_API void zend_hash_bucket_packed_swap(Bucket *p, Bucket *q)
2960 {
2961 	zval val;
2962 	zend_ulong h;
2963 
2964 	val = p->val;
2965 	h = p->h;
2966 
2967 	p->val = q->val;
2968 	p->h = q->h;
2969 
2970 	q->val = val;
2971 	q->h = h;
2972 }
2973 
zend_hash_sort_ex(HashTable * ht,sort_func_t sort,bucket_compare_func_t compar,bool renumber)2974 ZEND_API void ZEND_FASTCALL zend_hash_sort_ex(HashTable *ht, sort_func_t sort, bucket_compare_func_t compar, bool renumber)
2975 {
2976 	Bucket *p;
2977 	uint32_t i, j;
2978 
2979 	IS_CONSISTENT(ht);
2980 	HT_ASSERT_RC1(ht);
2981 
2982 	if (!(ht->nNumOfElements>1) && !(renumber && ht->nNumOfElements>0)) {
2983 		/* Doesn't require sorting */
2984 		return;
2985 	}
2986 
2987 	if (HT_IS_PACKED(ht)) {
2988 		zend_hash_packed_to_hash(ht); // TODO: ???
2989 	}
2990 
2991 	if (HT_IS_WITHOUT_HOLES(ht)) {
2992 		/* Store original order of elements in extra space to allow stable sorting. */
2993 		for (i = 0; i < ht->nNumUsed; i++) {
2994 			Z_EXTRA(ht->arData[i].val) = i;
2995 		}
2996 	} else {
2997 		/* Remove holes and store original order. */
2998 		for (j = 0, i = 0; j < ht->nNumUsed; j++) {
2999 			p = ht->arData + j;
3000 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
3001 			if (i != j) {
3002 				ht->arData[i] = *p;
3003 			}
3004 			Z_EXTRA(ht->arData[i].val) = i;
3005 			i++;
3006 		}
3007 		ht->nNumUsed = i;
3008 	}
3009 
3010 	if (!HT_IS_PACKED(ht)) {
3011 		/* We broke the hash collisions chains overriding Z_NEXT() by Z_EXTRA().
3012 		 * Reset the hash headers table as well to avoid possible inconsistent
3013 		 * access on recursive data structures.
3014 	     *
3015 	     * See Zend/tests/bug63882_2.phpt
3016 		 */
3017 		HT_HASH_RESET(ht);
3018 	}
3019 
3020 	sort((void *)ht->arData, ht->nNumUsed, sizeof(Bucket), (compare_func_t) compar,
3021 			(swap_func_t)(renumber? zend_hash_bucket_renum_swap :
3022 				(HT_IS_PACKED(ht) ? zend_hash_bucket_packed_swap : zend_hash_bucket_swap)));
3023 
3024 	ht->nInternalPointer = 0;
3025 
3026 	if (renumber) {
3027 		for (j = 0; j < i; j++) {
3028 			p = ht->arData + j;
3029 			p->h = j;
3030 			if (p->key) {
3031 				zend_string_release(p->key);
3032 				p->key = NULL;
3033 			}
3034 		}
3035 
3036 		ht->nNextFreeElement = i;
3037 	}
3038 	if (HT_IS_PACKED(ht)) {
3039 		if (!renumber) {
3040 			zend_hash_packed_to_hash(ht);
3041 		}
3042 	} else {
3043 		if (renumber) {
3044 			void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
3045 			Bucket *old_buckets = ht->arData;
3046 			zval *zv;
3047 
3048 			new_data = pemalloc(HT_PACKED_SIZE_EX(ht->nTableSize, HT_MIN_MASK), (GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
3049 			HT_FLAGS(ht) |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
3050 			ht->nTableMask = HT_MIN_MASK;
3051 			HT_SET_DATA_ADDR(ht, new_data);
3052 			p = old_buckets;
3053 			zv = ht->arPacked;
3054 			for (i = 0; i < ht->nTableSize; i++) {
3055 				ZVAL_COPY_VALUE(zv, &p->val);
3056 				zv++;
3057 				p++;
3058 			}
3059 			pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
3060 			HT_HASH_RESET_PACKED(ht);
3061 		} else {
3062 			zend_hash_rehash(ht);
3063 		}
3064 	}
3065 }
3066 
zend_hash_compare_impl(HashTable * ht1,HashTable * ht2,compare_func_t compar,bool ordered)3067 static zend_always_inline int zend_hash_compare_impl(HashTable *ht1, HashTable *ht2, compare_func_t compar, bool ordered) {
3068 	uint32_t idx1, idx2;
3069 	zend_string *key1, *key2;
3070 	zend_ulong h1, h2;
3071 	zval *pData1, *pData2;;
3072 	int result;
3073 
3074 	if (ht1->nNumOfElements != ht2->nNumOfElements) {
3075 		return ht1->nNumOfElements > ht2->nNumOfElements ? 1 : -1;
3076 	}
3077 
3078 	for (idx1 = 0, idx2 = 0; idx1 < ht1->nNumUsed; idx1++) {
3079 		if (HT_IS_PACKED(ht1)) {
3080 			pData1 = ht1->arPacked + idx1;
3081 			h1 = idx1;
3082 			key1 = NULL;
3083 		} else {
3084 			Bucket *p = ht1->arData + idx1;
3085 			pData1 = &p->val;
3086 			h1 = p->h;
3087 			key1 = p->key;
3088 		}
3089 
3090 		if (Z_TYPE_P(pData1) == IS_UNDEF) continue;
3091 		if (ordered) {
3092 			if (HT_IS_PACKED(ht2)) {
3093 				while (1) {
3094 					ZEND_ASSERT(idx2 != ht2->nNumUsed);
3095 					pData2 = ht2->arPacked + idx2;
3096 					h2 = idx2;
3097 					key2 = NULL;
3098 					if (Z_TYPE_P(pData2) != IS_UNDEF) break;
3099 					idx2++;
3100 				}
3101 			} else {
3102 				while (1) {
3103 					Bucket *p;
3104 					ZEND_ASSERT(idx2 != ht2->nNumUsed);
3105 					p = ht2->arData + idx2;
3106 					pData2 = &p->val;
3107 					h2 = p->h;
3108 					key2 = p->key;
3109 					if (Z_TYPE_P(pData2) != IS_UNDEF) break;
3110 					idx2++;
3111 				}
3112 			}
3113 			if (key1 == NULL && key2 == NULL) { /* numeric indices */
3114 				if (h1 != h2) {
3115 					return h1 > h2 ? 1 : -1;
3116 				}
3117 			} else if (key1 != NULL && key2 != NULL) { /* string indices */
3118 				if (ZSTR_LEN(key1) != ZSTR_LEN(key2)) {
3119 					return ZSTR_LEN(key1) > ZSTR_LEN(key2) ? 1 : -1;
3120 				}
3121 
3122 				result = memcmp(ZSTR_VAL(key1), ZSTR_VAL(key2), ZSTR_LEN(key1));
3123 				if (result != 0) {
3124 					return result;
3125 				}
3126 			} else {
3127 				/* Mixed key types: A string key is considered as larger */
3128 				return key1 != NULL ? 1 : -1;
3129 			}
3130 			idx2++;
3131 		} else {
3132 			if (key1 == NULL) { /* numeric index */
3133 				pData2 = zend_hash_index_find(ht2, h1);
3134 				if (pData2 == NULL) {
3135 					return 1;
3136 				}
3137 			} else { /* string index */
3138 				pData2 = zend_hash_find(ht2, key1);
3139 				if (pData2 == NULL) {
3140 					return 1;
3141 				}
3142 			}
3143 		}
3144 
3145 		if (Z_TYPE_P(pData1) == IS_INDIRECT) {
3146 			pData1 = Z_INDIRECT_P(pData1);
3147 		}
3148 		if (Z_TYPE_P(pData2) == IS_INDIRECT) {
3149 			pData2 = Z_INDIRECT_P(pData2);
3150 		}
3151 
3152 		if (Z_TYPE_P(pData1) == IS_UNDEF) {
3153 			if (Z_TYPE_P(pData2) != IS_UNDEF) {
3154 				return -1;
3155 			}
3156 		} else if (Z_TYPE_P(pData2) == IS_UNDEF) {
3157 			return 1;
3158 		} else {
3159 			result = compar(pData1, pData2);
3160 			if (result != 0) {
3161 				return result;
3162 			}
3163 		}
3164 	}
3165 
3166 	return 0;
3167 }
3168 
zend_hash_compare(HashTable * ht1,HashTable * ht2,compare_func_t compar,bool ordered)3169 ZEND_API int zend_hash_compare(HashTable *ht1, HashTable *ht2, compare_func_t compar, bool ordered)
3170 {
3171 	int result;
3172 	IS_CONSISTENT(ht1);
3173 	IS_CONSISTENT(ht2);
3174 
3175 	if (ht1 == ht2) {
3176 		return 0;
3177 	}
3178 
3179 	/* It's enough to protect only one of the arrays.
3180 	 * The second one may be referenced from the first and this may cause
3181 	 * false recursion detection.
3182 	 */
3183 	if (UNEXPECTED(GC_IS_RECURSIVE(ht1))) {
3184 		zend_error_noreturn(E_ERROR, "Nesting level too deep - recursive dependency?");
3185 	}
3186 
3187 	GC_TRY_PROTECT_RECURSION(ht1);
3188 	result = zend_hash_compare_impl(ht1, ht2, compar, ordered);
3189 	GC_TRY_UNPROTECT_RECURSION(ht1);
3190 
3191 	return result;
3192 }
3193 
3194 
zend_hash_minmax(const HashTable * ht,compare_func_t compar,uint32_t flag)3195 ZEND_API zval* ZEND_FASTCALL zend_hash_minmax(const HashTable *ht, compare_func_t compar, uint32_t flag)
3196 {
3197 	uint32_t idx;
3198 	zval *res;
3199 
3200 	IS_CONSISTENT(ht);
3201 
3202 	if (ht->nNumOfElements == 0 ) {
3203 		return NULL;
3204 	}
3205 
3206 	if (HT_IS_PACKED(ht)) {
3207 		zval *zv;
3208 
3209 		idx = 0;
3210 		while (1) {
3211 			if (idx == ht->nNumUsed) {
3212 				return NULL;
3213 			}
3214 			if (Z_TYPE(ht->arPacked[idx]) != IS_UNDEF) break;
3215 			idx++;
3216 		}
3217 		res = ht->arPacked + idx;
3218 		for (; idx < ht->nNumUsed; idx++) {
3219 			zv = ht->arPacked + idx;
3220 			if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
3221 
3222 			if (flag) {
3223 				if (compar(res, zv) < 0) { /* max */
3224 					res = zv;
3225 				}
3226 			} else {
3227 				if (compar(res, zv) > 0) { /* min */
3228 					res = zv;
3229 				}
3230 			}
3231 		}
3232 	} else {
3233 		Bucket *p;
3234 
3235 		idx = 0;
3236 		while (1) {
3237 			if (idx == ht->nNumUsed) {
3238 				return NULL;
3239 			}
3240 			if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) break;
3241 			idx++;
3242 		}
3243 		res = &ht->arData[idx].val;
3244 		for (; idx < ht->nNumUsed; idx++) {
3245 			p = ht->arData + idx;
3246 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
3247 
3248 			if (flag) {
3249 				if (compar(res, &p->val) < 0) { /* max */
3250 					res = &p->val;
3251 				}
3252 			} else {
3253 				if (compar(res, &p->val) > 0) { /* min */
3254 					res = &p->val;
3255 				}
3256 			}
3257 		}
3258 	}
3259 	return res;
3260 }
3261 
_zend_handle_numeric_str_ex(const char * key,size_t length,zend_ulong * idx)3262 ZEND_API bool ZEND_FASTCALL _zend_handle_numeric_str_ex(const char *key, size_t length, zend_ulong *idx)
3263 {
3264 	const char *tmp = key;
3265 
3266 	const char *end = key + length;
3267 
3268 	if (*tmp == '-') {
3269 		tmp++;
3270 	}
3271 
3272 	if ((*tmp == '0' && length > 1) /* numbers with leading zeros */
3273 	 || (end - tmp > MAX_LENGTH_OF_LONG - 1) /* number too long */
3274 	 || (SIZEOF_ZEND_LONG == 4 &&
3275 	     end - tmp == MAX_LENGTH_OF_LONG - 1 &&
3276 	     *tmp > '2')) { /* overflow */
3277 		return 0;
3278 	}
3279 	*idx = (*tmp - '0');
3280 	while (1) {
3281 		++tmp;
3282 		if (tmp == end) {
3283 			if (*key == '-') {
3284 				if (*idx-1 > ZEND_LONG_MAX) { /* overflow */
3285 					return 0;
3286 				}
3287 				*idx = 0 - *idx;
3288 			} else if (*idx > ZEND_LONG_MAX) { /* overflow */
3289 				return 0;
3290 			}
3291 			return 1;
3292 		}
3293 		if (*tmp <= '9' && *tmp >= '0') {
3294 			*idx = (*idx * 10) + (*tmp - '0');
3295 		} else {
3296 			return 0;
3297 		}
3298 	}
3299 }
3300 
3301 /* Takes a "symtable" hashtable (contains integer and non-numeric string keys)
3302  * and converts it to a "proptable" (contains only string keys).
3303  * If the symtable didn't need duplicating, its refcount is incremented.
3304  */
zend_symtable_to_proptable(HashTable * ht)3305 ZEND_API HashTable* ZEND_FASTCALL zend_symtable_to_proptable(HashTable *ht)
3306 {
3307 	zend_ulong num_key;
3308 	zend_string *str_key;
3309 	zval *zv;
3310 
3311 	if (UNEXPECTED(HT_IS_PACKED(ht))) {
3312 		goto convert;
3313 	}
3314 
3315 	ZEND_HASH_MAP_FOREACH_STR_KEY(ht, str_key) {
3316 		if (!str_key) {
3317 			goto convert;
3318 		}
3319 	} ZEND_HASH_FOREACH_END();
3320 
3321 	if (!(GC_FLAGS(ht) & IS_ARRAY_IMMUTABLE)) {
3322 		GC_ADDREF(ht);
3323 	}
3324 
3325 	return ht;
3326 
3327 convert:
3328 	{
3329 		HashTable *new_ht = zend_new_array(zend_hash_num_elements(ht));
3330 
3331 		ZEND_HASH_FOREACH_KEY_VAL(ht, num_key, str_key, zv) {
3332 			if (!str_key) {
3333 				str_key = zend_long_to_str(num_key);
3334 				zend_string_delref(str_key);
3335 			}
3336 			do {
3337 				if (Z_OPT_REFCOUNTED_P(zv)) {
3338 					if (Z_ISREF_P(zv) && Z_REFCOUNT_P(zv) == 1) {
3339 						zv = Z_REFVAL_P(zv);
3340 						if (!Z_OPT_REFCOUNTED_P(zv)) {
3341 							break;
3342 						}
3343 					}
3344 					Z_ADDREF_P(zv);
3345 				}
3346 			} while (0);
3347 			zend_hash_update(new_ht, str_key, zv);
3348 		} ZEND_HASH_FOREACH_END();
3349 
3350 		return new_ht;
3351 	}
3352 }
3353 
3354 /* Takes a "proptable" hashtable (contains only string keys) and converts it to
3355  * a "symtable" (contains integer and non-numeric string keys).
3356  * If the proptable didn't need duplicating, its refcount is incremented.
3357  */
zend_proptable_to_symtable(HashTable * ht,bool always_duplicate)3358 ZEND_API HashTable* ZEND_FASTCALL zend_proptable_to_symtable(HashTable *ht, bool always_duplicate)
3359 {
3360 	zend_ulong num_key;
3361 	zend_string *str_key;
3362 	zval *zv;
3363 
3364 	if (!HT_IS_PACKED(ht)) {
3365 		ZEND_HASH_MAP_FOREACH_STR_KEY(ht, str_key) {
3366 			/* The `str_key &&` here might seem redundant: property tables should
3367 			 * only have string keys. Unfortunately, this isn't true, at the very
3368 			 * least because of ArrayObject, which stores a symtable where the
3369 			 * property table should be.
3370 			 */
3371 			if (str_key && ZEND_HANDLE_NUMERIC(str_key, num_key)) {
3372 				goto convert;
3373 			}
3374 		} ZEND_HASH_FOREACH_END();
3375 	}
3376 
3377 	if (always_duplicate) {
3378 		return zend_array_dup(ht);
3379 	}
3380 
3381 	if (EXPECTED(!(GC_FLAGS(ht) & IS_ARRAY_IMMUTABLE))) {
3382 		GC_ADDREF(ht);
3383 	}
3384 
3385 	return ht;
3386 
3387 convert:
3388 	{
3389 		HashTable *new_ht = zend_new_array(zend_hash_num_elements(ht));
3390 
3391 		ZEND_HASH_MAP_FOREACH_KEY_VAL_IND(ht, num_key, str_key, zv) {
3392 			do {
3393 				if (Z_OPT_REFCOUNTED_P(zv)) {
3394 					if (Z_ISREF_P(zv) && Z_REFCOUNT_P(zv) == 1) {
3395 						zv = Z_REFVAL_P(zv);
3396 						if (!Z_OPT_REFCOUNTED_P(zv)) {
3397 							break;
3398 						}
3399 					}
3400 					Z_ADDREF_P(zv);
3401 				}
3402 			} while (0);
3403 			/* Again, thank ArrayObject for `!str_key ||`. */
3404 			if (!str_key || ZEND_HANDLE_NUMERIC(str_key, num_key)) {
3405 				zend_hash_index_update(new_ht, num_key, zv);
3406 			} else {
3407 				zend_hash_update(new_ht, str_key, zv);
3408 			}
3409 		} ZEND_HASH_FOREACH_END();
3410 
3411 		return new_ht;
3412 	}
3413 }
3414