xref: /php-src/Zend/zend_hash.c (revision fdd6ba62)
1 /*
2    +----------------------------------------------------------------------+
3    | Zend Engine                                                          |
4    +----------------------------------------------------------------------+
5    | Copyright (c) Zend Technologies Ltd. (http://www.zend.com)           |
6    +----------------------------------------------------------------------+
7    | This source file is subject to version 2.00 of the Zend license,     |
8    | that is bundled with this package in the file LICENSE, and is        |
9    | available through the world-wide-web at the following url:           |
10    | http://www.zend.com/license/2_00.txt.                                |
11    | If you did not receive a copy of the Zend license and are unable to  |
12    | obtain it through the world-wide-web, please send a note to          |
13    | license@zend.com so we can mail you a copy immediately.              |
14    +----------------------------------------------------------------------+
15    | Authors: Andi Gutmans <andi@php.net>                                 |
16    |          Zeev Suraski <zeev@php.net>                                 |
17    |          Dmitry Stogov <dmitry@php.net>                              |
18    +----------------------------------------------------------------------+
19 */
20 
21 #include "zend.h"
22 #include "zend_globals.h"
23 #include "zend_variables.h"
24 
25 #if defined(__aarch64__) || defined(_M_ARM64)
26 # include <arm_neon.h>
27 #endif
28 
29 /* Prefer to use AVX2 instructions for better latency and throughput */
30 #if defined(__AVX2__)
31 # include <immintrin.h>
32 #elif defined( __SSE2__)
33 # include <mmintrin.h>
34 # include <emmintrin.h>
35 #endif
36 
37 #if ZEND_DEBUG
38 # define HT_ASSERT(ht, expr) \
39 	ZEND_ASSERT((expr) || (HT_FLAGS(ht) & HASH_FLAG_ALLOW_COW_VIOLATION))
40 #else
41 # define HT_ASSERT(ht, expr)
42 #endif
43 
44 #define HT_ASSERT_RC1(ht) HT_ASSERT(ht, GC_REFCOUNT(ht) == 1)
45 
46 #define HT_POISONED_PTR ((HashTable *) (intptr_t) -1)
47 
48 #if ZEND_DEBUG
49 
50 #define HT_OK					0x00
51 #define HT_IS_DESTROYING		0x01
52 #define HT_DESTROYED			0x02
53 #define HT_CLEANING				0x03
54 
_zend_is_inconsistent(const HashTable * ht,const char * file,int line)55 static void _zend_is_inconsistent(const HashTable *ht, const char *file, int line)
56 {
57 	if ((HT_FLAGS(ht) & HASH_FLAG_CONSISTENCY) == HT_OK) {
58 		return;
59 	}
60 	switch (HT_FLAGS(ht) & HASH_FLAG_CONSISTENCY) {
61 		case HT_IS_DESTROYING:
62 			zend_output_debug_string(1, "%s(%d) : ht=%p is being destroyed", file, line, ht);
63 			break;
64 		case HT_DESTROYED:
65 			zend_output_debug_string(1, "%s(%d) : ht=%p is already destroyed", file, line, ht);
66 			break;
67 		case HT_CLEANING:
68 			zend_output_debug_string(1, "%s(%d) : ht=%p is being cleaned", file, line, ht);
69 			break;
70 		default:
71 			zend_output_debug_string(1, "%s(%d) : ht=%p is inconsistent", file, line, ht);
72 			break;
73 	}
74 	ZEND_UNREACHABLE();
75 }
76 #define IS_CONSISTENT(a) _zend_is_inconsistent(a, __FILE__, __LINE__);
77 #define SET_INCONSISTENT(n) do { \
78 		HT_FLAGS(ht) = (HT_FLAGS(ht) & ~HASH_FLAG_CONSISTENCY) | (n); \
79 	} while (0)
80 #else
81 #define IS_CONSISTENT(a)
82 #define SET_INCONSISTENT(n)
83 #endif
84 
85 #define ZEND_HASH_IF_FULL_DO_RESIZE(ht)				\
86 	if ((ht)->nNumUsed >= (ht)->nTableSize) {		\
87 		zend_hash_do_resize(ht);					\
88 	}
89 
zend_hash_str_find_ptr_lc(const HashTable * ht,const char * str,size_t len)90 ZEND_API void *zend_hash_str_find_ptr_lc(const HashTable *ht, const char *str, size_t len) {
91 	void *result;
92 	char *lc_str;
93 
94 	/* Stack allocate small strings to improve performance */
95 	ALLOCA_FLAG(use_heap)
96 
97 	lc_str = zend_str_tolower_copy(do_alloca(len + 1, use_heap), str, len);
98 	result = zend_hash_str_find_ptr(ht, lc_str, len);
99 	free_alloca(lc_str, use_heap);
100 
101 	return result;
102 }
103 
zend_hash_find_ptr_lc(const HashTable * ht,zend_string * key)104 ZEND_API void *zend_hash_find_ptr_lc(const HashTable *ht, zend_string *key) {
105 	void *result;
106 	zend_string *lc_key = zend_string_tolower(key);
107 	result = zend_hash_find_ptr(ht, lc_key);
108 	zend_string_release(lc_key);
109 	return result;
110 }
111 
112 static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht);
113 
zend_hash_check_size(uint32_t nSize)114 static zend_always_inline uint32_t zend_hash_check_size(uint32_t nSize)
115 {
116 #ifdef ZEND_WIN32
117 	unsigned long index;
118 #endif
119 
120 	/* Use big enough power of 2 */
121 	/* size should be between HT_MIN_SIZE and HT_MAX_SIZE */
122 	if (nSize <= HT_MIN_SIZE) {
123 		return HT_MIN_SIZE;
124 	} else if (UNEXPECTED(nSize > HT_MAX_SIZE)) {
125 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", nSize, sizeof(Bucket), sizeof(Bucket));
126 	}
127 
128 #ifdef ZEND_WIN32
129 	if (BitScanReverse(&index, nSize - 1)) {
130 		return 0x2u << ((31 - index) ^ 0x1f);
131 	} else {
132 		/* nSize is ensured to be in the valid range, fall back to it
133 		   rather than using an undefined bis scan result. */
134 		return nSize;
135 	}
136 #elif (defined(__GNUC__) || __has_builtin(__builtin_clz))  && defined(PHP_HAVE_BUILTIN_CLZ)
137 	return 0x2u << (__builtin_clz(nSize - 1) ^ 0x1f);
138 #else
139 	nSize -= 1;
140 	nSize |= (nSize >> 1);
141 	nSize |= (nSize >> 2);
142 	nSize |= (nSize >> 4);
143 	nSize |= (nSize >> 8);
144 	nSize |= (nSize >> 16);
145 	return nSize + 1;
146 #endif
147 }
148 
zend_hash_real_init_packed_ex(HashTable * ht)149 static zend_always_inline void zend_hash_real_init_packed_ex(HashTable *ht)
150 {
151 	void *data;
152 
153 	if (UNEXPECTED(GC_FLAGS(ht) & IS_ARRAY_PERSISTENT)) {
154 		data = pemalloc(HT_PACKED_SIZE_EX(ht->nTableSize, HT_MIN_MASK), 1);
155 	} else if (EXPECTED(ht->nTableSize == HT_MIN_SIZE)) {
156 		/* Use specialized API with constant allocation amount for a particularly common case. */
157 		data = emalloc(HT_PACKED_SIZE_EX(HT_MIN_SIZE, HT_MIN_MASK));
158 	} else {
159 		data = emalloc(HT_PACKED_SIZE_EX(ht->nTableSize, HT_MIN_MASK));
160 	}
161 	HT_SET_DATA_ADDR(ht, data);
162 	/* Don't overwrite iterator count. */
163 	ht->u.v.flags = HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
164 	HT_HASH_RESET_PACKED(ht);
165 }
166 
zend_hash_real_init_mixed_ex(HashTable * ht)167 static zend_always_inline void zend_hash_real_init_mixed_ex(HashTable *ht)
168 {
169 	void *data;
170 	uint32_t nSize = ht->nTableSize;
171 
172 	ZEND_ASSERT(HT_SIZE_TO_MASK(nSize));
173 
174 	if (UNEXPECTED(GC_FLAGS(ht) & IS_ARRAY_PERSISTENT)) {
175 		data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), 1);
176 	} else if (EXPECTED(nSize == HT_MIN_SIZE)) {
177 		data = emalloc(HT_SIZE_EX(HT_MIN_SIZE, HT_SIZE_TO_MASK(HT_MIN_SIZE)));
178 		ht->nTableMask = HT_SIZE_TO_MASK(HT_MIN_SIZE);
179 		HT_SET_DATA_ADDR(ht, data);
180 		/* Don't overwrite iterator count. */
181 		ht->u.v.flags = HASH_FLAG_STATIC_KEYS;
182 #if defined(__AVX2__)
183 		do {
184 			__m256i ymm0 = _mm256_setzero_si256();
185 			ymm0 = _mm256_cmpeq_epi64(ymm0, ymm0);
186 			_mm256_storeu_si256((__m256i*)&HT_HASH_EX(data,  0), ymm0);
187 			_mm256_storeu_si256((__m256i*)&HT_HASH_EX(data,  8), ymm0);
188 		} while(0);
189 #elif defined (__SSE2__)
190 		do {
191 			__m128i xmm0 = _mm_setzero_si128();
192 			xmm0 = _mm_cmpeq_epi8(xmm0, xmm0);
193 			_mm_storeu_si128((__m128i*)&HT_HASH_EX(data,  0), xmm0);
194 			_mm_storeu_si128((__m128i*)&HT_HASH_EX(data,  4), xmm0);
195 			_mm_storeu_si128((__m128i*)&HT_HASH_EX(data,  8), xmm0);
196 			_mm_storeu_si128((__m128i*)&HT_HASH_EX(data, 12), xmm0);
197 		} while (0);
198 #elif defined(__aarch64__) || defined(_M_ARM64)
199 		do {
200 			int32x4_t t = vdupq_n_s32(-1);
201 			vst1q_s32((int32_t*)&HT_HASH_EX(data,  0), t);
202 			vst1q_s32((int32_t*)&HT_HASH_EX(data,  4), t);
203 			vst1q_s32((int32_t*)&HT_HASH_EX(data,  8), t);
204 			vst1q_s32((int32_t*)&HT_HASH_EX(data, 12), t);
205 		} while (0);
206 #else
207 		HT_HASH_EX(data,  0) = -1;
208 		HT_HASH_EX(data,  1) = -1;
209 		HT_HASH_EX(data,  2) = -1;
210 		HT_HASH_EX(data,  3) = -1;
211 		HT_HASH_EX(data,  4) = -1;
212 		HT_HASH_EX(data,  5) = -1;
213 		HT_HASH_EX(data,  6) = -1;
214 		HT_HASH_EX(data,  7) = -1;
215 		HT_HASH_EX(data,  8) = -1;
216 		HT_HASH_EX(data,  9) = -1;
217 		HT_HASH_EX(data, 10) = -1;
218 		HT_HASH_EX(data, 11) = -1;
219 		HT_HASH_EX(data, 12) = -1;
220 		HT_HASH_EX(data, 13) = -1;
221 		HT_HASH_EX(data, 14) = -1;
222 		HT_HASH_EX(data, 15) = -1;
223 #endif
224 		return;
225 	} else {
226 		data = emalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)));
227 	}
228 	ht->nTableMask = HT_SIZE_TO_MASK(nSize);
229 	HT_SET_DATA_ADDR(ht, data);
230 	HT_FLAGS(ht) = HASH_FLAG_STATIC_KEYS;
231 	HT_HASH_RESET(ht);
232 }
233 
zend_hash_real_init_ex(HashTable * ht,bool packed)234 static zend_always_inline void zend_hash_real_init_ex(HashTable *ht, bool packed)
235 {
236 	HT_ASSERT_RC1(ht);
237 	ZEND_ASSERT(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED);
238 	if (packed) {
239 		zend_hash_real_init_packed_ex(ht);
240 	} else {
241 		zend_hash_real_init_mixed_ex(ht);
242 	}
243 }
244 
245 static const uint32_t uninitialized_bucket[-HT_MIN_MASK] =
246 	{HT_INVALID_IDX, HT_INVALID_IDX};
247 
248 ZEND_API const HashTable zend_empty_array = {
249 	.gc.refcount = 2,
250 	.gc.u.type_info = IS_ARRAY | (GC_IMMUTABLE << GC_FLAGS_SHIFT),
251 	.u.flags = HASH_FLAG_UNINITIALIZED,
252 	.nTableMask = HT_MIN_MASK,
253 	{.arData = (Bucket*)&uninitialized_bucket[2]},
254 	.nNumUsed = 0,
255 	.nNumOfElements = 0,
256 	.nTableSize = HT_MIN_SIZE,
257 	.nInternalPointer = 0,
258 	.nNextFreeElement = ZEND_LONG_MIN,
259 	.pDestructor = ZVAL_PTR_DTOR
260 };
261 
_zend_hash_init_int(HashTable * ht,uint32_t nSize,dtor_func_t pDestructor,bool persistent)262 static zend_always_inline void _zend_hash_init_int(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, bool persistent)
263 {
264 	GC_SET_REFCOUNT(ht, 1);
265 	GC_TYPE_INFO(ht) = GC_ARRAY | (persistent ? ((GC_PERSISTENT|GC_NOT_COLLECTABLE) << GC_FLAGS_SHIFT) : 0);
266 	HT_FLAGS(ht) = HASH_FLAG_UNINITIALIZED;
267 	ht->nTableMask = HT_MIN_MASK;
268 	HT_SET_DATA_ADDR(ht, &uninitialized_bucket);
269 	ht->nNumUsed = 0;
270 	ht->nNumOfElements = 0;
271 	ht->nInternalPointer = 0;
272 	ht->nNextFreeElement = ZEND_LONG_MIN;
273 	ht->pDestructor = pDestructor;
274 	ht->nTableSize = zend_hash_check_size(nSize);
275 }
276 
_zend_hash_init(HashTable * ht,uint32_t nSize,dtor_func_t pDestructor,bool persistent)277 ZEND_API void ZEND_FASTCALL _zend_hash_init(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, bool persistent)
278 {
279 	_zend_hash_init_int(ht, nSize, pDestructor, persistent);
280 }
281 
_zend_new_array_0(void)282 ZEND_API HashTable* ZEND_FASTCALL _zend_new_array_0(void)
283 {
284 	HashTable *ht = emalloc(sizeof(HashTable));
285 	_zend_hash_init_int(ht, HT_MIN_SIZE, ZVAL_PTR_DTOR, 0);
286 	return ht;
287 }
288 
_zend_new_array(uint32_t nSize)289 ZEND_API HashTable* ZEND_FASTCALL _zend_new_array(uint32_t nSize)
290 {
291 	HashTable *ht = emalloc(sizeof(HashTable));
292 	_zend_hash_init_int(ht, nSize, ZVAL_PTR_DTOR, 0);
293 	return ht;
294 }
295 
zend_new_pair(const zval * val1,const zval * val2)296 ZEND_API HashTable* ZEND_FASTCALL zend_new_pair(const zval *val1, const zval *val2)
297 {
298 	zval *zv;
299 	HashTable *ht = emalloc(sizeof(HashTable));
300 	_zend_hash_init_int(ht, HT_MIN_SIZE, ZVAL_PTR_DTOR, 0);
301 	ht->nNumUsed = ht->nNumOfElements = ht->nNextFreeElement = 2;
302 	zend_hash_real_init_packed_ex(ht);
303 
304 	zv = ht->arPacked;
305 	ZVAL_COPY_VALUE(zv, val1);
306 	zv++;
307 	ZVAL_COPY_VALUE(zv, val2);
308 	return ht;
309 }
310 
zend_hash_packed_grow(HashTable * ht)311 ZEND_API void ZEND_FASTCALL zend_hash_packed_grow(HashTable *ht)
312 {
313 	HT_ASSERT_RC1(ht);
314 	if (ht->nTableSize >= HT_MAX_SIZE) {
315 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket), sizeof(Bucket));
316 	}
317 	uint32_t newTableSize = ht->nTableSize * 2;
318 	HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_PACKED_SIZE_EX(newTableSize, HT_MIN_MASK), HT_PACKED_USED_SIZE(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
319 	ht->nTableSize = newTableSize;
320 }
321 
zend_hash_real_init(HashTable * ht,bool packed)322 ZEND_API void ZEND_FASTCALL zend_hash_real_init(HashTable *ht, bool packed)
323 {
324 	IS_CONSISTENT(ht);
325 
326 	HT_ASSERT_RC1(ht);
327 	zend_hash_real_init_ex(ht, packed);
328 }
329 
zend_hash_real_init_packed(HashTable * ht)330 ZEND_API void ZEND_FASTCALL zend_hash_real_init_packed(HashTable *ht)
331 {
332 	IS_CONSISTENT(ht);
333 
334 	HT_ASSERT_RC1(ht);
335 	zend_hash_real_init_packed_ex(ht);
336 }
337 
zend_hash_real_init_mixed(HashTable * ht)338 ZEND_API void ZEND_FASTCALL zend_hash_real_init_mixed(HashTable *ht)
339 {
340 	IS_CONSISTENT(ht);
341 
342 	HT_ASSERT_RC1(ht);
343 	zend_hash_real_init_mixed_ex(ht);
344 }
345 
zend_hash_packed_to_hash(HashTable * ht)346 ZEND_API void ZEND_FASTCALL zend_hash_packed_to_hash(HashTable *ht)
347 {
348 	void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
349 	zval *src = ht->arPacked;
350 	Bucket *dst;
351 	uint32_t i;
352 	uint32_t nSize = ht->nTableSize;
353 
354 	ZEND_ASSERT(HT_SIZE_TO_MASK(nSize));
355 
356 	HT_ASSERT_RC1(ht);
357 	// Alloc before assign to avoid inconsistencies on OOM
358 	new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
359 	HT_FLAGS(ht) &= ~HASH_FLAG_PACKED;
360 	ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
361 	HT_SET_DATA_ADDR(ht, new_data);
362 	dst = ht->arData;
363 	for (i = 0; i < ht->nNumUsed; i++) {
364 		ZVAL_COPY_VALUE(&dst->val, src);
365 		dst->h = i;
366 		dst->key = NULL;
367 		dst++;
368 		src++;
369 	}
370 	pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
371 	zend_hash_rehash(ht);
372 }
373 
zend_hash_to_packed(HashTable * ht)374 ZEND_API void ZEND_FASTCALL zend_hash_to_packed(HashTable *ht)
375 {
376 	void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
377 	Bucket *src = ht->arData;
378 	zval *dst;
379 	uint32_t i;
380 
381 	HT_ASSERT_RC1(ht);
382 	new_data = pemalloc(HT_PACKED_SIZE_EX(ht->nTableSize, HT_MIN_MASK), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
383 	HT_FLAGS(ht) |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
384 	ht->nTableMask = HT_MIN_MASK;
385 	HT_SET_DATA_ADDR(ht, new_data);
386 	HT_HASH_RESET_PACKED(ht);
387 	dst = ht->arPacked;
388 	for (i = 0; i < ht->nNumUsed; i++) {
389 		ZVAL_COPY_VALUE(dst, &src->val);
390 		dst++;
391 		src++;
392 	}
393 	pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
394 }
395 
zend_hash_extend(HashTable * ht,uint32_t nSize,bool packed)396 ZEND_API void ZEND_FASTCALL zend_hash_extend(HashTable *ht, uint32_t nSize, bool packed)
397 {
398 	HT_ASSERT_RC1(ht);
399 
400 	if (nSize == 0) return;
401 
402 	ZEND_ASSERT(HT_SIZE_TO_MASK(nSize));
403 
404 	if (UNEXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
405 		if (nSize > ht->nTableSize) {
406 			ht->nTableSize = zend_hash_check_size(nSize);
407 		}
408 		zend_hash_real_init(ht, packed);
409 	} else {
410 		if (packed) {
411 			ZEND_ASSERT(HT_IS_PACKED(ht));
412 			if (nSize > ht->nTableSize) {
413 				uint32_t newTableSize = zend_hash_check_size(nSize);
414 				HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_PACKED_SIZE_EX(newTableSize, HT_MIN_MASK), HT_PACKED_USED_SIZE(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
415 				ht->nTableSize = newTableSize;
416 			}
417 		} else {
418 			ZEND_ASSERT(!HT_IS_PACKED(ht));
419 			if (nSize > ht->nTableSize) {
420 				void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
421 				Bucket *old_buckets = ht->arData;
422 				nSize = zend_hash_check_size(nSize);
423 				new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
424 				ht->nTableSize = nSize;
425 				ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
426 				HT_SET_DATA_ADDR(ht, new_data);
427 				memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
428 				pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
429 				zend_hash_rehash(ht);
430 			}
431 		}
432 	}
433 }
434 
zend_hash_discard(HashTable * ht,uint32_t nNumUsed)435 ZEND_API void ZEND_FASTCALL zend_hash_discard(HashTable *ht, uint32_t nNumUsed)
436 {
437 	Bucket *p, *end, *arData;
438 	uint32_t nIndex;
439 
440 	ZEND_ASSERT(!HT_IS_PACKED(ht));
441 	arData = ht->arData;
442 	p = arData + ht->nNumUsed;
443 	end = arData + nNumUsed;
444 	ht->nNumUsed = nNumUsed;
445 	while (p != end) {
446 		p--;
447 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
448 		ht->nNumOfElements--;
449 		/* Collision pointers always directed from higher to lower buckets */
450 #if 0
451 		if (!(Z_NEXT(p->val) == HT_INVALID_IDX || HT_HASH_TO_BUCKET_EX(arData, Z_NEXT(p->val)) < p)) {
452 			abort();
453 		}
454 #endif
455 		nIndex = p->h | ht->nTableMask;
456 		HT_HASH_EX(arData, nIndex) = Z_NEXT(p->val);
457 	}
458 }
459 
zend_array_recalc_elements(const HashTable * ht)460 static uint32_t zend_array_recalc_elements(const HashTable *ht)
461 {
462 	zval *val;
463 	uint32_t num = ht->nNumOfElements;
464 
465 	ZEND_HASH_MAP_FOREACH_VAL(ht, val) {
466 		if (Z_TYPE_P(val) == IS_INDIRECT) {
467 			if (UNEXPECTED(Z_TYPE_P(Z_INDIRECT_P(val)) == IS_UNDEF)) {
468 				num--;
469 			}
470 		}
471 	} ZEND_HASH_FOREACH_END();
472 	return num;
473 }
474 /* }}} */
475 
zend_array_count(HashTable * ht)476 ZEND_API uint32_t zend_array_count(HashTable *ht)
477 {
478 	uint32_t num;
479 	if (UNEXPECTED(HT_FLAGS(ht) & HASH_FLAG_HAS_EMPTY_IND)) {
480 		num = zend_array_recalc_elements(ht);
481 		if (UNEXPECTED(ht->nNumOfElements == num)) {
482 			HT_FLAGS(ht) &= ~HASH_FLAG_HAS_EMPTY_IND;
483 		}
484 	} else if (UNEXPECTED(ht == &EG(symbol_table))) {
485 		num = zend_array_recalc_elements(ht);
486 	} else {
487 		num = zend_hash_num_elements(ht);
488 	}
489 	return num;
490 }
491 /* }}} */
492 
_zend_hash_get_valid_pos(const HashTable * ht,HashPosition pos)493 static zend_always_inline HashPosition _zend_hash_get_valid_pos(const HashTable *ht, HashPosition pos)
494 {
495 	if (HT_IS_PACKED(ht)) {
496 		while (pos < ht->nNumUsed && Z_ISUNDEF(ht->arPacked[pos])) {
497 			pos++;
498 		}
499 	} else {
500 		while (pos < ht->nNumUsed && Z_ISUNDEF(ht->arData[pos].val)) {
501 			pos++;
502 		}
503 	}
504 	return pos;
505 }
506 
_zend_hash_get_current_pos(const HashTable * ht)507 static zend_always_inline HashPosition _zend_hash_get_current_pos(const HashTable *ht)
508 {
509 	return _zend_hash_get_valid_pos(ht, ht->nInternalPointer);
510 }
511 
zend_hash_get_current_pos(const HashTable * ht)512 ZEND_API HashPosition ZEND_FASTCALL zend_hash_get_current_pos(const HashTable *ht)
513 {
514 	return _zend_hash_get_current_pos(ht);
515 }
516 
zend_hash_get_current_pos_ex(const HashTable * ht,HashPosition pos)517 ZEND_API HashPosition ZEND_FASTCALL zend_hash_get_current_pos_ex(const HashTable *ht, HashPosition pos)
518 {
519 	return _zend_hash_get_valid_pos(ht, pos);
520 }
521 
zend_hash_remove_iterator_copies(uint32_t idx)522 static void zend_hash_remove_iterator_copies(uint32_t idx) {
523 	HashTableIterator *iterators = EG(ht_iterators);
524 
525 	HashTableIterator *iter = iterators + idx;
526 	uint32_t next_idx = iter->next_copy;
527 	while (next_idx != idx) {
528 		uint32_t cur_idx = next_idx;
529 		HashTableIterator *cur_iter = iterators + cur_idx;
530 		next_idx = cur_iter->next_copy;
531 		cur_iter->next_copy = cur_idx; // avoid recursion in zend_hash_iterator_del
532 		zend_hash_iterator_del(cur_idx);
533 	}
534 	iter->next_copy = idx;
535 }
536 
zend_hash_iterator_add(HashTable * ht,HashPosition pos)537 ZEND_API uint32_t ZEND_FASTCALL zend_hash_iterator_add(HashTable *ht, HashPosition pos)
538 {
539 	HashTableIterator *iter = EG(ht_iterators);
540 	HashTableIterator *end  = iter + EG(ht_iterators_count);
541 	uint32_t idx;
542 
543 	if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
544 		HT_INC_ITERATORS_COUNT(ht);
545 	}
546 	while (iter != end) {
547 		if (iter->ht == NULL) {
548 			iter->ht = ht;
549 			iter->pos = pos;
550 			idx = iter - EG(ht_iterators);
551 			iter->next_copy = idx;
552 			if (idx + 1 > EG(ht_iterators_used)) {
553 				EG(ht_iterators_used) = idx + 1;
554 			}
555 			return idx;
556 		}
557 		iter++;
558 	}
559 	if (EG(ht_iterators) == EG(ht_iterators_slots)) {
560 		EG(ht_iterators) = emalloc(sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
561 		memcpy(EG(ht_iterators), EG(ht_iterators_slots), sizeof(HashTableIterator) * EG(ht_iterators_count));
562 	} else {
563 		EG(ht_iterators) = erealloc(EG(ht_iterators), sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
564 	}
565 	iter = EG(ht_iterators) + EG(ht_iterators_count);
566 	EG(ht_iterators_count) += 8;
567 	iter->ht = ht;
568 	iter->pos = pos;
569 	memset(iter + 1, 0, sizeof(HashTableIterator) * 7);
570 	idx = iter - EG(ht_iterators);
571 	iter->next_copy = idx;
572 	EG(ht_iterators_used) = idx + 1;
573 	return idx;
574 }
575 
576 // To avoid losing track of the HashTable when separating arrays, we track all copies at once.
zend_hash_iterator_find_copy_pos(uint32_t idx,HashTable * ht)577 static zend_always_inline bool zend_hash_iterator_find_copy_pos(uint32_t idx, HashTable *ht) {
578 	HashTableIterator *iter = EG(ht_iterators) + idx;
579 
580 	uint32_t next_idx = iter->next_copy;
581 	if (EXPECTED(next_idx != idx)) {
582 		HashTableIterator *copy_iter;
583 		while (next_idx != idx) {
584 			copy_iter = EG(ht_iterators) + next_idx;
585 			if (copy_iter->ht == ht) {
586 				// We have found the hashtable we are actually iterating over
587 				// Now clean any intermittent copies and replace the original index by the found one
588 				if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
589 					&& EXPECTED(!HT_ITERATORS_OVERFLOW(iter->ht))) {
590 					HT_DEC_ITERATORS_COUNT(iter->ht);
591 				}
592 				if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
593 					HT_INC_ITERATORS_COUNT(ht);
594 				}
595 				iter->ht = copy_iter->ht;
596 				iter->pos = copy_iter->pos;
597 				zend_hash_remove_iterator_copies(idx);
598 				return true;
599 			}
600 			next_idx = copy_iter->next_copy;
601 		}
602 		zend_hash_remove_iterator_copies(idx);
603 	}
604 
605 	return false;
606 }
607 
zend_hash_iterator_pos(uint32_t idx,HashTable * ht)608 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos(uint32_t idx, HashTable *ht)
609 {
610 	HashTableIterator *iter = EG(ht_iterators) + idx;
611 
612 	ZEND_ASSERT(idx != (uint32_t)-1);
613 	if (UNEXPECTED(iter->ht != ht) && !zend_hash_iterator_find_copy_pos(idx, ht)) {
614 		if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
615 				&& EXPECTED(!HT_ITERATORS_OVERFLOW(iter->ht))) {
616 			HT_DEC_ITERATORS_COUNT(iter->ht);
617 		}
618 		if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
619 			HT_INC_ITERATORS_COUNT(ht);
620 		}
621 		iter->ht = ht;
622 		iter->pos = _zend_hash_get_current_pos(ht);
623 	}
624 	return iter->pos;
625 }
626 
zend_hash_iterator_pos_ex(uint32_t idx,zval * array)627 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos_ex(uint32_t idx, zval *array)
628 {
629 	HashTable *ht = Z_ARRVAL_P(array);
630 	HashTableIterator *iter = EG(ht_iterators) + idx;
631 
632 	ZEND_ASSERT(idx != (uint32_t)-1);
633 	if (UNEXPECTED(iter->ht != ht) && !zend_hash_iterator_find_copy_pos(idx, ht)) {
634 		if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
635 				&& EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
636 			HT_DEC_ITERATORS_COUNT(iter->ht);
637 		}
638 		SEPARATE_ARRAY(array);
639 		ht = Z_ARRVAL_P(array);
640 		if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
641 			HT_INC_ITERATORS_COUNT(ht);
642 		}
643 		iter->ht = ht;
644 		iter->pos = _zend_hash_get_current_pos(ht);
645 	}
646 	return iter->pos;
647 }
648 
zend_hash_iterator_del(uint32_t idx)649 ZEND_API void ZEND_FASTCALL zend_hash_iterator_del(uint32_t idx)
650 {
651 	HashTableIterator *iter = EG(ht_iterators) + idx;
652 
653 	ZEND_ASSERT(idx != (uint32_t)-1);
654 
655 	if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
656 			&& EXPECTED(!HT_ITERATORS_OVERFLOW(iter->ht))) {
657 		ZEND_ASSERT(HT_ITERATORS_COUNT(iter->ht) != 0);
658 		HT_DEC_ITERATORS_COUNT(iter->ht);
659 	}
660 	iter->ht = NULL;
661 
662 	if (UNEXPECTED(iter->next_copy != idx)) {
663 		zend_hash_remove_iterator_copies(idx);
664 	}
665 
666 	if (idx == EG(ht_iterators_used) - 1) {
667 		while (idx > 0 && EG(ht_iterators)[idx - 1].ht == NULL) {
668 			idx--;
669 		}
670 		EG(ht_iterators_used) = idx;
671 	}
672 }
673 
_zend_hash_iterators_remove(const HashTable * ht)674 static zend_never_inline void ZEND_FASTCALL _zend_hash_iterators_remove(const HashTable *ht)
675 {
676 	HashTableIterator *iter = EG(ht_iterators);
677 	const HashTableIterator *end = iter + EG(ht_iterators_used);
678 
679 	while (iter != end) {
680 		if (iter->ht == ht) {
681 			iter->ht = HT_POISONED_PTR;
682 		}
683 		iter++;
684 	}
685 }
686 
zend_hash_iterators_remove(const HashTable * ht)687 static zend_always_inline void zend_hash_iterators_remove(const HashTable *ht)
688 {
689 	if (UNEXPECTED(HT_HAS_ITERATORS(ht))) {
690 		_zend_hash_iterators_remove(ht);
691 	}
692 }
693 
zend_hash_iterators_lower_pos(const HashTable * ht,HashPosition start)694 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterators_lower_pos(const HashTable *ht, HashPosition start)
695 {
696 	const HashTableIterator *iter = EG(ht_iterators);
697 	const HashTableIterator *end = iter + EG(ht_iterators_used);
698 	HashPosition res = ht->nNumUsed;
699 
700 	while (iter != end) {
701 		if (iter->ht == ht) {
702 			if (iter->pos >= start && iter->pos < res) {
703 				res = iter->pos;
704 			}
705 		}
706 		iter++;
707 	}
708 	return res;
709 }
710 
_zend_hash_iterators_update(const HashTable * ht,HashPosition from,HashPosition to)711 ZEND_API void ZEND_FASTCALL _zend_hash_iterators_update(const HashTable *ht, HashPosition from, HashPosition to)
712 {
713 	HashTableIterator *iter = EG(ht_iterators);
714 	const HashTableIterator *end = iter + EG(ht_iterators_used);
715 
716 	while (iter != end) {
717 		if (iter->ht == ht && iter->pos == from) {
718 			iter->pos = to;
719 		}
720 		iter++;
721 	}
722 }
723 
zend_hash_iterators_advance(const HashTable * ht,HashPosition step)724 ZEND_API void ZEND_FASTCALL zend_hash_iterators_advance(const HashTable *ht, HashPosition step)
725 {
726 	HashTableIterator *iter = EG(ht_iterators);
727 	const HashTableIterator *end = iter + EG(ht_iterators_used);
728 
729 	while (iter != end) {
730 		if (iter->ht == ht) {
731 			iter->pos += step;
732 		}
733 		iter++;
734 	}
735 }
736 
737 /* Hash must be known and precomputed before */
zend_hash_find_bucket(const HashTable * ht,const zend_string * key)738 static zend_always_inline Bucket *zend_hash_find_bucket(const HashTable *ht, const zend_string *key)
739 {
740 	uint32_t nIndex;
741 	uint32_t idx;
742 	Bucket *p, *arData;
743 
744 	ZEND_ASSERT(ZSTR_H(key) != 0 && "Hash must be known");
745 
746 	arData = ht->arData;
747 	nIndex = ZSTR_H(key) | ht->nTableMask;
748 	idx = HT_HASH_EX(arData, nIndex);
749 
750 	if (UNEXPECTED(idx == HT_INVALID_IDX)) {
751 		return NULL;
752 	}
753 	p = HT_HASH_TO_BUCKET_EX(arData, idx);
754 	if (EXPECTED(p->key == key)) { /* check for the same interned string */
755 		return p;
756 	}
757 
758 	while (1) {
759 		if (p->h == ZSTR_H(key) &&
760 		    EXPECTED(p->key) &&
761 		    zend_string_equal_content(p->key, key)) {
762 			return p;
763 		}
764 		idx = Z_NEXT(p->val);
765 		if (idx == HT_INVALID_IDX) {
766 			return NULL;
767 		}
768 		p = HT_HASH_TO_BUCKET_EX(arData, idx);
769 		if (p->key == key) { /* check for the same interned string */
770 			return p;
771 		}
772 	}
773 }
774 
zend_hash_str_find_bucket(const HashTable * ht,const char * str,size_t len,zend_ulong h)775 static zend_always_inline Bucket *zend_hash_str_find_bucket(const HashTable *ht, const char *str, size_t len, zend_ulong h)
776 {
777 	uint32_t nIndex;
778 	uint32_t idx;
779 	Bucket *p, *arData;
780 
781 	arData = ht->arData;
782 	nIndex = h | ht->nTableMask;
783 	idx = HT_HASH_EX(arData, nIndex);
784 	while (idx != HT_INVALID_IDX) {
785 		ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
786 		p = HT_HASH_TO_BUCKET_EX(arData, idx);
787 		if ((p->h == h)
788 			 && p->key
789 			 && zend_string_equals_cstr(p->key, str, len)) {
790 			return p;
791 		}
792 		idx = Z_NEXT(p->val);
793 	}
794 	return NULL;
795 }
796 
zend_hash_index_find_bucket(const HashTable * ht,zend_ulong h)797 static zend_always_inline Bucket *zend_hash_index_find_bucket(const HashTable *ht, zend_ulong h)
798 {
799 	uint32_t nIndex;
800 	uint32_t idx;
801 	Bucket *p, *arData;
802 
803 	arData = ht->arData;
804 	nIndex = h | ht->nTableMask;
805 	idx = HT_HASH_EX(arData, nIndex);
806 	while (idx != HT_INVALID_IDX) {
807 		ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
808 		p = HT_HASH_TO_BUCKET_EX(arData, idx);
809 		if (p->h == h && !p->key) {
810 			return p;
811 		}
812 		idx = Z_NEXT(p->val);
813 	}
814 	return NULL;
815 }
816 
_zend_hash_add_or_update_i(HashTable * ht,zend_string * key,zval * pData,uint32_t flag)817 static zend_always_inline zval *_zend_hash_add_or_update_i(HashTable *ht, zend_string *key, zval *pData, uint32_t flag)
818 {
819 	zend_ulong h;
820 	uint32_t nIndex;
821 	uint32_t idx;
822 	Bucket *p, *arData;
823 
824 	IS_CONSISTENT(ht);
825 	HT_ASSERT_RC1(ht);
826 	zend_string_hash_val(key);
827 
828 	if (UNEXPECTED(HT_FLAGS(ht) & (HASH_FLAG_UNINITIALIZED|HASH_FLAG_PACKED))) {
829 		if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
830 			zend_hash_real_init_mixed(ht);
831 			goto add_to_hash;
832 		} else {
833 			zend_hash_packed_to_hash(ht);
834 		}
835 	} else if ((flag & HASH_ADD_NEW) == 0 || ZEND_DEBUG) {
836 		p = zend_hash_find_bucket(ht, key);
837 
838 		if (p) {
839 			zval *data;
840 
841 			ZEND_ASSERT((flag & HASH_ADD_NEW) == 0);
842 			if (flag & HASH_LOOKUP) {
843 				return &p->val;
844 			} else if (flag & HASH_ADD) {
845 				if (!(flag & HASH_UPDATE_INDIRECT)) {
846 					return NULL;
847 				}
848 				ZEND_ASSERT(&p->val != pData);
849 				data = &p->val;
850 				if (Z_TYPE_P(data) == IS_INDIRECT) {
851 					data = Z_INDIRECT_P(data);
852 					if (Z_TYPE_P(data) != IS_UNDEF) {
853 						return NULL;
854 					}
855 				} else {
856 					return NULL;
857 				}
858 			} else {
859 				ZEND_ASSERT(&p->val != pData);
860 				data = &p->val;
861 				if ((flag & HASH_UPDATE_INDIRECT) && Z_TYPE_P(data) == IS_INDIRECT) {
862 					data = Z_INDIRECT_P(data);
863 				}
864 			}
865 			if (ht->pDestructor) {
866 				ht->pDestructor(data);
867 			}
868 			ZVAL_COPY_VALUE(data, pData);
869 			return data;
870 		}
871 	}
872 
873 	ZEND_HASH_IF_FULL_DO_RESIZE(ht);		/* If the Hash table is full, resize it */
874 
875 add_to_hash:
876 	if (!ZSTR_IS_INTERNED(key)) {
877 		zend_string_addref(key);
878 		HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
879 	}
880 	idx = ht->nNumUsed++;
881 	ht->nNumOfElements++;
882 	arData = ht->arData;
883 	p = arData + idx;
884 	p->key = key;
885 	p->h = h = ZSTR_H(key);
886 	nIndex = h | ht->nTableMask;
887 	Z_NEXT(p->val) = HT_HASH_EX(arData, nIndex);
888 	HT_HASH_EX(arData, nIndex) = HT_IDX_TO_HASH(idx);
889 	if (flag & HASH_LOOKUP) {
890 		ZVAL_NULL(&p->val);
891 	} else {
892 		ZVAL_COPY_VALUE(&p->val, pData);
893 	}
894 
895 	return &p->val;
896 }
897 
_zend_hash_str_add_or_update_i(HashTable * ht,const char * str,size_t len,zend_ulong h,zval * pData,uint32_t flag)898 static zend_always_inline zval *_zend_hash_str_add_or_update_i(HashTable *ht, const char *str, size_t len, zend_ulong h, zval *pData, uint32_t flag)
899 {
900 	zend_string *key;
901 	uint32_t nIndex;
902 	uint32_t idx;
903 	Bucket *p;
904 
905 	IS_CONSISTENT(ht);
906 	HT_ASSERT_RC1(ht);
907 
908 	if (UNEXPECTED(HT_FLAGS(ht) & (HASH_FLAG_UNINITIALIZED|HASH_FLAG_PACKED))) {
909 		if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
910 			zend_hash_real_init_mixed(ht);
911 			goto add_to_hash;
912 		} else {
913 			zend_hash_packed_to_hash(ht);
914 		}
915 	} else if ((flag & HASH_ADD_NEW) == 0) {
916 		p = zend_hash_str_find_bucket(ht, str, len, h);
917 
918 		if (p) {
919 			zval *data;
920 
921 			if (flag & HASH_LOOKUP) {
922 				return &p->val;
923 			} else if (flag & HASH_ADD) {
924 				if (!(flag & HASH_UPDATE_INDIRECT)) {
925 					return NULL;
926 				}
927 				ZEND_ASSERT(&p->val != pData);
928 				data = &p->val;
929 				if (Z_TYPE_P(data) == IS_INDIRECT) {
930 					data = Z_INDIRECT_P(data);
931 					if (Z_TYPE_P(data) != IS_UNDEF) {
932 						return NULL;
933 					}
934 				} else {
935 					return NULL;
936 				}
937 			} else {
938 				ZEND_ASSERT(&p->val != pData);
939 				data = &p->val;
940 				if ((flag & HASH_UPDATE_INDIRECT) && Z_TYPE_P(data) == IS_INDIRECT) {
941 					data = Z_INDIRECT_P(data);
942 				}
943 			}
944 			if (ht->pDestructor) {
945 				ht->pDestructor(data);
946 			}
947 			ZVAL_COPY_VALUE(data, pData);
948 			return data;
949 		}
950 	}
951 
952 	ZEND_HASH_IF_FULL_DO_RESIZE(ht);		/* If the Hash table is full, resize it */
953 
954 add_to_hash:
955 	idx = ht->nNumUsed++;
956 	ht->nNumOfElements++;
957 	p = ht->arData + idx;
958 	p->key = key = zend_string_init(str, len, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
959 #if ZEND_RC_DEBUG
960 	if (GC_FLAGS(ht) & GC_PERSISTENT_LOCAL) {
961 		GC_MAKE_PERSISTENT_LOCAL(key);
962 	}
963 #endif
964 	p->h = ZSTR_H(key) = h;
965 	HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
966 	if (flag & HASH_LOOKUP) {
967 		ZVAL_NULL(&p->val);
968 	} else {
969 		ZVAL_COPY_VALUE(&p->val, pData);
970 	}
971 	nIndex = h | ht->nTableMask;
972 	Z_NEXT(p->val) = HT_HASH(ht, nIndex);
973 	HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
974 
975 	return &p->val;
976 }
977 
zend_hash_add_or_update(HashTable * ht,zend_string * key,zval * pData,uint32_t flag)978 ZEND_API zval* ZEND_FASTCALL zend_hash_add_or_update(HashTable *ht, zend_string *key, zval *pData, uint32_t flag)
979 {
980 	if (flag == HASH_ADD) {
981 		return zend_hash_add(ht, key, pData);
982 	} else if (flag == HASH_ADD_NEW) {
983 		return zend_hash_add_new(ht, key, pData);
984 	} else if (flag == HASH_UPDATE) {
985 		return zend_hash_update(ht, key, pData);
986 	} else {
987 		ZEND_ASSERT(flag == (HASH_UPDATE|HASH_UPDATE_INDIRECT));
988 		return zend_hash_update_ind(ht, key, pData);
989 	}
990 }
991 
zend_hash_add(HashTable * ht,zend_string * key,zval * pData)992 ZEND_API zval* ZEND_FASTCALL zend_hash_add(HashTable *ht, zend_string *key, zval *pData)
993 {
994 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD);
995 }
996 
zend_hash_update(HashTable * ht,zend_string * key,zval * pData)997 ZEND_API zval* ZEND_FASTCALL zend_hash_update(HashTable *ht, zend_string *key, zval *pData)
998 {
999 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE);
1000 }
1001 
zend_hash_update_ind(HashTable * ht,zend_string * key,zval * pData)1002 ZEND_API zval* ZEND_FASTCALL zend_hash_update_ind(HashTable *ht, zend_string *key, zval *pData)
1003 {
1004 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT);
1005 }
1006 
zend_hash_add_new(HashTable * ht,zend_string * key,zval * pData)1007 ZEND_API zval* ZEND_FASTCALL zend_hash_add_new(HashTable *ht, zend_string *key, zval *pData)
1008 {
1009 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD_NEW);
1010 }
1011 
zend_hash_lookup(HashTable * ht,zend_string * key)1012 ZEND_API zval* ZEND_FASTCALL zend_hash_lookup(HashTable *ht, zend_string *key)
1013 {
1014 	return _zend_hash_add_or_update_i(ht, key, NULL, HASH_LOOKUP);
1015 }
1016 
zend_hash_str_add_or_update(HashTable * ht,const char * str,size_t len,zval * pData,uint32_t flag)1017 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_or_update(HashTable *ht, const char *str, size_t len, zval *pData, uint32_t flag)
1018 {
1019 	if (flag == HASH_ADD) {
1020 		return zend_hash_str_add(ht, str, len, pData);
1021 	} else if (flag == HASH_ADD_NEW) {
1022 		return zend_hash_str_add_new(ht, str, len, pData);
1023 	} else if (flag == HASH_UPDATE) {
1024 		return zend_hash_str_update(ht, str, len, pData);
1025 	} else {
1026 		ZEND_ASSERT(flag == (HASH_UPDATE|HASH_UPDATE_INDIRECT));
1027 		return zend_hash_str_update_ind(ht, str, len, pData);
1028 	}
1029 }
1030 
zend_hash_str_update(HashTable * ht,const char * str,size_t len,zval * pData)1031 ZEND_API zval* ZEND_FASTCALL zend_hash_str_update(HashTable *ht, const char *str, size_t len, zval *pData)
1032 {
1033 	zend_ulong h = zend_hash_func(str, len);
1034 
1035 	return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_UPDATE);
1036 }
1037 
zend_hash_str_update_ind(HashTable * ht,const char * str,size_t len,zval * pData)1038 ZEND_API zval* ZEND_FASTCALL zend_hash_str_update_ind(HashTable *ht, const char *str, size_t len, zval *pData)
1039 {
1040 	zend_ulong h = zend_hash_func(str, len);
1041 
1042 	return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT);
1043 }
1044 
zend_hash_str_add(HashTable * ht,const char * str,size_t len,zval * pData)1045 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add(HashTable *ht, const char *str, size_t len, zval *pData)
1046 {
1047 	zend_ulong h = zend_hash_func(str, len);
1048 
1049 	return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_ADD);
1050 }
1051 
zend_hash_str_add_new(HashTable * ht,const char * str,size_t len,zval * pData)1052 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_new(HashTable *ht, const char *str, size_t len, zval *pData)
1053 {
1054 	zend_ulong h = zend_hash_func(str, len);
1055 
1056 	return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_ADD_NEW);
1057 }
1058 
zend_hash_index_add_empty_element(HashTable * ht,zend_ulong h)1059 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_empty_element(HashTable *ht, zend_ulong h)
1060 {
1061 	zval dummy;
1062 
1063 	ZVAL_NULL(&dummy);
1064 	return zend_hash_index_add(ht, h, &dummy);
1065 }
1066 
zend_hash_add_empty_element(HashTable * ht,zend_string * key)1067 ZEND_API zval* ZEND_FASTCALL zend_hash_add_empty_element(HashTable *ht, zend_string *key)
1068 {
1069 	zval dummy;
1070 
1071 	ZVAL_NULL(&dummy);
1072 	return zend_hash_add(ht, key, &dummy);
1073 }
1074 
zend_hash_str_add_empty_element(HashTable * ht,const char * str,size_t len)1075 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_empty_element(HashTable *ht, const char *str, size_t len)
1076 {
1077 	zval dummy;
1078 
1079 	ZVAL_NULL(&dummy);
1080 	return zend_hash_str_add(ht, str, len, &dummy);
1081 }
1082 
_zend_hash_index_add_or_update_i(HashTable * ht,zend_ulong h,zval * pData,uint32_t flag)1083 static zend_always_inline zval *_zend_hash_index_add_or_update_i(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag)
1084 {
1085 	uint32_t nIndex;
1086 	uint32_t idx;
1087 	Bucket *p;
1088 	zval *zv;
1089 
1090 	IS_CONSISTENT(ht);
1091 	HT_ASSERT_RC1(ht);
1092 
1093 	if ((flag & HASH_ADD_NEXT) && h == ZEND_LONG_MIN) {
1094 		h = 0;
1095 	}
1096 
1097 	if (HT_IS_PACKED(ht)) {
1098 		if ((flag & (HASH_ADD_NEW|HASH_ADD_NEXT)) != (HASH_ADD_NEW|HASH_ADD_NEXT)
1099 		 && h < ht->nNumUsed) {
1100 			zv = ht->arPacked + h;
1101 			if (Z_TYPE_P(zv) != IS_UNDEF) {
1102 				if (flag & HASH_LOOKUP) {
1103 					return zv;
1104 				}
1105 replace:
1106 				if (flag & HASH_ADD) {
1107 					return NULL;
1108 				}
1109 				if (ht->pDestructor) {
1110 					ht->pDestructor(zv);
1111 				}
1112 				ZVAL_COPY_VALUE(zv, pData);
1113 				return zv;
1114 			} else { /* we have to keep the order :( */
1115 				goto convert_to_hash;
1116 			}
1117 		} else if (EXPECTED(h < ht->nTableSize)) {
1118 add_to_packed:
1119 			zv = ht->arPacked + h;
1120 			/* incremental initialization of empty Buckets */
1121 			if ((flag & (HASH_ADD_NEW|HASH_ADD_NEXT)) != (HASH_ADD_NEW|HASH_ADD_NEXT)) {
1122 				if (h > ht->nNumUsed) {
1123 					zval *q = ht->arPacked + ht->nNumUsed;
1124 					while (q != zv) {
1125 						ZVAL_UNDEF(q);
1126 						q++;
1127 					}
1128 				}
1129 			}
1130 			ht->nNextFreeElement = ht->nNumUsed = h + 1;
1131 			ht->nNumOfElements++;
1132 			if (flag & HASH_LOOKUP) {
1133 				ZVAL_NULL(zv);
1134 			} else {
1135 				ZVAL_COPY_VALUE(zv, pData);
1136 			}
1137 
1138 			return zv;
1139 		} else if ((h >> 1) < ht->nTableSize &&
1140 		           (ht->nTableSize >> 1) < ht->nNumOfElements) {
1141 			zend_hash_packed_grow(ht);
1142 			goto add_to_packed;
1143 		} else {
1144 			if (ht->nNumUsed >= ht->nTableSize) {
1145 				ht->nTableSize += ht->nTableSize;
1146 			}
1147 convert_to_hash:
1148 			zend_hash_packed_to_hash(ht);
1149 		}
1150 	} else if (HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED) {
1151 		if (h < ht->nTableSize) {
1152 			zend_hash_real_init_packed_ex(ht);
1153 			goto add_to_packed;
1154 		}
1155 		zend_hash_real_init_mixed(ht);
1156 	} else {
1157 		if ((flag & HASH_ADD_NEW) == 0 || ZEND_DEBUG) {
1158 			p = zend_hash_index_find_bucket(ht, h);
1159 			if (p) {
1160 				if (flag & HASH_LOOKUP) {
1161 					return &p->val;
1162 				}
1163 				ZEND_ASSERT((flag & HASH_ADD_NEW) == 0);
1164 				zv = &p->val;
1165 				goto replace;
1166 			}
1167 		}
1168 		ZEND_HASH_IF_FULL_DO_RESIZE(ht);		/* If the Hash table is full, resize it */
1169 	}
1170 
1171 	idx = ht->nNumUsed++;
1172 	nIndex = h | ht->nTableMask;
1173 	p = ht->arData + idx;
1174 	Z_NEXT(p->val) = HT_HASH(ht, nIndex);
1175 	HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
1176 	if ((zend_long)h >= ht->nNextFreeElement) {
1177 		ht->nNextFreeElement = (zend_long)h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
1178 	}
1179 	ht->nNumOfElements++;
1180 	p->h = h;
1181 	p->key = NULL;
1182 	if (flag & HASH_LOOKUP) {
1183 		ZVAL_NULL(&p->val);
1184 	} else {
1185 		ZVAL_COPY_VALUE(&p->val, pData);
1186 	}
1187 
1188 	return &p->val;
1189 }
1190 
zend_hash_index_add_or_update(HashTable * ht,zend_ulong h,zval * pData,uint32_t flag)1191 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_or_update(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag)
1192 {
1193 	if (flag == HASH_ADD) {
1194 		return zend_hash_index_add(ht, h, pData);
1195 	} else if (flag == (HASH_ADD|HASH_ADD_NEW)) {
1196 		return zend_hash_index_add_new(ht, h, pData);
1197 	} else if (flag == (HASH_ADD|HASH_ADD_NEXT)) {
1198 		ZEND_ASSERT(h == ht->nNextFreeElement);
1199 		return zend_hash_next_index_insert(ht, pData);
1200 	} else if (flag == (HASH_ADD|HASH_ADD_NEW|HASH_ADD_NEXT)) {
1201 		ZEND_ASSERT(h == ht->nNextFreeElement);
1202 		return zend_hash_next_index_insert_new(ht, pData);
1203 	} else {
1204 		ZEND_ASSERT(flag == HASH_UPDATE);
1205 		return zend_hash_index_update(ht, h, pData);
1206 	}
1207 }
1208 
zend_hash_index_add(HashTable * ht,zend_ulong h,zval * pData)1209 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add(HashTable *ht, zend_ulong h, zval *pData)
1210 {
1211 	return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD);
1212 }
1213 
zend_hash_index_add_new(HashTable * ht,zend_ulong h,zval * pData)1214 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_new(HashTable *ht, zend_ulong h, zval *pData)
1215 {
1216 	return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD | HASH_ADD_NEW);
1217 }
1218 
zend_hash_index_update(HashTable * ht,zend_ulong h,zval * pData)1219 ZEND_API zval* ZEND_FASTCALL zend_hash_index_update(HashTable *ht, zend_ulong h, zval *pData)
1220 {
1221 	return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_UPDATE);
1222 }
1223 
zend_hash_next_index_insert(HashTable * ht,zval * pData)1224 ZEND_API zval* ZEND_FASTCALL zend_hash_next_index_insert(HashTable *ht, zval *pData)
1225 {
1226 	return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEXT);
1227 }
1228 
zend_hash_next_index_insert_new(HashTable * ht,zval * pData)1229 ZEND_API zval* ZEND_FASTCALL zend_hash_next_index_insert_new(HashTable *ht, zval *pData)
1230 {
1231 	return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEW | HASH_ADD_NEXT);
1232 }
1233 
zend_hash_index_lookup(HashTable * ht,zend_ulong h)1234 ZEND_API zval* ZEND_FASTCALL zend_hash_index_lookup(HashTable *ht, zend_ulong h)
1235 {
1236 	return _zend_hash_index_add_or_update_i(ht, h, NULL, HASH_LOOKUP);
1237 }
1238 
zend_hash_set_bucket_key(HashTable * ht,Bucket * b,zend_string * key)1239 ZEND_API zval* ZEND_FASTCALL zend_hash_set_bucket_key(HashTable *ht, Bucket *b, zend_string *key)
1240 {
1241 	uint32_t nIndex;
1242 	uint32_t idx, i;
1243 	Bucket *p, *arData;
1244 
1245 	IS_CONSISTENT(ht);
1246 	HT_ASSERT_RC1(ht);
1247 	ZEND_ASSERT(!HT_IS_PACKED(ht));
1248 
1249 	(void)zend_string_hash_val(key);
1250 	p = zend_hash_find_bucket(ht, key);
1251 	if (UNEXPECTED(p)) {
1252 		return (p == b) ? &p->val : NULL;
1253 	}
1254 
1255 	if (!ZSTR_IS_INTERNED(key)) {
1256 		zend_string_addref(key);
1257 		HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
1258 	}
1259 
1260 	arData = ht->arData;
1261 
1262 	/* del from hash */
1263 	idx = HT_IDX_TO_HASH(b - arData);
1264 	nIndex = b->h | ht->nTableMask;
1265 	i = HT_HASH_EX(arData, nIndex);
1266 	if (i == idx) {
1267 		HT_HASH_EX(arData, nIndex) = Z_NEXT(b->val);
1268 	} else {
1269 		p = HT_HASH_TO_BUCKET_EX(arData, i);
1270 		while (Z_NEXT(p->val) != idx) {
1271 			i = Z_NEXT(p->val);
1272 			p = HT_HASH_TO_BUCKET_EX(arData, i);
1273 		}
1274 		Z_NEXT(p->val) = Z_NEXT(b->val);
1275 	}
1276 	zend_string_release(b->key);
1277 
1278 	/* add to hash */
1279 	idx = b - arData;
1280 	b->key = key;
1281 	b->h = ZSTR_H(key);
1282 	nIndex = b->h | ht->nTableMask;
1283 	idx = HT_IDX_TO_HASH(idx);
1284 	i = HT_HASH_EX(arData, nIndex);
1285 	if (i == HT_INVALID_IDX || i < idx) {
1286 		Z_NEXT(b->val) = i;
1287 		HT_HASH_EX(arData, nIndex) = idx;
1288 	} else {
1289 		p = HT_HASH_TO_BUCKET_EX(arData, i);
1290 		while (Z_NEXT(p->val) != HT_INVALID_IDX && Z_NEXT(p->val) > idx) {
1291 			i = Z_NEXT(p->val);
1292 			p = HT_HASH_TO_BUCKET_EX(arData, i);
1293 		}
1294 		Z_NEXT(b->val) = Z_NEXT(p->val);
1295 		Z_NEXT(p->val) = idx;
1296 	}
1297 	return &b->val;
1298 }
1299 
zend_hash_do_resize(HashTable * ht)1300 static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht)
1301 {
1302 
1303 	IS_CONSISTENT(ht);
1304 	HT_ASSERT_RC1(ht);
1305 
1306 	ZEND_ASSERT(!HT_IS_PACKED(ht));
1307 	if (ht->nNumUsed > ht->nNumOfElements + (ht->nNumOfElements >> 5)) { /* additional term is there to amortize the cost of compaction */
1308 		zend_hash_rehash(ht);
1309 	} else if (ht->nTableSize < HT_MAX_SIZE) {	/* Let's double the table size */
1310 		void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
1311 		uint32_t nSize = ht->nTableSize + ht->nTableSize;
1312 		Bucket *old_buckets = ht->arData;
1313 
1314 		ZEND_ASSERT(HT_SIZE_TO_MASK(nSize));
1315 
1316 		new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1317 		ht->nTableSize = nSize;
1318 		ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
1319 		HT_SET_DATA_ADDR(ht, new_data);
1320 		memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
1321 		pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1322 		zend_hash_rehash(ht);
1323 	} else {
1324 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket) + sizeof(uint32_t), sizeof(Bucket));
1325 	}
1326 }
1327 
zend_hash_rehash(HashTable * ht)1328 ZEND_API void ZEND_FASTCALL zend_hash_rehash(HashTable *ht)
1329 {
1330 	Bucket *p;
1331 	uint32_t nIndex, i;
1332 
1333 	IS_CONSISTENT(ht);
1334 
1335 	if (UNEXPECTED(ht->nNumOfElements == 0)) {
1336 		if (!(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
1337 			ht->nNumUsed = 0;
1338 			HT_HASH_RESET(ht);
1339 			/* Even if the array is empty, we still need to reset the iterator positions. */
1340 			ht->nInternalPointer = 0;
1341 			if (UNEXPECTED(HT_HAS_ITERATORS(ht))) {
1342 				HashTableIterator *iter = EG(ht_iterators);
1343 				HashTableIterator *end  = iter + EG(ht_iterators_used);
1344 				while (iter != end) {
1345 					if (iter->ht == ht) {
1346 						iter->pos = 0;
1347 					}
1348 					iter++;
1349 				}
1350 			}
1351 		}
1352 		return;
1353 	}
1354 
1355 	HT_HASH_RESET(ht);
1356 	i = 0;
1357 	p = ht->arData;
1358 	if (HT_IS_WITHOUT_HOLES(ht)) {
1359 		do {
1360 			nIndex = p->h | ht->nTableMask;
1361 			Z_NEXT(p->val) = HT_HASH(ht, nIndex);
1362 			HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
1363 			p++;
1364 		} while (++i < ht->nNumUsed);
1365 	} else {
1366 		uint32_t old_num_used = ht->nNumUsed;
1367 		do {
1368 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) {
1369 				uint32_t j = i;
1370 				Bucket *q = p;
1371 
1372 				if (EXPECTED(!HT_HAS_ITERATORS(ht))) {
1373 					while (++i < ht->nNumUsed) {
1374 						p++;
1375 						if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
1376 							ZVAL_COPY_VALUE(&q->val, &p->val);
1377 							q->h = p->h;
1378 							nIndex = q->h | ht->nTableMask;
1379 							q->key = p->key;
1380 							Z_NEXT(q->val) = HT_HASH(ht, nIndex);
1381 							HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
1382 							if (UNEXPECTED(ht->nInternalPointer == i)) {
1383 								ht->nInternalPointer = j;
1384 							}
1385 							q++;
1386 							j++;
1387 						}
1388 					}
1389 				} else {
1390 					uint32_t iter_pos = zend_hash_iterators_lower_pos(ht, i + 1);
1391 
1392 					while (++i < ht->nNumUsed) {
1393 						p++;
1394 						if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
1395 							ZVAL_COPY_VALUE(&q->val, &p->val);
1396 							q->h = p->h;
1397 							nIndex = q->h | ht->nTableMask;
1398 							q->key = p->key;
1399 							Z_NEXT(q->val) = HT_HASH(ht, nIndex);
1400 							HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
1401 							if (UNEXPECTED(ht->nInternalPointer == i)) {
1402 								ht->nInternalPointer = j;
1403 							}
1404 							if (UNEXPECTED(i >= iter_pos)) {
1405 								do {
1406 									zend_hash_iterators_update(ht, iter_pos, j);
1407 									iter_pos = zend_hash_iterators_lower_pos(ht, iter_pos + 1);
1408 								} while (iter_pos < i);
1409 							}
1410 							q++;
1411 							j++;
1412 						}
1413 					}
1414 				}
1415 				ht->nNumUsed = j;
1416 				break;
1417 			}
1418 			nIndex = p->h | ht->nTableMask;
1419 			Z_NEXT(p->val) = HT_HASH(ht, nIndex);
1420 			HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
1421 			p++;
1422 		} while (++i < ht->nNumUsed);
1423 
1424 		/* Migrate pointer to one past the end of the array to the new one past the end, so that
1425 		 * newly inserted elements are picked up correctly. */
1426 		if (UNEXPECTED(HT_HAS_ITERATORS(ht))) {
1427 			_zend_hash_iterators_update(ht, old_num_used, ht->nNumUsed);
1428 		}
1429 	}
1430 }
1431 
zend_hash_iterators_clamp_max(const HashTable * ht,uint32_t max)1432 static zend_always_inline void zend_hash_iterators_clamp_max(const HashTable *ht, uint32_t max)
1433 {
1434 	if (UNEXPECTED(HT_HAS_ITERATORS(ht))) {
1435 		HashTableIterator *iter = EG(ht_iterators);
1436 		const HashTableIterator *end = iter + EG(ht_iterators_used);
1437 		while (iter != end) {
1438 			if (iter->ht == ht) {
1439 				iter->pos = MIN(iter->pos, max);
1440 			}
1441 			iter++;
1442 		}
1443 	}
1444 }
1445 
_zend_hash_packed_del_val(HashTable * ht,uint32_t idx,zval * zv)1446 static zend_always_inline void _zend_hash_packed_del_val(HashTable *ht, uint32_t idx, zval *zv)
1447 {
1448 	idx = HT_HASH_TO_IDX(idx);
1449 	ht->nNumOfElements--;
1450 	if (ht->nNumUsed - 1 == idx) {
1451 		do {
1452 			ht->nNumUsed--;
1453 		} while (ht->nNumUsed > 0 && (UNEXPECTED(Z_TYPE(ht->arPacked[ht->nNumUsed-1]) == IS_UNDEF)));
1454 		ht->nInternalPointer = MIN(ht->nInternalPointer, ht->nNumUsed);
1455 		zend_hash_iterators_clamp_max(ht, ht->nNumUsed);
1456 	}
1457 	if (ht->pDestructor) {
1458 		zval tmp;
1459 		ZVAL_COPY_VALUE(&tmp, zv);
1460 		ZVAL_UNDEF(zv);
1461 		ht->pDestructor(&tmp);
1462 	} else {
1463 		ZVAL_UNDEF(zv);
1464 	}
1465 }
1466 
_zend_hash_del_el_ex(HashTable * ht,uint32_t idx,Bucket * p,Bucket * prev)1467 static zend_always_inline void _zend_hash_del_el_ex(HashTable *ht, uint32_t idx, Bucket *p, Bucket *prev)
1468 {
1469 	if (prev) {
1470 		Z_NEXT(prev->val) = Z_NEXT(p->val);
1471 	} else {
1472 		HT_HASH(ht, p->h | ht->nTableMask) = Z_NEXT(p->val);
1473 	}
1474 	idx = HT_HASH_TO_IDX(idx);
1475 	ht->nNumOfElements--;
1476 	if (ht->nNumUsed - 1 == idx) {
1477 		do {
1478 			ht->nNumUsed--;
1479 		} while (ht->nNumUsed > 0 && (UNEXPECTED(Z_TYPE(ht->arData[ht->nNumUsed-1].val) == IS_UNDEF)));
1480 		ht->nInternalPointer = MIN(ht->nInternalPointer, ht->nNumUsed);
1481 		zend_hash_iterators_clamp_max(ht, ht->nNumUsed);
1482 	}
1483 	if (ht->pDestructor) {
1484 		zval tmp;
1485 		ZVAL_COPY_VALUE(&tmp, &p->val);
1486 		ZVAL_UNDEF(&p->val);
1487 		ht->pDestructor(&tmp);
1488 	} else {
1489 		ZVAL_UNDEF(&p->val);
1490 	}
1491 }
1492 
_zend_hash_del_el(HashTable * ht,uint32_t idx,Bucket * p)1493 static zend_always_inline void _zend_hash_del_el(HashTable *ht, uint32_t idx, Bucket *p)
1494 {
1495 	Bucket *prev = NULL;
1496 	uint32_t nIndex;
1497 	uint32_t i;
1498 
1499 	nIndex = p->h | ht->nTableMask;
1500 	i = HT_HASH(ht, nIndex);
1501 
1502 	if (i != idx) {
1503 		prev = HT_HASH_TO_BUCKET(ht, i);
1504 		while (Z_NEXT(prev->val) != idx) {
1505 			i = Z_NEXT(prev->val);
1506 			prev = HT_HASH_TO_BUCKET(ht, i);
1507 		}
1508 	}
1509 
1510 	if (p->key) {
1511 		zend_string_release(p->key);
1512 		p->key = NULL;
1513 	}
1514 	_zend_hash_del_el_ex(ht, idx, p, prev);
1515 }
1516 
zend_hash_packed_del_val(HashTable * ht,zval * zv)1517 ZEND_API void ZEND_FASTCALL zend_hash_packed_del_val(HashTable *ht, zval *zv)
1518 {
1519 	IS_CONSISTENT(ht);
1520 	HT_ASSERT_RC1(ht);
1521 	ZEND_ASSERT(HT_IS_PACKED(ht));
1522 	_zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(zv - ht->arPacked), zv);
1523 }
1524 
1525 
zend_hash_del_bucket(HashTable * ht,Bucket * p)1526 ZEND_API void ZEND_FASTCALL zend_hash_del_bucket(HashTable *ht, Bucket *p)
1527 {
1528 	IS_CONSISTENT(ht);
1529 	HT_ASSERT_RC1(ht);
1530 	ZEND_ASSERT(!HT_IS_PACKED(ht));
1531 	_zend_hash_del_el(ht, HT_IDX_TO_HASH(p - ht->arData), p);
1532 }
1533 
zend_hash_del(HashTable * ht,zend_string * key)1534 ZEND_API zend_result ZEND_FASTCALL zend_hash_del(HashTable *ht, zend_string *key)
1535 {
1536 	zend_ulong h;
1537 	uint32_t nIndex;
1538 	uint32_t idx;
1539 	Bucket *p;
1540 	Bucket *prev = NULL;
1541 
1542 	IS_CONSISTENT(ht);
1543 	HT_ASSERT_RC1(ht);
1544 
1545 	h = zend_string_hash_val(key);
1546 	nIndex = h | ht->nTableMask;
1547 
1548 	idx = HT_HASH(ht, nIndex);
1549 	while (idx != HT_INVALID_IDX) {
1550 		p = HT_HASH_TO_BUCKET(ht, idx);
1551 		if ((p->key == key) ||
1552 			(p->h == h &&
1553 		     p->key &&
1554 		     zend_string_equal_content(p->key, key))) {
1555 			zend_string_release(p->key);
1556 			p->key = NULL;
1557 			_zend_hash_del_el_ex(ht, idx, p, prev);
1558 			return SUCCESS;
1559 		}
1560 		prev = p;
1561 		idx = Z_NEXT(p->val);
1562 	}
1563 	return FAILURE;
1564 }
1565 
zend_hash_del_ind(HashTable * ht,zend_string * key)1566 ZEND_API zend_result ZEND_FASTCALL zend_hash_del_ind(HashTable *ht, zend_string *key)
1567 {
1568 	zend_ulong h;
1569 	uint32_t nIndex;
1570 	uint32_t idx;
1571 	Bucket *p;
1572 	Bucket *prev = NULL;
1573 
1574 	IS_CONSISTENT(ht);
1575 	HT_ASSERT_RC1(ht);
1576 
1577 	h = zend_string_hash_val(key);
1578 	nIndex = h | ht->nTableMask;
1579 
1580 	idx = HT_HASH(ht, nIndex);
1581 	while (idx != HT_INVALID_IDX) {
1582 		p = HT_HASH_TO_BUCKET(ht, idx);
1583 		if ((p->key == key) ||
1584 			(p->h == h &&
1585 		     p->key &&
1586 		     zend_string_equal_content(p->key, key))) {
1587 			if (Z_TYPE(p->val) == IS_INDIRECT) {
1588 				zval *data = Z_INDIRECT(p->val);
1589 
1590 				if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
1591 					return FAILURE;
1592 				} else {
1593 					if (ht->pDestructor) {
1594 						zval tmp;
1595 						ZVAL_COPY_VALUE(&tmp, data);
1596 						ZVAL_UNDEF(data);
1597 						ht->pDestructor(&tmp);
1598 					} else {
1599 						ZVAL_UNDEF(data);
1600 					}
1601 					HT_FLAGS(ht) |= HASH_FLAG_HAS_EMPTY_IND;
1602 				}
1603 			} else {
1604 				zend_string_release(p->key);
1605 				p->key = NULL;
1606 				_zend_hash_del_el_ex(ht, idx, p, prev);
1607 			}
1608 			return SUCCESS;
1609 		}
1610 		prev = p;
1611 		idx = Z_NEXT(p->val);
1612 	}
1613 	return FAILURE;
1614 }
1615 
zend_hash_str_del_ind(HashTable * ht,const char * str,size_t len)1616 ZEND_API zend_result ZEND_FASTCALL zend_hash_str_del_ind(HashTable *ht, const char *str, size_t len)
1617 {
1618 	zend_ulong h;
1619 	uint32_t nIndex;
1620 	uint32_t idx;
1621 	Bucket *p;
1622 	Bucket *prev = NULL;
1623 
1624 	IS_CONSISTENT(ht);
1625 	HT_ASSERT_RC1(ht);
1626 
1627 	h = zend_inline_hash_func(str, len);
1628 	nIndex = h | ht->nTableMask;
1629 
1630 	idx = HT_HASH(ht, nIndex);
1631 	while (idx != HT_INVALID_IDX) {
1632 		p = HT_HASH_TO_BUCKET(ht, idx);
1633 		if ((p->h == h)
1634 			 && p->key
1635 			 && zend_string_equals_cstr(p->key, str, len)) {
1636 			if (Z_TYPE(p->val) == IS_INDIRECT) {
1637 				zval *data = Z_INDIRECT(p->val);
1638 
1639 				if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
1640 					return FAILURE;
1641 				} else {
1642 					if (ht->pDestructor) {
1643 						ht->pDestructor(data);
1644 					}
1645 					ZVAL_UNDEF(data);
1646 					HT_FLAGS(ht) |= HASH_FLAG_HAS_EMPTY_IND;
1647 				}
1648 			} else {
1649 				zend_string_release(p->key);
1650 				p->key = NULL;
1651 				_zend_hash_del_el_ex(ht, idx, p, prev);
1652 			}
1653 			return SUCCESS;
1654 		}
1655 		prev = p;
1656 		idx = Z_NEXT(p->val);
1657 	}
1658 	return FAILURE;
1659 }
1660 
zend_hash_str_del(HashTable * ht,const char * str,size_t len)1661 ZEND_API zend_result ZEND_FASTCALL zend_hash_str_del(HashTable *ht, const char *str, size_t len)
1662 {
1663 	zend_ulong h;
1664 	uint32_t nIndex;
1665 	uint32_t idx;
1666 	Bucket *p;
1667 	Bucket *prev = NULL;
1668 
1669 	IS_CONSISTENT(ht);
1670 	HT_ASSERT_RC1(ht);
1671 
1672 	h = zend_inline_hash_func(str, len);
1673 	nIndex = h | ht->nTableMask;
1674 
1675 	idx = HT_HASH(ht, nIndex);
1676 	while (idx != HT_INVALID_IDX) {
1677 		p = HT_HASH_TO_BUCKET(ht, idx);
1678 		if ((p->h == h)
1679 			 && p->key
1680 			 && zend_string_equals_cstr(p->key, str, len)) {
1681 			zend_string_release(p->key);
1682 			p->key = NULL;
1683 			_zend_hash_del_el_ex(ht, idx, p, prev);
1684 			return SUCCESS;
1685 		}
1686 		prev = p;
1687 		idx = Z_NEXT(p->val);
1688 	}
1689 	return FAILURE;
1690 }
1691 
zend_hash_index_del(HashTable * ht,zend_ulong h)1692 ZEND_API zend_result ZEND_FASTCALL zend_hash_index_del(HashTable *ht, zend_ulong h)
1693 {
1694 	uint32_t nIndex;
1695 	uint32_t idx;
1696 	Bucket *p;
1697 	Bucket *prev = NULL;
1698 
1699 	IS_CONSISTENT(ht);
1700 	HT_ASSERT_RC1(ht);
1701 
1702 	if (HT_IS_PACKED(ht)) {
1703 		if (h < ht->nNumUsed) {
1704 			zval *zv = ht->arPacked + h;
1705 			if (Z_TYPE_P(zv) != IS_UNDEF) {
1706 				_zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(h), zv);
1707 				return SUCCESS;
1708 			}
1709 		}
1710 		return FAILURE;
1711 	}
1712 	nIndex = h | ht->nTableMask;
1713 
1714 	idx = HT_HASH(ht, nIndex);
1715 	while (idx != HT_INVALID_IDX) {
1716 		p = HT_HASH_TO_BUCKET(ht, idx);
1717 		if ((p->h == h) && (p->key == NULL)) {
1718 			_zend_hash_del_el_ex(ht, idx, p, prev);
1719 			return SUCCESS;
1720 		}
1721 		prev = p;
1722 		idx = Z_NEXT(p->val);
1723 	}
1724 	return FAILURE;
1725 }
1726 
zend_hash_destroy(HashTable * ht)1727 ZEND_API void ZEND_FASTCALL zend_hash_destroy(HashTable *ht)
1728 {
1729 	IS_CONSISTENT(ht);
1730 	HT_ASSERT(ht, GC_REFCOUNT(ht) <= 1);
1731 
1732 	if (ht->nNumUsed) {
1733 		if (HT_IS_PACKED(ht)) {
1734 			if (ht->pDestructor) {
1735 				zval *zv = ht->arPacked;
1736 				zval *end = zv + ht->nNumUsed;
1737 
1738 				SET_INCONSISTENT(HT_IS_DESTROYING);
1739 				if (HT_IS_WITHOUT_HOLES(ht)) {
1740 					do {
1741 						ht->pDestructor(zv);
1742 					} while (++zv != end);
1743 				} else {
1744 					do {
1745 						if (EXPECTED(Z_TYPE_P(zv) != IS_UNDEF)) {
1746 							ht->pDestructor(zv);
1747 						}
1748 					} while (++zv != end);
1749 				}
1750 				SET_INCONSISTENT(HT_DESTROYED);
1751 			}
1752 			zend_hash_iterators_remove(ht);
1753 		} else {
1754 			Bucket *p = ht->arData;
1755 			Bucket *end = p + ht->nNumUsed;
1756 
1757 			if (ht->pDestructor) {
1758 				SET_INCONSISTENT(HT_IS_DESTROYING);
1759 
1760 				if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1761 					if (HT_IS_WITHOUT_HOLES(ht)) {
1762 						do {
1763 							ht->pDestructor(&p->val);
1764 						} while (++p != end);
1765 					} else {
1766 						do {
1767 							if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1768 								ht->pDestructor(&p->val);
1769 							}
1770 						} while (++p != end);
1771 					}
1772 				} else if (HT_IS_WITHOUT_HOLES(ht)) {
1773 					do {
1774 						ht->pDestructor(&p->val);
1775 						if (EXPECTED(p->key)) {
1776 							zend_string_release(p->key);
1777 						}
1778 					} while (++p != end);
1779 				} else {
1780 					do {
1781 						if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1782 							ht->pDestructor(&p->val);
1783 							if (EXPECTED(p->key)) {
1784 								zend_string_release(p->key);
1785 							}
1786 						}
1787 					} while (++p != end);
1788 				}
1789 
1790 				SET_INCONSISTENT(HT_DESTROYED);
1791 			} else {
1792 				if (!HT_HAS_STATIC_KEYS_ONLY(ht)) {
1793 					do {
1794 						if (EXPECTED(p->key)) {
1795 							zend_string_release(p->key);
1796 						}
1797 					} while (++p != end);
1798 				}
1799 			}
1800 			zend_hash_iterators_remove(ht);
1801 		}
1802 	} else if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
1803 		return;
1804 	}
1805 	pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1806 }
1807 
zend_array_destroy(HashTable * ht)1808 ZEND_API void ZEND_FASTCALL zend_array_destroy(HashTable *ht)
1809 {
1810 	IS_CONSISTENT(ht);
1811 	HT_ASSERT(ht, GC_REFCOUNT(ht) <= 1);
1812 
1813 	/* break possible cycles */
1814 	GC_REMOVE_FROM_BUFFER(ht);
1815 	GC_TYPE_INFO(ht) = GC_NULL /*???| (GC_WHITE << 16)*/;
1816 
1817 	if (ht->nNumUsed) {
1818 		/* In some rare cases destructors of regular arrays may be changed */
1819 		if (UNEXPECTED(ht->pDestructor != ZVAL_PTR_DTOR)) {
1820 			zend_hash_destroy(ht);
1821 			goto free_ht;
1822 		}
1823 
1824 		SET_INCONSISTENT(HT_IS_DESTROYING);
1825 
1826 		if (HT_IS_PACKED(ht)) {
1827 			zval *zv = ht->arPacked;
1828 			zval *end = zv + ht->nNumUsed;
1829 
1830 			do {
1831 				i_zval_ptr_dtor(zv);
1832 			} while (++zv != end);
1833 		} else {
1834 			Bucket *p = ht->arData;
1835 			Bucket *end = p + ht->nNumUsed;
1836 
1837 			if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1838 				do {
1839 					i_zval_ptr_dtor(&p->val);
1840 				} while (++p != end);
1841 			} else if (HT_IS_WITHOUT_HOLES(ht)) {
1842 				do {
1843 					i_zval_ptr_dtor(&p->val);
1844 					if (EXPECTED(p->key)) {
1845 						zend_string_release_ex(p->key, 0);
1846 					}
1847 				} while (++p != end);
1848 			} else {
1849 				do {
1850 					if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1851 						i_zval_ptr_dtor(&p->val);
1852 						if (EXPECTED(p->key)) {
1853 							zend_string_release_ex(p->key, 0);
1854 						}
1855 					}
1856 				} while (++p != end);
1857 			}
1858 		}
1859 	} else if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
1860 		goto free_ht;
1861 	}
1862 	SET_INCONSISTENT(HT_DESTROYED);
1863 	efree(HT_GET_DATA_ADDR(ht));
1864 free_ht:
1865 	zend_hash_iterators_remove(ht);
1866 	FREE_HASHTABLE(ht);
1867 }
1868 
zend_hash_clean(HashTable * ht)1869 ZEND_API void ZEND_FASTCALL zend_hash_clean(HashTable *ht)
1870 {
1871 	IS_CONSISTENT(ht);
1872 	HT_ASSERT_RC1(ht);
1873 
1874 	if (ht->nNumUsed) {
1875 		if (HT_IS_PACKED(ht)) {
1876 			zval *zv = ht->arPacked;
1877 			zval *end = zv + ht->nNumUsed;
1878 
1879 			if (ht->pDestructor) {
1880 				if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1881 					if (HT_IS_WITHOUT_HOLES(ht)) {
1882 						do {
1883 							ht->pDestructor(zv);
1884 						} while (++zv != end);
1885 					} else {
1886 						do {
1887 							if (EXPECTED(Z_TYPE_P(zv) != IS_UNDEF)) {
1888 								ht->pDestructor(zv);
1889 							}
1890 						} while (++zv != end);
1891 					}
1892 				}
1893 			}
1894 		} else {
1895 			Bucket *p = ht->arData;
1896 			Bucket *end = p + ht->nNumUsed;
1897 
1898 			if (ht->pDestructor) {
1899 				if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1900 					if (HT_IS_WITHOUT_HOLES(ht)) {
1901 						do {
1902 							ht->pDestructor(&p->val);
1903 						} while (++p != end);
1904 					} else {
1905 						do {
1906 							if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1907 								ht->pDestructor(&p->val);
1908 							}
1909 						} while (++p != end);
1910 					}
1911 				} else if (HT_IS_WITHOUT_HOLES(ht)) {
1912 					do {
1913 						ht->pDestructor(&p->val);
1914 						if (EXPECTED(p->key)) {
1915 							zend_string_release(p->key);
1916 						}
1917 					} while (++p != end);
1918 				} else {
1919 					do {
1920 						if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1921 							ht->pDestructor(&p->val);
1922 							if (EXPECTED(p->key)) {
1923 								zend_string_release(p->key);
1924 							}
1925 						}
1926 					} while (++p != end);
1927 				}
1928 			} else {
1929 				if (!HT_HAS_STATIC_KEYS_ONLY(ht)) {
1930 					do {
1931 						if (EXPECTED(p->key)) {
1932 							zend_string_release(p->key);
1933 						}
1934 					} while (++p != end);
1935 				}
1936 			}
1937 			HT_HASH_RESET(ht);
1938 		}
1939 	}
1940 	ht->nNumUsed = 0;
1941 	ht->nNumOfElements = 0;
1942 	ht->nNextFreeElement = ZEND_LONG_MIN;
1943 	ht->nInternalPointer = 0;
1944 }
1945 
zend_symtable_clean(HashTable * ht)1946 ZEND_API void ZEND_FASTCALL zend_symtable_clean(HashTable *ht)
1947 {
1948 	Bucket *p, *end;
1949 
1950 	IS_CONSISTENT(ht);
1951 	HT_ASSERT_RC1(ht);
1952 
1953 	if (ht->nNumUsed) {
1954 		ZEND_ASSERT(!HT_IS_PACKED(ht));
1955 		p = ht->arData;
1956 		end = p + ht->nNumUsed;
1957 		if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1958 			do {
1959 				i_zval_ptr_dtor(&p->val);
1960 			} while (++p != end);
1961 		} else if (HT_IS_WITHOUT_HOLES(ht)) {
1962 			do {
1963 				i_zval_ptr_dtor(&p->val);
1964 				if (EXPECTED(p->key)) {
1965 					zend_string_release(p->key);
1966 				}
1967 			} while (++p != end);
1968 		} else {
1969 			do {
1970 				if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1971 					i_zval_ptr_dtor(&p->val);
1972 					if (EXPECTED(p->key)) {
1973 						zend_string_release(p->key);
1974 					}
1975 				}
1976 			} while (++p != end);
1977 		}
1978 		HT_HASH_RESET(ht);
1979 	}
1980 	ht->nNumUsed = 0;
1981 	ht->nNumOfElements = 0;
1982 	ht->nNextFreeElement = ZEND_LONG_MIN;
1983 	ht->nInternalPointer = 0;
1984 }
1985 
zend_hash_graceful_destroy(HashTable * ht)1986 ZEND_API void ZEND_FASTCALL zend_hash_graceful_destroy(HashTable *ht)
1987 {
1988 	uint32_t idx;
1989 
1990 	IS_CONSISTENT(ht);
1991 	HT_ASSERT_RC1(ht);
1992 
1993 	if (HT_IS_PACKED(ht)) {
1994 		zval *zv = ht->arPacked;
1995 
1996 		for (idx = 0; idx < ht->nNumUsed; idx++, zv++) {
1997 			if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
1998 			_zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv);
1999 		}
2000 	} else {
2001 		Bucket *p = ht->arData;
2002 
2003 		for (idx = 0; idx < ht->nNumUsed; idx++, p++) {
2004 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2005 			_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
2006 		}
2007 	}
2008 	if (!(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
2009 		pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
2010 	}
2011 
2012 	SET_INCONSISTENT(HT_DESTROYED);
2013 }
2014 
zend_hash_graceful_reverse_destroy(HashTable * ht)2015 ZEND_API void ZEND_FASTCALL zend_hash_graceful_reverse_destroy(HashTable *ht)
2016 {
2017 	uint32_t idx;
2018 
2019 	IS_CONSISTENT(ht);
2020 	HT_ASSERT_RC1(ht);
2021 
2022 	idx = ht->nNumUsed;
2023 	if (HT_IS_PACKED(ht)) {
2024 		zval *zv = ht->arPacked + ht->nNumUsed;
2025 
2026 		while (idx > 0) {
2027 			idx--;
2028 			zv--;
2029 			if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
2030 			_zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv);
2031 		}
2032 	} else {
2033 		Bucket *p = ht->arData + ht->nNumUsed;
2034 
2035 		while (idx > 0) {
2036 			idx--;
2037 			p--;
2038 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2039 			_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
2040 		}
2041 	}
2042 
2043 	if (!(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
2044 		pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
2045 	}
2046 
2047 	SET_INCONSISTENT(HT_DESTROYED);
2048 }
2049 
2050 /* This is used to recurse elements and selectively delete certain entries
2051  * from a hashtable. apply_func() receives the data and decides if the entry
2052  * should be deleted or recursion should be stopped. The following three
2053  * return codes are possible:
2054  * ZEND_HASH_APPLY_KEEP   - continue
2055  * ZEND_HASH_APPLY_STOP   - stop iteration
2056  * ZEND_HASH_APPLY_REMOVE - delete the element, combinable with the former
2057  */
2058 
zend_hash_apply(HashTable * ht,apply_func_t apply_func)2059 ZEND_API void ZEND_FASTCALL zend_hash_apply(HashTable *ht, apply_func_t apply_func)
2060 {
2061 	uint32_t idx;
2062 	int result;
2063 
2064 	IS_CONSISTENT(ht);
2065 	if (HT_IS_PACKED(ht)) {
2066 		for (idx = 0; idx < ht->nNumUsed; idx++) {
2067 			zval *zv = ht->arPacked + idx;
2068 
2069 			if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
2070 			result = apply_func(zv);
2071 
2072 			if (result & ZEND_HASH_APPLY_REMOVE) {
2073 				HT_ASSERT_RC1(ht);
2074 				_zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv);
2075 			}
2076 			if (result & ZEND_HASH_APPLY_STOP) {
2077 				break;
2078 			}
2079 		}
2080 	} else {
2081 		for (idx = 0; idx < ht->nNumUsed; idx++) {
2082 			Bucket *p = ht->arData + idx;
2083 
2084 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2085 			result = apply_func(&p->val);
2086 
2087 			if (result & ZEND_HASH_APPLY_REMOVE) {
2088 				HT_ASSERT_RC1(ht);
2089 				_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
2090 			}
2091 			if (result & ZEND_HASH_APPLY_STOP) {
2092 				break;
2093 			}
2094 		}
2095 	}
2096 }
2097 
2098 
zend_hash_apply_with_argument(HashTable * ht,apply_func_arg_t apply_func,void * argument)2099 ZEND_API void ZEND_FASTCALL zend_hash_apply_with_argument(HashTable *ht, apply_func_arg_t apply_func, void *argument)
2100 {
2101 	uint32_t idx;
2102 	int result;
2103 
2104 	IS_CONSISTENT(ht);
2105 	if (HT_IS_PACKED(ht)) {
2106 		for (idx = 0; idx < ht->nNumUsed; idx++) {
2107 			zval *zv = ht->arPacked + idx;
2108 			if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
2109 			result = apply_func(zv, argument);
2110 
2111 			if (result & ZEND_HASH_APPLY_REMOVE) {
2112 				HT_ASSERT_RC1(ht);
2113 				_zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv);
2114 			}
2115 			if (result & ZEND_HASH_APPLY_STOP) {
2116 				break;
2117 			}
2118 		}
2119 	} else {
2120 		for (idx = 0; idx < ht->nNumUsed; idx++) {
2121 			Bucket *p = ht->arData + idx;
2122 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2123 			result = apply_func(&p->val, argument);
2124 
2125 			if (result & ZEND_HASH_APPLY_REMOVE) {
2126 				HT_ASSERT_RC1(ht);
2127 				_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
2128 			}
2129 			if (result & ZEND_HASH_APPLY_STOP) {
2130 				break;
2131 			}
2132 		}
2133 	}
2134 }
2135 
2136 
zend_hash_apply_with_arguments(HashTable * ht,apply_func_args_t apply_func,int num_args,...)2137 ZEND_API void zend_hash_apply_with_arguments(HashTable *ht, apply_func_args_t apply_func, int num_args, ...)
2138 {
2139 	uint32_t idx;
2140 	va_list args;
2141 	zend_hash_key hash_key;
2142 	int result;
2143 
2144 	IS_CONSISTENT(ht);
2145 
2146 	if (HT_IS_PACKED(ht)) {
2147 		for (idx = 0; idx < ht->nNumUsed; idx++) {
2148 			zval *zv = ht->arPacked + idx;
2149 
2150 			if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
2151 			va_start(args, num_args);
2152 			hash_key.h = idx;
2153 			hash_key.key = NULL;
2154 
2155 			result = apply_func(zv, num_args, args, &hash_key);
2156 
2157 			if (result & ZEND_HASH_APPLY_REMOVE) {
2158 				HT_ASSERT_RC1(ht);
2159 				_zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv);
2160 			}
2161 			if (result & ZEND_HASH_APPLY_STOP) {
2162 				va_end(args);
2163 				break;
2164 			}
2165 			va_end(args);
2166 		}
2167 	} else {
2168 		for (idx = 0; idx < ht->nNumUsed; idx++) {
2169 			Bucket *p = ht->arData + idx;
2170 
2171 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2172 			va_start(args, num_args);
2173 			hash_key.h = p->h;
2174 			hash_key.key = p->key;
2175 
2176 			result = apply_func(&p->val, num_args, args, &hash_key);
2177 
2178 			if (result & ZEND_HASH_APPLY_REMOVE) {
2179 				HT_ASSERT_RC1(ht);
2180 				_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
2181 			}
2182 			if (result & ZEND_HASH_APPLY_STOP) {
2183 				va_end(args);
2184 				break;
2185 			}
2186 			va_end(args);
2187 		}
2188 	}
2189 }
2190 
2191 
zend_hash_reverse_apply(HashTable * ht,apply_func_t apply_func)2192 ZEND_API void ZEND_FASTCALL zend_hash_reverse_apply(HashTable *ht, apply_func_t apply_func)
2193 {
2194 	uint32_t idx;
2195 	int result;
2196 
2197 	IS_CONSISTENT(ht);
2198 
2199 	idx = ht->nNumUsed;
2200 	if (HT_IS_PACKED(ht)) {
2201 		zval *zv;
2202 
2203 		while (idx > 0) {
2204 			idx--;
2205 			zv = ht->arPacked + idx;
2206 			if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
2207 
2208 			result = apply_func(zv);
2209 
2210 			if (result & ZEND_HASH_APPLY_REMOVE) {
2211 				HT_ASSERT_RC1(ht);
2212 				_zend_hash_packed_del_val(ht, HT_IDX_TO_HASH(idx), zv);
2213 			}
2214 			if (result & ZEND_HASH_APPLY_STOP) {
2215 				break;
2216 			}
2217 		}
2218 	} else {
2219 		Bucket *p;
2220 
2221 		while (idx > 0) {
2222 			idx--;
2223 			p = ht->arData + idx;
2224 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2225 
2226 			result = apply_func(&p->val);
2227 
2228 			if (result & ZEND_HASH_APPLY_REMOVE) {
2229 				HT_ASSERT_RC1(ht);
2230 				_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
2231 			}
2232 			if (result & ZEND_HASH_APPLY_STOP) {
2233 				break;
2234 			}
2235 		}
2236 	}
2237 }
2238 
2239 
zend_hash_copy(HashTable * target,const HashTable * source,copy_ctor_func_t pCopyConstructor)2240 ZEND_API void ZEND_FASTCALL zend_hash_copy(HashTable *target, const HashTable *source, copy_ctor_func_t pCopyConstructor)
2241 {
2242 	uint32_t idx;
2243 	zval *new_entry, *data;
2244 
2245 	IS_CONSISTENT(source);
2246 	IS_CONSISTENT(target);
2247 	HT_ASSERT_RC1(target);
2248 
2249 	if (HT_IS_PACKED(source)) {
2250 		for (idx = 0; idx < source->nNumUsed; idx++) {
2251 			zval *zv = source->arPacked + idx;
2252 			if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
2253 
2254 			new_entry = zend_hash_index_update(target, idx, zv);
2255 			if (pCopyConstructor) {
2256 				pCopyConstructor(new_entry);
2257 			}
2258 		}
2259 		return;
2260 	}
2261 
2262 	for (idx = 0; idx < source->nNumUsed; idx++) {
2263 		Bucket *p = source->arData + idx;
2264 
2265 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2266 
2267 		/* INDIRECT element may point to UNDEF-ined slots */
2268 		data = &p->val;
2269 		if (Z_TYPE_P(data) == IS_INDIRECT) {
2270 			data = Z_INDIRECT_P(data);
2271 			if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
2272 				continue;
2273 			}
2274 		}
2275 		if (p->key) {
2276 			new_entry = zend_hash_update(target, p->key, data);
2277 		} else {
2278 			new_entry = zend_hash_index_update(target, p->h, data);
2279 		}
2280 		if (pCopyConstructor) {
2281 			pCopyConstructor(new_entry);
2282 		}
2283 	}
2284 }
2285 
2286 
zend_array_dup_value(const HashTable * source,zval * data,zval * dest,bool packed,bool with_holes)2287 static zend_always_inline bool zend_array_dup_value(const HashTable *source, zval *data, zval *dest, bool packed, bool with_holes)
2288 {
2289 	if (with_holes) {
2290 		if (!packed && Z_TYPE_INFO_P(data) == IS_INDIRECT) {
2291 			data = Z_INDIRECT_P(data);
2292 		}
2293 		if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) {
2294 			return 0;
2295 		}
2296 	} else if (!packed) {
2297 		/* INDIRECT element may point to UNDEF-ined slots */
2298 		if (Z_TYPE_INFO_P(data) == IS_INDIRECT) {
2299 			data = Z_INDIRECT_P(data);
2300 			if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) {
2301 				return 0;
2302 			}
2303 		}
2304 	}
2305 
2306 	do {
2307 		if (Z_OPT_REFCOUNTED_P(data)) {
2308 			if (Z_ISREF_P(data) && Z_REFCOUNT_P(data) == 1 &&
2309 			    (Z_TYPE_P(Z_REFVAL_P(data)) != IS_ARRAY ||
2310 			      Z_ARRVAL_P(Z_REFVAL_P(data)) != source)) {
2311 				data = Z_REFVAL_P(data);
2312 				if (!Z_OPT_REFCOUNTED_P(data)) {
2313 					break;
2314 				}
2315 			}
2316 			Z_ADDREF_P(data);
2317 		}
2318 	} while (0);
2319 	ZVAL_COPY_VALUE(dest, data);
2320 
2321 	return 1;
2322 }
2323 
zend_array_dup_element(const HashTable * source,HashTable * target,uint32_t idx,Bucket * p,Bucket * q,bool packed,bool static_keys,bool with_holes)2324 static zend_always_inline bool zend_array_dup_element(const HashTable *source, HashTable *target, uint32_t idx, Bucket *p, Bucket *q, bool packed, bool static_keys, bool with_holes)
2325 {
2326 	if (!zend_array_dup_value(source, &p->val, &q->val, packed, with_holes)) {
2327 		return 0;
2328 	}
2329 
2330 	if (!packed) {
2331 		uint32_t nIndex;
2332 
2333 		q->h = p->h;
2334 		q->key = p->key;
2335 		if (!static_keys && q->key) {
2336 			zend_string_addref(q->key);
2337 		}
2338 
2339 		nIndex = q->h | target->nTableMask;
2340 		Z_NEXT(q->val) = HT_HASH(target, nIndex);
2341 		HT_HASH(target, nIndex) = HT_IDX_TO_HASH(idx);
2342 	}
2343 	return 1;
2344 }
2345 
2346 // We need to duplicate iterators to be able to search through all copy-on-write copies to find the actually iterated HashTable and position back
zend_array_dup_ht_iterators(const HashTable * source,HashTable * target)2347 static void zend_array_dup_ht_iterators(const HashTable *source, HashTable *target) {
2348 	uint32_t iter_index = 0;
2349 	uint32_t end_index = EG(ht_iterators_used);
2350 
2351 	while (iter_index != end_index) {
2352 		HashTableIterator *iter = &EG(ht_iterators)[iter_index];
2353 		if (iter->ht == source) {
2354 			uint32_t copy_idx = zend_hash_iterator_add(target, iter->pos);
2355 			/* Refetch iter because the memory may be reallocated. */
2356 			iter = &EG(ht_iterators)[iter_index];
2357 			HashTableIterator *copy_iter = EG(ht_iterators) + copy_idx;
2358 			copy_iter->next_copy = iter->next_copy;
2359 			iter->next_copy = copy_idx;
2360 		}
2361 		iter_index++;
2362 	}
2363 }
2364 
zend_array_dup_packed_elements(const HashTable * source,HashTable * target,bool with_holes)2365 static zend_always_inline void zend_array_dup_packed_elements(const HashTable *source, HashTable *target, bool with_holes)
2366 {
2367 	zval *p = source->arPacked;
2368 	zval *q = target->arPacked;
2369 	const zval *end = p + source->nNumUsed;
2370 
2371 	do {
2372 		if (!zend_array_dup_value(source, p, q, 1, with_holes)) {
2373 			if (with_holes) {
2374 				ZVAL_UNDEF(q);
2375 			}
2376 		}
2377 		p++; q++;
2378 	} while (p != end);
2379 
2380 	if (UNEXPECTED(HT_HAS_ITERATORS(source))) {
2381 		zend_array_dup_ht_iterators(source, target);
2382 	}
2383 }
2384 
zend_array_dup_elements(const HashTable * source,HashTable * target,bool static_keys,bool with_holes)2385 static zend_always_inline uint32_t zend_array_dup_elements(const HashTable *source, HashTable *target, bool static_keys, bool with_holes)
2386 {
2387 	uint32_t idx = 0;
2388 	Bucket *p = source->arData;
2389 	Bucket *q = target->arData;
2390 	const Bucket *end = p + source->nNumUsed;
2391 
2392 	if (UNEXPECTED(HT_HAS_ITERATORS(source))) {
2393 		zend_array_dup_ht_iterators(source, target);
2394 	}
2395 
2396 	do {
2397 		if (!zend_array_dup_element(source, target, idx, p, q, 0, static_keys, with_holes)) {
2398 			uint32_t target_idx = idx;
2399 
2400 			idx++; p++;
2401 			if (EXPECTED(!HT_HAS_ITERATORS(target))) {
2402 				while (p != end) {
2403 					if (zend_array_dup_element(source, target, target_idx, p, q, 0, static_keys, with_holes)) {
2404 						if (source->nInternalPointer == idx) {
2405 							target->nInternalPointer = target_idx;
2406 						}
2407 						target_idx++; q++;
2408 					}
2409 					idx++; p++;
2410 				}
2411 			} else {
2412 				target->nNumUsed = source->nNumUsed;
2413 				uint32_t iter_pos = zend_hash_iterators_lower_pos(target, idx);
2414 
2415 				while (p != end) {
2416 					if (zend_array_dup_element(source, target, target_idx, p, q, 0, static_keys, with_holes)) {
2417 						if (source->nInternalPointer == idx) {
2418 							target->nInternalPointer = target_idx;
2419 						}
2420 						if (UNEXPECTED(idx >= iter_pos)) {
2421 							do {
2422 								zend_hash_iterators_update(target, iter_pos, target_idx);
2423 								iter_pos = zend_hash_iterators_lower_pos(target, iter_pos + 1);
2424 							} while (iter_pos < idx);
2425 						}
2426 						target_idx++; q++;
2427 					}
2428 					idx++; p++;
2429 				}
2430 			}
2431 			return target_idx;
2432 		}
2433 		idx++; p++; q++;
2434 	} while (p != end);
2435 	return idx;
2436 }
2437 
zend_array_dup(const HashTable * source)2438 ZEND_API HashTable* ZEND_FASTCALL zend_array_dup(const HashTable *source)
2439 {
2440 	uint32_t idx;
2441 	HashTable *target;
2442 
2443 	IS_CONSISTENT(source);
2444 
2445 	ALLOC_HASHTABLE(target);
2446 	GC_SET_REFCOUNT(target, 1);
2447 	GC_TYPE_INFO(target) = GC_ARRAY;
2448 
2449 	target->pDestructor = ZVAL_PTR_DTOR;
2450 
2451 	if (source->nNumOfElements == 0) {
2452 		HT_FLAGS(target) = HASH_FLAG_UNINITIALIZED;
2453 		target->nTableMask = HT_MIN_MASK;
2454 		target->nNumUsed = 0;
2455 		target->nNumOfElements = 0;
2456 		target->nNextFreeElement = source->nNextFreeElement;
2457 		target->nInternalPointer = 0;
2458 		target->nTableSize = HT_MIN_SIZE;
2459 		HT_SET_DATA_ADDR(target, &uninitialized_bucket);
2460 	} else if (GC_FLAGS(source) & IS_ARRAY_IMMUTABLE) {
2461 		HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK;
2462 		target->nTableMask = source->nTableMask;
2463 		target->nNumUsed = source->nNumUsed;
2464 		target->nNumOfElements = source->nNumOfElements;
2465 		target->nNextFreeElement = source->nNextFreeElement;
2466 		target->nTableSize = source->nTableSize;
2467 		if (HT_IS_PACKED(source)) {
2468 			HT_SET_DATA_ADDR(target, emalloc(HT_PACKED_SIZE(target)));
2469 			target->nInternalPointer = source->nInternalPointer;
2470 			memcpy(HT_GET_DATA_ADDR(target), HT_GET_DATA_ADDR(source), HT_PACKED_USED_SIZE(source));
2471 		} else {
2472 			HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
2473 			target->nInternalPointer = source->nInternalPointer;
2474 			memcpy(HT_GET_DATA_ADDR(target), HT_GET_DATA_ADDR(source), HT_USED_SIZE(source));
2475 		}
2476 	} else if (HT_IS_PACKED(source)) {
2477 		HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK;
2478 		target->nTableMask = HT_MIN_MASK;
2479 		target->nNumUsed = source->nNumUsed;
2480 		target->nNumOfElements = source->nNumOfElements;
2481 		target->nNextFreeElement = source->nNextFreeElement;
2482 		target->nTableSize = source->nTableSize;
2483 		HT_SET_DATA_ADDR(target, emalloc(HT_PACKED_SIZE_EX(target->nTableSize, HT_MIN_MASK)));
2484 		target->nInternalPointer =
2485 			(source->nInternalPointer < source->nNumUsed) ?
2486 				source->nInternalPointer : 0;
2487 
2488 		HT_HASH_RESET_PACKED(target);
2489 
2490 		if (HT_IS_WITHOUT_HOLES(target)) {
2491 			zend_array_dup_packed_elements(source, target, 0);
2492 		} else {
2493 			zend_array_dup_packed_elements(source, target, 1);
2494 		}
2495 	} else {
2496 		HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK;
2497 		target->nTableMask = source->nTableMask;
2498 		target->nNextFreeElement = source->nNextFreeElement;
2499 		target->nInternalPointer =
2500 			(source->nInternalPointer < source->nNumUsed) ?
2501 				source->nInternalPointer : 0;
2502 
2503 		target->nTableSize = source->nTableSize;
2504 		HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
2505 		HT_HASH_RESET(target);
2506 
2507 		if (HT_HAS_STATIC_KEYS_ONLY(target)) {
2508 			if (HT_IS_WITHOUT_HOLES(source)) {
2509 				idx = zend_array_dup_elements(source, target, 1, 0);
2510 			} else {
2511 				idx = zend_array_dup_elements(source, target, 1, 1);
2512 			}
2513 		} else {
2514 			if (HT_IS_WITHOUT_HOLES(source)) {
2515 				idx = zend_array_dup_elements(source, target, 0, 0);
2516 			} else {
2517 				idx = zend_array_dup_elements(source, target, 0, 1);
2518 			}
2519 		}
2520 		target->nNumUsed = idx;
2521 		target->nNumOfElements = idx;
2522 	}
2523 	return target;
2524 }
2525 
zend_array_to_list(const HashTable * source)2526 ZEND_API HashTable* zend_array_to_list(const HashTable *source)
2527 {
2528 	HashTable *result = _zend_new_array(zend_hash_num_elements(source));
2529 	zend_hash_real_init_packed(result);
2530 
2531 	ZEND_HASH_FILL_PACKED(result) {
2532 		zval *entry;
2533 
2534 		ZEND_HASH_FOREACH_VAL(source, entry) {
2535 			if (UNEXPECTED(Z_ISREF_P(entry) && Z_REFCOUNT_P(entry) == 1)) {
2536 				entry = Z_REFVAL_P(entry);
2537 			}
2538 			Z_TRY_ADDREF_P(entry);
2539 			ZEND_HASH_FILL_ADD(entry);
2540 		} ZEND_HASH_FOREACH_END();
2541 	} ZEND_HASH_FILL_END();
2542 
2543 	return result;
2544 }
2545 
2546 
zend_hash_merge(HashTable * target,const HashTable * source,copy_ctor_func_t pCopyConstructor,bool overwrite)2547 ZEND_API void ZEND_FASTCALL zend_hash_merge(HashTable *target, const HashTable *source, copy_ctor_func_t pCopyConstructor, bool overwrite)
2548 {
2549 	uint32_t idx;
2550 	Bucket *p;
2551 	zval *t, *s;
2552 
2553 	IS_CONSISTENT(source);
2554 	IS_CONSISTENT(target);
2555 	HT_ASSERT_RC1(target);
2556 
2557 	if (overwrite) {
2558 		if (HT_IS_PACKED(source)) {
2559 			for (idx = 0; idx < source->nNumUsed; idx++) {
2560 				s = source->arPacked + idx;
2561 				if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) {
2562 					continue;
2563 				}
2564 				t = zend_hash_index_update(target, idx, s);
2565 				if (pCopyConstructor) {
2566 					pCopyConstructor(t);
2567 				}
2568 			}
2569 			return;
2570 		}
2571 
2572 		for (idx = 0; idx < source->nNumUsed; idx++) {
2573 			p = source->arData + idx;
2574 			s = &p->val;
2575 			if (UNEXPECTED(Z_TYPE_P(s) == IS_INDIRECT)) {
2576 				s = Z_INDIRECT_P(s);
2577 			}
2578 			if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) {
2579 				continue;
2580 			}
2581 			if (p->key) {
2582 				t = _zend_hash_add_or_update_i(target, p->key, s, HASH_UPDATE | HASH_UPDATE_INDIRECT);
2583 				if (pCopyConstructor) {
2584 					pCopyConstructor(t);
2585 				}
2586 			} else {
2587 				t = zend_hash_index_update(target, p->h, s);
2588 				if (pCopyConstructor) {
2589 					pCopyConstructor(t);
2590 				}
2591 			}
2592 		}
2593 	} else {
2594 		if (HT_IS_PACKED(source)) {
2595 			for (idx = 0; idx < source->nNumUsed; idx++) {
2596 				s = source->arPacked + idx;
2597 				if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) {
2598 					continue;
2599 				}
2600 				t = zend_hash_index_add(target, idx, s);
2601 				if (t && pCopyConstructor) {
2602 					pCopyConstructor(t);
2603 				}
2604 			}
2605 			return;
2606 		}
2607 
2608 		for (idx = 0; idx < source->nNumUsed; idx++) {
2609 			p = source->arData + idx;
2610 			s = &p->val;
2611 			if (UNEXPECTED(Z_TYPE_P(s) == IS_INDIRECT)) {
2612 				s = Z_INDIRECT_P(s);
2613 			}
2614 			if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) {
2615 				continue;
2616 			}
2617 			if (p->key) {
2618 				t = _zend_hash_add_or_update_i(target, p->key, s, HASH_ADD | HASH_UPDATE_INDIRECT);
2619 				if (t && pCopyConstructor) {
2620 					pCopyConstructor(t);
2621 				}
2622 			} else {
2623 				t = zend_hash_index_add(target, p->h, s);
2624 				if (t && pCopyConstructor) {
2625 					pCopyConstructor(t);
2626 				}
2627 			}
2628 		}
2629 	}
2630 }
2631 
2632 
zend_hash_replace_checker_wrapper(HashTable * target,zval * source_data,zend_ulong h,zend_string * key,void * pParam,merge_checker_func_t merge_checker_func)2633 static bool ZEND_FASTCALL zend_hash_replace_checker_wrapper(HashTable *target, zval *source_data, zend_ulong h, zend_string *key, void *pParam, merge_checker_func_t merge_checker_func)
2634 {
2635 	zend_hash_key hash_key;
2636 
2637 	hash_key.h = h;
2638 	hash_key.key = key;
2639 	return merge_checker_func(target, source_data, &hash_key, pParam);
2640 }
2641 
2642 
zend_hash_merge_ex(HashTable * target,const HashTable * source,copy_ctor_func_t pCopyConstructor,merge_checker_func_t pMergeSource,void * pParam)2643 ZEND_API void ZEND_FASTCALL zend_hash_merge_ex(HashTable *target, const HashTable *source, copy_ctor_func_t pCopyConstructor, merge_checker_func_t pMergeSource, void *pParam)
2644 {
2645 	uint32_t idx;
2646 	Bucket *p;
2647 	zval *t;
2648 
2649 	IS_CONSISTENT(source);
2650 	IS_CONSISTENT(target);
2651 	HT_ASSERT_RC1(target);
2652 
2653 	ZEND_ASSERT(!HT_IS_PACKED(source));
2654 	for (idx = 0; idx < source->nNumUsed; idx++) {
2655 		p = source->arData + idx;
2656 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2657 		if (zend_hash_replace_checker_wrapper(target, &p->val, p->h, p->key, pParam, pMergeSource)) {
2658 			t = zend_hash_update(target, p->key, &p->val);
2659 			if (pCopyConstructor) {
2660 				pCopyConstructor(t);
2661 			}
2662 		}
2663 	}
2664 }
2665 
2666 
2667 /* Returns the hash table data if found and NULL if not. */
zend_hash_find(const HashTable * ht,zend_string * key)2668 ZEND_API zval* ZEND_FASTCALL zend_hash_find(const HashTable *ht, zend_string *key)
2669 {
2670 	Bucket *p;
2671 
2672 	IS_CONSISTENT(ht);
2673 
2674 	(void)zend_string_hash_val(key);
2675 	p = zend_hash_find_bucket(ht, key);
2676 	return p ? &p->val : NULL;
2677 }
2678 
zend_hash_find_known_hash(const HashTable * ht,const zend_string * key)2679 ZEND_API zval* ZEND_FASTCALL zend_hash_find_known_hash(const HashTable *ht, const zend_string *key)
2680 {
2681 	Bucket *p;
2682 
2683 	IS_CONSISTENT(ht);
2684 
2685 	p = zend_hash_find_bucket(ht, key);
2686 	return p ? &p->val : NULL;
2687 }
2688 
zend_hash_str_find(const HashTable * ht,const char * str,size_t len)2689 ZEND_API zval* ZEND_FASTCALL zend_hash_str_find(const HashTable *ht, const char *str, size_t len)
2690 {
2691 	zend_ulong h;
2692 	Bucket *p;
2693 
2694 	IS_CONSISTENT(ht);
2695 
2696 	h = zend_inline_hash_func(str, len);
2697 	p = zend_hash_str_find_bucket(ht, str, len, h);
2698 	return p ? &p->val : NULL;
2699 }
2700 
zend_hash_index_find(const HashTable * ht,zend_ulong h)2701 ZEND_API zval* ZEND_FASTCALL zend_hash_index_find(const HashTable *ht, zend_ulong h)
2702 {
2703 	Bucket *p;
2704 
2705 	IS_CONSISTENT(ht);
2706 
2707 	if (HT_IS_PACKED(ht)) {
2708 		if (h < ht->nNumUsed) {
2709 			zval *zv = ht->arPacked + h;
2710 
2711 			if (Z_TYPE_P(zv) != IS_UNDEF) {
2712 				return zv;
2713 			}
2714 		}
2715 		return NULL;
2716 	}
2717 
2718 	p = zend_hash_index_find_bucket(ht, h);
2719 	return p ? &p->val : NULL;
2720 }
2721 
_zend_hash_index_find(const HashTable * ht,zend_ulong h)2722 ZEND_API zval* ZEND_FASTCALL _zend_hash_index_find(const HashTable *ht, zend_ulong h)
2723 {
2724 	Bucket *p;
2725 
2726 	IS_CONSISTENT(ht);
2727 	ZEND_ASSERT(!HT_IS_PACKED(ht));
2728 
2729 	p = zend_hash_index_find_bucket(ht, h);
2730 	return p ? &p->val : NULL;
2731 }
2732 
zend_hash_internal_pointer_reset_ex(const HashTable * ht,HashPosition * pos)2733 ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_reset_ex(const HashTable *ht, HashPosition *pos)
2734 {
2735 	IS_CONSISTENT(ht);
2736 	HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2737 	*pos = _zend_hash_get_valid_pos(ht, 0);
2738 }
2739 
2740 
2741 /* This function will be extremely optimized by remembering
2742  * the end of the list
2743  */
zend_hash_internal_pointer_end_ex(const HashTable * ht,HashPosition * pos)2744 ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_end_ex(const HashTable *ht, HashPosition *pos)
2745 {
2746 	uint32_t idx;
2747 
2748 	IS_CONSISTENT(ht);
2749 	HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2750 
2751 	idx = ht->nNumUsed;
2752 	if (HT_IS_PACKED(ht)) {
2753 		while (idx > 0) {
2754 			idx--;
2755 			if (Z_TYPE(ht->arPacked[idx]) != IS_UNDEF) {
2756 				*pos = idx;
2757 				return;
2758 			}
2759 		}
2760 	} else {
2761 		while (idx > 0) {
2762 			idx--;
2763 			if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2764 				*pos = idx;
2765 				return;
2766 			}
2767 		}
2768 	}
2769 	*pos = ht->nNumUsed;
2770 }
2771 
2772 
zend_hash_move_forward_ex(const HashTable * ht,HashPosition * pos)2773 ZEND_API zend_result ZEND_FASTCALL zend_hash_move_forward_ex(const HashTable *ht, HashPosition *pos)
2774 {
2775 	uint32_t idx;
2776 
2777 	IS_CONSISTENT(ht);
2778 	HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2779 
2780 	idx = _zend_hash_get_valid_pos(ht, *pos);
2781 	if (idx < ht->nNumUsed) {
2782 		if (HT_IS_PACKED(ht)) {
2783 			while (1) {
2784 				idx++;
2785 				if (idx >= ht->nNumUsed) {
2786 					*pos = ht->nNumUsed;
2787 					return SUCCESS;
2788 				}
2789 				if (Z_TYPE(ht->arPacked[idx]) != IS_UNDEF) {
2790 					*pos = idx;
2791 					return SUCCESS;
2792 				}
2793 			}
2794 		} else {
2795 			while (1) {
2796 				idx++;
2797 				if (idx >= ht->nNumUsed) {
2798 					*pos = ht->nNumUsed;
2799 					return SUCCESS;
2800 				}
2801 				if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2802 					*pos = idx;
2803 					return SUCCESS;
2804 				}
2805 			}
2806 		}
2807 	} else {
2808 		return FAILURE;
2809 	}
2810 }
2811 
zend_hash_move_backwards_ex(const HashTable * ht,HashPosition * pos)2812 ZEND_API zend_result ZEND_FASTCALL zend_hash_move_backwards_ex(const HashTable *ht, HashPosition *pos)
2813 {
2814 	uint32_t idx = *pos;
2815 
2816 	IS_CONSISTENT(ht);
2817 	HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2818 
2819 	if (idx < ht->nNumUsed) {
2820 		if (HT_IS_PACKED(ht)) {
2821 			while (idx > 0) {
2822 				idx--;
2823 				if (Z_TYPE(ht->arPacked[idx]) != IS_UNDEF) {
2824 					*pos = idx;
2825 					return SUCCESS;
2826 				}
2827 			}
2828 		} else {
2829 			while (idx > 0) {
2830 				idx--;
2831 				if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2832 					*pos = idx;
2833 					return SUCCESS;
2834 				}
2835 			}
2836 		}
2837 		*pos = ht->nNumUsed;
2838 		return SUCCESS;
2839 	} else {
2840 		return FAILURE;
2841 	}
2842 }
2843 
2844 
2845 /* This function should be made binary safe  */
zend_hash_get_current_key_ex(const HashTable * ht,zend_string ** str_index,zend_ulong * num_index,const HashPosition * pos)2846 ZEND_API int ZEND_FASTCALL zend_hash_get_current_key_ex(const HashTable *ht, zend_string **str_index, zend_ulong *num_index, const HashPosition *pos)
2847 {
2848 	uint32_t idx;
2849 	Bucket *p;
2850 
2851 	IS_CONSISTENT(ht);
2852 	idx = _zend_hash_get_valid_pos(ht, *pos);
2853 	if (idx < ht->nNumUsed) {
2854 		if (HT_IS_PACKED(ht)) {
2855 			*num_index = idx;
2856 			return HASH_KEY_IS_LONG;
2857 		}
2858 		p = ht->arData + idx;
2859 		if (p->key) {
2860 			*str_index = p->key;
2861 			return HASH_KEY_IS_STRING;
2862 		} else {
2863 			*num_index = p->h;
2864 			return HASH_KEY_IS_LONG;
2865 		}
2866 	}
2867 	return HASH_KEY_NON_EXISTENT;
2868 }
2869 
zend_hash_get_current_key_zval_ex(const HashTable * ht,zval * key,const HashPosition * pos)2870 ZEND_API void ZEND_FASTCALL zend_hash_get_current_key_zval_ex(const HashTable *ht, zval *key, const HashPosition *pos)
2871 {
2872 	uint32_t idx;
2873 	Bucket *p;
2874 
2875 	IS_CONSISTENT(ht);
2876 	idx = _zend_hash_get_valid_pos(ht, *pos);
2877 	if (idx >= ht->nNumUsed) {
2878 		ZVAL_NULL(key);
2879 	} else {
2880 		if (HT_IS_PACKED(ht)) {
2881 			ZVAL_LONG(key, idx);
2882 			return;
2883 		}
2884 		p = ht->arData + idx;
2885 		if (p->key) {
2886 			ZVAL_STR_COPY(key, p->key);
2887 		} else {
2888 			ZVAL_LONG(key, p->h);
2889 		}
2890 	}
2891 }
2892 
zend_hash_get_current_key_type_ex(const HashTable * ht,const HashPosition * pos)2893 ZEND_API int ZEND_FASTCALL zend_hash_get_current_key_type_ex(const HashTable *ht, const HashPosition *pos)
2894 {
2895 	uint32_t idx;
2896 	Bucket *p;
2897 
2898 	IS_CONSISTENT(ht);
2899 	idx = _zend_hash_get_valid_pos(ht, *pos);
2900 	if (idx < ht->nNumUsed) {
2901 		if (HT_IS_PACKED(ht)) {
2902 			return HASH_KEY_IS_LONG;
2903 		}
2904 		p = ht->arData + idx;
2905 		if (p->key) {
2906 			return HASH_KEY_IS_STRING;
2907 		} else {
2908 			return HASH_KEY_IS_LONG;
2909 		}
2910 	}
2911 	return HASH_KEY_NON_EXISTENT;
2912 }
2913 
2914 
zend_hash_get_current_data_ex(const HashTable * ht,const HashPosition * pos)2915 ZEND_API zval* ZEND_FASTCALL zend_hash_get_current_data_ex(const HashTable *ht, const HashPosition *pos)
2916 {
2917 	uint32_t idx;
2918 	Bucket *p;
2919 
2920 	IS_CONSISTENT(ht);
2921 	idx = _zend_hash_get_valid_pos(ht, *pos);
2922 	if (idx < ht->nNumUsed) {
2923 		if (HT_IS_PACKED(ht)) {
2924 			return &ht->arPacked[idx];
2925 		}
2926 		p = ht->arData + idx;
2927 		return &p->val;
2928 	} else {
2929 		return NULL;
2930 	}
2931 }
2932 
zend_hash_bucket_swap(Bucket * p,Bucket * q)2933 ZEND_API void zend_hash_bucket_swap(Bucket *p, Bucket *q)
2934 {
2935 	zval val;
2936 	zend_ulong h;
2937 	zend_string *key;
2938 
2939 	val = p->val;
2940 	h = p->h;
2941 	key = p->key;
2942 
2943 	p->val = q->val;
2944 	p->h = q->h;
2945 	p->key = q->key;
2946 
2947 	q->val = val;
2948 	q->h = h;
2949 	q->key = key;
2950 }
2951 
zend_hash_bucket_renum_swap(Bucket * p,Bucket * q)2952 ZEND_API void zend_hash_bucket_renum_swap(Bucket *p, Bucket *q)
2953 {
2954 	zval val;
2955 
2956 	val = p->val;
2957 	p->val = q->val;
2958 	q->val = val;
2959 }
2960 
zend_hash_bucket_packed_swap(Bucket * p,Bucket * q)2961 ZEND_API void zend_hash_bucket_packed_swap(Bucket *p, Bucket *q)
2962 {
2963 	zval val;
2964 	zend_ulong h;
2965 
2966 	val = p->val;
2967 	h = p->h;
2968 
2969 	p->val = q->val;
2970 	p->h = q->h;
2971 
2972 	q->val = val;
2973 	q->h = h;
2974 }
2975 
zend_hash_sort_ex(HashTable * ht,sort_func_t sort,bucket_compare_func_t compar,bool renumber)2976 ZEND_API void ZEND_FASTCALL zend_hash_sort_ex(HashTable *ht, sort_func_t sort, bucket_compare_func_t compar, bool renumber)
2977 {
2978 	Bucket *p;
2979 	uint32_t i, j;
2980 
2981 	IS_CONSISTENT(ht);
2982 	HT_ASSERT_RC1(ht);
2983 
2984 	if (!(ht->nNumOfElements>1) && !(renumber && ht->nNumOfElements>0)) {
2985 		/* Doesn't require sorting */
2986 		return;
2987 	}
2988 
2989 	if (HT_IS_PACKED(ht)) {
2990 		zend_hash_packed_to_hash(ht); // TODO: ???
2991 	}
2992 
2993 	if (HT_IS_WITHOUT_HOLES(ht)) {
2994 		/* Store original order of elements in extra space to allow stable sorting. */
2995 		for (i = 0; i < ht->nNumUsed; i++) {
2996 			Z_EXTRA(ht->arData[i].val) = i;
2997 		}
2998 	} else {
2999 		/* Remove holes and store original order. */
3000 		for (j = 0, i = 0; j < ht->nNumUsed; j++) {
3001 			p = ht->arData + j;
3002 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
3003 			if (i != j) {
3004 				ht->arData[i] = *p;
3005 			}
3006 			Z_EXTRA(ht->arData[i].val) = i;
3007 			i++;
3008 		}
3009 		ht->nNumUsed = i;
3010 	}
3011 
3012 	if (!HT_IS_PACKED(ht)) {
3013 		/* We broke the hash collisions chains overriding Z_NEXT() by Z_EXTRA().
3014 		 * Reset the hash headers table as well to avoid possible inconsistent
3015 		 * access on recursive data structures.
3016 	     *
3017 	     * See Zend/tests/bug63882_2.phpt
3018 		 */
3019 		HT_HASH_RESET(ht);
3020 	}
3021 
3022 	sort((void *)ht->arData, ht->nNumUsed, sizeof(Bucket), (compare_func_t) compar,
3023 			(swap_func_t)(renumber? zend_hash_bucket_renum_swap :
3024 				(HT_IS_PACKED(ht) ? zend_hash_bucket_packed_swap : zend_hash_bucket_swap)));
3025 
3026 	ht->nInternalPointer = 0;
3027 
3028 	if (renumber) {
3029 		for (j = 0; j < i; j++) {
3030 			p = ht->arData + j;
3031 			p->h = j;
3032 			if (p->key) {
3033 				zend_string_release(p->key);
3034 				p->key = NULL;
3035 			}
3036 		}
3037 
3038 		ht->nNextFreeElement = i;
3039 	}
3040 	if (HT_IS_PACKED(ht)) {
3041 		if (!renumber) {
3042 			zend_hash_packed_to_hash(ht);
3043 		}
3044 	} else {
3045 		if (renumber) {
3046 			void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
3047 			Bucket *old_buckets = ht->arData;
3048 			zval *zv;
3049 
3050 			new_data = pemalloc(HT_PACKED_SIZE_EX(ht->nTableSize, HT_MIN_MASK), (GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
3051 			HT_FLAGS(ht) |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
3052 			ht->nTableMask = HT_MIN_MASK;
3053 			HT_SET_DATA_ADDR(ht, new_data);
3054 			p = old_buckets;
3055 			zv = ht->arPacked;
3056 			for (i = 0; i < ht->nTableSize; i++) {
3057 				ZVAL_COPY_VALUE(zv, &p->val);
3058 				zv++;
3059 				p++;
3060 			}
3061 			pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
3062 			HT_HASH_RESET_PACKED(ht);
3063 		} else {
3064 			zend_hash_rehash(ht);
3065 		}
3066 	}
3067 }
3068 
zend_hash_compare_impl(const HashTable * ht1,const HashTable * ht2,compare_func_t compar,bool ordered)3069 static zend_always_inline int zend_hash_compare_impl(const HashTable *ht1, const HashTable *ht2, compare_func_t compar, bool ordered) {
3070 	uint32_t idx1, idx2;
3071 	zend_string *key1, *key2;
3072 	zend_ulong h1, h2;
3073 	zval *pData1, *pData2;;
3074 	int result;
3075 
3076 	if (ht1->nNumOfElements != ht2->nNumOfElements) {
3077 		return ht1->nNumOfElements > ht2->nNumOfElements ? 1 : -1;
3078 	}
3079 
3080 	for (idx1 = 0, idx2 = 0; idx1 < ht1->nNumUsed; idx1++) {
3081 		if (HT_IS_PACKED(ht1)) {
3082 			pData1 = ht1->arPacked + idx1;
3083 			h1 = idx1;
3084 			key1 = NULL;
3085 		} else {
3086 			Bucket *p = ht1->arData + idx1;
3087 			pData1 = &p->val;
3088 			h1 = p->h;
3089 			key1 = p->key;
3090 		}
3091 
3092 		if (Z_TYPE_P(pData1) == IS_UNDEF) continue;
3093 		if (ordered) {
3094 			if (HT_IS_PACKED(ht2)) {
3095 				while (1) {
3096 					ZEND_ASSERT(idx2 != ht2->nNumUsed);
3097 					pData2 = ht2->arPacked + idx2;
3098 					h2 = idx2;
3099 					key2 = NULL;
3100 					if (Z_TYPE_P(pData2) != IS_UNDEF) break;
3101 					idx2++;
3102 				}
3103 			} else {
3104 				while (1) {
3105 					Bucket *p;
3106 					ZEND_ASSERT(idx2 != ht2->nNumUsed);
3107 					p = ht2->arData + idx2;
3108 					pData2 = &p->val;
3109 					h2 = p->h;
3110 					key2 = p->key;
3111 					if (Z_TYPE_P(pData2) != IS_UNDEF) break;
3112 					idx2++;
3113 				}
3114 			}
3115 			if (key1 == NULL && key2 == NULL) { /* numeric indices */
3116 				if (h1 != h2) {
3117 					return h1 > h2 ? 1 : -1;
3118 				}
3119 			} else if (key1 != NULL && key2 != NULL) { /* string indices */
3120 				if (ZSTR_LEN(key1) != ZSTR_LEN(key2)) {
3121 					return ZSTR_LEN(key1) > ZSTR_LEN(key2) ? 1 : -1;
3122 				}
3123 
3124 				result = memcmp(ZSTR_VAL(key1), ZSTR_VAL(key2), ZSTR_LEN(key1));
3125 				if (result != 0) {
3126 					return result;
3127 				}
3128 			} else {
3129 				/* Mixed key types: A string key is considered as larger */
3130 				return key1 != NULL ? 1 : -1;
3131 			}
3132 			idx2++;
3133 		} else {
3134 			if (key1 == NULL) { /* numeric index */
3135 				pData2 = zend_hash_index_find(ht2, h1);
3136 				if (pData2 == NULL) {
3137 					return 1;
3138 				}
3139 			} else { /* string index */
3140 				pData2 = zend_hash_find(ht2, key1);
3141 				if (pData2 == NULL) {
3142 					return 1;
3143 				}
3144 			}
3145 		}
3146 
3147 		if (Z_TYPE_P(pData1) == IS_INDIRECT) {
3148 			pData1 = Z_INDIRECT_P(pData1);
3149 		}
3150 		if (Z_TYPE_P(pData2) == IS_INDIRECT) {
3151 			pData2 = Z_INDIRECT_P(pData2);
3152 		}
3153 
3154 		if (Z_TYPE_P(pData1) == IS_UNDEF) {
3155 			if (Z_TYPE_P(pData2) != IS_UNDEF) {
3156 				return -1;
3157 			}
3158 		} else if (Z_TYPE_P(pData2) == IS_UNDEF) {
3159 			return 1;
3160 		} else {
3161 			result = compar(pData1, pData2);
3162 			if (result != 0) {
3163 				return result;
3164 			}
3165 		}
3166 	}
3167 
3168 	return 0;
3169 }
3170 
zend_hash_compare(HashTable * ht1,HashTable * ht2,compare_func_t compar,bool ordered)3171 ZEND_API int zend_hash_compare(HashTable *ht1, HashTable *ht2, compare_func_t compar, bool ordered)
3172 {
3173 	int result;
3174 	IS_CONSISTENT(ht1);
3175 	IS_CONSISTENT(ht2);
3176 
3177 	if (ht1 == ht2) {
3178 		return 0;
3179 	}
3180 
3181 	/* It's enough to protect only one of the arrays.
3182 	 * The second one may be referenced from the first and this may cause
3183 	 * false recursion detection.
3184 	 */
3185 	if (UNEXPECTED(GC_IS_RECURSIVE(ht1))) {
3186 		zend_throw_error(NULL, "Nesting level too deep - recursive dependency?");
3187 		return ZEND_UNCOMPARABLE;
3188 	}
3189 
3190 	GC_TRY_PROTECT_RECURSION(ht1);
3191 	result = zend_hash_compare_impl(ht1, ht2, compar, ordered);
3192 	GC_TRY_UNPROTECT_RECURSION(ht1);
3193 
3194 	return result;
3195 }
3196 
3197 
zend_hash_minmax(const HashTable * ht,compare_func_t compar,uint32_t flag)3198 ZEND_API zval* ZEND_FASTCALL zend_hash_minmax(const HashTable *ht, compare_func_t compar, uint32_t flag)
3199 {
3200 	uint32_t idx;
3201 	zval *res;
3202 
3203 	IS_CONSISTENT(ht);
3204 
3205 	if (ht->nNumOfElements == 0 ) {
3206 		return NULL;
3207 	}
3208 
3209 	if (HT_IS_PACKED(ht)) {
3210 		zval *zv;
3211 
3212 		idx = 0;
3213 		while (1) {
3214 			if (idx == ht->nNumUsed) {
3215 				return NULL;
3216 			}
3217 			if (Z_TYPE(ht->arPacked[idx]) != IS_UNDEF) break;
3218 			idx++;
3219 		}
3220 		res = ht->arPacked + idx;
3221 		for (; idx < ht->nNumUsed; idx++) {
3222 			zv = ht->arPacked + idx;
3223 			if (UNEXPECTED(Z_TYPE_P(zv) == IS_UNDEF)) continue;
3224 
3225 			if (flag) {
3226 				if (compar(res, zv) < 0) { /* max */
3227 					res = zv;
3228 				}
3229 			} else {
3230 				if (compar(res, zv) > 0) { /* min */
3231 					res = zv;
3232 				}
3233 			}
3234 		}
3235 	} else {
3236 		Bucket *p;
3237 
3238 		idx = 0;
3239 		while (1) {
3240 			if (idx == ht->nNumUsed) {
3241 				return NULL;
3242 			}
3243 			if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) break;
3244 			idx++;
3245 		}
3246 		res = &ht->arData[idx].val;
3247 		for (; idx < ht->nNumUsed; idx++) {
3248 			p = ht->arData + idx;
3249 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
3250 
3251 			if (flag) {
3252 				if (compar(res, &p->val) < 0) { /* max */
3253 					res = &p->val;
3254 				}
3255 			} else {
3256 				if (compar(res, &p->val) > 0) { /* min */
3257 					res = &p->val;
3258 				}
3259 			}
3260 		}
3261 	}
3262 	return res;
3263 }
3264 
_zend_handle_numeric_str_ex(const char * key,size_t length,zend_ulong * idx)3265 ZEND_API bool ZEND_FASTCALL _zend_handle_numeric_str_ex(const char *key, size_t length, zend_ulong *idx)
3266 {
3267 	const char *tmp = key;
3268 
3269 	const char *end = key + length;
3270 
3271 	if (*tmp == '-') {
3272 		tmp++;
3273 	}
3274 
3275 	if ((*tmp == '0' && length > 1) /* numbers with leading zeros */
3276 	 || (end - tmp > MAX_LENGTH_OF_LONG - 1) /* number too long */
3277 	 || (SIZEOF_ZEND_LONG == 4 &&
3278 	     end - tmp == MAX_LENGTH_OF_LONG - 1 &&
3279 	     *tmp > '2')) { /* overflow */
3280 		return 0;
3281 	}
3282 	*idx = (*tmp - '0');
3283 	while (1) {
3284 		++tmp;
3285 		if (tmp == end) {
3286 			if (*key == '-') {
3287 				if (*idx-1 > ZEND_LONG_MAX) { /* overflow */
3288 					return 0;
3289 				}
3290 				*idx = 0 - *idx;
3291 			} else if (*idx > ZEND_LONG_MAX) { /* overflow */
3292 				return 0;
3293 			}
3294 			return 1;
3295 		}
3296 		if (*tmp <= '9' && *tmp >= '0') {
3297 			*idx = (*idx * 10) + (*tmp - '0');
3298 		} else {
3299 			return 0;
3300 		}
3301 	}
3302 }
3303 
3304 /* Takes a "symtable" hashtable (contains integer and non-numeric string keys)
3305  * and converts it to a "proptable" (contains only string keys).
3306  * If the symtable didn't need duplicating, its refcount is incremented.
3307  */
zend_symtable_to_proptable(HashTable * ht)3308 ZEND_API HashTable* ZEND_FASTCALL zend_symtable_to_proptable(HashTable *ht)
3309 {
3310 	zend_ulong num_key;
3311 	zend_string *str_key;
3312 	zval *zv;
3313 
3314 	if (UNEXPECTED(HT_IS_PACKED(ht))) {
3315 		goto convert;
3316 	}
3317 
3318 	ZEND_HASH_MAP_FOREACH_STR_KEY(ht, str_key) {
3319 		if (!str_key) {
3320 			goto convert;
3321 		}
3322 	} ZEND_HASH_FOREACH_END();
3323 
3324 	if (!(GC_FLAGS(ht) & IS_ARRAY_IMMUTABLE)) {
3325 		GC_ADDREF(ht);
3326 	}
3327 
3328 	return ht;
3329 
3330 convert:
3331 	{
3332 		HashTable *new_ht = zend_new_array(zend_hash_num_elements(ht));
3333 
3334 		ZEND_HASH_FOREACH_KEY_VAL(ht, num_key, str_key, zv) {
3335 			if (!str_key) {
3336 				str_key = zend_long_to_str(num_key);
3337 				zend_string_delref(str_key);
3338 			}
3339 			do {
3340 				if (Z_OPT_REFCOUNTED_P(zv)) {
3341 					if (Z_ISREF_P(zv) && Z_REFCOUNT_P(zv) == 1) {
3342 						zv = Z_REFVAL_P(zv);
3343 						if (!Z_OPT_REFCOUNTED_P(zv)) {
3344 							break;
3345 						}
3346 					}
3347 					Z_ADDREF_P(zv);
3348 				}
3349 			} while (0);
3350 			zend_hash_update(new_ht, str_key, zv);
3351 		} ZEND_HASH_FOREACH_END();
3352 
3353 		return new_ht;
3354 	}
3355 }
3356 
3357 /* Takes a "proptable" hashtable (contains only string keys) and converts it to
3358  * a "symtable" (contains integer and non-numeric string keys).
3359  * If the proptable didn't need duplicating, its refcount is incremented.
3360  */
zend_proptable_to_symtable(HashTable * ht,bool always_duplicate)3361 ZEND_API HashTable* ZEND_FASTCALL zend_proptable_to_symtable(HashTable *ht, bool always_duplicate)
3362 {
3363 	zend_ulong num_key;
3364 	zend_string *str_key;
3365 	zval *zv;
3366 
3367 	if (!HT_IS_PACKED(ht)) {
3368 		ZEND_HASH_MAP_FOREACH_STR_KEY(ht, str_key) {
3369 			/* The `str_key &&` here might seem redundant: property tables should
3370 			 * only have string keys. Unfortunately, this isn't true, at the very
3371 			 * least because of ArrayObject, which stores a symtable where the
3372 			 * property table should be.
3373 			 */
3374 			if (str_key && ZEND_HANDLE_NUMERIC(str_key, num_key)) {
3375 				goto convert;
3376 			}
3377 		} ZEND_HASH_FOREACH_END();
3378 	}
3379 
3380 	if (always_duplicate) {
3381 		return zend_array_dup(ht);
3382 	}
3383 
3384 	if (EXPECTED(!(GC_FLAGS(ht) & IS_ARRAY_IMMUTABLE))) {
3385 		GC_ADDREF(ht);
3386 	}
3387 
3388 	return ht;
3389 
3390 convert:
3391 	{
3392 		HashTable *new_ht = zend_new_array(zend_hash_num_elements(ht));
3393 
3394 		ZEND_HASH_MAP_FOREACH_KEY_VAL_IND(ht, num_key, str_key, zv) {
3395 			do {
3396 				if (Z_OPT_REFCOUNTED_P(zv)) {
3397 					if (Z_ISREF_P(zv) && Z_REFCOUNT_P(zv) == 1) {
3398 						zv = Z_REFVAL_P(zv);
3399 						if (!Z_OPT_REFCOUNTED_P(zv)) {
3400 							break;
3401 						}
3402 					}
3403 					Z_ADDREF_P(zv);
3404 				}
3405 			} while (0);
3406 			/* Again, thank ArrayObject for `!str_key ||`. */
3407 			if (!str_key || ZEND_HANDLE_NUMERIC(str_key, num_key)) {
3408 				zend_hash_index_update(new_ht, num_key, zv);
3409 			} else {
3410 				zend_hash_update(new_ht, str_key, zv);
3411 			}
3412 		} ZEND_HASH_FOREACH_END();
3413 
3414 		return new_ht;
3415 	}
3416 }
3417