xref: /PHP-8.1/Zend/zend_hash.c (revision af77d3b8)
1 /*
2    +----------------------------------------------------------------------+
3    | Zend Engine                                                          |
4    +----------------------------------------------------------------------+
5    | Copyright (c) Zend Technologies Ltd. (http://www.zend.com)           |
6    +----------------------------------------------------------------------+
7    | This source file is subject to version 2.00 of the Zend license,     |
8    | that is bundled with this package in the file LICENSE, and is        |
9    | available through the world-wide-web at the following url:           |
10    | http://www.zend.com/license/2_00.txt.                                |
11    | If you did not receive a copy of the Zend license and are unable to  |
12    | obtain it through the world-wide-web, please send a note to          |
13    | license@zend.com so we can mail you a copy immediately.              |
14    +----------------------------------------------------------------------+
15    | Authors: Andi Gutmans <andi@php.net>                                 |
16    |          Zeev Suraski <zeev@php.net>                                 |
17    |          Dmitry Stogov <dmitry@php.net>                              |
18    +----------------------------------------------------------------------+
19 */
20 
21 #include "zend.h"
22 #include "zend_globals.h"
23 #include "zend_variables.h"
24 
25 #if defined(__aarch64__)
26 # include <arm_neon.h>
27 #endif
28 
29 #ifdef __SSE2__
30 # include <mmintrin.h>
31 # include <emmintrin.h>
32 #endif
33 
34 #if ZEND_DEBUG
35 # define HT_ASSERT(ht, expr) \
36 	ZEND_ASSERT((expr) || (HT_FLAGS(ht) & HASH_FLAG_ALLOW_COW_VIOLATION))
37 #else
38 # define HT_ASSERT(ht, expr)
39 #endif
40 
41 #define HT_ASSERT_RC1(ht) HT_ASSERT(ht, GC_REFCOUNT(ht) == 1)
42 
43 #define HT_POISONED_PTR ((HashTable *) (intptr_t) -1)
44 
45 #if ZEND_DEBUG
46 
47 #define HT_OK					0x00
48 #define HT_IS_DESTROYING		0x01
49 #define HT_DESTROYED			0x02
50 #define HT_CLEANING				0x03
51 
_zend_is_inconsistent(const HashTable * ht,const char * file,int line)52 static void _zend_is_inconsistent(const HashTable *ht, const char *file, int line)
53 {
54 	if ((HT_FLAGS(ht) & HASH_FLAG_CONSISTENCY) == HT_OK) {
55 		return;
56 	}
57 	switch (HT_FLAGS(ht) & HASH_FLAG_CONSISTENCY) {
58 		case HT_IS_DESTROYING:
59 			zend_output_debug_string(1, "%s(%d) : ht=%p is being destroyed", file, line, ht);
60 			break;
61 		case HT_DESTROYED:
62 			zend_output_debug_string(1, "%s(%d) : ht=%p is already destroyed", file, line, ht);
63 			break;
64 		case HT_CLEANING:
65 			zend_output_debug_string(1, "%s(%d) : ht=%p is being cleaned", file, line, ht);
66 			break;
67 		default:
68 			zend_output_debug_string(1, "%s(%d) : ht=%p is inconsistent", file, line, ht);
69 			break;
70 	}
71 	ZEND_UNREACHABLE();
72 }
73 #define IS_CONSISTENT(a) _zend_is_inconsistent(a, __FILE__, __LINE__);
74 #define SET_INCONSISTENT(n) do { \
75 		HT_FLAGS(ht) = (HT_FLAGS(ht) & ~HASH_FLAG_CONSISTENCY) | (n); \
76 	} while (0)
77 #else
78 #define IS_CONSISTENT(a)
79 #define SET_INCONSISTENT(n)
80 #endif
81 
82 #define ZEND_HASH_IF_FULL_DO_RESIZE(ht)				\
83 	if ((ht)->nNumUsed >= (ht)->nTableSize) {		\
84 		zend_hash_do_resize(ht);					\
85 	}
86 
zend_hash_str_find_ptr_lc(const HashTable * ht,const char * str,size_t len)87 ZEND_API void *zend_hash_str_find_ptr_lc(const HashTable *ht, const char *str, size_t len) {
88 	void *result;
89 	char *lc_str;
90 
91 	/* Stack allocate small strings to improve performance */
92 	ALLOCA_FLAG(use_heap)
93 
94 	lc_str = zend_str_tolower_copy(do_alloca(len + 1, use_heap), str, len);
95 	result = zend_hash_str_find_ptr(ht, lc_str, len);
96 	free_alloca(lc_str, use_heap);
97 
98 	return result;
99 }
100 
zend_hash_find_ptr_lc(const HashTable * ht,zend_string * key)101 ZEND_API void *zend_hash_find_ptr_lc(const HashTable *ht, zend_string *key) {
102 	void *result;
103 	zend_string *lc_key = zend_string_tolower(key);
104 	result = zend_hash_find_ptr(ht, lc_key);
105 	zend_string_release(lc_key);
106 	return result;
107 }
108 
109 static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht);
110 
zend_hash_check_size(uint32_t nSize)111 static zend_always_inline uint32_t zend_hash_check_size(uint32_t nSize)
112 {
113 #ifdef ZEND_WIN32
114 	unsigned long index;
115 #endif
116 
117 	/* Use big enough power of 2 */
118 	/* size should be between HT_MIN_SIZE and HT_MAX_SIZE */
119 	if (nSize <= HT_MIN_SIZE) {
120 		return HT_MIN_SIZE;
121 	} else if (UNEXPECTED(nSize >= HT_MAX_SIZE)) {
122 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", nSize, sizeof(Bucket), sizeof(Bucket));
123 	}
124 
125 #ifdef ZEND_WIN32
126 	if (BitScanReverse(&index, nSize - 1)) {
127 		return 0x2u << ((31 - index) ^ 0x1f);
128 	} else {
129 		/* nSize is ensured to be in the valid range, fall back to it
130 		   rather than using an undefined bis scan result. */
131 		return nSize;
132 	}
133 #elif (defined(__GNUC__) || __has_builtin(__builtin_clz))  && defined(PHP_HAVE_BUILTIN_CLZ)
134 	return 0x2u << (__builtin_clz(nSize - 1) ^ 0x1f);
135 #else
136 	nSize -= 1;
137 	nSize |= (nSize >> 1);
138 	nSize |= (nSize >> 2);
139 	nSize |= (nSize >> 4);
140 	nSize |= (nSize >> 8);
141 	nSize |= (nSize >> 16);
142 	return nSize + 1;
143 #endif
144 }
145 
zend_hash_real_init_packed_ex(HashTable * ht)146 static zend_always_inline void zend_hash_real_init_packed_ex(HashTable *ht)
147 {
148 	void *data;
149 
150 	if (UNEXPECTED(GC_FLAGS(ht) & IS_ARRAY_PERSISTENT)) {
151 		data = pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), 1);
152 	} else if (EXPECTED(ht->nTableSize == HT_MIN_SIZE)) {
153 		/* Use specialized API with constant allocation amount for a particularly common case. */
154 		data = emalloc(HT_SIZE_EX(HT_MIN_SIZE, HT_MIN_MASK));
155 	} else {
156 		data = emalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK));
157 	}
158 	HT_SET_DATA_ADDR(ht, data);
159 	/* Don't overwrite iterator count. */
160 	ht->u.v.flags = HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
161 	HT_HASH_RESET_PACKED(ht);
162 }
163 
zend_hash_real_init_mixed_ex(HashTable * ht)164 static zend_always_inline void zend_hash_real_init_mixed_ex(HashTable *ht)
165 {
166 	void *data;
167 	uint32_t nSize = ht->nTableSize;
168 
169 	ZEND_ASSERT(HT_SIZE_TO_MASK(nSize));
170 
171 	if (UNEXPECTED(GC_FLAGS(ht) & IS_ARRAY_PERSISTENT)) {
172 		data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), 1);
173 	} else if (EXPECTED(nSize == HT_MIN_SIZE)) {
174 		data = emalloc(HT_SIZE_EX(HT_MIN_SIZE, HT_SIZE_TO_MASK(HT_MIN_SIZE)));
175 		ht->nTableMask = HT_SIZE_TO_MASK(HT_MIN_SIZE);
176 		HT_SET_DATA_ADDR(ht, data);
177 		/* Don't overwrite iterator count. */
178 		ht->u.v.flags = HASH_FLAG_STATIC_KEYS;
179 #ifdef __SSE2__
180 		do {
181 			__m128i xmm0 = _mm_setzero_si128();
182 			xmm0 = _mm_cmpeq_epi8(xmm0, xmm0);
183 			_mm_storeu_si128((__m128i*)&HT_HASH_EX(data,  0), xmm0);
184 			_mm_storeu_si128((__m128i*)&HT_HASH_EX(data,  4), xmm0);
185 			_mm_storeu_si128((__m128i*)&HT_HASH_EX(data,  8), xmm0);
186 			_mm_storeu_si128((__m128i*)&HT_HASH_EX(data, 12), xmm0);
187 		} while (0);
188 #elif defined(__aarch64__)
189 		do {
190 			int32x4_t t = vdupq_n_s32(-1);
191 			vst1q_s32((int32_t*)&HT_HASH_EX(data,  0), t);
192 			vst1q_s32((int32_t*)&HT_HASH_EX(data,  4), t);
193 			vst1q_s32((int32_t*)&HT_HASH_EX(data,  8), t);
194 			vst1q_s32((int32_t*)&HT_HASH_EX(data, 12), t);
195 		} while (0);
196 #else
197 		HT_HASH_EX(data,  0) = -1;
198 		HT_HASH_EX(data,  1) = -1;
199 		HT_HASH_EX(data,  2) = -1;
200 		HT_HASH_EX(data,  3) = -1;
201 		HT_HASH_EX(data,  4) = -1;
202 		HT_HASH_EX(data,  5) = -1;
203 		HT_HASH_EX(data,  6) = -1;
204 		HT_HASH_EX(data,  7) = -1;
205 		HT_HASH_EX(data,  8) = -1;
206 		HT_HASH_EX(data,  9) = -1;
207 		HT_HASH_EX(data, 10) = -1;
208 		HT_HASH_EX(data, 11) = -1;
209 		HT_HASH_EX(data, 12) = -1;
210 		HT_HASH_EX(data, 13) = -1;
211 		HT_HASH_EX(data, 14) = -1;
212 		HT_HASH_EX(data, 15) = -1;
213 #endif
214 		return;
215 	} else {
216 		data = emalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)));
217 	}
218 	ht->nTableMask = HT_SIZE_TO_MASK(nSize);
219 	HT_SET_DATA_ADDR(ht, data);
220 	HT_FLAGS(ht) = HASH_FLAG_STATIC_KEYS;
221 	HT_HASH_RESET(ht);
222 }
223 
zend_hash_real_init_ex(HashTable * ht,bool packed)224 static zend_always_inline void zend_hash_real_init_ex(HashTable *ht, bool packed)
225 {
226 	HT_ASSERT_RC1(ht);
227 	ZEND_ASSERT(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED);
228 	if (packed) {
229 		zend_hash_real_init_packed_ex(ht);
230 	} else {
231 		zend_hash_real_init_mixed_ex(ht);
232 	}
233 }
234 
235 static const uint32_t uninitialized_bucket[-HT_MIN_MASK] =
236 	{HT_INVALID_IDX, HT_INVALID_IDX};
237 
238 ZEND_API const HashTable zend_empty_array = {
239 	.gc.refcount = 2,
240 	.gc.u.type_info = IS_ARRAY | (GC_IMMUTABLE << GC_FLAGS_SHIFT),
241 	.u.flags = HASH_FLAG_UNINITIALIZED,
242 	.nTableMask = HT_MIN_MASK,
243 	.arData = (Bucket*)&uninitialized_bucket[2],
244 	.nNumUsed = 0,
245 	.nNumOfElements = 0,
246 	.nTableSize = HT_MIN_SIZE,
247 	.nInternalPointer = 0,
248 	.nNextFreeElement = 0,
249 	.pDestructor = ZVAL_PTR_DTOR
250 };
251 
_zend_hash_init_int(HashTable * ht,uint32_t nSize,dtor_func_t pDestructor,bool persistent)252 static zend_always_inline void _zend_hash_init_int(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, bool persistent)
253 {
254 	GC_SET_REFCOUNT(ht, 1);
255 	GC_TYPE_INFO(ht) = GC_ARRAY | (persistent ? ((GC_PERSISTENT|GC_NOT_COLLECTABLE) << GC_FLAGS_SHIFT) : 0);
256 	HT_FLAGS(ht) = HASH_FLAG_UNINITIALIZED;
257 	ht->nTableMask = HT_MIN_MASK;
258 	HT_SET_DATA_ADDR(ht, &uninitialized_bucket);
259 	ht->nNumUsed = 0;
260 	ht->nNumOfElements = 0;
261 	ht->nInternalPointer = 0;
262 	ht->nNextFreeElement = ZEND_LONG_MIN;
263 	ht->pDestructor = pDestructor;
264 	ht->nTableSize = zend_hash_check_size(nSize);
265 }
266 
_zend_hash_init(HashTable * ht,uint32_t nSize,dtor_func_t pDestructor,bool persistent)267 ZEND_API void ZEND_FASTCALL _zend_hash_init(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, bool persistent)
268 {
269 	_zend_hash_init_int(ht, nSize, pDestructor, persistent);
270 }
271 
_zend_new_array_0(void)272 ZEND_API HashTable* ZEND_FASTCALL _zend_new_array_0(void)
273 {
274 	HashTable *ht = emalloc(sizeof(HashTable));
275 	_zend_hash_init_int(ht, HT_MIN_SIZE, ZVAL_PTR_DTOR, 0);
276 	return ht;
277 }
278 
_zend_new_array(uint32_t nSize)279 ZEND_API HashTable* ZEND_FASTCALL _zend_new_array(uint32_t nSize)
280 {
281 	HashTable *ht = emalloc(sizeof(HashTable));
282 	_zend_hash_init_int(ht, nSize, ZVAL_PTR_DTOR, 0);
283 	return ht;
284 }
285 
zend_new_pair(zval * val1,zval * val2)286 ZEND_API HashTable* ZEND_FASTCALL zend_new_pair(zval *val1, zval *val2)
287 {
288 	Bucket *p;
289 	HashTable *ht = emalloc(sizeof(HashTable));
290 	_zend_hash_init_int(ht, HT_MIN_SIZE, ZVAL_PTR_DTOR, 0);
291 	ht->nNumUsed = ht->nNumOfElements = ht->nNextFreeElement = 2;
292 	zend_hash_real_init_packed_ex(ht);
293 
294 	p = ht->arData;
295 	ZVAL_COPY_VALUE(&p->val, val1);
296 	p->h = 0;
297 	p->key = NULL;
298 
299 	p++;
300 	ZVAL_COPY_VALUE(&p->val, val2);
301 	p->h = 1;
302 	p->key = NULL;
303 	return ht;
304 }
305 
zend_hash_packed_grow(HashTable * ht)306 ZEND_API void ZEND_FASTCALL zend_hash_packed_grow(HashTable *ht)
307 {
308 	HT_ASSERT_RC1(ht);
309 	if (ht->nTableSize >= HT_MAX_SIZE) {
310 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket), sizeof(Bucket));
311 	}
312 	uint32_t newTableSize = ht->nTableSize * 2;
313 	HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE_EX(newTableSize, HT_MIN_MASK), HT_USED_SIZE(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
314 	ht->nTableSize = newTableSize;
315 }
316 
zend_hash_real_init(HashTable * ht,bool packed)317 ZEND_API void ZEND_FASTCALL zend_hash_real_init(HashTable *ht, bool packed)
318 {
319 	IS_CONSISTENT(ht);
320 
321 	HT_ASSERT_RC1(ht);
322 	zend_hash_real_init_ex(ht, packed);
323 }
324 
zend_hash_real_init_packed(HashTable * ht)325 ZEND_API void ZEND_FASTCALL zend_hash_real_init_packed(HashTable *ht)
326 {
327 	IS_CONSISTENT(ht);
328 
329 	HT_ASSERT_RC1(ht);
330 	zend_hash_real_init_packed_ex(ht);
331 }
332 
zend_hash_real_init_mixed(HashTable * ht)333 ZEND_API void ZEND_FASTCALL zend_hash_real_init_mixed(HashTable *ht)
334 {
335 	IS_CONSISTENT(ht);
336 
337 	HT_ASSERT_RC1(ht);
338 	zend_hash_real_init_mixed_ex(ht);
339 }
340 
zend_hash_packed_to_hash(HashTable * ht)341 ZEND_API void ZEND_FASTCALL zend_hash_packed_to_hash(HashTable *ht)
342 {
343 	void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
344 	Bucket *old_buckets = ht->arData;
345 	uint32_t nSize = ht->nTableSize;
346 
347 	ZEND_ASSERT(HT_SIZE_TO_MASK(nSize));
348 
349 	HT_ASSERT_RC1(ht);
350 	// Alloc before assign to avoid inconsistencies on OOM
351 	new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
352 	HT_FLAGS(ht) &= ~HASH_FLAG_PACKED;
353 	ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
354 	HT_SET_DATA_ADDR(ht, new_data);
355 	memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
356 	pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
357 	zend_hash_rehash(ht);
358 }
359 
zend_hash_to_packed(HashTable * ht)360 ZEND_API void ZEND_FASTCALL zend_hash_to_packed(HashTable *ht)
361 {
362 	void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
363 	Bucket *old_buckets = ht->arData;
364 
365 	HT_ASSERT_RC1(ht);
366 	new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
367 	HT_FLAGS(ht) |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
368 	ht->nTableMask = HT_MIN_MASK;
369 	HT_SET_DATA_ADDR(ht, new_data);
370 	HT_HASH_RESET_PACKED(ht);
371 	memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
372 	pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
373 }
374 
zend_hash_extend(HashTable * ht,uint32_t nSize,bool packed)375 ZEND_API void ZEND_FASTCALL zend_hash_extend(HashTable *ht, uint32_t nSize, bool packed)
376 {
377 	HT_ASSERT_RC1(ht);
378 
379 	if (nSize == 0) return;
380 
381 	ZEND_ASSERT(HT_SIZE_TO_MASK(nSize));
382 
383 	if (UNEXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
384 		if (nSize > ht->nTableSize) {
385 			ht->nTableSize = zend_hash_check_size(nSize);
386 		}
387 		zend_hash_real_init(ht, packed);
388 	} else {
389 		if (packed) {
390 			ZEND_ASSERT(HT_FLAGS(ht) & HASH_FLAG_PACKED);
391 			if (nSize > ht->nTableSize) {
392 				uint32_t newTableSize = zend_hash_check_size(nSize);
393 				HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE_EX(newTableSize, HT_MIN_MASK), HT_USED_SIZE(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
394 				ht->nTableSize = newTableSize;
395 			}
396 		} else {
397 			ZEND_ASSERT(!(HT_FLAGS(ht) & HASH_FLAG_PACKED));
398 			if (nSize > ht->nTableSize) {
399 				void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
400 				Bucket *old_buckets = ht->arData;
401 				nSize = zend_hash_check_size(nSize);
402 				new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
403 				ht->nTableSize = nSize;
404 				ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
405 				HT_SET_DATA_ADDR(ht, new_data);
406 				memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
407 				pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
408 				zend_hash_rehash(ht);
409 			}
410 		}
411 	}
412 }
413 
zend_hash_discard(HashTable * ht,uint32_t nNumUsed)414 ZEND_API void ZEND_FASTCALL zend_hash_discard(HashTable *ht, uint32_t nNumUsed)
415 {
416 	Bucket *p, *end, *arData;
417 	uint32_t nIndex;
418 
419 	arData = ht->arData;
420 	p = arData + ht->nNumUsed;
421 	end = arData + nNumUsed;
422 	ht->nNumUsed = nNumUsed;
423 	while (p != end) {
424 		p--;
425 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
426 		ht->nNumOfElements--;
427 		/* Collision pointers always directed from higher to lower buckets */
428 #if 0
429 		if (!(Z_NEXT(p->val) == HT_INVALID_IDX || HT_HASH_TO_BUCKET_EX(arData, Z_NEXT(p->val)) < p)) {
430 			abort();
431 		}
432 #endif
433 		nIndex = p->h | ht->nTableMask;
434 		HT_HASH_EX(arData, nIndex) = Z_NEXT(p->val);
435 	}
436 }
437 
zend_array_recalc_elements(HashTable * ht)438 static uint32_t zend_array_recalc_elements(HashTable *ht)
439 {
440 	zval *val;
441 	uint32_t num = ht->nNumOfElements;
442 
443 	ZEND_HASH_FOREACH_VAL(ht, val) {
444 		if (Z_TYPE_P(val) == IS_INDIRECT) {
445 			if (UNEXPECTED(Z_TYPE_P(Z_INDIRECT_P(val)) == IS_UNDEF)) {
446 				num--;
447 			}
448 		}
449 	} ZEND_HASH_FOREACH_END();
450 	return num;
451 }
452 /* }}} */
453 
zend_array_count(HashTable * ht)454 ZEND_API uint32_t zend_array_count(HashTable *ht)
455 {
456 	uint32_t num;
457 	if (UNEXPECTED(HT_FLAGS(ht) & HASH_FLAG_HAS_EMPTY_IND)) {
458 		num = zend_array_recalc_elements(ht);
459 		if (UNEXPECTED(ht->nNumOfElements == num)) {
460 			HT_FLAGS(ht) &= ~HASH_FLAG_HAS_EMPTY_IND;
461 		}
462 	} else if (UNEXPECTED(ht == &EG(symbol_table))) {
463 		num = zend_array_recalc_elements(ht);
464 	} else {
465 		num = zend_hash_num_elements(ht);
466 	}
467 	return num;
468 }
469 /* }}} */
470 
_zend_hash_get_valid_pos(const HashTable * ht,HashPosition pos)471 static zend_always_inline HashPosition _zend_hash_get_valid_pos(const HashTable *ht, HashPosition pos)
472 {
473 	while (pos < ht->nNumUsed && Z_ISUNDEF(ht->arData[pos].val)) {
474 		pos++;
475 	}
476 	return pos;
477 }
478 
_zend_hash_get_current_pos(const HashTable * ht)479 static zend_always_inline HashPosition _zend_hash_get_current_pos(const HashTable *ht)
480 {
481 	return _zend_hash_get_valid_pos(ht, ht->nInternalPointer);
482 }
483 
zend_hash_get_current_pos(const HashTable * ht)484 ZEND_API HashPosition ZEND_FASTCALL zend_hash_get_current_pos(const HashTable *ht)
485 {
486 	return _zend_hash_get_current_pos(ht);
487 }
488 
zend_hash_iterator_add(HashTable * ht,HashPosition pos)489 ZEND_API uint32_t ZEND_FASTCALL zend_hash_iterator_add(HashTable *ht, HashPosition pos)
490 {
491 	HashTableIterator *iter = EG(ht_iterators);
492 	HashTableIterator *end  = iter + EG(ht_iterators_count);
493 	uint32_t idx;
494 
495 	if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
496 		HT_INC_ITERATORS_COUNT(ht);
497 	}
498 	while (iter != end) {
499 		if (iter->ht == NULL) {
500 			iter->ht = ht;
501 			iter->pos = pos;
502 			idx = iter - EG(ht_iterators);
503 			if (idx + 1 > EG(ht_iterators_used)) {
504 				EG(ht_iterators_used) = idx + 1;
505 			}
506 			return idx;
507 		}
508 		iter++;
509 	}
510 	if (EG(ht_iterators) == EG(ht_iterators_slots)) {
511 		EG(ht_iterators) = emalloc(sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
512 		memcpy(EG(ht_iterators), EG(ht_iterators_slots), sizeof(HashTableIterator) * EG(ht_iterators_count));
513 	} else {
514 		EG(ht_iterators) = erealloc(EG(ht_iterators), sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
515 	}
516 	iter = EG(ht_iterators) + EG(ht_iterators_count);
517 	EG(ht_iterators_count) += 8;
518 	iter->ht = ht;
519 	iter->pos = pos;
520 	memset(iter + 1, 0, sizeof(HashTableIterator) * 7);
521 	idx = iter - EG(ht_iterators);
522 	EG(ht_iterators_used) = idx + 1;
523 	return idx;
524 }
525 
zend_hash_iterator_pos(uint32_t idx,HashTable * ht)526 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos(uint32_t idx, HashTable *ht)
527 {
528 	HashTableIterator *iter = EG(ht_iterators) + idx;
529 
530 	ZEND_ASSERT(idx != (uint32_t)-1);
531 	if (UNEXPECTED(iter->ht != ht)) {
532 		if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
533 				&& EXPECTED(!HT_ITERATORS_OVERFLOW(iter->ht))) {
534 			HT_DEC_ITERATORS_COUNT(iter->ht);
535 		}
536 		if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
537 			HT_INC_ITERATORS_COUNT(ht);
538 		}
539 		iter->ht = ht;
540 		iter->pos = _zend_hash_get_current_pos(ht);
541 	}
542 	return iter->pos;
543 }
544 
zend_hash_iterator_pos_ex(uint32_t idx,zval * array)545 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos_ex(uint32_t idx, zval *array)
546 {
547 	HashTable *ht = Z_ARRVAL_P(array);
548 	HashTableIterator *iter = EG(ht_iterators) + idx;
549 
550 	ZEND_ASSERT(idx != (uint32_t)-1);
551 	if (UNEXPECTED(iter->ht != ht)) {
552 		if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
553 				&& EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
554 			HT_DEC_ITERATORS_COUNT(iter->ht);
555 		}
556 		SEPARATE_ARRAY(array);
557 		ht = Z_ARRVAL_P(array);
558 		if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
559 			HT_INC_ITERATORS_COUNT(ht);
560 		}
561 		iter->ht = ht;
562 		iter->pos = _zend_hash_get_current_pos(ht);
563 	}
564 	return iter->pos;
565 }
566 
zend_hash_iterator_del(uint32_t idx)567 ZEND_API void ZEND_FASTCALL zend_hash_iterator_del(uint32_t idx)
568 {
569 	HashTableIterator *iter = EG(ht_iterators) + idx;
570 
571 	ZEND_ASSERT(idx != (uint32_t)-1);
572 
573 	if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
574 			&& EXPECTED(!HT_ITERATORS_OVERFLOW(iter->ht))) {
575 		ZEND_ASSERT(HT_ITERATORS_COUNT(iter->ht) != 0);
576 		HT_DEC_ITERATORS_COUNT(iter->ht);
577 	}
578 	iter->ht = NULL;
579 
580 	if (idx == EG(ht_iterators_used) - 1) {
581 		while (idx > 0 && EG(ht_iterators)[idx - 1].ht == NULL) {
582 			idx--;
583 		}
584 		EG(ht_iterators_used) = idx;
585 	}
586 }
587 
_zend_hash_iterators_remove(HashTable * ht)588 static zend_never_inline void ZEND_FASTCALL _zend_hash_iterators_remove(HashTable *ht)
589 {
590 	HashTableIterator *iter = EG(ht_iterators);
591 	HashTableIterator *end  = iter + EG(ht_iterators_used);
592 
593 	while (iter != end) {
594 		if (iter->ht == ht) {
595 			iter->ht = HT_POISONED_PTR;
596 		}
597 		iter++;
598 	}
599 }
600 
zend_hash_iterators_remove(HashTable * ht)601 static zend_always_inline void zend_hash_iterators_remove(HashTable *ht)
602 {
603 	if (UNEXPECTED(HT_HAS_ITERATORS(ht))) {
604 		_zend_hash_iterators_remove(ht);
605 	}
606 }
607 
zend_hash_iterators_lower_pos(HashTable * ht,HashPosition start)608 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterators_lower_pos(HashTable *ht, HashPosition start)
609 {
610 	HashTableIterator *iter = EG(ht_iterators);
611 	HashTableIterator *end  = iter + EG(ht_iterators_used);
612 	HashPosition res = ht->nNumUsed;
613 
614 	while (iter != end) {
615 		if (iter->ht == ht) {
616 			if (iter->pos >= start && iter->pos < res) {
617 				res = iter->pos;
618 			}
619 		}
620 		iter++;
621 	}
622 	return res;
623 }
624 
_zend_hash_iterators_update(HashTable * ht,HashPosition from,HashPosition to)625 ZEND_API void ZEND_FASTCALL _zend_hash_iterators_update(HashTable *ht, HashPosition from, HashPosition to)
626 {
627 	HashTableIterator *iter = EG(ht_iterators);
628 	HashTableIterator *end  = iter + EG(ht_iterators_used);
629 
630 	while (iter != end) {
631 		if (iter->ht == ht && iter->pos == from) {
632 			iter->pos = to;
633 		}
634 		iter++;
635 	}
636 }
637 
zend_hash_iterators_advance(HashTable * ht,HashPosition step)638 ZEND_API void ZEND_FASTCALL zend_hash_iterators_advance(HashTable *ht, HashPosition step)
639 {
640 	HashTableIterator *iter = EG(ht_iterators);
641 	HashTableIterator *end  = iter + EG(ht_iterators_used);
642 
643 	while (iter != end) {
644 		if (iter->ht == ht) {
645 			iter->pos += step;
646 		}
647 		iter++;
648 	}
649 }
650 
zend_hash_find_bucket(const HashTable * ht,zend_string * key,bool known_hash)651 static zend_always_inline Bucket *zend_hash_find_bucket(const HashTable *ht, zend_string *key, bool known_hash)
652 {
653 	zend_ulong h;
654 	uint32_t nIndex;
655 	uint32_t idx;
656 	Bucket *p, *arData;
657 
658 	if (known_hash) {
659 		h = ZSTR_H(key);
660 		ZEND_ASSERT(h != 0 && "Hash must be known");
661 	} else {
662 		h = zend_string_hash_val(key);
663 	}
664 	arData = ht->arData;
665 	nIndex = h | ht->nTableMask;
666 	idx = HT_HASH_EX(arData, nIndex);
667 
668 	if (UNEXPECTED(idx == HT_INVALID_IDX)) {
669 		return NULL;
670 	}
671 	p = HT_HASH_TO_BUCKET_EX(arData, idx);
672 	if (EXPECTED(p->key == key)) { /* check for the same interned string */
673 		return p;
674 	}
675 
676 	while (1) {
677 		if (p->h == ZSTR_H(key) &&
678 		    EXPECTED(p->key) &&
679 		    zend_string_equal_content(p->key, key)) {
680 			return p;
681 		}
682 		idx = Z_NEXT(p->val);
683 		if (idx == HT_INVALID_IDX) {
684 			return NULL;
685 		}
686 		p = HT_HASH_TO_BUCKET_EX(arData, idx);
687 		if (p->key == key) { /* check for the same interned string */
688 			return p;
689 		}
690 	}
691 }
692 
zend_hash_str_find_bucket(const HashTable * ht,const char * str,size_t len,zend_ulong h)693 static zend_always_inline Bucket *zend_hash_str_find_bucket(const HashTable *ht, const char *str, size_t len, zend_ulong h)
694 {
695 	uint32_t nIndex;
696 	uint32_t idx;
697 	Bucket *p, *arData;
698 
699 	arData = ht->arData;
700 	nIndex = h | ht->nTableMask;
701 	idx = HT_HASH_EX(arData, nIndex);
702 	while (idx != HT_INVALID_IDX) {
703 		ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
704 		p = HT_HASH_TO_BUCKET_EX(arData, idx);
705 		if ((p->h == h)
706 			 && p->key
707 			 && (ZSTR_LEN(p->key) == len)
708 			 && !memcmp(ZSTR_VAL(p->key), str, len)) {
709 			return p;
710 		}
711 		idx = Z_NEXT(p->val);
712 	}
713 	return NULL;
714 }
715 
zend_hash_index_find_bucket(const HashTable * ht,zend_ulong h)716 static zend_always_inline Bucket *zend_hash_index_find_bucket(const HashTable *ht, zend_ulong h)
717 {
718 	uint32_t nIndex;
719 	uint32_t idx;
720 	Bucket *p, *arData;
721 
722 	arData = ht->arData;
723 	nIndex = h | ht->nTableMask;
724 	idx = HT_HASH_EX(arData, nIndex);
725 	while (idx != HT_INVALID_IDX) {
726 		ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
727 		p = HT_HASH_TO_BUCKET_EX(arData, idx);
728 		if (p->h == h && !p->key) {
729 			return p;
730 		}
731 		idx = Z_NEXT(p->val);
732 	}
733 	return NULL;
734 }
735 
_zend_hash_add_or_update_i(HashTable * ht,zend_string * key,zval * pData,uint32_t flag)736 static zend_always_inline zval *_zend_hash_add_or_update_i(HashTable *ht, zend_string *key, zval *pData, uint32_t flag)
737 {
738 	zend_ulong h;
739 	uint32_t nIndex;
740 	uint32_t idx;
741 	Bucket *p, *arData;
742 
743 	IS_CONSISTENT(ht);
744 	HT_ASSERT_RC1(ht);
745 
746 	if (!ZSTR_IS_INTERNED(key)) {
747 		zend_string_hash_val(key);
748 	}
749 
750 	if (UNEXPECTED(HT_FLAGS(ht) & (HASH_FLAG_UNINITIALIZED|HASH_FLAG_PACKED))) {
751 		if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
752 			zend_hash_real_init_mixed(ht);
753 			goto add_to_hash;
754 		} else {
755 			zend_hash_packed_to_hash(ht);
756 		}
757 	} else if ((flag & HASH_ADD_NEW) == 0 || ZEND_DEBUG) {
758 		p = zend_hash_find_bucket(ht, key, 1);
759 
760 		if (p) {
761 			zval *data;
762 
763 			ZEND_ASSERT((flag & HASH_ADD_NEW) == 0);
764 			if (flag & HASH_LOOKUP) {
765 				return &p->val;
766 			} else if (flag & HASH_ADD) {
767 				if (!(flag & HASH_UPDATE_INDIRECT)) {
768 					return NULL;
769 				}
770 				ZEND_ASSERT(&p->val != pData);
771 				data = &p->val;
772 				if (Z_TYPE_P(data) == IS_INDIRECT) {
773 					data = Z_INDIRECT_P(data);
774 					if (Z_TYPE_P(data) != IS_UNDEF) {
775 						return NULL;
776 					}
777 				} else {
778 					return NULL;
779 				}
780 			} else {
781 				ZEND_ASSERT(&p->val != pData);
782 				data = &p->val;
783 				if ((flag & HASH_UPDATE_INDIRECT) && Z_TYPE_P(data) == IS_INDIRECT) {
784 					data = Z_INDIRECT_P(data);
785 				}
786 			}
787 			if (ht->pDestructor) {
788 				ht->pDestructor(data);
789 			}
790 			ZVAL_COPY_VALUE(data, pData);
791 			return data;
792 		}
793 	}
794 
795 	ZEND_HASH_IF_FULL_DO_RESIZE(ht);		/* If the Hash table is full, resize it */
796 
797 add_to_hash:
798 	if (!ZSTR_IS_INTERNED(key)) {
799 		zend_string_addref(key);
800 		HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
801 	}
802 	idx = ht->nNumUsed++;
803 	ht->nNumOfElements++;
804 	arData = ht->arData;
805 	p = arData + idx;
806 	p->key = key;
807 	p->h = h = ZSTR_H(key);
808 	nIndex = h | ht->nTableMask;
809 	Z_NEXT(p->val) = HT_HASH_EX(arData, nIndex);
810 	HT_HASH_EX(arData, nIndex) = HT_IDX_TO_HASH(idx);
811 	if (flag & HASH_LOOKUP) {
812 		ZVAL_NULL(&p->val);
813 	} else {
814 		ZVAL_COPY_VALUE(&p->val, pData);
815 	}
816 
817 	return &p->val;
818 }
819 
_zend_hash_str_add_or_update_i(HashTable * ht,const char * str,size_t len,zend_ulong h,zval * pData,uint32_t flag)820 static zend_always_inline zval *_zend_hash_str_add_or_update_i(HashTable *ht, const char *str, size_t len, zend_ulong h, zval *pData, uint32_t flag)
821 {
822 	zend_string *key;
823 	uint32_t nIndex;
824 	uint32_t idx;
825 	Bucket *p;
826 
827 	IS_CONSISTENT(ht);
828 	HT_ASSERT_RC1(ht);
829 
830 	if (UNEXPECTED(HT_FLAGS(ht) & (HASH_FLAG_UNINITIALIZED|HASH_FLAG_PACKED))) {
831 		if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
832 			zend_hash_real_init_mixed(ht);
833 			goto add_to_hash;
834 		} else {
835 			zend_hash_packed_to_hash(ht);
836 		}
837 	} else if ((flag & HASH_ADD_NEW) == 0) {
838 		p = zend_hash_str_find_bucket(ht, str, len, h);
839 
840 		if (p) {
841 			zval *data;
842 
843 			if (flag & HASH_LOOKUP) {
844 				return &p->val;
845 			} else if (flag & HASH_ADD) {
846 				if (!(flag & HASH_UPDATE_INDIRECT)) {
847 					return NULL;
848 				}
849 				ZEND_ASSERT(&p->val != pData);
850 				data = &p->val;
851 				if (Z_TYPE_P(data) == IS_INDIRECT) {
852 					data = Z_INDIRECT_P(data);
853 					if (Z_TYPE_P(data) != IS_UNDEF) {
854 						return NULL;
855 					}
856 				} else {
857 					return NULL;
858 				}
859 			} else {
860 				ZEND_ASSERT(&p->val != pData);
861 				data = &p->val;
862 				if ((flag & HASH_UPDATE_INDIRECT) && Z_TYPE_P(data) == IS_INDIRECT) {
863 					data = Z_INDIRECT_P(data);
864 				}
865 			}
866 			if (ht->pDestructor) {
867 				ht->pDestructor(data);
868 			}
869 			ZVAL_COPY_VALUE(data, pData);
870 			return data;
871 		}
872 	}
873 
874 	ZEND_HASH_IF_FULL_DO_RESIZE(ht);		/* If the Hash table is full, resize it */
875 
876 add_to_hash:
877 	idx = ht->nNumUsed++;
878 	ht->nNumOfElements++;
879 	p = ht->arData + idx;
880 	p->key = key = zend_string_init(str, len, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
881 #if ZEND_RC_DEBUG
882 	if (GC_FLAGS(ht) & GC_PERSISTENT_LOCAL) {
883 		GC_MAKE_PERSISTENT_LOCAL(key);
884 	}
885 #endif
886 	p->h = ZSTR_H(key) = h;
887 	HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
888 	if (flag & HASH_LOOKUP) {
889 		ZVAL_NULL(&p->val);
890 	} else {
891 		ZVAL_COPY_VALUE(&p->val, pData);
892 	}
893 	nIndex = h | ht->nTableMask;
894 	Z_NEXT(p->val) = HT_HASH(ht, nIndex);
895 	HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
896 
897 	return &p->val;
898 }
899 
zend_hash_add_or_update(HashTable * ht,zend_string * key,zval * pData,uint32_t flag)900 ZEND_API zval* ZEND_FASTCALL zend_hash_add_or_update(HashTable *ht, zend_string *key, zval *pData, uint32_t flag)
901 {
902 	if (flag == HASH_ADD) {
903 		return zend_hash_add(ht, key, pData);
904 	} else if (flag == HASH_ADD_NEW) {
905 		return zend_hash_add_new(ht, key, pData);
906 	} else if (flag == HASH_UPDATE) {
907 		return zend_hash_update(ht, key, pData);
908 	} else {
909 		ZEND_ASSERT(flag == (HASH_UPDATE|HASH_UPDATE_INDIRECT));
910 		return zend_hash_update_ind(ht, key, pData);
911 	}
912 }
913 
zend_hash_add(HashTable * ht,zend_string * key,zval * pData)914 ZEND_API zval* ZEND_FASTCALL zend_hash_add(HashTable *ht, zend_string *key, zval *pData)
915 {
916 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD);
917 }
918 
zend_hash_update(HashTable * ht,zend_string * key,zval * pData)919 ZEND_API zval* ZEND_FASTCALL zend_hash_update(HashTable *ht, zend_string *key, zval *pData)
920 {
921 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE);
922 }
923 
zend_hash_update_ind(HashTable * ht,zend_string * key,zval * pData)924 ZEND_API zval* ZEND_FASTCALL zend_hash_update_ind(HashTable *ht, zend_string *key, zval *pData)
925 {
926 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT);
927 }
928 
zend_hash_add_new(HashTable * ht,zend_string * key,zval * pData)929 ZEND_API zval* ZEND_FASTCALL zend_hash_add_new(HashTable *ht, zend_string *key, zval *pData)
930 {
931 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD_NEW);
932 }
933 
zend_hash_lookup(HashTable * ht,zend_string * key)934 ZEND_API zval* ZEND_FASTCALL zend_hash_lookup(HashTable *ht, zend_string *key)
935 {
936 	return _zend_hash_add_or_update_i(ht, key, NULL, HASH_LOOKUP);
937 }
938 
zend_hash_str_add_or_update(HashTable * ht,const char * str,size_t len,zval * pData,uint32_t flag)939 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_or_update(HashTable *ht, const char *str, size_t len, zval *pData, uint32_t flag)
940 {
941 	if (flag == HASH_ADD) {
942 		return zend_hash_str_add(ht, str, len, pData);
943 	} else if (flag == HASH_ADD_NEW) {
944 		return zend_hash_str_add_new(ht, str, len, pData);
945 	} else if (flag == HASH_UPDATE) {
946 		return zend_hash_str_update(ht, str, len, pData);
947 	} else {
948 		ZEND_ASSERT(flag == (HASH_UPDATE|HASH_UPDATE_INDIRECT));
949 		return zend_hash_str_update_ind(ht, str, len, pData);
950 	}
951 }
952 
zend_hash_str_update(HashTable * ht,const char * str,size_t len,zval * pData)953 ZEND_API zval* ZEND_FASTCALL zend_hash_str_update(HashTable *ht, const char *str, size_t len, zval *pData)
954 {
955 	zend_ulong h = zend_hash_func(str, len);
956 
957 	return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_UPDATE);
958 }
959 
zend_hash_str_update_ind(HashTable * ht,const char * str,size_t len,zval * pData)960 ZEND_API zval* ZEND_FASTCALL zend_hash_str_update_ind(HashTable *ht, const char *str, size_t len, zval *pData)
961 {
962 	zend_ulong h = zend_hash_func(str, len);
963 
964 	return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT);
965 }
966 
zend_hash_str_add(HashTable * ht,const char * str,size_t len,zval * pData)967 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add(HashTable *ht, const char *str, size_t len, zval *pData)
968 {
969 	zend_ulong h = zend_hash_func(str, len);
970 
971 	return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_ADD);
972 }
973 
zend_hash_str_add_new(HashTable * ht,const char * str,size_t len,zval * pData)974 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_new(HashTable *ht, const char *str, size_t len, zval *pData)
975 {
976 	zend_ulong h = zend_hash_func(str, len);
977 
978 	return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_ADD_NEW);
979 }
980 
zend_hash_index_add_empty_element(HashTable * ht,zend_ulong h)981 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_empty_element(HashTable *ht, zend_ulong h)
982 {
983 	zval dummy;
984 
985 	ZVAL_NULL(&dummy);
986 	return zend_hash_index_add(ht, h, &dummy);
987 }
988 
zend_hash_add_empty_element(HashTable * ht,zend_string * key)989 ZEND_API zval* ZEND_FASTCALL zend_hash_add_empty_element(HashTable *ht, zend_string *key)
990 {
991 	zval dummy;
992 
993 	ZVAL_NULL(&dummy);
994 	return zend_hash_add(ht, key, &dummy);
995 }
996 
zend_hash_str_add_empty_element(HashTable * ht,const char * str,size_t len)997 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_empty_element(HashTable *ht, const char *str, size_t len)
998 {
999 	zval dummy;
1000 
1001 	ZVAL_NULL(&dummy);
1002 	return zend_hash_str_add(ht, str, len, &dummy);
1003 }
1004 
_zend_hash_index_add_or_update_i(HashTable * ht,zend_ulong h,zval * pData,uint32_t flag)1005 static zend_always_inline zval *_zend_hash_index_add_or_update_i(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag)
1006 {
1007 	uint32_t nIndex;
1008 	uint32_t idx;
1009 	Bucket *p;
1010 
1011 	IS_CONSISTENT(ht);
1012 	HT_ASSERT_RC1(ht);
1013 
1014 	if ((flag & HASH_ADD_NEXT) && h == ZEND_LONG_MIN) {
1015 		h = 0;
1016 	}
1017 
1018 	if (HT_FLAGS(ht) & HASH_FLAG_PACKED) {
1019 		if ((flag & (HASH_ADD_NEW|HASH_ADD_NEXT)) != (HASH_ADD_NEW|HASH_ADD_NEXT)
1020 		 && h < ht->nNumUsed) {
1021 			p = ht->arData + h;
1022 			if (Z_TYPE(p->val) != IS_UNDEF) {
1023 				if (flag & HASH_LOOKUP) {
1024 					return &p->val;
1025 				}
1026 replace:
1027 				if (flag & HASH_ADD) {
1028 					return NULL;
1029 				}
1030 				if (ht->pDestructor) {
1031 					ht->pDestructor(&p->val);
1032 				}
1033 				ZVAL_COPY_VALUE(&p->val, pData);
1034 				return &p->val;
1035 			} else { /* we have to keep the order :( */
1036 				goto convert_to_hash;
1037 			}
1038 		} else if (EXPECTED(h < ht->nTableSize)) {
1039 add_to_packed:
1040 			p = ht->arData + h;
1041 			/* incremental initialization of empty Buckets */
1042 			if ((flag & (HASH_ADD_NEW|HASH_ADD_NEXT)) != (HASH_ADD_NEW|HASH_ADD_NEXT)) {
1043 				if (h > ht->nNumUsed) {
1044 					Bucket *q = ht->arData + ht->nNumUsed;
1045 					while (q != p) {
1046 						ZVAL_UNDEF(&q->val);
1047 						q++;
1048 					}
1049 				}
1050 			}
1051 			ht->nNextFreeElement = ht->nNumUsed = h + 1;
1052 			goto add;
1053 		} else if ((h >> 1) < ht->nTableSize &&
1054 		           (ht->nTableSize >> 1) < ht->nNumOfElements) {
1055 			zend_hash_packed_grow(ht);
1056 			goto add_to_packed;
1057 		} else {
1058 			if (ht->nNumUsed >= ht->nTableSize) {
1059 				ht->nTableSize += ht->nTableSize;
1060 			}
1061 convert_to_hash:
1062 			zend_hash_packed_to_hash(ht);
1063 		}
1064 	} else if (HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED) {
1065 		if (h < ht->nTableSize) {
1066 			zend_hash_real_init_packed_ex(ht);
1067 			goto add_to_packed;
1068 		}
1069 		zend_hash_real_init_mixed(ht);
1070 	} else {
1071 		if ((flag & HASH_ADD_NEW) == 0 || ZEND_DEBUG) {
1072 			p = zend_hash_index_find_bucket(ht, h);
1073 			if (p) {
1074 				if (flag & HASH_LOOKUP) {
1075 					return &p->val;
1076 				}
1077 				ZEND_ASSERT((flag & HASH_ADD_NEW) == 0);
1078 				goto replace;
1079 			}
1080 		}
1081 		ZEND_HASH_IF_FULL_DO_RESIZE(ht);		/* If the Hash table is full, resize it */
1082 	}
1083 
1084 	idx = ht->nNumUsed++;
1085 	nIndex = h | ht->nTableMask;
1086 	p = ht->arData + idx;
1087 	Z_NEXT(p->val) = HT_HASH(ht, nIndex);
1088 	HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
1089 	if ((zend_long)h >= ht->nNextFreeElement) {
1090 		ht->nNextFreeElement = (zend_long)h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
1091 	}
1092 add:
1093 	ht->nNumOfElements++;
1094 	p->h = h;
1095 	p->key = NULL;
1096 	if (flag & HASH_LOOKUP) {
1097 		ZVAL_NULL(&p->val);
1098 	} else {
1099 		ZVAL_COPY_VALUE(&p->val, pData);
1100 	}
1101 
1102 	return &p->val;
1103 }
1104 
zend_hash_index_add_or_update(HashTable * ht,zend_ulong h,zval * pData,uint32_t flag)1105 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_or_update(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag)
1106 {
1107 	if (flag == HASH_ADD) {
1108 		return zend_hash_index_add(ht, h, pData);
1109 	} else if (flag == (HASH_ADD|HASH_ADD_NEW)) {
1110 		return zend_hash_index_add_new(ht, h, pData);
1111 	} else if (flag == (HASH_ADD|HASH_ADD_NEXT)) {
1112 		ZEND_ASSERT(h == ht->nNextFreeElement);
1113 		return zend_hash_next_index_insert(ht, pData);
1114 	} else if (flag == (HASH_ADD|HASH_ADD_NEW|HASH_ADD_NEXT)) {
1115 		ZEND_ASSERT(h == ht->nNextFreeElement);
1116 		return zend_hash_next_index_insert_new(ht, pData);
1117 	} else {
1118 		ZEND_ASSERT(flag == HASH_UPDATE);
1119 		return zend_hash_index_update(ht, h, pData);
1120 	}
1121 }
1122 
zend_hash_index_add(HashTable * ht,zend_ulong h,zval * pData)1123 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add(HashTable *ht, zend_ulong h, zval *pData)
1124 {
1125 	return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD);
1126 }
1127 
zend_hash_index_add_new(HashTable * ht,zend_ulong h,zval * pData)1128 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_new(HashTable *ht, zend_ulong h, zval *pData)
1129 {
1130 	return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD | HASH_ADD_NEW);
1131 }
1132 
zend_hash_index_update(HashTable * ht,zend_ulong h,zval * pData)1133 ZEND_API zval* ZEND_FASTCALL zend_hash_index_update(HashTable *ht, zend_ulong h, zval *pData)
1134 {
1135 	return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_UPDATE);
1136 }
1137 
zend_hash_next_index_insert(HashTable * ht,zval * pData)1138 ZEND_API zval* ZEND_FASTCALL zend_hash_next_index_insert(HashTable *ht, zval *pData)
1139 {
1140 	return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEXT);
1141 }
1142 
zend_hash_next_index_insert_new(HashTable * ht,zval * pData)1143 ZEND_API zval* ZEND_FASTCALL zend_hash_next_index_insert_new(HashTable *ht, zval *pData)
1144 {
1145 	return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEW | HASH_ADD_NEXT);
1146 }
1147 
zend_hash_index_lookup(HashTable * ht,zend_ulong h)1148 ZEND_API zval* ZEND_FASTCALL zend_hash_index_lookup(HashTable *ht, zend_ulong h)
1149 {
1150 	return _zend_hash_index_add_or_update_i(ht, h, NULL, HASH_LOOKUP);
1151 }
1152 
zend_hash_set_bucket_key(HashTable * ht,Bucket * b,zend_string * key)1153 ZEND_API zval* ZEND_FASTCALL zend_hash_set_bucket_key(HashTable *ht, Bucket *b, zend_string *key)
1154 {
1155 	uint32_t nIndex;
1156 	uint32_t idx, i;
1157 	Bucket *p, *arData;
1158 
1159 	IS_CONSISTENT(ht);
1160 	HT_ASSERT_RC1(ht);
1161 	ZEND_ASSERT(!(HT_FLAGS(ht) & HASH_FLAG_PACKED));
1162 
1163 	p = zend_hash_find_bucket(ht, key, 0);
1164 	if (UNEXPECTED(p)) {
1165 		return (p == b) ? &p->val : NULL;
1166 	}
1167 
1168 	if (!ZSTR_IS_INTERNED(key)) {
1169 		zend_string_addref(key);
1170 		HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
1171 	}
1172 
1173 	arData = ht->arData;
1174 
1175 	/* del from hash */
1176 	idx = HT_IDX_TO_HASH(b - arData);
1177 	nIndex = b->h | ht->nTableMask;
1178 	i = HT_HASH_EX(arData, nIndex);
1179 	if (i == idx) {
1180 		HT_HASH_EX(arData, nIndex) = Z_NEXT(b->val);
1181 	} else {
1182 		p = HT_HASH_TO_BUCKET_EX(arData, i);
1183 		while (Z_NEXT(p->val) != idx) {
1184 			i = Z_NEXT(p->val);
1185 			p = HT_HASH_TO_BUCKET_EX(arData, i);
1186 		}
1187 		Z_NEXT(p->val) = Z_NEXT(b->val);
1188 	}
1189 	zend_string_release(b->key);
1190 
1191 	/* add to hash */
1192 	idx = b - arData;
1193 	b->key = key;
1194 	b->h = ZSTR_H(key);
1195 	nIndex = b->h | ht->nTableMask;
1196 	idx = HT_IDX_TO_HASH(idx);
1197 	i = HT_HASH_EX(arData, nIndex);
1198 	if (i == HT_INVALID_IDX || i < idx) {
1199 		Z_NEXT(b->val) = i;
1200 		HT_HASH_EX(arData, nIndex) = idx;
1201 	} else {
1202 		p = HT_HASH_TO_BUCKET_EX(arData, i);
1203 		while (Z_NEXT(p->val) != HT_INVALID_IDX && Z_NEXT(p->val) > idx) {
1204 			i = Z_NEXT(p->val);
1205 			p = HT_HASH_TO_BUCKET_EX(arData, i);
1206 		}
1207 		Z_NEXT(b->val) = Z_NEXT(p->val);
1208 		Z_NEXT(p->val) = idx;
1209 	}
1210 	return &b->val;
1211 }
1212 
zend_hash_do_resize(HashTable * ht)1213 static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht)
1214 {
1215 
1216 	IS_CONSISTENT(ht);
1217 	HT_ASSERT_RC1(ht);
1218 
1219 	if (ht->nNumUsed > ht->nNumOfElements + (ht->nNumOfElements >> 5)) { /* additional term is there to amortize the cost of compaction */
1220 		zend_hash_rehash(ht);
1221 	} else if (ht->nTableSize < HT_MAX_SIZE) {	/* Let's double the table size */
1222 		void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
1223 		uint32_t nSize = ht->nTableSize + ht->nTableSize;
1224 		Bucket *old_buckets = ht->arData;
1225 
1226 		ZEND_ASSERT(HT_SIZE_TO_MASK(nSize));
1227 
1228 		new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1229 		ht->nTableSize = nSize;
1230 		ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
1231 		HT_SET_DATA_ADDR(ht, new_data);
1232 		memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
1233 		pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1234 		zend_hash_rehash(ht);
1235 	} else {
1236 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket) + sizeof(uint32_t), sizeof(Bucket));
1237 	}
1238 }
1239 
zend_hash_rehash(HashTable * ht)1240 ZEND_API void ZEND_FASTCALL zend_hash_rehash(HashTable *ht)
1241 {
1242 	Bucket *p;
1243 	uint32_t nIndex, i;
1244 
1245 	IS_CONSISTENT(ht);
1246 
1247 	if (UNEXPECTED(ht->nNumOfElements == 0)) {
1248 		if (!(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
1249 			ht->nNumUsed = 0;
1250 			HT_HASH_RESET(ht);
1251 		}
1252 		return;
1253 	}
1254 
1255 	HT_HASH_RESET(ht);
1256 	i = 0;
1257 	p = ht->arData;
1258 	if (HT_IS_WITHOUT_HOLES(ht)) {
1259 		do {
1260 			nIndex = p->h | ht->nTableMask;
1261 			Z_NEXT(p->val) = HT_HASH(ht, nIndex);
1262 			HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
1263 			p++;
1264 		} while (++i < ht->nNumUsed);
1265 	} else {
1266 		uint32_t old_num_used = ht->nNumUsed;
1267 		do {
1268 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) {
1269 				uint32_t j = i;
1270 				Bucket *q = p;
1271 
1272 				if (EXPECTED(!HT_HAS_ITERATORS(ht))) {
1273 					while (++i < ht->nNumUsed) {
1274 						p++;
1275 						if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
1276 							ZVAL_COPY_VALUE(&q->val, &p->val);
1277 							q->h = p->h;
1278 							nIndex = q->h | ht->nTableMask;
1279 							q->key = p->key;
1280 							Z_NEXT(q->val) = HT_HASH(ht, nIndex);
1281 							HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
1282 							if (UNEXPECTED(ht->nInternalPointer == i)) {
1283 								ht->nInternalPointer = j;
1284 							}
1285 							q++;
1286 							j++;
1287 						}
1288 					}
1289 				} else {
1290 					uint32_t iter_pos = zend_hash_iterators_lower_pos(ht, i + 1);
1291 
1292 					while (++i < ht->nNumUsed) {
1293 						p++;
1294 						if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
1295 							ZVAL_COPY_VALUE(&q->val, &p->val);
1296 							q->h = p->h;
1297 							nIndex = q->h | ht->nTableMask;
1298 							q->key = p->key;
1299 							Z_NEXT(q->val) = HT_HASH(ht, nIndex);
1300 							HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
1301 							if (UNEXPECTED(ht->nInternalPointer == i)) {
1302 								ht->nInternalPointer = j;
1303 							}
1304 							if (UNEXPECTED(i >= iter_pos)) {
1305 								do {
1306 									zend_hash_iterators_update(ht, iter_pos, j);
1307 									iter_pos = zend_hash_iterators_lower_pos(ht, iter_pos + 1);
1308 								} while (iter_pos < i);
1309 							}
1310 							q++;
1311 							j++;
1312 						}
1313 					}
1314 				}
1315 				ht->nNumUsed = j;
1316 				break;
1317 			}
1318 			nIndex = p->h | ht->nTableMask;
1319 			Z_NEXT(p->val) = HT_HASH(ht, nIndex);
1320 			HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
1321 			p++;
1322 		} while (++i < ht->nNumUsed);
1323 
1324 		/* Migrate pointer to one past the end of the array to the new one past the end, so that
1325 		 * newly inserted elements are picked up correctly. */
1326 		if (UNEXPECTED(HT_HAS_ITERATORS(ht))) {
1327 			_zend_hash_iterators_update(ht, old_num_used, ht->nNumUsed);
1328 		}
1329 	}
1330 }
1331 
_zend_hash_del_el_ex(HashTable * ht,uint32_t idx,Bucket * p,Bucket * prev)1332 static zend_always_inline void _zend_hash_del_el_ex(HashTable *ht, uint32_t idx, Bucket *p, Bucket *prev)
1333 {
1334 	if (!(HT_FLAGS(ht) & HASH_FLAG_PACKED)) {
1335 		if (prev) {
1336 			Z_NEXT(prev->val) = Z_NEXT(p->val);
1337 		} else {
1338 			HT_HASH(ht, p->h | ht->nTableMask) = Z_NEXT(p->val);
1339 		}
1340 	}
1341 	idx = HT_HASH_TO_IDX(idx);
1342 	ht->nNumOfElements--;
1343 	if (ht->nInternalPointer == idx || UNEXPECTED(HT_HAS_ITERATORS(ht))) {
1344 		uint32_t new_idx;
1345 
1346 		new_idx = idx;
1347 		while (1) {
1348 			new_idx++;
1349 			if (new_idx >= ht->nNumUsed) {
1350 				break;
1351 			} else if (Z_TYPE(ht->arData[new_idx].val) != IS_UNDEF) {
1352 				break;
1353 			}
1354 		}
1355 		if (ht->nInternalPointer == idx) {
1356 			ht->nInternalPointer = new_idx;
1357 		}
1358 		zend_hash_iterators_update(ht, idx, new_idx);
1359 	}
1360 	if (ht->nNumUsed - 1 == idx) {
1361 		do {
1362 			ht->nNumUsed--;
1363 		} while (ht->nNumUsed > 0 && (UNEXPECTED(Z_TYPE(ht->arData[ht->nNumUsed-1].val) == IS_UNDEF)));
1364 		ht->nInternalPointer = MIN(ht->nInternalPointer, ht->nNumUsed);
1365 	}
1366 	if (p->key) {
1367 		zend_string_release(p->key);
1368 	}
1369 	if (ht->pDestructor) {
1370 		zval tmp;
1371 		ZVAL_COPY_VALUE(&tmp, &p->val);
1372 		ZVAL_UNDEF(&p->val);
1373 		ht->pDestructor(&tmp);
1374 	} else {
1375 		ZVAL_UNDEF(&p->val);
1376 	}
1377 }
1378 
_zend_hash_del_el(HashTable * ht,uint32_t idx,Bucket * p)1379 static zend_always_inline void _zend_hash_del_el(HashTable *ht, uint32_t idx, Bucket *p)
1380 {
1381 	Bucket *prev = NULL;
1382 
1383 	if (!(HT_FLAGS(ht) & HASH_FLAG_PACKED)) {
1384 		uint32_t nIndex = p->h | ht->nTableMask;
1385 		uint32_t i = HT_HASH(ht, nIndex);
1386 
1387 		if (i != idx) {
1388 			prev = HT_HASH_TO_BUCKET(ht, i);
1389 			while (Z_NEXT(prev->val) != idx) {
1390 				i = Z_NEXT(prev->val);
1391 				prev = HT_HASH_TO_BUCKET(ht, i);
1392 			}
1393 	 	}
1394 	}
1395 
1396 	_zend_hash_del_el_ex(ht, idx, p, prev);
1397 }
1398 
zend_hash_del_bucket(HashTable * ht,Bucket * p)1399 ZEND_API void ZEND_FASTCALL zend_hash_del_bucket(HashTable *ht, Bucket *p)
1400 {
1401 	IS_CONSISTENT(ht);
1402 	HT_ASSERT_RC1(ht);
1403 	_zend_hash_del_el(ht, HT_IDX_TO_HASH(p - ht->arData), p);
1404 }
1405 
zend_hash_del(HashTable * ht,zend_string * key)1406 ZEND_API zend_result ZEND_FASTCALL zend_hash_del(HashTable *ht, zend_string *key)
1407 {
1408 	zend_ulong h;
1409 	uint32_t nIndex;
1410 	uint32_t idx;
1411 	Bucket *p;
1412 	Bucket *prev = NULL;
1413 
1414 	IS_CONSISTENT(ht);
1415 	HT_ASSERT_RC1(ht);
1416 
1417 	h = zend_string_hash_val(key);
1418 	nIndex = h | ht->nTableMask;
1419 
1420 	idx = HT_HASH(ht, nIndex);
1421 	while (idx != HT_INVALID_IDX) {
1422 		p = HT_HASH_TO_BUCKET(ht, idx);
1423 		if ((p->key == key) ||
1424 			(p->h == h &&
1425 		     p->key &&
1426 		     zend_string_equal_content(p->key, key))) {
1427 			_zend_hash_del_el_ex(ht, idx, p, prev);
1428 			return SUCCESS;
1429 		}
1430 		prev = p;
1431 		idx = Z_NEXT(p->val);
1432 	}
1433 	return FAILURE;
1434 }
1435 
zend_hash_del_ind(HashTable * ht,zend_string * key)1436 ZEND_API zend_result ZEND_FASTCALL zend_hash_del_ind(HashTable *ht, zend_string *key)
1437 {
1438 	zend_ulong h;
1439 	uint32_t nIndex;
1440 	uint32_t idx;
1441 	Bucket *p;
1442 	Bucket *prev = NULL;
1443 
1444 	IS_CONSISTENT(ht);
1445 	HT_ASSERT_RC1(ht);
1446 
1447 	h = zend_string_hash_val(key);
1448 	nIndex = h | ht->nTableMask;
1449 
1450 	idx = HT_HASH(ht, nIndex);
1451 	while (idx != HT_INVALID_IDX) {
1452 		p = HT_HASH_TO_BUCKET(ht, idx);
1453 		if ((p->key == key) ||
1454 			(p->h == h &&
1455 		     p->key &&
1456 		     zend_string_equal_content(p->key, key))) {
1457 			if (Z_TYPE(p->val) == IS_INDIRECT) {
1458 				zval *data = Z_INDIRECT(p->val);
1459 
1460 				if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
1461 					return FAILURE;
1462 				} else {
1463 					if (ht->pDestructor) {
1464 						zval tmp;
1465 						ZVAL_COPY_VALUE(&tmp, data);
1466 						ZVAL_UNDEF(data);
1467 						ht->pDestructor(&tmp);
1468 					} else {
1469 						ZVAL_UNDEF(data);
1470 					}
1471 					HT_FLAGS(ht) |= HASH_FLAG_HAS_EMPTY_IND;
1472 				}
1473 			} else {
1474 				_zend_hash_del_el_ex(ht, idx, p, prev);
1475 			}
1476 			return SUCCESS;
1477 		}
1478 		prev = p;
1479 		idx = Z_NEXT(p->val);
1480 	}
1481 	return FAILURE;
1482 }
1483 
zend_hash_str_del_ind(HashTable * ht,const char * str,size_t len)1484 ZEND_API zend_result ZEND_FASTCALL zend_hash_str_del_ind(HashTable *ht, const char *str, size_t len)
1485 {
1486 	zend_ulong h;
1487 	uint32_t nIndex;
1488 	uint32_t idx;
1489 	Bucket *p;
1490 	Bucket *prev = NULL;
1491 
1492 	IS_CONSISTENT(ht);
1493 	HT_ASSERT_RC1(ht);
1494 
1495 	h = zend_inline_hash_func(str, len);
1496 	nIndex = h | ht->nTableMask;
1497 
1498 	idx = HT_HASH(ht, nIndex);
1499 	while (idx != HT_INVALID_IDX) {
1500 		p = HT_HASH_TO_BUCKET(ht, idx);
1501 		if ((p->h == h)
1502 			 && p->key
1503 			 && (ZSTR_LEN(p->key) == len)
1504 			 && !memcmp(ZSTR_VAL(p->key), str, len)) {
1505 			if (Z_TYPE(p->val) == IS_INDIRECT) {
1506 				zval *data = Z_INDIRECT(p->val);
1507 
1508 				if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
1509 					return FAILURE;
1510 				} else {
1511 					if (ht->pDestructor) {
1512 						ht->pDestructor(data);
1513 					}
1514 					ZVAL_UNDEF(data);
1515 					HT_FLAGS(ht) |= HASH_FLAG_HAS_EMPTY_IND;
1516 				}
1517 			} else {
1518 				_zend_hash_del_el_ex(ht, idx, p, prev);
1519 			}
1520 			return SUCCESS;
1521 		}
1522 		prev = p;
1523 		idx = Z_NEXT(p->val);
1524 	}
1525 	return FAILURE;
1526 }
1527 
zend_hash_str_del(HashTable * ht,const char * str,size_t len)1528 ZEND_API zend_result ZEND_FASTCALL zend_hash_str_del(HashTable *ht, const char *str, size_t len)
1529 {
1530 	zend_ulong h;
1531 	uint32_t nIndex;
1532 	uint32_t idx;
1533 	Bucket *p;
1534 	Bucket *prev = NULL;
1535 
1536 	IS_CONSISTENT(ht);
1537 	HT_ASSERT_RC1(ht);
1538 
1539 	h = zend_inline_hash_func(str, len);
1540 	nIndex = h | ht->nTableMask;
1541 
1542 	idx = HT_HASH(ht, nIndex);
1543 	while (idx != HT_INVALID_IDX) {
1544 		p = HT_HASH_TO_BUCKET(ht, idx);
1545 		if ((p->h == h)
1546 			 && p->key
1547 			 && (ZSTR_LEN(p->key) == len)
1548 			 && !memcmp(ZSTR_VAL(p->key), str, len)) {
1549 			_zend_hash_del_el_ex(ht, idx, p, prev);
1550 			return SUCCESS;
1551 		}
1552 		prev = p;
1553 		idx = Z_NEXT(p->val);
1554 	}
1555 	return FAILURE;
1556 }
1557 
zend_hash_index_del(HashTable * ht,zend_ulong h)1558 ZEND_API zend_result ZEND_FASTCALL zend_hash_index_del(HashTable *ht, zend_ulong h)
1559 {
1560 	uint32_t nIndex;
1561 	uint32_t idx;
1562 	Bucket *p;
1563 	Bucket *prev = NULL;
1564 
1565 	IS_CONSISTENT(ht);
1566 	HT_ASSERT_RC1(ht);
1567 
1568 	if (HT_FLAGS(ht) & HASH_FLAG_PACKED) {
1569 		if (h < ht->nNumUsed) {
1570 			p = ht->arData + h;
1571 			if (Z_TYPE(p->val) != IS_UNDEF) {
1572 				_zend_hash_del_el_ex(ht, HT_IDX_TO_HASH(h), p, NULL);
1573 				return SUCCESS;
1574 			}
1575 		}
1576 		return FAILURE;
1577 	}
1578 	nIndex = h | ht->nTableMask;
1579 
1580 	idx = HT_HASH(ht, nIndex);
1581 	while (idx != HT_INVALID_IDX) {
1582 		p = HT_HASH_TO_BUCKET(ht, idx);
1583 		if ((p->h == h) && (p->key == NULL)) {
1584 			_zend_hash_del_el_ex(ht, idx, p, prev);
1585 			return SUCCESS;
1586 		}
1587 		prev = p;
1588 		idx = Z_NEXT(p->val);
1589 	}
1590 	return FAILURE;
1591 }
1592 
zend_hash_destroy(HashTable * ht)1593 ZEND_API void ZEND_FASTCALL zend_hash_destroy(HashTable *ht)
1594 {
1595 	Bucket *p, *end;
1596 
1597 	IS_CONSISTENT(ht);
1598 	HT_ASSERT(ht, GC_REFCOUNT(ht) <= 1);
1599 
1600 	if (ht->nNumUsed) {
1601 		p = ht->arData;
1602 		end = p + ht->nNumUsed;
1603 		if (ht->pDestructor) {
1604 			SET_INCONSISTENT(HT_IS_DESTROYING);
1605 
1606 			if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1607 				if (HT_IS_WITHOUT_HOLES(ht)) {
1608 					do {
1609 						ht->pDestructor(&p->val);
1610 					} while (++p != end);
1611 				} else {
1612 					do {
1613 						if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1614 							ht->pDestructor(&p->val);
1615 						}
1616 					} while (++p != end);
1617 				}
1618 			} else if (HT_IS_WITHOUT_HOLES(ht)) {
1619 				do {
1620 					ht->pDestructor(&p->val);
1621 					if (EXPECTED(p->key)) {
1622 						zend_string_release(p->key);
1623 					}
1624 				} while (++p != end);
1625 			} else {
1626 				do {
1627 					if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1628 						ht->pDestructor(&p->val);
1629 						if (EXPECTED(p->key)) {
1630 							zend_string_release(p->key);
1631 						}
1632 					}
1633 				} while (++p != end);
1634 			}
1635 
1636 			SET_INCONSISTENT(HT_DESTROYED);
1637 		} else {
1638 			if (!HT_HAS_STATIC_KEYS_ONLY(ht)) {
1639 				do {
1640 					if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1641 						if (EXPECTED(p->key)) {
1642 							zend_string_release(p->key);
1643 						}
1644 					}
1645 				} while (++p != end);
1646 			}
1647 		}
1648 		zend_hash_iterators_remove(ht);
1649 	} else if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
1650 		return;
1651 	}
1652 	pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1653 }
1654 
zend_array_destroy(HashTable * ht)1655 ZEND_API void ZEND_FASTCALL zend_array_destroy(HashTable *ht)
1656 {
1657 	Bucket *p, *end;
1658 
1659 	IS_CONSISTENT(ht);
1660 	HT_ASSERT(ht, GC_REFCOUNT(ht) <= 1);
1661 
1662 	/* break possible cycles */
1663 	GC_REMOVE_FROM_BUFFER(ht);
1664 	GC_TYPE_INFO(ht) = GC_NULL /*???| (GC_WHITE << 16)*/;
1665 
1666 	if (ht->nNumUsed) {
1667 		/* In some rare cases destructors of regular arrays may be changed */
1668 		if (UNEXPECTED(ht->pDestructor != ZVAL_PTR_DTOR)) {
1669 			zend_hash_destroy(ht);
1670 			goto free_ht;
1671 		}
1672 
1673 		p = ht->arData;
1674 		end = p + ht->nNumUsed;
1675 		SET_INCONSISTENT(HT_IS_DESTROYING);
1676 
1677 		if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1678 			do {
1679 				i_zval_ptr_dtor(&p->val);
1680 			} while (++p != end);
1681 		} else if (HT_IS_WITHOUT_HOLES(ht)) {
1682 			do {
1683 				i_zval_ptr_dtor(&p->val);
1684 				if (EXPECTED(p->key)) {
1685 					zend_string_release_ex(p->key, 0);
1686 				}
1687 			} while (++p != end);
1688 		} else {
1689 			do {
1690 				if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1691 					i_zval_ptr_dtor(&p->val);
1692 					if (EXPECTED(p->key)) {
1693 						zend_string_release_ex(p->key, 0);
1694 					}
1695 				}
1696 			} while (++p != end);
1697 		}
1698 	} else if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
1699 		goto free_ht;
1700 	}
1701 	SET_INCONSISTENT(HT_DESTROYED);
1702 	efree(HT_GET_DATA_ADDR(ht));
1703 free_ht:
1704 	zend_hash_iterators_remove(ht);
1705 	FREE_HASHTABLE(ht);
1706 }
1707 
zend_hash_clean(HashTable * ht)1708 ZEND_API void ZEND_FASTCALL zend_hash_clean(HashTable *ht)
1709 {
1710 	Bucket *p, *end;
1711 
1712 	IS_CONSISTENT(ht);
1713 	HT_ASSERT_RC1(ht);
1714 
1715 	if (ht->nNumUsed) {
1716 		p = ht->arData;
1717 		end = p + ht->nNumUsed;
1718 		if (ht->pDestructor) {
1719 			if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1720 				if (HT_IS_WITHOUT_HOLES(ht)) {
1721 					do {
1722 						ht->pDestructor(&p->val);
1723 					} while (++p != end);
1724 				} else {
1725 					do {
1726 						if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1727 							ht->pDestructor(&p->val);
1728 						}
1729 					} while (++p != end);
1730 				}
1731 			} else if (HT_IS_WITHOUT_HOLES(ht)) {
1732 				do {
1733 					ht->pDestructor(&p->val);
1734 					if (EXPECTED(p->key)) {
1735 						zend_string_release(p->key);
1736 					}
1737 				} while (++p != end);
1738 			} else {
1739 				do {
1740 					if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1741 						ht->pDestructor(&p->val);
1742 						if (EXPECTED(p->key)) {
1743 							zend_string_release(p->key);
1744 						}
1745 					}
1746 				} while (++p != end);
1747 			}
1748 		} else {
1749 			if (!HT_HAS_STATIC_KEYS_ONLY(ht)) {
1750 				if (HT_IS_WITHOUT_HOLES(ht)) {
1751 					do {
1752 						if (EXPECTED(p->key)) {
1753 							zend_string_release(p->key);
1754 						}
1755 					} while (++p != end);
1756 				} else {
1757 					do {
1758 						if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1759 							if (EXPECTED(p->key)) {
1760 								zend_string_release(p->key);
1761 							}
1762 						}
1763 					} while (++p != end);
1764 				}
1765 			}
1766 		}
1767 		if (!(HT_FLAGS(ht) & HASH_FLAG_PACKED)) {
1768 			HT_HASH_RESET(ht);
1769 		}
1770 	}
1771 	ht->nNumUsed = 0;
1772 	ht->nNumOfElements = 0;
1773 	ht->nNextFreeElement = ZEND_LONG_MIN;
1774 	ht->nInternalPointer = 0;
1775 }
1776 
zend_symtable_clean(HashTable * ht)1777 ZEND_API void ZEND_FASTCALL zend_symtable_clean(HashTable *ht)
1778 {
1779 	Bucket *p, *end;
1780 
1781 	IS_CONSISTENT(ht);
1782 	HT_ASSERT_RC1(ht);
1783 
1784 	if (ht->nNumUsed) {
1785 		p = ht->arData;
1786 		end = p + ht->nNumUsed;
1787 		if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1788 			do {
1789 				i_zval_ptr_dtor(&p->val);
1790 			} while (++p != end);
1791 		} else if (HT_IS_WITHOUT_HOLES(ht)) {
1792 			do {
1793 				i_zval_ptr_dtor(&p->val);
1794 				if (EXPECTED(p->key)) {
1795 					zend_string_release(p->key);
1796 				}
1797 			} while (++p != end);
1798 		} else {
1799 			do {
1800 				if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1801 					i_zval_ptr_dtor(&p->val);
1802 					if (EXPECTED(p->key)) {
1803 						zend_string_release(p->key);
1804 					}
1805 				}
1806 			} while (++p != end);
1807 		}
1808 		HT_HASH_RESET(ht);
1809 	}
1810 	ht->nNumUsed = 0;
1811 	ht->nNumOfElements = 0;
1812 	ht->nNextFreeElement = ZEND_LONG_MIN;
1813 	ht->nInternalPointer = 0;
1814 }
1815 
zend_hash_graceful_destroy(HashTable * ht)1816 ZEND_API void ZEND_FASTCALL zend_hash_graceful_destroy(HashTable *ht)
1817 {
1818 	uint32_t idx;
1819 	Bucket *p;
1820 
1821 	IS_CONSISTENT(ht);
1822 	HT_ASSERT_RC1(ht);
1823 
1824 	p = ht->arData;
1825 	for (idx = 0; idx < ht->nNumUsed; idx++, p++) {
1826 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1827 		_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1828 	}
1829 	if (!(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
1830 		pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1831 	}
1832 
1833 	SET_INCONSISTENT(HT_DESTROYED);
1834 }
1835 
zend_hash_graceful_reverse_destroy(HashTable * ht)1836 ZEND_API void ZEND_FASTCALL zend_hash_graceful_reverse_destroy(HashTable *ht)
1837 {
1838 	uint32_t idx;
1839 	Bucket *p;
1840 
1841 	IS_CONSISTENT(ht);
1842 	HT_ASSERT_RC1(ht);
1843 
1844 	idx = ht->nNumUsed;
1845 	p = ht->arData + ht->nNumUsed;
1846 	while (idx > 0) {
1847 		idx--;
1848 		p--;
1849 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1850 		_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1851 	}
1852 
1853 	if (!(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
1854 		pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1855 	}
1856 
1857 	SET_INCONSISTENT(HT_DESTROYED);
1858 }
1859 
1860 /* This is used to recurse elements and selectively delete certain entries
1861  * from a hashtable. apply_func() receives the data and decides if the entry
1862  * should be deleted or recursion should be stopped. The following three
1863  * return codes are possible:
1864  * ZEND_HASH_APPLY_KEEP   - continue
1865  * ZEND_HASH_APPLY_STOP   - stop iteration
1866  * ZEND_HASH_APPLY_REMOVE - delete the element, combinable with the former
1867  */
1868 
zend_hash_apply(HashTable * ht,apply_func_t apply_func)1869 ZEND_API void ZEND_FASTCALL zend_hash_apply(HashTable *ht, apply_func_t apply_func)
1870 {
1871 	uint32_t idx;
1872 	Bucket *p;
1873 	int result;
1874 
1875 	IS_CONSISTENT(ht);
1876 
1877 	for (idx = 0; idx < ht->nNumUsed; idx++) {
1878 		p = ht->arData + idx;
1879 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1880 		result = apply_func(&p->val);
1881 
1882 		if (result & ZEND_HASH_APPLY_REMOVE) {
1883 			HT_ASSERT_RC1(ht);
1884 			_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1885 		}
1886 		if (result & ZEND_HASH_APPLY_STOP) {
1887 			break;
1888 		}
1889 	}
1890 }
1891 
1892 
zend_hash_apply_with_argument(HashTable * ht,apply_func_arg_t apply_func,void * argument)1893 ZEND_API void ZEND_FASTCALL zend_hash_apply_with_argument(HashTable *ht, apply_func_arg_t apply_func, void *argument)
1894 {
1895 	uint32_t idx;
1896 	Bucket *p;
1897 	int result;
1898 
1899 	IS_CONSISTENT(ht);
1900 
1901 	for (idx = 0; idx < ht->nNumUsed; idx++) {
1902 		p = ht->arData + idx;
1903 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1904 		result = apply_func(&p->val, argument);
1905 
1906 		if (result & ZEND_HASH_APPLY_REMOVE) {
1907 			HT_ASSERT_RC1(ht);
1908 			_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1909 		}
1910 		if (result & ZEND_HASH_APPLY_STOP) {
1911 			break;
1912 		}
1913 	}
1914 }
1915 
1916 
zend_hash_apply_with_arguments(HashTable * ht,apply_func_args_t apply_func,int num_args,...)1917 ZEND_API void zend_hash_apply_with_arguments(HashTable *ht, apply_func_args_t apply_func, int num_args, ...)
1918 {
1919 	uint32_t idx;
1920 	Bucket *p;
1921 	va_list args;
1922 	zend_hash_key hash_key;
1923 	int result;
1924 
1925 	IS_CONSISTENT(ht);
1926 
1927 	for (idx = 0; idx < ht->nNumUsed; idx++) {
1928 		p = ht->arData + idx;
1929 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1930 		va_start(args, num_args);
1931 		hash_key.h = p->h;
1932 		hash_key.key = p->key;
1933 
1934 		result = apply_func(&p->val, num_args, args, &hash_key);
1935 
1936 		if (result & ZEND_HASH_APPLY_REMOVE) {
1937 			HT_ASSERT_RC1(ht);
1938 			_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1939 		}
1940 		if (result & ZEND_HASH_APPLY_STOP) {
1941 			va_end(args);
1942 			break;
1943 		}
1944 		va_end(args);
1945 	}
1946 }
1947 
1948 
zend_hash_reverse_apply(HashTable * ht,apply_func_t apply_func)1949 ZEND_API void ZEND_FASTCALL zend_hash_reverse_apply(HashTable *ht, apply_func_t apply_func)
1950 {
1951 	uint32_t idx;
1952 	Bucket *p;
1953 	int result;
1954 
1955 	IS_CONSISTENT(ht);
1956 
1957 	idx = ht->nNumUsed;
1958 	while (idx > 0) {
1959 		idx--;
1960 		p = ht->arData + idx;
1961 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1962 
1963 		result = apply_func(&p->val);
1964 
1965 		if (result & ZEND_HASH_APPLY_REMOVE) {
1966 			HT_ASSERT_RC1(ht);
1967 			_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1968 		}
1969 		if (result & ZEND_HASH_APPLY_STOP) {
1970 			break;
1971 		}
1972 	}
1973 }
1974 
1975 
zend_hash_copy(HashTable * target,HashTable * source,copy_ctor_func_t pCopyConstructor)1976 ZEND_API void ZEND_FASTCALL zend_hash_copy(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor)
1977 {
1978 	uint32_t idx;
1979 	Bucket *p;
1980 	zval *new_entry, *data;
1981 
1982 	IS_CONSISTENT(source);
1983 	IS_CONSISTENT(target);
1984 	HT_ASSERT_RC1(target);
1985 
1986 	for (idx = 0; idx < source->nNumUsed; idx++) {
1987 		p = source->arData + idx;
1988 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1989 
1990 		/* INDIRECT element may point to UNDEF-ined slots */
1991 		data = &p->val;
1992 		if (Z_TYPE_P(data) == IS_INDIRECT) {
1993 			data = Z_INDIRECT_P(data);
1994 			if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
1995 				continue;
1996 			}
1997 		}
1998 		if (p->key) {
1999 			new_entry = zend_hash_update(target, p->key, data);
2000 		} else {
2001 			new_entry = zend_hash_index_update(target, p->h, data);
2002 		}
2003 		if (pCopyConstructor) {
2004 			pCopyConstructor(new_entry);
2005 		}
2006 	}
2007 }
2008 
2009 
zend_array_dup_element(HashTable * source,HashTable * target,uint32_t idx,Bucket * p,Bucket * q,bool packed,bool static_keys,bool with_holes)2010 static zend_always_inline bool zend_array_dup_element(HashTable *source, HashTable *target, uint32_t idx, Bucket *p, Bucket *q, bool packed, bool static_keys, bool with_holes)
2011 {
2012 	zval *data = &p->val;
2013 
2014 	if (with_holes) {
2015 		if (!packed && Z_TYPE_INFO_P(data) == IS_INDIRECT) {
2016 			data = Z_INDIRECT_P(data);
2017 		}
2018 		if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) {
2019 			return 0;
2020 		}
2021 	} else if (!packed) {
2022 		/* INDIRECT element may point to UNDEF-ined slots */
2023 		if (Z_TYPE_INFO_P(data) == IS_INDIRECT) {
2024 			data = Z_INDIRECT_P(data);
2025 			if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) {
2026 				return 0;
2027 			}
2028 		}
2029 	}
2030 
2031 	do {
2032 		if (Z_OPT_REFCOUNTED_P(data)) {
2033 			if (Z_ISREF_P(data) && Z_REFCOUNT_P(data) == 1 &&
2034 			    (Z_TYPE_P(Z_REFVAL_P(data)) != IS_ARRAY ||
2035 			      Z_ARRVAL_P(Z_REFVAL_P(data)) != source)) {
2036 				data = Z_REFVAL_P(data);
2037 				if (!Z_OPT_REFCOUNTED_P(data)) {
2038 					break;
2039 				}
2040 			}
2041 			Z_ADDREF_P(data);
2042 		}
2043 	} while (0);
2044 	ZVAL_COPY_VALUE(&q->val, data);
2045 
2046 	q->h = p->h;
2047 	if (packed) {
2048 		q->key = NULL;
2049 	} else {
2050 		uint32_t nIndex;
2051 
2052 		q->key = p->key;
2053 		if (!static_keys && q->key) {
2054 			zend_string_addref(q->key);
2055 		}
2056 
2057 		nIndex = q->h | target->nTableMask;
2058 		Z_NEXT(q->val) = HT_HASH(target, nIndex);
2059 		HT_HASH(target, nIndex) = HT_IDX_TO_HASH(idx);
2060 	}
2061 	return 1;
2062 }
2063 
zend_array_dup_packed_elements(HashTable * source,HashTable * target,bool with_holes)2064 static zend_always_inline void zend_array_dup_packed_elements(HashTable *source, HashTable *target, bool with_holes)
2065 {
2066 	Bucket *p = source->arData;
2067 	Bucket *q = target->arData;
2068 	Bucket *end = p + source->nNumUsed;
2069 
2070 	do {
2071 		if (!zend_array_dup_element(source, target, 0, p, q, 1, 1, with_holes)) {
2072 			if (with_holes) {
2073 				ZVAL_UNDEF(&q->val);
2074 			}
2075 		}
2076 		p++; q++;
2077 	} while (p != end);
2078 }
2079 
zend_array_dup_elements(HashTable * source,HashTable * target,bool static_keys,bool with_holes)2080 static zend_always_inline uint32_t zend_array_dup_elements(HashTable *source, HashTable *target, bool static_keys, bool with_holes)
2081 {
2082 	uint32_t idx = 0;
2083 	Bucket *p = source->arData;
2084 	Bucket *q = target->arData;
2085 	Bucket *end = p + source->nNumUsed;
2086 
2087 	do {
2088 		if (!zend_array_dup_element(source, target, idx, p, q, 0, static_keys, with_holes)) {
2089 			uint32_t target_idx = idx;
2090 
2091 			idx++; p++;
2092 			while (p != end) {
2093 				if (zend_array_dup_element(source, target, target_idx, p, q, 0, static_keys, with_holes)) {
2094 					if (source->nInternalPointer == idx) {
2095 						target->nInternalPointer = target_idx;
2096 					}
2097 					target_idx++; q++;
2098 				}
2099 				idx++; p++;
2100 			}
2101 			return target_idx;
2102 		}
2103 		idx++; p++; q++;
2104 	} while (p != end);
2105 	return idx;
2106 }
2107 
zend_array_dup(HashTable * source)2108 ZEND_API HashTable* ZEND_FASTCALL zend_array_dup(HashTable *source)
2109 {
2110 	uint32_t idx;
2111 	HashTable *target;
2112 
2113 	IS_CONSISTENT(source);
2114 
2115 	ALLOC_HASHTABLE(target);
2116 	GC_SET_REFCOUNT(target, 1);
2117 	GC_TYPE_INFO(target) = GC_ARRAY;
2118 
2119 	target->pDestructor = ZVAL_PTR_DTOR;
2120 
2121 	if (source->nNumOfElements == 0) {
2122 		HT_FLAGS(target) = HASH_FLAG_UNINITIALIZED;
2123 		target->nTableMask = HT_MIN_MASK;
2124 		target->nNumUsed = 0;
2125 		target->nNumOfElements = 0;
2126 		target->nNextFreeElement = source->nNextFreeElement;
2127 		target->nInternalPointer = 0;
2128 		target->nTableSize = HT_MIN_SIZE;
2129 		HT_SET_DATA_ADDR(target, &uninitialized_bucket);
2130 	} else if (GC_FLAGS(source) & IS_ARRAY_IMMUTABLE) {
2131 		HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK;
2132 		target->nTableMask = source->nTableMask;
2133 		target->nNumUsed = source->nNumUsed;
2134 		target->nNumOfElements = source->nNumOfElements;
2135 		target->nNextFreeElement = source->nNextFreeElement;
2136 		target->nTableSize = source->nTableSize;
2137 		HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
2138 		target->nInternalPointer = source->nInternalPointer;
2139 		memcpy(HT_GET_DATA_ADDR(target), HT_GET_DATA_ADDR(source), HT_USED_SIZE(source));
2140 	} else if (HT_FLAGS(source) & HASH_FLAG_PACKED) {
2141 		HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK;
2142 		target->nTableMask = HT_MIN_MASK;
2143 		target->nNumUsed = source->nNumUsed;
2144 		target->nNumOfElements = source->nNumOfElements;
2145 		target->nNextFreeElement = source->nNextFreeElement;
2146 		target->nTableSize = source->nTableSize;
2147 		HT_SET_DATA_ADDR(target, emalloc(HT_SIZE_EX(target->nTableSize, HT_MIN_MASK)));
2148 		target->nInternalPointer =
2149 			(source->nInternalPointer < source->nNumUsed) ?
2150 				source->nInternalPointer : 0;
2151 
2152 		HT_HASH_RESET_PACKED(target);
2153 
2154 		if (HT_IS_WITHOUT_HOLES(target)) {
2155 			zend_array_dup_packed_elements(source, target, 0);
2156 		} else {
2157 			zend_array_dup_packed_elements(source, target, 1);
2158 		}
2159 	} else {
2160 		HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK;
2161 		target->nTableMask = source->nTableMask;
2162 		target->nNextFreeElement = source->nNextFreeElement;
2163 		target->nInternalPointer =
2164 			(source->nInternalPointer < source->nNumUsed) ?
2165 				source->nInternalPointer : 0;
2166 
2167 		target->nTableSize = source->nTableSize;
2168 		HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
2169 		HT_HASH_RESET(target);
2170 
2171 		if (HT_HAS_STATIC_KEYS_ONLY(target)) {
2172 			if (HT_IS_WITHOUT_HOLES(source)) {
2173 				idx = zend_array_dup_elements(source, target, 1, 0);
2174 			} else {
2175 				idx = zend_array_dup_elements(source, target, 1, 1);
2176 			}
2177 		} else {
2178 			if (HT_IS_WITHOUT_HOLES(source)) {
2179 				idx = zend_array_dup_elements(source, target, 0, 0);
2180 			} else {
2181 				idx = zend_array_dup_elements(source, target, 0, 1);
2182 			}
2183 		}
2184 		target->nNumUsed = idx;
2185 		target->nNumOfElements = idx;
2186 	}
2187 	return target;
2188 }
2189 
2190 
zend_hash_merge(HashTable * target,HashTable * source,copy_ctor_func_t pCopyConstructor,bool overwrite)2191 ZEND_API void ZEND_FASTCALL zend_hash_merge(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor, bool overwrite)
2192 {
2193 	uint32_t idx;
2194 	Bucket *p;
2195 	zval *t, *s;
2196 
2197 	IS_CONSISTENT(source);
2198 	IS_CONSISTENT(target);
2199 	HT_ASSERT_RC1(target);
2200 
2201 	if (overwrite) {
2202 		for (idx = 0; idx < source->nNumUsed; idx++) {
2203 			p = source->arData + idx;
2204 			s = &p->val;
2205 			if (UNEXPECTED(Z_TYPE_P(s) == IS_INDIRECT)) {
2206 				s = Z_INDIRECT_P(s);
2207 			}
2208 			if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) {
2209 				continue;
2210 			}
2211 			if (p->key) {
2212 				t = _zend_hash_add_or_update_i(target, p->key, s, HASH_UPDATE | HASH_UPDATE_INDIRECT);
2213 				if (pCopyConstructor) {
2214 					pCopyConstructor(t);
2215 				}
2216 			} else {
2217 				t = zend_hash_index_update(target, p->h, s);
2218 				if (pCopyConstructor) {
2219 					pCopyConstructor(t);
2220 				}
2221 			}
2222 		}
2223 	} else {
2224 		for (idx = 0; idx < source->nNumUsed; idx++) {
2225 			p = source->arData + idx;
2226 			s = &p->val;
2227 			if (UNEXPECTED(Z_TYPE_P(s) == IS_INDIRECT)) {
2228 				s = Z_INDIRECT_P(s);
2229 			}
2230 			if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) {
2231 				continue;
2232 			}
2233 			if (p->key) {
2234 				t = _zend_hash_add_or_update_i(target, p->key, s, HASH_ADD | HASH_UPDATE_INDIRECT);
2235 				if (t && pCopyConstructor) {
2236 					pCopyConstructor(t);
2237 				}
2238 			} else {
2239 				t = zend_hash_index_add(target, p->h, s);
2240 				if (t && pCopyConstructor) {
2241 					pCopyConstructor(t);
2242 				}
2243 			}
2244 		}
2245 	}
2246 }
2247 
2248 
zend_hash_replace_checker_wrapper(HashTable * target,zval * source_data,Bucket * p,void * pParam,merge_checker_func_t merge_checker_func)2249 static bool ZEND_FASTCALL zend_hash_replace_checker_wrapper(HashTable *target, zval *source_data, Bucket *p, void *pParam, merge_checker_func_t merge_checker_func)
2250 {
2251 	zend_hash_key hash_key;
2252 
2253 	hash_key.h = p->h;
2254 	hash_key.key = p->key;
2255 	return merge_checker_func(target, source_data, &hash_key, pParam);
2256 }
2257 
2258 
zend_hash_merge_ex(HashTable * target,HashTable * source,copy_ctor_func_t pCopyConstructor,merge_checker_func_t pMergeSource,void * pParam)2259 ZEND_API void ZEND_FASTCALL zend_hash_merge_ex(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor, merge_checker_func_t pMergeSource, void *pParam)
2260 {
2261 	uint32_t idx;
2262 	Bucket *p;
2263 	zval *t;
2264 
2265 	IS_CONSISTENT(source);
2266 	IS_CONSISTENT(target);
2267 	HT_ASSERT_RC1(target);
2268 
2269 	for (idx = 0; idx < source->nNumUsed; idx++) {
2270 		p = source->arData + idx;
2271 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2272 		if (zend_hash_replace_checker_wrapper(target, &p->val, p, pParam, pMergeSource)) {
2273 			t = zend_hash_update(target, p->key, &p->val);
2274 			if (pCopyConstructor) {
2275 				pCopyConstructor(t);
2276 			}
2277 		}
2278 	}
2279 }
2280 
2281 
2282 /* Returns the hash table data if found and NULL if not. */
zend_hash_find(const HashTable * ht,zend_string * key)2283 ZEND_API zval* ZEND_FASTCALL zend_hash_find(const HashTable *ht, zend_string *key)
2284 {
2285 	Bucket *p;
2286 
2287 	IS_CONSISTENT(ht);
2288 
2289 	p = zend_hash_find_bucket(ht, key, 0);
2290 	return p ? &p->val : NULL;
2291 }
2292 
zend_hash_find_known_hash(const HashTable * ht,zend_string * key)2293 ZEND_API zval* ZEND_FASTCALL zend_hash_find_known_hash(const HashTable *ht, zend_string *key)
2294 {
2295 	Bucket *p;
2296 
2297 	IS_CONSISTENT(ht);
2298 
2299 	p = zend_hash_find_bucket(ht, key, 1);
2300 	return p ? &p->val : NULL;
2301 }
2302 
zend_hash_str_find(const HashTable * ht,const char * str,size_t len)2303 ZEND_API zval* ZEND_FASTCALL zend_hash_str_find(const HashTable *ht, const char *str, size_t len)
2304 {
2305 	zend_ulong h;
2306 	Bucket *p;
2307 
2308 	IS_CONSISTENT(ht);
2309 
2310 	h = zend_inline_hash_func(str, len);
2311 	p = zend_hash_str_find_bucket(ht, str, len, h);
2312 	return p ? &p->val : NULL;
2313 }
2314 
zend_hash_index_find(const HashTable * ht,zend_ulong h)2315 ZEND_API zval* ZEND_FASTCALL zend_hash_index_find(const HashTable *ht, zend_ulong h)
2316 {
2317 	Bucket *p;
2318 
2319 	IS_CONSISTENT(ht);
2320 
2321 	if (HT_FLAGS(ht) & HASH_FLAG_PACKED) {
2322 		if (h < ht->nNumUsed) {
2323 			p = ht->arData + h;
2324 			if (Z_TYPE(p->val) != IS_UNDEF) {
2325 				return &p->val;
2326 			}
2327 		}
2328 		return NULL;
2329 	}
2330 
2331 	p = zend_hash_index_find_bucket(ht, h);
2332 	return p ? &p->val : NULL;
2333 }
2334 
_zend_hash_index_find(const HashTable * ht,zend_ulong h)2335 ZEND_API zval* ZEND_FASTCALL _zend_hash_index_find(const HashTable *ht, zend_ulong h)
2336 {
2337 	Bucket *p;
2338 
2339 	IS_CONSISTENT(ht);
2340 
2341 	p = zend_hash_index_find_bucket(ht, h);
2342 	return p ? &p->val : NULL;
2343 }
2344 
zend_hash_internal_pointer_reset_ex(HashTable * ht,HashPosition * pos)2345 ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_reset_ex(HashTable *ht, HashPosition *pos)
2346 {
2347 	IS_CONSISTENT(ht);
2348 	HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2349 	*pos = _zend_hash_get_valid_pos(ht, 0);
2350 }
2351 
2352 
2353 /* This function will be extremely optimized by remembering
2354  * the end of the list
2355  */
zend_hash_internal_pointer_end_ex(HashTable * ht,HashPosition * pos)2356 ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_end_ex(HashTable *ht, HashPosition *pos)
2357 {
2358 	uint32_t idx;
2359 
2360 	IS_CONSISTENT(ht);
2361 	HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2362 
2363 	idx = ht->nNumUsed;
2364 	while (idx > 0) {
2365 		idx--;
2366 		if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2367 			*pos = idx;
2368 			return;
2369 		}
2370 	}
2371 	*pos = ht->nNumUsed;
2372 }
2373 
2374 
zend_hash_move_forward_ex(HashTable * ht,HashPosition * pos)2375 ZEND_API zend_result ZEND_FASTCALL zend_hash_move_forward_ex(HashTable *ht, HashPosition *pos)
2376 {
2377 	uint32_t idx;
2378 
2379 	IS_CONSISTENT(ht);
2380 	HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2381 
2382 	idx = _zend_hash_get_valid_pos(ht, *pos);
2383 	if (idx < ht->nNumUsed) {
2384 		while (1) {
2385 			idx++;
2386 			if (idx >= ht->nNumUsed) {
2387 				*pos = ht->nNumUsed;
2388 				return SUCCESS;
2389 			}
2390 			if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2391 				*pos = idx;
2392 				return SUCCESS;
2393 			}
2394 		}
2395 	} else {
2396 		return FAILURE;
2397 	}
2398 }
2399 
zend_hash_move_backwards_ex(HashTable * ht,HashPosition * pos)2400 ZEND_API zend_result ZEND_FASTCALL zend_hash_move_backwards_ex(HashTable *ht, HashPosition *pos)
2401 {
2402 	uint32_t idx = *pos;
2403 
2404 	IS_CONSISTENT(ht);
2405 	HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2406 
2407 	if (idx < ht->nNumUsed) {
2408 		while (idx > 0) {
2409 			idx--;
2410 			if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2411 				*pos = idx;
2412 				return SUCCESS;
2413 			}
2414 		}
2415 		*pos = ht->nNumUsed;
2416 		return SUCCESS;
2417 	} else {
2418 		return FAILURE;
2419 	}
2420 }
2421 
2422 
2423 /* This function should be made binary safe  */
zend_hash_get_current_key_ex(const HashTable * ht,zend_string ** str_index,zend_ulong * num_index,HashPosition * pos)2424 ZEND_API int ZEND_FASTCALL zend_hash_get_current_key_ex(const HashTable *ht, zend_string **str_index, zend_ulong *num_index, HashPosition *pos)
2425 {
2426 	uint32_t idx;
2427 	Bucket *p;
2428 
2429 	IS_CONSISTENT(ht);
2430 	idx = _zend_hash_get_valid_pos(ht, *pos);
2431 	if (idx < ht->nNumUsed) {
2432 		p = ht->arData + idx;
2433 		if (p->key) {
2434 			*str_index = p->key;
2435 			return HASH_KEY_IS_STRING;
2436 		} else {
2437 			*num_index = p->h;
2438 			return HASH_KEY_IS_LONG;
2439 		}
2440 	}
2441 	return HASH_KEY_NON_EXISTENT;
2442 }
2443 
zend_hash_get_current_key_zval_ex(const HashTable * ht,zval * key,HashPosition * pos)2444 ZEND_API void ZEND_FASTCALL zend_hash_get_current_key_zval_ex(const HashTable *ht, zval *key, HashPosition *pos)
2445 {
2446 	uint32_t idx;
2447 	Bucket *p;
2448 
2449 	IS_CONSISTENT(ht);
2450 	idx = _zend_hash_get_valid_pos(ht, *pos);
2451 	if (idx >= ht->nNumUsed) {
2452 		ZVAL_NULL(key);
2453 	} else {
2454 		p = ht->arData + idx;
2455 		if (p->key) {
2456 			ZVAL_STR_COPY(key, p->key);
2457 		} else {
2458 			ZVAL_LONG(key, p->h);
2459 		}
2460 	}
2461 }
2462 
zend_hash_get_current_key_type_ex(HashTable * ht,HashPosition * pos)2463 ZEND_API int ZEND_FASTCALL zend_hash_get_current_key_type_ex(HashTable *ht, HashPosition *pos)
2464 {
2465 	uint32_t idx;
2466 	Bucket *p;
2467 
2468 	IS_CONSISTENT(ht);
2469 	idx = _zend_hash_get_valid_pos(ht, *pos);
2470 	if (idx < ht->nNumUsed) {
2471 		p = ht->arData + idx;
2472 		if (p->key) {
2473 			return HASH_KEY_IS_STRING;
2474 		} else {
2475 			return HASH_KEY_IS_LONG;
2476 		}
2477 	}
2478 	return HASH_KEY_NON_EXISTENT;
2479 }
2480 
2481 
zend_hash_get_current_data_ex(HashTable * ht,HashPosition * pos)2482 ZEND_API zval* ZEND_FASTCALL zend_hash_get_current_data_ex(HashTable *ht, HashPosition *pos)
2483 {
2484 	uint32_t idx;
2485 	Bucket *p;
2486 
2487 	IS_CONSISTENT(ht);
2488 	idx = _zend_hash_get_valid_pos(ht, *pos);
2489 	if (idx < ht->nNumUsed) {
2490 		p = ht->arData + idx;
2491 		return &p->val;
2492 	} else {
2493 		return NULL;
2494 	}
2495 }
2496 
zend_hash_bucket_swap(Bucket * p,Bucket * q)2497 ZEND_API void zend_hash_bucket_swap(Bucket *p, Bucket *q)
2498 {
2499 	zval val;
2500 	zend_ulong h;
2501 	zend_string *key;
2502 
2503 	val = p->val;
2504 	h = p->h;
2505 	key = p->key;
2506 
2507 	p->val = q->val;
2508 	p->h = q->h;
2509 	p->key = q->key;
2510 
2511 	q->val = val;
2512 	q->h = h;
2513 	q->key = key;
2514 }
2515 
zend_hash_bucket_renum_swap(Bucket * p,Bucket * q)2516 ZEND_API void zend_hash_bucket_renum_swap(Bucket *p, Bucket *q)
2517 {
2518 	zval val;
2519 
2520 	val = p->val;
2521 	p->val = q->val;
2522 	q->val = val;
2523 }
2524 
zend_hash_bucket_packed_swap(Bucket * p,Bucket * q)2525 ZEND_API void zend_hash_bucket_packed_swap(Bucket *p, Bucket *q)
2526 {
2527 	zval val;
2528 	zend_ulong h;
2529 
2530 	val = p->val;
2531 	h = p->h;
2532 
2533 	p->val = q->val;
2534 	p->h = q->h;
2535 
2536 	q->val = val;
2537 	q->h = h;
2538 }
2539 
zend_hash_sort_ex(HashTable * ht,sort_func_t sort,bucket_compare_func_t compar,bool renumber)2540 ZEND_API void ZEND_FASTCALL zend_hash_sort_ex(HashTable *ht, sort_func_t sort, bucket_compare_func_t compar, bool renumber)
2541 {
2542 	Bucket *p;
2543 	uint32_t i, j;
2544 
2545 	IS_CONSISTENT(ht);
2546 	HT_ASSERT_RC1(ht);
2547 
2548 	if (!(ht->nNumOfElements>1) && !(renumber && ht->nNumOfElements>0)) {
2549 		/* Doesn't require sorting */
2550 		return;
2551 	}
2552 
2553 	if (HT_IS_WITHOUT_HOLES(ht)) {
2554 		/* Store original order of elements in extra space to allow stable sorting. */
2555 		for (i = 0; i < ht->nNumUsed; i++) {
2556 			Z_EXTRA(ht->arData[i].val) = i;
2557 		}
2558 	} else {
2559 		/* Remove holes and store original order. */
2560 		for (j = 0, i = 0; j < ht->nNumUsed; j++) {
2561 			p = ht->arData + j;
2562 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2563 			if (i != j) {
2564 				ht->arData[i] = *p;
2565 			}
2566 			Z_EXTRA(ht->arData[i].val) = i;
2567 			i++;
2568 		}
2569 		ht->nNumUsed = i;
2570 	}
2571 
2572 	if (!(HT_FLAGS(ht) & HASH_FLAG_PACKED)) {
2573 		/* We broke the hash colisions chains overriding Z_NEXT() by Z_EXTRA().
2574 		 * Reset the hash headers table as well to avoid possilbe inconsistent
2575 		 * access on recursive data structures.
2576 	     *
2577 	     * See Zend/tests/bug63882_2.phpt
2578 		 */
2579 		HT_HASH_RESET(ht);
2580 	}
2581 
2582 	sort((void *)ht->arData, ht->nNumUsed, sizeof(Bucket), (compare_func_t) compar,
2583 			(swap_func_t)(renumber? zend_hash_bucket_renum_swap :
2584 				((HT_FLAGS(ht) & HASH_FLAG_PACKED) ? zend_hash_bucket_packed_swap : zend_hash_bucket_swap)));
2585 
2586 	ht->nInternalPointer = 0;
2587 
2588 	if (renumber) {
2589 		for (j = 0; j < i; j++) {
2590 			p = ht->arData + j;
2591 			p->h = j;
2592 			if (p->key) {
2593 				zend_string_release(p->key);
2594 				p->key = NULL;
2595 			}
2596 		}
2597 
2598 		ht->nNextFreeElement = i;
2599 	}
2600 	if (HT_FLAGS(ht) & HASH_FLAG_PACKED) {
2601 		if (!renumber) {
2602 			zend_hash_packed_to_hash(ht);
2603 		}
2604 	} else {
2605 		if (renumber) {
2606 			void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
2607 			Bucket *old_buckets = ht->arData;
2608 
2609 			new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), (GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
2610 			HT_FLAGS(ht) |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
2611 			ht->nTableMask = HT_MIN_MASK;
2612 			HT_SET_DATA_ADDR(ht, new_data);
2613 			memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
2614 			pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
2615 			HT_HASH_RESET_PACKED(ht);
2616 		} else {
2617 			zend_hash_rehash(ht);
2618 		}
2619 	}
2620 }
2621 
zend_hash_compare_impl(HashTable * ht1,HashTable * ht2,compare_func_t compar,bool ordered)2622 static zend_always_inline int zend_hash_compare_impl(HashTable *ht1, HashTable *ht2, compare_func_t compar, bool ordered) {
2623 	uint32_t idx1, idx2;
2624 
2625 	if (ht1->nNumOfElements != ht2->nNumOfElements) {
2626 		return ht1->nNumOfElements > ht2->nNumOfElements ? 1 : -1;
2627 	}
2628 
2629 	for (idx1 = 0, idx2 = 0; idx1 < ht1->nNumUsed; idx1++) {
2630 		Bucket *p1 = ht1->arData + idx1, *p2;
2631 		zval *pData1, *pData2;
2632 		int result;
2633 
2634 		if (Z_TYPE(p1->val) == IS_UNDEF) continue;
2635 		if (ordered) {
2636 			while (1) {
2637 				ZEND_ASSERT(idx2 != ht2->nNumUsed);
2638 				p2 = ht2->arData + idx2;
2639 				if (Z_TYPE(p2->val) != IS_UNDEF) break;
2640 				idx2++;
2641 			}
2642 			if (p1->key == NULL && p2->key == NULL) { /* numeric indices */
2643 				if (p1->h != p2->h) {
2644 					return p1->h > p2->h ? 1 : -1;
2645 				}
2646 			} else if (p1->key != NULL && p2->key != NULL) { /* string indices */
2647 				if (ZSTR_LEN(p1->key) != ZSTR_LEN(p2->key)) {
2648 					return ZSTR_LEN(p1->key) > ZSTR_LEN(p2->key) ? 1 : -1;
2649 				}
2650 
2651 				result = memcmp(ZSTR_VAL(p1->key), ZSTR_VAL(p2->key), ZSTR_LEN(p1->key));
2652 				if (result != 0) {
2653 					return result;
2654 				}
2655 			} else {
2656 				/* Mixed key types: A string key is considered as larger */
2657 				return p1->key != NULL ? 1 : -1;
2658 			}
2659 			pData2 = &p2->val;
2660 			idx2++;
2661 		} else {
2662 			if (p1->key == NULL) { /* numeric index */
2663 				pData2 = zend_hash_index_find(ht2, p1->h);
2664 				if (pData2 == NULL) {
2665 					return 1;
2666 				}
2667 			} else { /* string index */
2668 				pData2 = zend_hash_find(ht2, p1->key);
2669 				if (pData2 == NULL) {
2670 					return 1;
2671 				}
2672 			}
2673 		}
2674 
2675 		pData1 = &p1->val;
2676 		if (Z_TYPE_P(pData1) == IS_INDIRECT) {
2677 			pData1 = Z_INDIRECT_P(pData1);
2678 		}
2679 		if (Z_TYPE_P(pData2) == IS_INDIRECT) {
2680 			pData2 = Z_INDIRECT_P(pData2);
2681 		}
2682 
2683 		if (Z_TYPE_P(pData1) == IS_UNDEF) {
2684 			if (Z_TYPE_P(pData2) != IS_UNDEF) {
2685 				return -1;
2686 			}
2687 		} else if (Z_TYPE_P(pData2) == IS_UNDEF) {
2688 			return 1;
2689 		} else {
2690 			result = compar(pData1, pData2);
2691 			if (result != 0) {
2692 				return result;
2693 			}
2694 		}
2695 	}
2696 
2697 	return 0;
2698 }
2699 
zend_hash_compare(HashTable * ht1,HashTable * ht2,compare_func_t compar,bool ordered)2700 ZEND_API int zend_hash_compare(HashTable *ht1, HashTable *ht2, compare_func_t compar, bool ordered)
2701 {
2702 	int result;
2703 	IS_CONSISTENT(ht1);
2704 	IS_CONSISTENT(ht2);
2705 
2706 	if (ht1 == ht2) {
2707 		return 0;
2708 	}
2709 
2710 	/* It's enough to protect only one of the arrays.
2711 	 * The second one may be referenced from the first and this may cause
2712 	 * false recursion detection.
2713 	 */
2714 	if (UNEXPECTED(GC_IS_RECURSIVE(ht1))) {
2715 		zend_error_noreturn(E_ERROR, "Nesting level too deep - recursive dependency?");
2716 	}
2717 
2718 	GC_TRY_PROTECT_RECURSION(ht1);
2719 	result = zend_hash_compare_impl(ht1, ht2, compar, ordered);
2720 	GC_TRY_UNPROTECT_RECURSION(ht1);
2721 
2722 	return result;
2723 }
2724 
2725 
zend_hash_minmax(const HashTable * ht,bucket_compare_func_t compar,uint32_t flag)2726 ZEND_API zval* ZEND_FASTCALL zend_hash_minmax(const HashTable *ht, bucket_compare_func_t compar, uint32_t flag)
2727 {
2728 	uint32_t idx;
2729 	Bucket *p, *res;
2730 
2731 	IS_CONSISTENT(ht);
2732 
2733 	if (ht->nNumOfElements == 0 ) {
2734 		return NULL;
2735 	}
2736 
2737 	idx = 0;
2738 	while (1) {
2739 		if (idx == ht->nNumUsed) {
2740 			return NULL;
2741 		}
2742 		if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) break;
2743 		idx++;
2744 	}
2745 	res = ht->arData + idx;
2746 	for (; idx < ht->nNumUsed; idx++) {
2747 		p = ht->arData + idx;
2748 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2749 
2750 		if (flag) {
2751 			if (compar(res, p) < 0) { /* max */
2752 				res = p;
2753 			}
2754 		} else {
2755 			if (compar(res, p) > 0) { /* min */
2756 				res = p;
2757 			}
2758 		}
2759 	}
2760 	return &res->val;
2761 }
2762 
_zend_handle_numeric_str_ex(const char * key,size_t length,zend_ulong * idx)2763 ZEND_API bool ZEND_FASTCALL _zend_handle_numeric_str_ex(const char *key, size_t length, zend_ulong *idx)
2764 {
2765 	const char *tmp = key;
2766 
2767 	const char *end = key + length;
2768 
2769 	if (*tmp == '-') {
2770 		tmp++;
2771 	}
2772 
2773 	if ((*tmp == '0' && length > 1) /* numbers with leading zeros */
2774 	 || (end - tmp > MAX_LENGTH_OF_LONG - 1) /* number too long */
2775 	 || (SIZEOF_ZEND_LONG == 4 &&
2776 	     end - tmp == MAX_LENGTH_OF_LONG - 1 &&
2777 	     *tmp > '2')) { /* overflow */
2778 		return 0;
2779 	}
2780 	*idx = (*tmp - '0');
2781 	while (1) {
2782 		++tmp;
2783 		if (tmp == end) {
2784 			if (*key == '-') {
2785 				if (*idx-1 > ZEND_LONG_MAX) { /* overflow */
2786 					return 0;
2787 				}
2788 				*idx = 0 - *idx;
2789 			} else if (*idx > ZEND_LONG_MAX) { /* overflow */
2790 				return 0;
2791 			}
2792 			return 1;
2793 		}
2794 		if (*tmp <= '9' && *tmp >= '0') {
2795 			*idx = (*idx * 10) + (*tmp - '0');
2796 		} else {
2797 			return 0;
2798 		}
2799 	}
2800 }
2801 
2802 /* Takes a "symtable" hashtable (contains integer and non-numeric string keys)
2803  * and converts it to a "proptable" (contains only string keys).
2804  * If the symtable didn't need duplicating, its refcount is incremented.
2805  */
zend_symtable_to_proptable(HashTable * ht)2806 ZEND_API HashTable* ZEND_FASTCALL zend_symtable_to_proptable(HashTable *ht)
2807 {
2808 	zend_ulong num_key;
2809 	zend_string *str_key;
2810 	zval *zv;
2811 
2812 	if (UNEXPECTED(HT_IS_PACKED(ht))) {
2813 		goto convert;
2814 	}
2815 
2816 	ZEND_HASH_FOREACH_STR_KEY(ht, str_key) {
2817 		if (!str_key) {
2818 			goto convert;
2819 		}
2820 	} ZEND_HASH_FOREACH_END();
2821 
2822 	if (!(GC_FLAGS(ht) & IS_ARRAY_IMMUTABLE)) {
2823 		GC_ADDREF(ht);
2824 	}
2825 
2826 	return ht;
2827 
2828 convert:
2829 	{
2830 		HashTable *new_ht = zend_new_array(zend_hash_num_elements(ht));
2831 
2832 		ZEND_HASH_FOREACH_KEY_VAL(ht, num_key, str_key, zv) {
2833 			if (!str_key) {
2834 				str_key = zend_long_to_str(num_key);
2835 				zend_string_delref(str_key);
2836 			}
2837 			do {
2838 				if (Z_OPT_REFCOUNTED_P(zv)) {
2839 					if (Z_ISREF_P(zv) && Z_REFCOUNT_P(zv) == 1) {
2840 						zv = Z_REFVAL_P(zv);
2841 						if (!Z_OPT_REFCOUNTED_P(zv)) {
2842 							break;
2843 						}
2844 					}
2845 					Z_ADDREF_P(zv);
2846 				}
2847 			} while (0);
2848 			zend_hash_update(new_ht, str_key, zv);
2849 		} ZEND_HASH_FOREACH_END();
2850 
2851 		return new_ht;
2852 	}
2853 }
2854 
2855 /* Takes a "proptable" hashtable (contains only string keys) and converts it to
2856  * a "symtable" (contains integer and non-numeric string keys).
2857  * If the proptable didn't need duplicating, its refcount is incremented.
2858  */
zend_proptable_to_symtable(HashTable * ht,bool always_duplicate)2859 ZEND_API HashTable* ZEND_FASTCALL zend_proptable_to_symtable(HashTable *ht, bool always_duplicate)
2860 {
2861 	zend_ulong num_key;
2862 	zend_string *str_key;
2863 	zval *zv;
2864 
2865 	ZEND_HASH_FOREACH_STR_KEY(ht, str_key) {
2866 		/* The `str_key &&` here might seem redundant: property tables should
2867 		 * only have string keys. Unfortunately, this isn't true, at the very
2868 		 * least because of ArrayObject, which stores a symtable where the
2869 		 * property table should be.
2870 		 */
2871 		if (str_key && ZEND_HANDLE_NUMERIC(str_key, num_key)) {
2872 			goto convert;
2873 		}
2874 	} ZEND_HASH_FOREACH_END();
2875 
2876 	if (always_duplicate) {
2877 		return zend_array_dup(ht);
2878 	}
2879 
2880 	if (EXPECTED(!(GC_FLAGS(ht) & IS_ARRAY_IMMUTABLE))) {
2881 		GC_ADDREF(ht);
2882 	}
2883 
2884 	return ht;
2885 
2886 convert:
2887 	{
2888 		HashTable *new_ht = zend_new_array(zend_hash_num_elements(ht));
2889 
2890 		ZEND_HASH_FOREACH_KEY_VAL_IND(ht, num_key, str_key, zv) {
2891 			do {
2892 				if (Z_OPT_REFCOUNTED_P(zv)) {
2893 					if (Z_ISREF_P(zv) && Z_REFCOUNT_P(zv) == 1) {
2894 						zv = Z_REFVAL_P(zv);
2895 						if (!Z_OPT_REFCOUNTED_P(zv)) {
2896 							break;
2897 						}
2898 					}
2899 					Z_ADDREF_P(zv);
2900 				}
2901 			} while (0);
2902 			/* Again, thank ArrayObject for `!str_key ||`. */
2903 			if (!str_key || ZEND_HANDLE_NUMERIC(str_key, num_key)) {
2904 				zend_hash_index_update(new_ht, num_key, zv);
2905 			} else {
2906 				zend_hash_update(new_ht, str_key, zv);
2907 			}
2908 		} ZEND_HASH_FOREACH_END();
2909 
2910 		return new_ht;
2911 	}
2912 }
2913