xref: /PHP-8.0/Zend/zend_hash.c (revision 2c508c4d)
1 /*
2    +----------------------------------------------------------------------+
3    | Zend Engine                                                          |
4    +----------------------------------------------------------------------+
5    | Copyright (c) Zend Technologies Ltd. (http://www.zend.com)           |
6    +----------------------------------------------------------------------+
7    | This source file is subject to version 2.00 of the Zend license,     |
8    | that is bundled with this package in the file LICENSE, and is        |
9    | available through the world-wide-web at the following url:           |
10    | http://www.zend.com/license/2_00.txt.                                |
11    | If you did not receive a copy of the Zend license and are unable to  |
12    | obtain it through the world-wide-web, please send a note to          |
13    | license@zend.com so we can mail you a copy immediately.              |
14    +----------------------------------------------------------------------+
15    | Authors: Andi Gutmans <andi@php.net>                                 |
16    |          Zeev Suraski <zeev@php.net>                                 |
17    |          Dmitry Stogov <dmitry@php.net>                              |
18    +----------------------------------------------------------------------+
19 */
20 
21 #include "zend.h"
22 #include "zend_globals.h"
23 #include "zend_variables.h"
24 
25 #if defined(__aarch64__)
26 # include <arm_neon.h>
27 #endif
28 
29 #ifdef __SSE2__
30 # include <mmintrin.h>
31 # include <emmintrin.h>
32 #endif
33 
34 #if ZEND_DEBUG
35 # define HT_ASSERT(ht, expr) \
36 	ZEND_ASSERT((expr) || (HT_FLAGS(ht) & HASH_FLAG_ALLOW_COW_VIOLATION))
37 #else
38 # define HT_ASSERT(ht, expr)
39 #endif
40 
41 #define HT_ASSERT_RC1(ht) HT_ASSERT(ht, GC_REFCOUNT(ht) == 1)
42 
43 #define HT_POISONED_PTR ((HashTable *) (intptr_t) -1)
44 
45 #if ZEND_DEBUG
46 
47 #define HT_OK					0x00
48 #define HT_IS_DESTROYING		0x01
49 #define HT_DESTROYED			0x02
50 #define HT_CLEANING				0x03
51 
_zend_is_inconsistent(const HashTable * ht,const char * file,int line)52 static void _zend_is_inconsistent(const HashTable *ht, const char *file, int line)
53 {
54 	if ((HT_FLAGS(ht) & HASH_FLAG_CONSISTENCY) == HT_OK) {
55 		return;
56 	}
57 	switch (HT_FLAGS(ht) & HASH_FLAG_CONSISTENCY) {
58 		case HT_IS_DESTROYING:
59 			zend_output_debug_string(1, "%s(%d) : ht=%p is being destroyed", file, line, ht);
60 			break;
61 		case HT_DESTROYED:
62 			zend_output_debug_string(1, "%s(%d) : ht=%p is already destroyed", file, line, ht);
63 			break;
64 		case HT_CLEANING:
65 			zend_output_debug_string(1, "%s(%d) : ht=%p is being cleaned", file, line, ht);
66 			break;
67 		default:
68 			zend_output_debug_string(1, "%s(%d) : ht=%p is inconsistent", file, line, ht);
69 			break;
70 	}
71 	ZEND_UNREACHABLE();
72 }
73 #define IS_CONSISTENT(a) _zend_is_inconsistent(a, __FILE__, __LINE__);
74 #define SET_INCONSISTENT(n) do { \
75 		HT_FLAGS(ht) = (HT_FLAGS(ht) & ~HASH_FLAG_CONSISTENCY) | (n); \
76 	} while (0)
77 #else
78 #define IS_CONSISTENT(a)
79 #define SET_INCONSISTENT(n)
80 #endif
81 
82 #define ZEND_HASH_IF_FULL_DO_RESIZE(ht)				\
83 	if ((ht)->nNumUsed >= (ht)->nTableSize) {		\
84 		zend_hash_do_resize(ht);					\
85 	}
86 
zend_hash_str_find_ptr_lc(const HashTable * ht,const char * str,size_t len)87 ZEND_API void *zend_hash_str_find_ptr_lc(const HashTable *ht, const char *str, size_t len) {
88 	void *result;
89 	char *lc_str;
90 
91 	/* Stack allocate small strings to improve performance */
92 	ALLOCA_FLAG(use_heap)
93 
94 	lc_str = zend_str_tolower_copy(do_alloca(len + 1, use_heap), str, len);
95 	result = zend_hash_str_find_ptr(ht, lc_str, len);
96 	free_alloca(lc_str, use_heap);
97 
98 	return result;
99 }
100 
zend_hash_find_ptr_lc(const HashTable * ht,zend_string * key)101 ZEND_API void *zend_hash_find_ptr_lc(const HashTable *ht, zend_string *key) {
102 	void *result;
103 	zend_string *lc_key = zend_string_tolower(key);
104 	result = zend_hash_find_ptr(ht, lc_key);
105 	zend_string_release(lc_key);
106 	return result;
107 }
108 
109 static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht);
110 
zend_hash_check_size(uint32_t nSize)111 static zend_always_inline uint32_t zend_hash_check_size(uint32_t nSize)
112 {
113 #ifdef ZEND_WIN32
114 	unsigned long index;
115 #endif
116 
117 	/* Use big enough power of 2 */
118 	/* size should be between HT_MIN_SIZE and HT_MAX_SIZE */
119 	if (nSize <= HT_MIN_SIZE) {
120 		return HT_MIN_SIZE;
121 	} else if (UNEXPECTED(nSize >= HT_MAX_SIZE)) {
122 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", nSize, sizeof(Bucket), sizeof(Bucket));
123 	}
124 
125 #ifdef ZEND_WIN32
126 	if (BitScanReverse(&index, nSize - 1)) {
127 		return 0x2u << ((31 - index) ^ 0x1f);
128 	} else {
129 		/* nSize is ensured to be in the valid range, fall back to it
130 		   rather than using an undefined bis scan result. */
131 		return nSize;
132 	}
133 #elif (defined(__GNUC__) || __has_builtin(__builtin_clz))  && defined(PHP_HAVE_BUILTIN_CLZ)
134 	return 0x2u << (__builtin_clz(nSize - 1) ^ 0x1f);
135 #else
136 	nSize -= 1;
137 	nSize |= (nSize >> 1);
138 	nSize |= (nSize >> 2);
139 	nSize |= (nSize >> 4);
140 	nSize |= (nSize >> 8);
141 	nSize |= (nSize >> 16);
142 	return nSize + 1;
143 #endif
144 }
145 
zend_hash_real_init_packed_ex(HashTable * ht)146 static zend_always_inline void zend_hash_real_init_packed_ex(HashTable *ht)
147 {
148 	void *data;
149 
150 	if (UNEXPECTED(GC_FLAGS(ht) & IS_ARRAY_PERSISTENT)) {
151 		data = pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), 1);
152 	} else if (EXPECTED(ht->nTableSize == HT_MIN_SIZE)) {
153 		data = emalloc(HT_SIZE_EX(HT_MIN_SIZE, HT_MIN_MASK));
154 	} else {
155 		data = emalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK));
156 	}
157 	HT_SET_DATA_ADDR(ht, data);
158 	/* Don't overwrite iterator count. */
159 	ht->u.v.flags = HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
160 	HT_HASH_RESET_PACKED(ht);
161 }
162 
zend_hash_real_init_mixed_ex(HashTable * ht)163 static zend_always_inline void zend_hash_real_init_mixed_ex(HashTable *ht)
164 {
165 	void *data;
166 	uint32_t nSize = ht->nTableSize;
167 
168 	if (UNEXPECTED(GC_FLAGS(ht) & IS_ARRAY_PERSISTENT)) {
169 		data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), 1);
170 	} else if (EXPECTED(nSize == HT_MIN_SIZE)) {
171 		data = emalloc(HT_SIZE_EX(HT_MIN_SIZE, HT_SIZE_TO_MASK(HT_MIN_SIZE)));
172 		ht->nTableMask = HT_SIZE_TO_MASK(HT_MIN_SIZE);
173 		HT_SET_DATA_ADDR(ht, data);
174 		/* Don't overwrite iterator count. */
175 		ht->u.v.flags = HASH_FLAG_STATIC_KEYS;
176 #ifdef __SSE2__
177 		do {
178 			__m128i xmm0 = _mm_setzero_si128();
179 			xmm0 = _mm_cmpeq_epi8(xmm0, xmm0);
180 			_mm_storeu_si128((__m128i*)&HT_HASH_EX(data,  0), xmm0);
181 			_mm_storeu_si128((__m128i*)&HT_HASH_EX(data,  4), xmm0);
182 			_mm_storeu_si128((__m128i*)&HT_HASH_EX(data,  8), xmm0);
183 			_mm_storeu_si128((__m128i*)&HT_HASH_EX(data, 12), xmm0);
184 		} while (0);
185 #elif defined(__aarch64__)
186 		do {
187 			int32x4_t t = vdupq_n_s32(-1);
188 			vst1q_s32((int32_t*)&HT_HASH_EX(data,  0), t);
189 			vst1q_s32((int32_t*)&HT_HASH_EX(data,  4), t);
190 			vst1q_s32((int32_t*)&HT_HASH_EX(data,  8), t);
191 			vst1q_s32((int32_t*)&HT_HASH_EX(data, 12), t);
192 		} while (0);
193 #else
194 		HT_HASH_EX(data,  0) = -1;
195 		HT_HASH_EX(data,  1) = -1;
196 		HT_HASH_EX(data,  2) = -1;
197 		HT_HASH_EX(data,  3) = -1;
198 		HT_HASH_EX(data,  4) = -1;
199 		HT_HASH_EX(data,  5) = -1;
200 		HT_HASH_EX(data,  6) = -1;
201 		HT_HASH_EX(data,  7) = -1;
202 		HT_HASH_EX(data,  8) = -1;
203 		HT_HASH_EX(data,  9) = -1;
204 		HT_HASH_EX(data, 10) = -1;
205 		HT_HASH_EX(data, 11) = -1;
206 		HT_HASH_EX(data, 12) = -1;
207 		HT_HASH_EX(data, 13) = -1;
208 		HT_HASH_EX(data, 14) = -1;
209 		HT_HASH_EX(data, 15) = -1;
210 #endif
211 		return;
212 	} else {
213 		data = emalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)));
214 	}
215 	ht->nTableMask = HT_SIZE_TO_MASK(nSize);
216 	HT_SET_DATA_ADDR(ht, data);
217 	HT_FLAGS(ht) = HASH_FLAG_STATIC_KEYS;
218 	HT_HASH_RESET(ht);
219 }
220 
zend_hash_real_init_ex(HashTable * ht,bool packed)221 static zend_always_inline void zend_hash_real_init_ex(HashTable *ht, bool packed)
222 {
223 	HT_ASSERT_RC1(ht);
224 	ZEND_ASSERT(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED);
225 	if (packed) {
226 		zend_hash_real_init_packed_ex(ht);
227 	} else {
228 		zend_hash_real_init_mixed_ex(ht);
229 	}
230 }
231 
232 static const uint32_t uninitialized_bucket[-HT_MIN_MASK] =
233 	{HT_INVALID_IDX, HT_INVALID_IDX};
234 
235 ZEND_API const HashTable zend_empty_array = {
236 	.gc.refcount = 2,
237 	.gc.u.type_info = IS_ARRAY | (GC_IMMUTABLE << GC_FLAGS_SHIFT),
238 	.u.flags = HASH_FLAG_UNINITIALIZED,
239 	.nTableMask = HT_MIN_MASK,
240 	.arData = (Bucket*)&uninitialized_bucket[2],
241 	.nNumUsed = 0,
242 	.nNumOfElements = 0,
243 	.nTableSize = HT_MIN_SIZE,
244 	.nInternalPointer = 0,
245 	.nNextFreeElement = 0,
246 	.pDestructor = ZVAL_PTR_DTOR
247 };
248 
_zend_hash_init_int(HashTable * ht,uint32_t nSize,dtor_func_t pDestructor,zend_bool persistent)249 static zend_always_inline void _zend_hash_init_int(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, zend_bool persistent)
250 {
251 	GC_SET_REFCOUNT(ht, 1);
252 	GC_TYPE_INFO(ht) = GC_ARRAY | (persistent ? ((GC_PERSISTENT|GC_NOT_COLLECTABLE) << GC_FLAGS_SHIFT) : 0);
253 	HT_FLAGS(ht) = HASH_FLAG_UNINITIALIZED;
254 	ht->nTableMask = HT_MIN_MASK;
255 	HT_SET_DATA_ADDR(ht, &uninitialized_bucket);
256 	ht->nNumUsed = 0;
257 	ht->nNumOfElements = 0;
258 	ht->nInternalPointer = 0;
259 	ht->nNextFreeElement = ZEND_LONG_MIN;
260 	ht->pDestructor = pDestructor;
261 	ht->nTableSize = zend_hash_check_size(nSize);
262 }
263 
_zend_hash_init(HashTable * ht,uint32_t nSize,dtor_func_t pDestructor,zend_bool persistent)264 ZEND_API void ZEND_FASTCALL _zend_hash_init(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, zend_bool persistent)
265 {
266 	_zend_hash_init_int(ht, nSize, pDestructor, persistent);
267 }
268 
_zend_new_array_0(void)269 ZEND_API HashTable* ZEND_FASTCALL _zend_new_array_0(void)
270 {
271 	HashTable *ht = emalloc(sizeof(HashTable));
272 	_zend_hash_init_int(ht, HT_MIN_SIZE, ZVAL_PTR_DTOR, 0);
273 	return ht;
274 }
275 
_zend_new_array(uint32_t nSize)276 ZEND_API HashTable* ZEND_FASTCALL _zend_new_array(uint32_t nSize)
277 {
278 	HashTable *ht = emalloc(sizeof(HashTable));
279 	_zend_hash_init_int(ht, nSize, ZVAL_PTR_DTOR, 0);
280 	return ht;
281 }
282 
zend_new_pair(zval * val1,zval * val2)283 ZEND_API HashTable* ZEND_FASTCALL zend_new_pair(zval *val1, zval *val2)
284 {
285 	Bucket *p;
286 	HashTable *ht = emalloc(sizeof(HashTable));
287 	_zend_hash_init_int(ht, HT_MIN_SIZE, ZVAL_PTR_DTOR, 0);
288 	ht->nNumUsed = ht->nNumOfElements = ht->nNextFreeElement = 2;
289 	zend_hash_real_init_packed_ex(ht);
290 
291 	p = ht->arData;
292 	ZVAL_COPY_VALUE(&p->val, val1);
293 	p->h = 0;
294 	p->key = NULL;
295 
296 	p++;
297 	ZVAL_COPY_VALUE(&p->val, val2);
298 	p->h = 1;
299 	p->key = NULL;
300 	return ht;
301 }
302 
zend_hash_packed_grow(HashTable * ht)303 static void ZEND_FASTCALL zend_hash_packed_grow(HashTable *ht)
304 {
305 	HT_ASSERT_RC1(ht);
306 	if (ht->nTableSize >= HT_MAX_SIZE) {
307 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket), sizeof(Bucket));
308 	}
309 	ht->nTableSize += ht->nTableSize;
310 	HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), HT_USED_SIZE(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
311 }
312 
zend_hash_real_init(HashTable * ht,zend_bool packed)313 ZEND_API void ZEND_FASTCALL zend_hash_real_init(HashTable *ht, zend_bool packed)
314 {
315 	IS_CONSISTENT(ht);
316 
317 	HT_ASSERT_RC1(ht);
318 	zend_hash_real_init_ex(ht, packed);
319 }
320 
zend_hash_real_init_packed(HashTable * ht)321 ZEND_API void ZEND_FASTCALL zend_hash_real_init_packed(HashTable *ht)
322 {
323 	IS_CONSISTENT(ht);
324 
325 	HT_ASSERT_RC1(ht);
326 	zend_hash_real_init_packed_ex(ht);
327 }
328 
zend_hash_real_init_mixed(HashTable * ht)329 ZEND_API void ZEND_FASTCALL zend_hash_real_init_mixed(HashTable *ht)
330 {
331 	IS_CONSISTENT(ht);
332 
333 	HT_ASSERT_RC1(ht);
334 	zend_hash_real_init_mixed_ex(ht);
335 }
336 
zend_hash_packed_to_hash(HashTable * ht)337 ZEND_API void ZEND_FASTCALL zend_hash_packed_to_hash(HashTable *ht)
338 {
339 	void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
340 	Bucket *old_buckets = ht->arData;
341 	uint32_t nSize = ht->nTableSize;
342 
343 	HT_ASSERT_RC1(ht);
344 	HT_FLAGS(ht) &= ~HASH_FLAG_PACKED;
345 	new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
346 	ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
347 	HT_SET_DATA_ADDR(ht, new_data);
348 	memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
349 	pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
350 	zend_hash_rehash(ht);
351 }
352 
zend_hash_to_packed(HashTable * ht)353 ZEND_API void ZEND_FASTCALL zend_hash_to_packed(HashTable *ht)
354 {
355 	void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
356 	Bucket *old_buckets = ht->arData;
357 
358 	HT_ASSERT_RC1(ht);
359 	new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
360 	HT_FLAGS(ht) |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
361 	ht->nTableMask = HT_MIN_MASK;
362 	HT_SET_DATA_ADDR(ht, new_data);
363 	HT_HASH_RESET_PACKED(ht);
364 	memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
365 	pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
366 }
367 
zend_hash_extend(HashTable * ht,uint32_t nSize,zend_bool packed)368 ZEND_API void ZEND_FASTCALL zend_hash_extend(HashTable *ht, uint32_t nSize, zend_bool packed)
369 {
370 	HT_ASSERT_RC1(ht);
371 	if (nSize == 0) return;
372 	if (UNEXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
373 		if (nSize > ht->nTableSize) {
374 			ht->nTableSize = zend_hash_check_size(nSize);
375 		}
376 		zend_hash_real_init(ht, packed);
377 	} else {
378 		if (packed) {
379 			ZEND_ASSERT(HT_FLAGS(ht) & HASH_FLAG_PACKED);
380 			if (nSize > ht->nTableSize) {
381 				ht->nTableSize = zend_hash_check_size(nSize);
382 				HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), HT_USED_SIZE(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
383 			}
384 		} else {
385 			ZEND_ASSERT(!(HT_FLAGS(ht) & HASH_FLAG_PACKED));
386 			if (nSize > ht->nTableSize) {
387 				void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
388 				Bucket *old_buckets = ht->arData;
389 				nSize = zend_hash_check_size(nSize);
390 				ht->nTableSize = nSize;
391 				new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
392 				ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
393 				HT_SET_DATA_ADDR(ht, new_data);
394 				memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
395 				pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
396 				zend_hash_rehash(ht);
397 			}
398 		}
399 	}
400 }
401 
zend_hash_discard(HashTable * ht,uint32_t nNumUsed)402 ZEND_API void ZEND_FASTCALL zend_hash_discard(HashTable *ht, uint32_t nNumUsed)
403 {
404 	Bucket *p, *end, *arData;
405 	uint32_t nIndex;
406 
407 	arData = ht->arData;
408 	p = arData + ht->nNumUsed;
409 	end = arData + nNumUsed;
410 	ht->nNumUsed = nNumUsed;
411 	while (p != end) {
412 		p--;
413 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
414 		ht->nNumOfElements--;
415 		/* Collision pointers always directed from higher to lower buckets */
416 #if 0
417 		if (!(Z_NEXT(p->val) == HT_INVALID_IDX || HT_HASH_TO_BUCKET_EX(arData, Z_NEXT(p->val)) < p)) {
418 			abort();
419 		}
420 #endif
421 		nIndex = p->h | ht->nTableMask;
422 		HT_HASH_EX(arData, nIndex) = Z_NEXT(p->val);
423 	}
424 }
425 
zend_array_recalc_elements(HashTable * ht)426 static uint32_t zend_array_recalc_elements(HashTable *ht)
427 {
428        zval *val;
429        uint32_t num = ht->nNumOfElements;
430 
431 	   ZEND_HASH_FOREACH_VAL(ht, val) {
432 		   if (Z_TYPE_P(val) == IS_INDIRECT) {
433 			   if (UNEXPECTED(Z_TYPE_P(Z_INDIRECT_P(val)) == IS_UNDEF)) {
434 				   num--;
435 			   }
436 		   }
437        } ZEND_HASH_FOREACH_END();
438        return num;
439 }
440 /* }}} */
441 
zend_array_count(HashTable * ht)442 ZEND_API uint32_t zend_array_count(HashTable *ht)
443 {
444 	uint32_t num;
445 	if (UNEXPECTED(HT_FLAGS(ht) & HASH_FLAG_HAS_EMPTY_IND)) {
446 		num = zend_array_recalc_elements(ht);
447 		if (UNEXPECTED(ht->nNumOfElements == num)) {
448 			HT_FLAGS(ht) &= ~HASH_FLAG_HAS_EMPTY_IND;
449 		}
450 	} else if (UNEXPECTED(ht == &EG(symbol_table))) {
451 		num = zend_array_recalc_elements(ht);
452 	} else {
453 		num = zend_hash_num_elements(ht);
454 	}
455 	return num;
456 }
457 /* }}} */
458 
_zend_hash_get_valid_pos(const HashTable * ht,HashPosition pos)459 static zend_always_inline HashPosition _zend_hash_get_valid_pos(const HashTable *ht, HashPosition pos)
460 {
461 	while (pos < ht->nNumUsed && Z_ISUNDEF(ht->arData[pos].val)) {
462 		pos++;
463 	}
464 	return pos;
465 }
466 
_zend_hash_get_current_pos(const HashTable * ht)467 static zend_always_inline HashPosition _zend_hash_get_current_pos(const HashTable *ht)
468 {
469 	return _zend_hash_get_valid_pos(ht, ht->nInternalPointer);
470 }
471 
zend_hash_get_current_pos(const HashTable * ht)472 ZEND_API HashPosition ZEND_FASTCALL zend_hash_get_current_pos(const HashTable *ht)
473 {
474 	return _zend_hash_get_current_pos(ht);
475 }
476 
zend_hash_iterator_add(HashTable * ht,HashPosition pos)477 ZEND_API uint32_t ZEND_FASTCALL zend_hash_iterator_add(HashTable *ht, HashPosition pos)
478 {
479 	HashTableIterator *iter = EG(ht_iterators);
480 	HashTableIterator *end  = iter + EG(ht_iterators_count);
481 	uint32_t idx;
482 
483 	if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
484 		HT_INC_ITERATORS_COUNT(ht);
485 	}
486 	while (iter != end) {
487 		if (iter->ht == NULL) {
488 			iter->ht = ht;
489 			iter->pos = pos;
490 			idx = iter - EG(ht_iterators);
491 			if (idx + 1 > EG(ht_iterators_used)) {
492 				EG(ht_iterators_used) = idx + 1;
493 			}
494 			return idx;
495 		}
496 		iter++;
497 	}
498 	if (EG(ht_iterators) == EG(ht_iterators_slots)) {
499 		EG(ht_iterators) = emalloc(sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
500 		memcpy(EG(ht_iterators), EG(ht_iterators_slots), sizeof(HashTableIterator) * EG(ht_iterators_count));
501 	} else {
502 		EG(ht_iterators) = erealloc(EG(ht_iterators), sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
503 	}
504 	iter = EG(ht_iterators) + EG(ht_iterators_count);
505 	EG(ht_iterators_count) += 8;
506 	iter->ht = ht;
507 	iter->pos = pos;
508 	memset(iter + 1, 0, sizeof(HashTableIterator) * 7);
509 	idx = iter - EG(ht_iterators);
510 	EG(ht_iterators_used) = idx + 1;
511 	return idx;
512 }
513 
zend_hash_iterator_pos(uint32_t idx,HashTable * ht)514 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos(uint32_t idx, HashTable *ht)
515 {
516 	HashTableIterator *iter = EG(ht_iterators) + idx;
517 
518 	ZEND_ASSERT(idx != (uint32_t)-1);
519 	if (UNEXPECTED(iter->ht != ht)) {
520 		if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
521 				&& EXPECTED(!HT_ITERATORS_OVERFLOW(iter->ht))) {
522 			HT_DEC_ITERATORS_COUNT(iter->ht);
523 		}
524 		if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
525 			HT_INC_ITERATORS_COUNT(ht);
526 		}
527 		iter->ht = ht;
528 		iter->pos = _zend_hash_get_current_pos(ht);
529 	}
530 	return iter->pos;
531 }
532 
zend_hash_iterator_pos_ex(uint32_t idx,zval * array)533 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos_ex(uint32_t idx, zval *array)
534 {
535 	HashTable *ht = Z_ARRVAL_P(array);
536 	HashTableIterator *iter = EG(ht_iterators) + idx;
537 
538 	ZEND_ASSERT(idx != (uint32_t)-1);
539 	if (UNEXPECTED(iter->ht != ht)) {
540 		if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
541 				&& EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
542 			HT_DEC_ITERATORS_COUNT(iter->ht);
543 		}
544 		SEPARATE_ARRAY(array);
545 		ht = Z_ARRVAL_P(array);
546 		if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
547 			HT_INC_ITERATORS_COUNT(ht);
548 		}
549 		iter->ht = ht;
550 		iter->pos = _zend_hash_get_current_pos(ht);
551 	}
552 	return iter->pos;
553 }
554 
zend_hash_iterator_del(uint32_t idx)555 ZEND_API void ZEND_FASTCALL zend_hash_iterator_del(uint32_t idx)
556 {
557 	HashTableIterator *iter = EG(ht_iterators) + idx;
558 
559 	ZEND_ASSERT(idx != (uint32_t)-1);
560 
561 	if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
562 			&& EXPECTED(!HT_ITERATORS_OVERFLOW(iter->ht))) {
563 		ZEND_ASSERT(HT_ITERATORS_COUNT(iter->ht) != 0);
564 		HT_DEC_ITERATORS_COUNT(iter->ht);
565 	}
566 	iter->ht = NULL;
567 
568 	if (idx == EG(ht_iterators_used) - 1) {
569 		while (idx > 0 && EG(ht_iterators)[idx - 1].ht == NULL) {
570 			idx--;
571 		}
572 		EG(ht_iterators_used) = idx;
573 	}
574 }
575 
_zend_hash_iterators_remove(HashTable * ht)576 static zend_never_inline void ZEND_FASTCALL _zend_hash_iterators_remove(HashTable *ht)
577 {
578 	HashTableIterator *iter = EG(ht_iterators);
579 	HashTableIterator *end  = iter + EG(ht_iterators_used);
580 
581 	while (iter != end) {
582 		if (iter->ht == ht) {
583 			iter->ht = HT_POISONED_PTR;
584 		}
585 		iter++;
586 	}
587 }
588 
zend_hash_iterators_remove(HashTable * ht)589 static zend_always_inline void zend_hash_iterators_remove(HashTable *ht)
590 {
591 	if (UNEXPECTED(HT_HAS_ITERATORS(ht))) {
592 		_zend_hash_iterators_remove(ht);
593 	}
594 }
595 
zend_hash_iterators_lower_pos(HashTable * ht,HashPosition start)596 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterators_lower_pos(HashTable *ht, HashPosition start)
597 {
598 	HashTableIterator *iter = EG(ht_iterators);
599 	HashTableIterator *end  = iter + EG(ht_iterators_used);
600 	HashPosition res = ht->nNumUsed;
601 
602 	while (iter != end) {
603 		if (iter->ht == ht) {
604 			if (iter->pos >= start && iter->pos < res) {
605 				res = iter->pos;
606 			}
607 		}
608 		iter++;
609 	}
610 	return res;
611 }
612 
_zend_hash_iterators_update(HashTable * ht,HashPosition from,HashPosition to)613 ZEND_API void ZEND_FASTCALL _zend_hash_iterators_update(HashTable *ht, HashPosition from, HashPosition to)
614 {
615 	HashTableIterator *iter = EG(ht_iterators);
616 	HashTableIterator *end  = iter + EG(ht_iterators_used);
617 
618 	while (iter != end) {
619 		if (iter->ht == ht && iter->pos == from) {
620 			iter->pos = to;
621 		}
622 		iter++;
623 	}
624 }
625 
zend_hash_iterators_advance(HashTable * ht,HashPosition step)626 ZEND_API void ZEND_FASTCALL zend_hash_iterators_advance(HashTable *ht, HashPosition step)
627 {
628 	HashTableIterator *iter = EG(ht_iterators);
629 	HashTableIterator *end  = iter + EG(ht_iterators_used);
630 
631 	while (iter != end) {
632 		if (iter->ht == ht) {
633 			iter->pos += step;
634 		}
635 		iter++;
636 	}
637 }
638 
zend_hash_find_bucket(const HashTable * ht,zend_string * key,zend_bool known_hash)639 static zend_always_inline Bucket *zend_hash_find_bucket(const HashTable *ht, zend_string *key, zend_bool known_hash)
640 {
641 	zend_ulong h;
642 	uint32_t nIndex;
643 	uint32_t idx;
644 	Bucket *p, *arData;
645 
646 	if (known_hash) {
647 		h = ZSTR_H(key);
648 	} else {
649 		h = zend_string_hash_val(key);
650 	}
651 	arData = ht->arData;
652 	nIndex = h | ht->nTableMask;
653 	idx = HT_HASH_EX(arData, nIndex);
654 
655 	if (UNEXPECTED(idx == HT_INVALID_IDX)) {
656 		return NULL;
657 	}
658 	p = HT_HASH_TO_BUCKET_EX(arData, idx);
659 	if (EXPECTED(p->key == key)) { /* check for the same interned string */
660 		return p;
661 	}
662 
663 	while (1) {
664 		if (p->h == ZSTR_H(key) &&
665 		    EXPECTED(p->key) &&
666 		    zend_string_equal_content(p->key, key)) {
667 			return p;
668 		}
669 		idx = Z_NEXT(p->val);
670 		if (idx == HT_INVALID_IDX) {
671 			return NULL;
672 		}
673 		p = HT_HASH_TO_BUCKET_EX(arData, idx);
674 		if (p->key == key) { /* check for the same interned string */
675 			return p;
676 		}
677 	}
678 }
679 
zend_hash_str_find_bucket(const HashTable * ht,const char * str,size_t len,zend_ulong h)680 static zend_always_inline Bucket *zend_hash_str_find_bucket(const HashTable *ht, const char *str, size_t len, zend_ulong h)
681 {
682 	uint32_t nIndex;
683 	uint32_t idx;
684 	Bucket *p, *arData;
685 
686 	arData = ht->arData;
687 	nIndex = h | ht->nTableMask;
688 	idx = HT_HASH_EX(arData, nIndex);
689 	while (idx != HT_INVALID_IDX) {
690 		ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
691 		p = HT_HASH_TO_BUCKET_EX(arData, idx);
692 		if ((p->h == h)
693 			 && p->key
694 			 && (ZSTR_LEN(p->key) == len)
695 			 && !memcmp(ZSTR_VAL(p->key), str, len)) {
696 			return p;
697 		}
698 		idx = Z_NEXT(p->val);
699 	}
700 	return NULL;
701 }
702 
zend_hash_index_find_bucket(const HashTable * ht,zend_ulong h)703 static zend_always_inline Bucket *zend_hash_index_find_bucket(const HashTable *ht, zend_ulong h)
704 {
705 	uint32_t nIndex;
706 	uint32_t idx;
707 	Bucket *p, *arData;
708 
709 	arData = ht->arData;
710 	nIndex = h | ht->nTableMask;
711 	idx = HT_HASH_EX(arData, nIndex);
712 	while (idx != HT_INVALID_IDX) {
713 		ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
714 		p = HT_HASH_TO_BUCKET_EX(arData, idx);
715 		if (p->h == h && !p->key) {
716 			return p;
717 		}
718 		idx = Z_NEXT(p->val);
719 	}
720 	return NULL;
721 }
722 
_zend_hash_add_or_update_i(HashTable * ht,zend_string * key,zval * pData,uint32_t flag)723 static zend_always_inline zval *_zend_hash_add_or_update_i(HashTable *ht, zend_string *key, zval *pData, uint32_t flag)
724 {
725 	zend_ulong h;
726 	uint32_t nIndex;
727 	uint32_t idx;
728 	Bucket *p, *arData;
729 
730 	IS_CONSISTENT(ht);
731 	HT_ASSERT_RC1(ht);
732 
733 	if (UNEXPECTED(HT_FLAGS(ht) & (HASH_FLAG_UNINITIALIZED|HASH_FLAG_PACKED))) {
734 		if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
735 			zend_hash_real_init_mixed(ht);
736 			if (!ZSTR_IS_INTERNED(key)) {
737 				zend_string_addref(key);
738 				HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
739 				zend_string_hash_val(key);
740 			}
741 			goto add_to_hash;
742 		} else {
743 			zend_hash_packed_to_hash(ht);
744 			if (!ZSTR_IS_INTERNED(key)) {
745 				zend_string_addref(key);
746 				HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
747 				zend_string_hash_val(key);
748 			}
749 		}
750 	} else if ((flag & HASH_ADD_NEW) == 0 || ZEND_DEBUG) {
751 		p = zend_hash_find_bucket(ht, key, 0);
752 
753 		if (p) {
754 			zval *data;
755 
756 			ZEND_ASSERT((flag & HASH_ADD_NEW) == 0);
757 			if (flag & HASH_ADD) {
758 				if (!(flag & HASH_UPDATE_INDIRECT)) {
759 					return NULL;
760 				}
761 				ZEND_ASSERT(&p->val != pData);
762 				data = &p->val;
763 				if (Z_TYPE_P(data) == IS_INDIRECT) {
764 					data = Z_INDIRECT_P(data);
765 					if (Z_TYPE_P(data) != IS_UNDEF) {
766 						return NULL;
767 					}
768 				} else {
769 					return NULL;
770 				}
771 			} else {
772 				ZEND_ASSERT(&p->val != pData);
773 				data = &p->val;
774 				if ((flag & HASH_UPDATE_INDIRECT) && Z_TYPE_P(data) == IS_INDIRECT) {
775 					data = Z_INDIRECT_P(data);
776 				}
777 			}
778 			if (ht->pDestructor) {
779 				ht->pDestructor(data);
780 			}
781 			ZVAL_COPY_VALUE(data, pData);
782 			return data;
783 		}
784 		if (!ZSTR_IS_INTERNED(key)) {
785 			zend_string_addref(key);
786 			HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
787 		}
788 	} else if (!ZSTR_IS_INTERNED(key)) {
789 		zend_string_addref(key);
790 		HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
791 		zend_string_hash_val(key);
792 	}
793 
794 	ZEND_HASH_IF_FULL_DO_RESIZE(ht);		/* If the Hash table is full, resize it */
795 
796 add_to_hash:
797 	idx = ht->nNumUsed++;
798 	ht->nNumOfElements++;
799 	arData = ht->arData;
800 	p = arData + idx;
801 	p->key = key;
802 	p->h = h = ZSTR_H(key);
803 	nIndex = h | ht->nTableMask;
804 	Z_NEXT(p->val) = HT_HASH_EX(arData, nIndex);
805 	HT_HASH_EX(arData, nIndex) = HT_IDX_TO_HASH(idx);
806 	ZVAL_COPY_VALUE(&p->val, pData);
807 
808 	return &p->val;
809 }
810 
_zend_hash_str_add_or_update_i(HashTable * ht,const char * str,size_t len,zend_ulong h,zval * pData,uint32_t flag)811 static zend_always_inline zval *_zend_hash_str_add_or_update_i(HashTable *ht, const char *str, size_t len, zend_ulong h, zval *pData, uint32_t flag)
812 {
813 	zend_string *key;
814 	uint32_t nIndex;
815 	uint32_t idx;
816 	Bucket *p;
817 
818 	IS_CONSISTENT(ht);
819 	HT_ASSERT_RC1(ht);
820 
821 	if (UNEXPECTED(HT_FLAGS(ht) & (HASH_FLAG_UNINITIALIZED|HASH_FLAG_PACKED))) {
822 		if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
823 			zend_hash_real_init_mixed(ht);
824 			goto add_to_hash;
825 		} else {
826 			zend_hash_packed_to_hash(ht);
827 		}
828 	} else if ((flag & HASH_ADD_NEW) == 0) {
829 		p = zend_hash_str_find_bucket(ht, str, len, h);
830 
831 		if (p) {
832 			zval *data;
833 
834 			if (flag & HASH_ADD) {
835 				if (!(flag & HASH_UPDATE_INDIRECT)) {
836 					return NULL;
837 				}
838 				ZEND_ASSERT(&p->val != pData);
839 				data = &p->val;
840 				if (Z_TYPE_P(data) == IS_INDIRECT) {
841 					data = Z_INDIRECT_P(data);
842 					if (Z_TYPE_P(data) != IS_UNDEF) {
843 						return NULL;
844 					}
845 				} else {
846 					return NULL;
847 				}
848 			} else {
849 				ZEND_ASSERT(&p->val != pData);
850 				data = &p->val;
851 				if ((flag & HASH_UPDATE_INDIRECT) && Z_TYPE_P(data) == IS_INDIRECT) {
852 					data = Z_INDIRECT_P(data);
853 				}
854 			}
855 			if (ht->pDestructor) {
856 				ht->pDestructor(data);
857 			}
858 			ZVAL_COPY_VALUE(data, pData);
859 			return data;
860 		}
861 	}
862 
863 	ZEND_HASH_IF_FULL_DO_RESIZE(ht);		/* If the Hash table is full, resize it */
864 
865 add_to_hash:
866 	idx = ht->nNumUsed++;
867 	ht->nNumOfElements++;
868 	p = ht->arData + idx;
869 	p->key = key = zend_string_init(str, len, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
870 	p->h = ZSTR_H(key) = h;
871 	HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
872 	ZVAL_COPY_VALUE(&p->val, pData);
873 	nIndex = h | ht->nTableMask;
874 	Z_NEXT(p->val) = HT_HASH(ht, nIndex);
875 	HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
876 
877 	return &p->val;
878 }
879 
zend_hash_add_or_update(HashTable * ht,zend_string * key,zval * pData,uint32_t flag)880 ZEND_API zval* ZEND_FASTCALL zend_hash_add_or_update(HashTable *ht, zend_string *key, zval *pData, uint32_t flag)
881 {
882 	if (flag == HASH_ADD) {
883 		return zend_hash_add(ht, key, pData);
884 	} else if (flag == HASH_ADD_NEW) {
885 		return zend_hash_add_new(ht, key, pData);
886 	} else if (flag == HASH_UPDATE) {
887 		return zend_hash_update(ht, key, pData);
888 	} else {
889 		ZEND_ASSERT(flag == (HASH_UPDATE|HASH_UPDATE_INDIRECT));
890 		return zend_hash_update_ind(ht, key, pData);
891 	}
892 }
893 
zend_hash_add(HashTable * ht,zend_string * key,zval * pData)894 ZEND_API zval* ZEND_FASTCALL zend_hash_add(HashTable *ht, zend_string *key, zval *pData)
895 {
896 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD);
897 }
898 
zend_hash_update(HashTable * ht,zend_string * key,zval * pData)899 ZEND_API zval* ZEND_FASTCALL zend_hash_update(HashTable *ht, zend_string *key, zval *pData)
900 {
901 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE);
902 }
903 
zend_hash_update_ind(HashTable * ht,zend_string * key,zval * pData)904 ZEND_API zval* ZEND_FASTCALL zend_hash_update_ind(HashTable *ht, zend_string *key, zval *pData)
905 {
906 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT);
907 }
908 
zend_hash_add_new(HashTable * ht,zend_string * key,zval * pData)909 ZEND_API zval* ZEND_FASTCALL zend_hash_add_new(HashTable *ht, zend_string *key, zval *pData)
910 {
911 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD_NEW);
912 }
913 
zend_hash_str_add_or_update(HashTable * ht,const char * str,size_t len,zval * pData,uint32_t flag)914 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_or_update(HashTable *ht, const char *str, size_t len, zval *pData, uint32_t flag)
915 {
916 	if (flag == HASH_ADD) {
917 		return zend_hash_str_add(ht, str, len, pData);
918 	} else if (flag == HASH_ADD_NEW) {
919 		return zend_hash_str_add_new(ht, str, len, pData);
920 	} else if (flag == HASH_UPDATE) {
921 		return zend_hash_str_update(ht, str, len, pData);
922 	} else {
923 		ZEND_ASSERT(flag == (HASH_UPDATE|HASH_UPDATE_INDIRECT));
924 		return zend_hash_str_update_ind(ht, str, len, pData);
925 	}
926 }
927 
zend_hash_str_update(HashTable * ht,const char * str,size_t len,zval * pData)928 ZEND_API zval* ZEND_FASTCALL zend_hash_str_update(HashTable *ht, const char *str, size_t len, zval *pData)
929 {
930 	zend_ulong h = zend_hash_func(str, len);
931 
932 	return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_UPDATE);
933 }
934 
zend_hash_str_update_ind(HashTable * ht,const char * str,size_t len,zval * pData)935 ZEND_API zval* ZEND_FASTCALL zend_hash_str_update_ind(HashTable *ht, const char *str, size_t len, zval *pData)
936 {
937 	zend_ulong h = zend_hash_func(str, len);
938 
939 	return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT);
940 }
941 
zend_hash_str_add(HashTable * ht,const char * str,size_t len,zval * pData)942 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add(HashTable *ht, const char *str, size_t len, zval *pData)
943 {
944 	zend_ulong h = zend_hash_func(str, len);
945 
946 	return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_ADD);
947 }
948 
zend_hash_str_add_new(HashTable * ht,const char * str,size_t len,zval * pData)949 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_new(HashTable *ht, const char *str, size_t len, zval *pData)
950 {
951 	zend_ulong h = zend_hash_func(str, len);
952 
953 	return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_ADD_NEW);
954 }
955 
zend_hash_index_add_empty_element(HashTable * ht,zend_ulong h)956 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_empty_element(HashTable *ht, zend_ulong h)
957 {
958 	zval dummy;
959 
960 	ZVAL_NULL(&dummy);
961 	return zend_hash_index_add(ht, h, &dummy);
962 }
963 
zend_hash_add_empty_element(HashTable * ht,zend_string * key)964 ZEND_API zval* ZEND_FASTCALL zend_hash_add_empty_element(HashTable *ht, zend_string *key)
965 {
966 	zval dummy;
967 
968 	ZVAL_NULL(&dummy);
969 	return zend_hash_add(ht, key, &dummy);
970 }
971 
zend_hash_str_add_empty_element(HashTable * ht,const char * str,size_t len)972 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_empty_element(HashTable *ht, const char *str, size_t len)
973 {
974 	zval dummy;
975 
976 	ZVAL_NULL(&dummy);
977 	return zend_hash_str_add(ht, str, len, &dummy);
978 }
979 
_zend_hash_index_add_or_update_i(HashTable * ht,zend_ulong h,zval * pData,uint32_t flag)980 static zend_always_inline zval *_zend_hash_index_add_or_update_i(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag)
981 {
982 	uint32_t nIndex;
983 	uint32_t idx;
984 	Bucket *p;
985 
986 	IS_CONSISTENT(ht);
987 	HT_ASSERT_RC1(ht);
988 
989 	if ((flag & HASH_ADD_NEXT) && h == ZEND_LONG_MIN) {
990 		h = 0;
991 	}
992 
993 	if (HT_FLAGS(ht) & HASH_FLAG_PACKED) {
994 		if (h < ht->nNumUsed) {
995 			p = ht->arData + h;
996 			if (Z_TYPE(p->val) != IS_UNDEF) {
997 replace:
998 				if (flag & HASH_ADD) {
999 					return NULL;
1000 				}
1001 				if (ht->pDestructor) {
1002 					ht->pDestructor(&p->val);
1003 				}
1004 				ZVAL_COPY_VALUE(&p->val, pData);
1005 				return &p->val;
1006 			} else { /* we have to keep the order :( */
1007 				goto convert_to_hash;
1008 			}
1009 		} else if (EXPECTED(h < ht->nTableSize)) {
1010 add_to_packed:
1011 			p = ht->arData + h;
1012 			/* incremental initialization of empty Buckets */
1013 			if ((flag & (HASH_ADD_NEW|HASH_ADD_NEXT)) != (HASH_ADD_NEW|HASH_ADD_NEXT)) {
1014 				if (h > ht->nNumUsed) {
1015 					Bucket *q = ht->arData + ht->nNumUsed;
1016 					while (q != p) {
1017 						ZVAL_UNDEF(&q->val);
1018 						q++;
1019 					}
1020 				}
1021 			}
1022 			ht->nNextFreeElement = ht->nNumUsed = h + 1;
1023 			goto add;
1024 		} else if ((h >> 1) < ht->nTableSize &&
1025 		           (ht->nTableSize >> 1) < ht->nNumOfElements) {
1026 			zend_hash_packed_grow(ht);
1027 			goto add_to_packed;
1028 		} else {
1029 			if (ht->nNumUsed >= ht->nTableSize) {
1030 				ht->nTableSize += ht->nTableSize;
1031 			}
1032 convert_to_hash:
1033 			zend_hash_packed_to_hash(ht);
1034 		}
1035 	} else if (HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED) {
1036 		if (h < ht->nTableSize) {
1037 			zend_hash_real_init_packed_ex(ht);
1038 			goto add_to_packed;
1039 		}
1040 		zend_hash_real_init_mixed(ht);
1041 	} else {
1042 		if ((flag & HASH_ADD_NEW) == 0 || ZEND_DEBUG) {
1043 			p = zend_hash_index_find_bucket(ht, h);
1044 			if (p) {
1045 				ZEND_ASSERT((flag & HASH_ADD_NEW) == 0);
1046 				goto replace;
1047 			}
1048 		}
1049 		ZEND_HASH_IF_FULL_DO_RESIZE(ht);		/* If the Hash table is full, resize it */
1050 	}
1051 
1052 	idx = ht->nNumUsed++;
1053 	nIndex = h | ht->nTableMask;
1054 	p = ht->arData + idx;
1055 	Z_NEXT(p->val) = HT_HASH(ht, nIndex);
1056 	HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
1057 	if ((zend_long)h >= ht->nNextFreeElement) {
1058 		ht->nNextFreeElement = (zend_long)h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
1059 	}
1060 add:
1061 	ht->nNumOfElements++;
1062 	p->h = h;
1063 	p->key = NULL;
1064 	ZVAL_COPY_VALUE(&p->val, pData);
1065 
1066 	return &p->val;
1067 }
1068 
zend_hash_index_add_or_update(HashTable * ht,zend_ulong h,zval * pData,uint32_t flag)1069 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_or_update(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag)
1070 {
1071 	if (flag == HASH_ADD) {
1072 		return zend_hash_index_add(ht, h, pData);
1073 	} else if (flag == (HASH_ADD|HASH_ADD_NEW)) {
1074 		return zend_hash_index_add_new(ht, h, pData);
1075 	} else if (flag == (HASH_ADD|HASH_ADD_NEXT)) {
1076 		ZEND_ASSERT(h == ht->nNextFreeElement);
1077 		return zend_hash_next_index_insert(ht, pData);
1078 	} else if (flag == (HASH_ADD|HASH_ADD_NEW|HASH_ADD_NEXT)) {
1079 		ZEND_ASSERT(h == ht->nNextFreeElement);
1080 		return zend_hash_next_index_insert_new(ht, pData);
1081 	} else {
1082 		ZEND_ASSERT(flag == HASH_UPDATE);
1083 		return zend_hash_index_update(ht, h, pData);
1084 	}
1085 }
1086 
zend_hash_index_add(HashTable * ht,zend_ulong h,zval * pData)1087 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add(HashTable *ht, zend_ulong h, zval *pData)
1088 {
1089 	return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD);
1090 }
1091 
zend_hash_index_add_new(HashTable * ht,zend_ulong h,zval * pData)1092 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_new(HashTable *ht, zend_ulong h, zval *pData)
1093 {
1094 	return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD | HASH_ADD_NEW);
1095 }
1096 
zend_hash_index_update(HashTable * ht,zend_ulong h,zval * pData)1097 ZEND_API zval* ZEND_FASTCALL zend_hash_index_update(HashTable *ht, zend_ulong h, zval *pData)
1098 {
1099 	return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_UPDATE);
1100 }
1101 
zend_hash_next_index_insert(HashTable * ht,zval * pData)1102 ZEND_API zval* ZEND_FASTCALL zend_hash_next_index_insert(HashTable *ht, zval *pData)
1103 {
1104 	return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEXT);
1105 }
1106 
zend_hash_next_index_insert_new(HashTable * ht,zval * pData)1107 ZEND_API zval* ZEND_FASTCALL zend_hash_next_index_insert_new(HashTable *ht, zval *pData)
1108 {
1109 	return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEW | HASH_ADD_NEXT);
1110 }
1111 
zend_hash_set_bucket_key(HashTable * ht,Bucket * b,zend_string * key)1112 ZEND_API zval* ZEND_FASTCALL zend_hash_set_bucket_key(HashTable *ht, Bucket *b, zend_string *key)
1113 {
1114 	uint32_t nIndex;
1115 	uint32_t idx, i;
1116 	Bucket *p, *arData;
1117 
1118 	IS_CONSISTENT(ht);
1119 	HT_ASSERT_RC1(ht);
1120 	ZEND_ASSERT(!(HT_FLAGS(ht) & HASH_FLAG_PACKED));
1121 
1122 	p = zend_hash_find_bucket(ht, key, 0);
1123 	if (UNEXPECTED(p)) {
1124 		return (p == b) ? &p->val : NULL;
1125 	}
1126 
1127 	if (!ZSTR_IS_INTERNED(key)) {
1128 		zend_string_addref(key);
1129 		HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
1130 	}
1131 
1132 	arData = ht->arData;
1133 
1134 	/* del from hash */
1135 	idx = HT_IDX_TO_HASH(b - arData);
1136 	nIndex = b->h | ht->nTableMask;
1137 	i = HT_HASH_EX(arData, nIndex);
1138 	if (i == idx) {
1139 		HT_HASH_EX(arData, nIndex) = Z_NEXT(b->val);
1140 	} else {
1141 		p = HT_HASH_TO_BUCKET_EX(arData, i);
1142 		while (Z_NEXT(p->val) != idx) {
1143 			i = Z_NEXT(p->val);
1144 			p = HT_HASH_TO_BUCKET_EX(arData, i);
1145 		}
1146 		Z_NEXT(p->val) = Z_NEXT(b->val);
1147 	}
1148 	zend_string_release(b->key);
1149 
1150 	/* add to hash */
1151 	idx = b - arData;
1152 	b->key = key;
1153 	b->h = ZSTR_H(key);
1154 	nIndex = b->h | ht->nTableMask;
1155 	idx = HT_IDX_TO_HASH(idx);
1156 	i = HT_HASH_EX(arData, nIndex);
1157 	if (i == HT_INVALID_IDX || i < idx) {
1158 		Z_NEXT(b->val) = i;
1159 		HT_HASH_EX(arData, nIndex) = idx;
1160 	} else {
1161 		p = HT_HASH_TO_BUCKET_EX(arData, i);
1162 		while (Z_NEXT(p->val) != HT_INVALID_IDX && Z_NEXT(p->val) > idx) {
1163 			i = Z_NEXT(p->val);
1164 			p = HT_HASH_TO_BUCKET_EX(arData, i);
1165 		}
1166 		Z_NEXT(b->val) = Z_NEXT(p->val);
1167 		Z_NEXT(p->val) = idx;
1168 	}
1169 	return &b->val;
1170 }
1171 
zend_hash_do_resize(HashTable * ht)1172 static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht)
1173 {
1174 
1175 	IS_CONSISTENT(ht);
1176 	HT_ASSERT_RC1(ht);
1177 
1178 	if (ht->nNumUsed > ht->nNumOfElements + (ht->nNumOfElements >> 5)) { /* additional term is there to amortize the cost of compaction */
1179 		zend_hash_rehash(ht);
1180 	} else if (ht->nTableSize < HT_MAX_SIZE) {	/* Let's double the table size */
1181 		void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
1182 		uint32_t nSize = ht->nTableSize + ht->nTableSize;
1183 		Bucket *old_buckets = ht->arData;
1184 
1185 		ht->nTableSize = nSize;
1186 		new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1187 		ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
1188 		HT_SET_DATA_ADDR(ht, new_data);
1189 		memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
1190 		pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1191 		zend_hash_rehash(ht);
1192 	} else {
1193 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket) + sizeof(uint32_t), sizeof(Bucket));
1194 	}
1195 }
1196 
zend_hash_rehash(HashTable * ht)1197 ZEND_API void ZEND_FASTCALL zend_hash_rehash(HashTable *ht)
1198 {
1199 	Bucket *p;
1200 	uint32_t nIndex, i;
1201 
1202 	IS_CONSISTENT(ht);
1203 
1204 	if (UNEXPECTED(ht->nNumOfElements == 0)) {
1205 		if (!(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
1206 			ht->nNumUsed = 0;
1207 			HT_HASH_RESET(ht);
1208 		}
1209 		return;
1210 	}
1211 
1212 	HT_HASH_RESET(ht);
1213 	i = 0;
1214 	p = ht->arData;
1215 	if (HT_IS_WITHOUT_HOLES(ht)) {
1216 		do {
1217 			nIndex = p->h | ht->nTableMask;
1218 			Z_NEXT(p->val) = HT_HASH(ht, nIndex);
1219 			HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
1220 			p++;
1221 		} while (++i < ht->nNumUsed);
1222 	} else {
1223 		uint32_t old_num_used = ht->nNumUsed;
1224 		do {
1225 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) {
1226 				uint32_t j = i;
1227 				Bucket *q = p;
1228 
1229 				if (EXPECTED(!HT_HAS_ITERATORS(ht))) {
1230 					while (++i < ht->nNumUsed) {
1231 						p++;
1232 						if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
1233 							ZVAL_COPY_VALUE(&q->val, &p->val);
1234 							q->h = p->h;
1235 							nIndex = q->h | ht->nTableMask;
1236 							q->key = p->key;
1237 							Z_NEXT(q->val) = HT_HASH(ht, nIndex);
1238 							HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
1239 							if (UNEXPECTED(ht->nInternalPointer == i)) {
1240 								ht->nInternalPointer = j;
1241 							}
1242 							q++;
1243 							j++;
1244 						}
1245 					}
1246 				} else {
1247 					uint32_t iter_pos = zend_hash_iterators_lower_pos(ht, 0);
1248 
1249 					while (++i < ht->nNumUsed) {
1250 						p++;
1251 						if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
1252 							ZVAL_COPY_VALUE(&q->val, &p->val);
1253 							q->h = p->h;
1254 							nIndex = q->h | ht->nTableMask;
1255 							q->key = p->key;
1256 							Z_NEXT(q->val) = HT_HASH(ht, nIndex);
1257 							HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
1258 							if (UNEXPECTED(ht->nInternalPointer == i)) {
1259 								ht->nInternalPointer = j;
1260 							}
1261 							if (UNEXPECTED(i >= iter_pos)) {
1262 								do {
1263 									zend_hash_iterators_update(ht, iter_pos, j);
1264 									iter_pos = zend_hash_iterators_lower_pos(ht, iter_pos + 1);
1265 								} while (iter_pos < i);
1266 							}
1267 							q++;
1268 							j++;
1269 						}
1270 					}
1271 				}
1272 				ht->nNumUsed = j;
1273 				break;
1274 			}
1275 			nIndex = p->h | ht->nTableMask;
1276 			Z_NEXT(p->val) = HT_HASH(ht, nIndex);
1277 			HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
1278 			p++;
1279 		} while (++i < ht->nNumUsed);
1280 
1281 		/* Migrate pointer to one past the end of the array to the new one past the end, so that
1282 		 * newly inserted elements are picked up correctly. */
1283 		if (UNEXPECTED(HT_HAS_ITERATORS(ht))) {
1284 			_zend_hash_iterators_update(ht, old_num_used, ht->nNumUsed);
1285 		}
1286 	}
1287 }
1288 
_zend_hash_del_el_ex(HashTable * ht,uint32_t idx,Bucket * p,Bucket * prev)1289 static zend_always_inline void _zend_hash_del_el_ex(HashTable *ht, uint32_t idx, Bucket *p, Bucket *prev)
1290 {
1291 	if (!(HT_FLAGS(ht) & HASH_FLAG_PACKED)) {
1292 		if (prev) {
1293 			Z_NEXT(prev->val) = Z_NEXT(p->val);
1294 		} else {
1295 			HT_HASH(ht, p->h | ht->nTableMask) = Z_NEXT(p->val);
1296 		}
1297 	}
1298 	idx = HT_HASH_TO_IDX(idx);
1299 	ht->nNumOfElements--;
1300 	if (ht->nInternalPointer == idx || UNEXPECTED(HT_HAS_ITERATORS(ht))) {
1301 		uint32_t new_idx;
1302 
1303 		new_idx = idx;
1304 		while (1) {
1305 			new_idx++;
1306 			if (new_idx >= ht->nNumUsed) {
1307 				break;
1308 			} else if (Z_TYPE(ht->arData[new_idx].val) != IS_UNDEF) {
1309 				break;
1310 			}
1311 		}
1312 		if (ht->nInternalPointer == idx) {
1313 			ht->nInternalPointer = new_idx;
1314 		}
1315 		zend_hash_iterators_update(ht, idx, new_idx);
1316 	}
1317 	if (ht->nNumUsed - 1 == idx) {
1318 		do {
1319 			ht->nNumUsed--;
1320 		} while (ht->nNumUsed > 0 && (UNEXPECTED(Z_TYPE(ht->arData[ht->nNumUsed-1].val) == IS_UNDEF)));
1321 		ht->nInternalPointer = MIN(ht->nInternalPointer, ht->nNumUsed);
1322 	}
1323 	if (p->key) {
1324 		zend_string_release(p->key);
1325 	}
1326 	if (ht->pDestructor) {
1327 		zval tmp;
1328 		ZVAL_COPY_VALUE(&tmp, &p->val);
1329 		ZVAL_UNDEF(&p->val);
1330 		ht->pDestructor(&tmp);
1331 	} else {
1332 		ZVAL_UNDEF(&p->val);
1333 	}
1334 }
1335 
_zend_hash_del_el(HashTable * ht,uint32_t idx,Bucket * p)1336 static zend_always_inline void _zend_hash_del_el(HashTable *ht, uint32_t idx, Bucket *p)
1337 {
1338 	Bucket *prev = NULL;
1339 
1340 	if (!(HT_FLAGS(ht) & HASH_FLAG_PACKED)) {
1341 		uint32_t nIndex = p->h | ht->nTableMask;
1342 		uint32_t i = HT_HASH(ht, nIndex);
1343 
1344 		if (i != idx) {
1345 			prev = HT_HASH_TO_BUCKET(ht, i);
1346 			while (Z_NEXT(prev->val) != idx) {
1347 				i = Z_NEXT(prev->val);
1348 				prev = HT_HASH_TO_BUCKET(ht, i);
1349 			}
1350 	 	}
1351 	}
1352 
1353 	_zend_hash_del_el_ex(ht, idx, p, prev);
1354 }
1355 
zend_hash_del_bucket(HashTable * ht,Bucket * p)1356 ZEND_API void ZEND_FASTCALL zend_hash_del_bucket(HashTable *ht, Bucket *p)
1357 {
1358 	IS_CONSISTENT(ht);
1359 	HT_ASSERT_RC1(ht);
1360 	_zend_hash_del_el(ht, HT_IDX_TO_HASH(p - ht->arData), p);
1361 }
1362 
zend_hash_del(HashTable * ht,zend_string * key)1363 ZEND_API zend_result ZEND_FASTCALL zend_hash_del(HashTable *ht, zend_string *key)
1364 {
1365 	zend_ulong h;
1366 	uint32_t nIndex;
1367 	uint32_t idx;
1368 	Bucket *p;
1369 	Bucket *prev = NULL;
1370 
1371 	IS_CONSISTENT(ht);
1372 	HT_ASSERT_RC1(ht);
1373 
1374 	h = zend_string_hash_val(key);
1375 	nIndex = h | ht->nTableMask;
1376 
1377 	idx = HT_HASH(ht, nIndex);
1378 	while (idx != HT_INVALID_IDX) {
1379 		p = HT_HASH_TO_BUCKET(ht, idx);
1380 		if ((p->key == key) ||
1381 			(p->h == h &&
1382 		     p->key &&
1383 		     zend_string_equal_content(p->key, key))) {
1384 			_zend_hash_del_el_ex(ht, idx, p, prev);
1385 			return SUCCESS;
1386 		}
1387 		prev = p;
1388 		idx = Z_NEXT(p->val);
1389 	}
1390 	return FAILURE;
1391 }
1392 
zend_hash_del_ind(HashTable * ht,zend_string * key)1393 ZEND_API zend_result ZEND_FASTCALL zend_hash_del_ind(HashTable *ht, zend_string *key)
1394 {
1395 	zend_ulong h;
1396 	uint32_t nIndex;
1397 	uint32_t idx;
1398 	Bucket *p;
1399 	Bucket *prev = NULL;
1400 
1401 	IS_CONSISTENT(ht);
1402 	HT_ASSERT_RC1(ht);
1403 
1404 	h = zend_string_hash_val(key);
1405 	nIndex = h | ht->nTableMask;
1406 
1407 	idx = HT_HASH(ht, nIndex);
1408 	while (idx != HT_INVALID_IDX) {
1409 		p = HT_HASH_TO_BUCKET(ht, idx);
1410 		if ((p->key == key) ||
1411 			(p->h == h &&
1412 		     p->key &&
1413 		     zend_string_equal_content(p->key, key))) {
1414 			if (Z_TYPE(p->val) == IS_INDIRECT) {
1415 				zval *data = Z_INDIRECT(p->val);
1416 
1417 				if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
1418 					return FAILURE;
1419 				} else {
1420 					if (ht->pDestructor) {
1421 						zval tmp;
1422 						ZVAL_COPY_VALUE(&tmp, data);
1423 						ZVAL_UNDEF(data);
1424 						ht->pDestructor(&tmp);
1425 					} else {
1426 						ZVAL_UNDEF(data);
1427 					}
1428 					HT_FLAGS(ht) |= HASH_FLAG_HAS_EMPTY_IND;
1429 				}
1430 			} else {
1431 				_zend_hash_del_el_ex(ht, idx, p, prev);
1432 			}
1433 			return SUCCESS;
1434 		}
1435 		prev = p;
1436 		idx = Z_NEXT(p->val);
1437 	}
1438 	return FAILURE;
1439 }
1440 
zend_hash_str_del_ind(HashTable * ht,const char * str,size_t len)1441 ZEND_API zend_result ZEND_FASTCALL zend_hash_str_del_ind(HashTable *ht, const char *str, size_t len)
1442 {
1443 	zend_ulong h;
1444 	uint32_t nIndex;
1445 	uint32_t idx;
1446 	Bucket *p;
1447 	Bucket *prev = NULL;
1448 
1449 	IS_CONSISTENT(ht);
1450 	HT_ASSERT_RC1(ht);
1451 
1452 	h = zend_inline_hash_func(str, len);
1453 	nIndex = h | ht->nTableMask;
1454 
1455 	idx = HT_HASH(ht, nIndex);
1456 	while (idx != HT_INVALID_IDX) {
1457 		p = HT_HASH_TO_BUCKET(ht, idx);
1458 		if ((p->h == h)
1459 			 && p->key
1460 			 && (ZSTR_LEN(p->key) == len)
1461 			 && !memcmp(ZSTR_VAL(p->key), str, len)) {
1462 			if (Z_TYPE(p->val) == IS_INDIRECT) {
1463 				zval *data = Z_INDIRECT(p->val);
1464 
1465 				if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
1466 					return FAILURE;
1467 				} else {
1468 					if (ht->pDestructor) {
1469 						ht->pDestructor(data);
1470 					}
1471 					ZVAL_UNDEF(data);
1472 					HT_FLAGS(ht) |= HASH_FLAG_HAS_EMPTY_IND;
1473 				}
1474 			} else {
1475 				_zend_hash_del_el_ex(ht, idx, p, prev);
1476 			}
1477 			return SUCCESS;
1478 		}
1479 		prev = p;
1480 		idx = Z_NEXT(p->val);
1481 	}
1482 	return FAILURE;
1483 }
1484 
zend_hash_str_del(HashTable * ht,const char * str,size_t len)1485 ZEND_API zend_result ZEND_FASTCALL zend_hash_str_del(HashTable *ht, const char *str, size_t len)
1486 {
1487 	zend_ulong h;
1488 	uint32_t nIndex;
1489 	uint32_t idx;
1490 	Bucket *p;
1491 	Bucket *prev = NULL;
1492 
1493 	IS_CONSISTENT(ht);
1494 	HT_ASSERT_RC1(ht);
1495 
1496 	h = zend_inline_hash_func(str, len);
1497 	nIndex = h | ht->nTableMask;
1498 
1499 	idx = HT_HASH(ht, nIndex);
1500 	while (idx != HT_INVALID_IDX) {
1501 		p = HT_HASH_TO_BUCKET(ht, idx);
1502 		if ((p->h == h)
1503 			 && p->key
1504 			 && (ZSTR_LEN(p->key) == len)
1505 			 && !memcmp(ZSTR_VAL(p->key), str, len)) {
1506 			_zend_hash_del_el_ex(ht, idx, p, prev);
1507 			return SUCCESS;
1508 		}
1509 		prev = p;
1510 		idx = Z_NEXT(p->val);
1511 	}
1512 	return FAILURE;
1513 }
1514 
zend_hash_index_del(HashTable * ht,zend_ulong h)1515 ZEND_API zend_result ZEND_FASTCALL zend_hash_index_del(HashTable *ht, zend_ulong h)
1516 {
1517 	uint32_t nIndex;
1518 	uint32_t idx;
1519 	Bucket *p;
1520 	Bucket *prev = NULL;
1521 
1522 	IS_CONSISTENT(ht);
1523 	HT_ASSERT_RC1(ht);
1524 
1525 	if (HT_FLAGS(ht) & HASH_FLAG_PACKED) {
1526 		if (h < ht->nNumUsed) {
1527 			p = ht->arData + h;
1528 			if (Z_TYPE(p->val) != IS_UNDEF) {
1529 				_zend_hash_del_el_ex(ht, HT_IDX_TO_HASH(h), p, NULL);
1530 				return SUCCESS;
1531 			}
1532 		}
1533 		return FAILURE;
1534 	}
1535 	nIndex = h | ht->nTableMask;
1536 
1537 	idx = HT_HASH(ht, nIndex);
1538 	while (idx != HT_INVALID_IDX) {
1539 		p = HT_HASH_TO_BUCKET(ht, idx);
1540 		if ((p->h == h) && (p->key == NULL)) {
1541 			_zend_hash_del_el_ex(ht, idx, p, prev);
1542 			return SUCCESS;
1543 		}
1544 		prev = p;
1545 		idx = Z_NEXT(p->val);
1546 	}
1547 	return FAILURE;
1548 }
1549 
zend_hash_destroy(HashTable * ht)1550 ZEND_API void ZEND_FASTCALL zend_hash_destroy(HashTable *ht)
1551 {
1552 	Bucket *p, *end;
1553 
1554 	IS_CONSISTENT(ht);
1555 	HT_ASSERT(ht, GC_REFCOUNT(ht) <= 1);
1556 
1557 	if (ht->nNumUsed) {
1558 		p = ht->arData;
1559 		end = p + ht->nNumUsed;
1560 		if (ht->pDestructor) {
1561 			SET_INCONSISTENT(HT_IS_DESTROYING);
1562 
1563 			if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1564 				if (HT_IS_WITHOUT_HOLES(ht)) {
1565 					do {
1566 						ht->pDestructor(&p->val);
1567 					} while (++p != end);
1568 				} else {
1569 					do {
1570 						if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1571 							ht->pDestructor(&p->val);
1572 						}
1573 					} while (++p != end);
1574 				}
1575 			} else if (HT_IS_WITHOUT_HOLES(ht)) {
1576 				do {
1577 					ht->pDestructor(&p->val);
1578 					if (EXPECTED(p->key)) {
1579 						zend_string_release(p->key);
1580 					}
1581 				} while (++p != end);
1582 			} else {
1583 				do {
1584 					if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1585 						ht->pDestructor(&p->val);
1586 						if (EXPECTED(p->key)) {
1587 							zend_string_release(p->key);
1588 						}
1589 					}
1590 				} while (++p != end);
1591 			}
1592 
1593 			SET_INCONSISTENT(HT_DESTROYED);
1594 		} else {
1595 			if (!HT_HAS_STATIC_KEYS_ONLY(ht)) {
1596 				do {
1597 					if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1598 						if (EXPECTED(p->key)) {
1599 							zend_string_release(p->key);
1600 						}
1601 					}
1602 				} while (++p != end);
1603 			}
1604 		}
1605 		zend_hash_iterators_remove(ht);
1606 	} else if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
1607 		return;
1608 	}
1609 	pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1610 }
1611 
zend_array_destroy(HashTable * ht)1612 ZEND_API void ZEND_FASTCALL zend_array_destroy(HashTable *ht)
1613 {
1614 	Bucket *p, *end;
1615 
1616 	IS_CONSISTENT(ht);
1617 	HT_ASSERT(ht, GC_REFCOUNT(ht) <= 1);
1618 
1619 	/* break possible cycles */
1620 	GC_REMOVE_FROM_BUFFER(ht);
1621 	GC_TYPE_INFO(ht) = GC_NULL /*???| (GC_WHITE << 16)*/;
1622 
1623 	if (ht->nNumUsed) {
1624 		/* In some rare cases destructors of regular arrays may be changed */
1625 		if (UNEXPECTED(ht->pDestructor != ZVAL_PTR_DTOR)) {
1626 			zend_hash_destroy(ht);
1627 			goto free_ht;
1628 		}
1629 
1630 		p = ht->arData;
1631 		end = p + ht->nNumUsed;
1632 		SET_INCONSISTENT(HT_IS_DESTROYING);
1633 
1634 		if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1635 			do {
1636 				i_zval_ptr_dtor(&p->val);
1637 			} while (++p != end);
1638 		} else if (HT_IS_WITHOUT_HOLES(ht)) {
1639 			do {
1640 				i_zval_ptr_dtor(&p->val);
1641 				if (EXPECTED(p->key)) {
1642 					zend_string_release_ex(p->key, 0);
1643 				}
1644 			} while (++p != end);
1645 		} else {
1646 			do {
1647 				if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1648 					i_zval_ptr_dtor(&p->val);
1649 					if (EXPECTED(p->key)) {
1650 						zend_string_release_ex(p->key, 0);
1651 					}
1652 				}
1653 			} while (++p != end);
1654 		}
1655 	} else if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
1656 		goto free_ht;
1657 	}
1658 	SET_INCONSISTENT(HT_DESTROYED);
1659 	efree(HT_GET_DATA_ADDR(ht));
1660 free_ht:
1661 	zend_hash_iterators_remove(ht);
1662 	FREE_HASHTABLE(ht);
1663 }
1664 
zend_hash_clean(HashTable * ht)1665 ZEND_API void ZEND_FASTCALL zend_hash_clean(HashTable *ht)
1666 {
1667 	Bucket *p, *end;
1668 
1669 	IS_CONSISTENT(ht);
1670 	HT_ASSERT_RC1(ht);
1671 
1672 	if (ht->nNumUsed) {
1673 		p = ht->arData;
1674 		end = p + ht->nNumUsed;
1675 		if (ht->pDestructor) {
1676 			if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1677 				if (HT_IS_WITHOUT_HOLES(ht)) {
1678 					do {
1679 						ht->pDestructor(&p->val);
1680 					} while (++p != end);
1681 				} else {
1682 					do {
1683 						if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1684 							ht->pDestructor(&p->val);
1685 						}
1686 					} while (++p != end);
1687 				}
1688 			} else if (HT_IS_WITHOUT_HOLES(ht)) {
1689 				do {
1690 					ht->pDestructor(&p->val);
1691 					if (EXPECTED(p->key)) {
1692 						zend_string_release(p->key);
1693 					}
1694 				} while (++p != end);
1695 			} else {
1696 				do {
1697 					if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1698 						ht->pDestructor(&p->val);
1699 						if (EXPECTED(p->key)) {
1700 							zend_string_release(p->key);
1701 						}
1702 					}
1703 				} while (++p != end);
1704 			}
1705 		} else {
1706 			if (!HT_HAS_STATIC_KEYS_ONLY(ht)) {
1707 				if (HT_IS_WITHOUT_HOLES(ht)) {
1708 					do {
1709 						if (EXPECTED(p->key)) {
1710 							zend_string_release(p->key);
1711 						}
1712 					} while (++p != end);
1713 				} else {
1714 					do {
1715 						if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1716 							if (EXPECTED(p->key)) {
1717 								zend_string_release(p->key);
1718 							}
1719 						}
1720 					} while (++p != end);
1721 				}
1722 			}
1723 		}
1724 		if (!(HT_FLAGS(ht) & HASH_FLAG_PACKED)) {
1725 			HT_HASH_RESET(ht);
1726 		}
1727 	}
1728 	ht->nNumUsed = 0;
1729 	ht->nNumOfElements = 0;
1730 	ht->nNextFreeElement = ZEND_LONG_MIN;
1731 	ht->nInternalPointer = 0;
1732 }
1733 
zend_symtable_clean(HashTable * ht)1734 ZEND_API void ZEND_FASTCALL zend_symtable_clean(HashTable *ht)
1735 {
1736 	Bucket *p, *end;
1737 
1738 	IS_CONSISTENT(ht);
1739 	HT_ASSERT_RC1(ht);
1740 
1741 	if (ht->nNumUsed) {
1742 		p = ht->arData;
1743 		end = p + ht->nNumUsed;
1744 		if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1745 			do {
1746 				i_zval_ptr_dtor(&p->val);
1747 			} while (++p != end);
1748 		} else if (HT_IS_WITHOUT_HOLES(ht)) {
1749 			do {
1750 				i_zval_ptr_dtor(&p->val);
1751 				if (EXPECTED(p->key)) {
1752 					zend_string_release(p->key);
1753 				}
1754 			} while (++p != end);
1755 		} else {
1756 			do {
1757 				if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1758 					i_zval_ptr_dtor(&p->val);
1759 					if (EXPECTED(p->key)) {
1760 						zend_string_release(p->key);
1761 					}
1762 				}
1763 			} while (++p != end);
1764 		}
1765 		HT_HASH_RESET(ht);
1766 	}
1767 	ht->nNumUsed = 0;
1768 	ht->nNumOfElements = 0;
1769 	ht->nNextFreeElement = ZEND_LONG_MIN;
1770 	ht->nInternalPointer = 0;
1771 }
1772 
zend_hash_graceful_destroy(HashTable * ht)1773 ZEND_API void ZEND_FASTCALL zend_hash_graceful_destroy(HashTable *ht)
1774 {
1775 	uint32_t idx;
1776 	Bucket *p;
1777 
1778 	IS_CONSISTENT(ht);
1779 	HT_ASSERT_RC1(ht);
1780 
1781 	p = ht->arData;
1782 	for (idx = 0; idx < ht->nNumUsed; idx++, p++) {
1783 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1784 		_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1785 	}
1786 	if (!(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
1787 		pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1788 	}
1789 
1790 	SET_INCONSISTENT(HT_DESTROYED);
1791 }
1792 
zend_hash_graceful_reverse_destroy(HashTable * ht)1793 ZEND_API void ZEND_FASTCALL zend_hash_graceful_reverse_destroy(HashTable *ht)
1794 {
1795 	uint32_t idx;
1796 	Bucket *p;
1797 
1798 	IS_CONSISTENT(ht);
1799 	HT_ASSERT_RC1(ht);
1800 
1801 	idx = ht->nNumUsed;
1802 	p = ht->arData + ht->nNumUsed;
1803 	while (idx > 0) {
1804 		idx--;
1805 		p--;
1806 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1807 		_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1808 	}
1809 
1810 	if (!(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
1811 		pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1812 	}
1813 
1814 	SET_INCONSISTENT(HT_DESTROYED);
1815 }
1816 
1817 /* This is used to recurse elements and selectively delete certain entries
1818  * from a hashtable. apply_func() receives the data and decides if the entry
1819  * should be deleted or recursion should be stopped. The following three
1820  * return codes are possible:
1821  * ZEND_HASH_APPLY_KEEP   - continue
1822  * ZEND_HASH_APPLY_STOP   - stop iteration
1823  * ZEND_HASH_APPLY_REMOVE - delete the element, combineable with the former
1824  */
1825 
zend_hash_apply(HashTable * ht,apply_func_t apply_func)1826 ZEND_API void ZEND_FASTCALL zend_hash_apply(HashTable *ht, apply_func_t apply_func)
1827 {
1828 	uint32_t idx;
1829 	Bucket *p;
1830 	int result;
1831 
1832 	IS_CONSISTENT(ht);
1833 
1834 	for (idx = 0; idx < ht->nNumUsed; idx++) {
1835 		p = ht->arData + idx;
1836 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1837 		result = apply_func(&p->val);
1838 
1839 		if (result & ZEND_HASH_APPLY_REMOVE) {
1840 			HT_ASSERT_RC1(ht);
1841 			_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1842 		}
1843 		if (result & ZEND_HASH_APPLY_STOP) {
1844 			break;
1845 		}
1846 	}
1847 }
1848 
1849 
zend_hash_apply_with_argument(HashTable * ht,apply_func_arg_t apply_func,void * argument)1850 ZEND_API void ZEND_FASTCALL zend_hash_apply_with_argument(HashTable *ht, apply_func_arg_t apply_func, void *argument)
1851 {
1852     uint32_t idx;
1853 	Bucket *p;
1854 	int result;
1855 
1856 	IS_CONSISTENT(ht);
1857 
1858 	for (idx = 0; idx < ht->nNumUsed; idx++) {
1859 		p = ht->arData + idx;
1860 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1861 		result = apply_func(&p->val, argument);
1862 
1863 		if (result & ZEND_HASH_APPLY_REMOVE) {
1864 			HT_ASSERT_RC1(ht);
1865 			_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1866 		}
1867 		if (result & ZEND_HASH_APPLY_STOP) {
1868 			break;
1869 		}
1870 	}
1871 }
1872 
1873 
zend_hash_apply_with_arguments(HashTable * ht,apply_func_args_t apply_func,int num_args,...)1874 ZEND_API void zend_hash_apply_with_arguments(HashTable *ht, apply_func_args_t apply_func, int num_args, ...)
1875 {
1876 	uint32_t idx;
1877 	Bucket *p;
1878 	va_list args;
1879 	zend_hash_key hash_key;
1880 	int result;
1881 
1882 	IS_CONSISTENT(ht);
1883 
1884 	for (idx = 0; idx < ht->nNumUsed; idx++) {
1885 		p = ht->arData + idx;
1886 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1887 		va_start(args, num_args);
1888 		hash_key.h = p->h;
1889 		hash_key.key = p->key;
1890 
1891 		result = apply_func(&p->val, num_args, args, &hash_key);
1892 
1893 		if (result & ZEND_HASH_APPLY_REMOVE) {
1894 			HT_ASSERT_RC1(ht);
1895 			_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1896 		}
1897 		if (result & ZEND_HASH_APPLY_STOP) {
1898 			va_end(args);
1899 			break;
1900 		}
1901 		va_end(args);
1902 	}
1903 }
1904 
1905 
zend_hash_reverse_apply(HashTable * ht,apply_func_t apply_func)1906 ZEND_API void ZEND_FASTCALL zend_hash_reverse_apply(HashTable *ht, apply_func_t apply_func)
1907 {
1908 	uint32_t idx;
1909 	Bucket *p;
1910 	int result;
1911 
1912 	IS_CONSISTENT(ht);
1913 
1914 	idx = ht->nNumUsed;
1915 	while (idx > 0) {
1916 		idx--;
1917 		p = ht->arData + idx;
1918 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1919 
1920 		result = apply_func(&p->val);
1921 
1922 		if (result & ZEND_HASH_APPLY_REMOVE) {
1923 			HT_ASSERT_RC1(ht);
1924 			_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1925 		}
1926 		if (result & ZEND_HASH_APPLY_STOP) {
1927 			break;
1928 		}
1929 	}
1930 }
1931 
1932 
zend_hash_copy(HashTable * target,HashTable * source,copy_ctor_func_t pCopyConstructor)1933 ZEND_API void ZEND_FASTCALL zend_hash_copy(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor)
1934 {
1935     uint32_t idx;
1936 	Bucket *p;
1937 	zval *new_entry, *data;
1938 
1939 	IS_CONSISTENT(source);
1940 	IS_CONSISTENT(target);
1941 	HT_ASSERT_RC1(target);
1942 
1943 	for (idx = 0; idx < source->nNumUsed; idx++) {
1944 		p = source->arData + idx;
1945 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1946 
1947 		/* INDIRECT element may point to UNDEF-ined slots */
1948 		data = &p->val;
1949 		if (Z_TYPE_P(data) == IS_INDIRECT) {
1950 			data = Z_INDIRECT_P(data);
1951 			if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
1952 				continue;
1953 			}
1954 		}
1955 		if (p->key) {
1956 			new_entry = zend_hash_update(target, p->key, data);
1957 		} else {
1958 			new_entry = zend_hash_index_update(target, p->h, data);
1959 		}
1960 		if (pCopyConstructor) {
1961 			pCopyConstructor(new_entry);
1962 		}
1963 	}
1964 }
1965 
1966 
zend_array_dup_element(HashTable * source,HashTable * target,uint32_t idx,Bucket * p,Bucket * q,bool packed,bool static_keys,bool with_holes)1967 static zend_always_inline bool zend_array_dup_element(HashTable *source, HashTable *target, uint32_t idx, Bucket *p, Bucket *q, bool packed, bool static_keys, bool with_holes)
1968 {
1969 	zval *data = &p->val;
1970 
1971 	if (with_holes) {
1972 		if (!packed && Z_TYPE_INFO_P(data) == IS_INDIRECT) {
1973 			data = Z_INDIRECT_P(data);
1974 		}
1975 		if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) {
1976 			return 0;
1977 		}
1978 	} else if (!packed) {
1979 		/* INDIRECT element may point to UNDEF-ined slots */
1980 		if (Z_TYPE_INFO_P(data) == IS_INDIRECT) {
1981 			data = Z_INDIRECT_P(data);
1982 			if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) {
1983 				return 0;
1984 			}
1985 		}
1986 	}
1987 
1988 	do {
1989 		if (Z_OPT_REFCOUNTED_P(data)) {
1990 			if (Z_ISREF_P(data) && Z_REFCOUNT_P(data) == 1 &&
1991 			    (Z_TYPE_P(Z_REFVAL_P(data)) != IS_ARRAY ||
1992 			      Z_ARRVAL_P(Z_REFVAL_P(data)) != source)) {
1993 				data = Z_REFVAL_P(data);
1994 				if (!Z_OPT_REFCOUNTED_P(data)) {
1995 					break;
1996 				}
1997 			}
1998 			Z_ADDREF_P(data);
1999 		}
2000 	} while (0);
2001 	ZVAL_COPY_VALUE(&q->val, data);
2002 
2003 	q->h = p->h;
2004 	if (packed) {
2005 		q->key = NULL;
2006 	} else {
2007 		uint32_t nIndex;
2008 
2009 		q->key = p->key;
2010 		if (!static_keys && q->key) {
2011 			zend_string_addref(q->key);
2012 		}
2013 
2014 		nIndex = q->h | target->nTableMask;
2015 		Z_NEXT(q->val) = HT_HASH(target, nIndex);
2016 		HT_HASH(target, nIndex) = HT_IDX_TO_HASH(idx);
2017 	}
2018 	return 1;
2019 }
2020 
zend_array_dup_packed_elements(HashTable * source,HashTable * target,bool with_holes)2021 static zend_always_inline void zend_array_dup_packed_elements(HashTable *source, HashTable *target, bool with_holes)
2022 {
2023 	Bucket *p = source->arData;
2024 	Bucket *q = target->arData;
2025 	Bucket *end = p + source->nNumUsed;
2026 
2027 	do {
2028 		if (!zend_array_dup_element(source, target, 0, p, q, 1, 1, with_holes)) {
2029 			if (with_holes) {
2030 				ZVAL_UNDEF(&q->val);
2031 			}
2032 		}
2033 		p++; q++;
2034 	} while (p != end);
2035 }
2036 
zend_array_dup_elements(HashTable * source,HashTable * target,bool static_keys,bool with_holes)2037 static zend_always_inline uint32_t zend_array_dup_elements(HashTable *source, HashTable *target, bool static_keys, bool with_holes)
2038 {
2039 	uint32_t idx = 0;
2040 	Bucket *p = source->arData;
2041 	Bucket *q = target->arData;
2042 	Bucket *end = p + source->nNumUsed;
2043 
2044 	do {
2045 		if (!zend_array_dup_element(source, target, idx, p, q, 0, static_keys, with_holes)) {
2046 			uint32_t target_idx = idx;
2047 
2048 			idx++; p++;
2049 			while (p != end) {
2050 				if (zend_array_dup_element(source, target, target_idx, p, q, 0, static_keys, with_holes)) {
2051 					if (source->nInternalPointer == idx) {
2052 						target->nInternalPointer = target_idx;
2053 					}
2054 					target_idx++; q++;
2055 				}
2056 				idx++; p++;
2057 			}
2058 			return target_idx;
2059 		}
2060 		idx++; p++; q++;
2061 	} while (p != end);
2062 	return idx;
2063 }
2064 
zend_array_dup(HashTable * source)2065 ZEND_API HashTable* ZEND_FASTCALL zend_array_dup(HashTable *source)
2066 {
2067 	uint32_t idx;
2068 	HashTable *target;
2069 
2070 	IS_CONSISTENT(source);
2071 
2072 	ALLOC_HASHTABLE(target);
2073 	GC_SET_REFCOUNT(target, 1);
2074 	GC_TYPE_INFO(target) = GC_ARRAY;
2075 
2076 	target->pDestructor = ZVAL_PTR_DTOR;
2077 
2078 	if (source->nNumOfElements == 0) {
2079 		HT_FLAGS(target) = HASH_FLAG_UNINITIALIZED;
2080 		target->nTableMask = HT_MIN_MASK;
2081 		target->nNumUsed = 0;
2082 		target->nNumOfElements = 0;
2083 		target->nNextFreeElement = source->nNextFreeElement;
2084 		target->nInternalPointer = 0;
2085 		target->nTableSize = HT_MIN_SIZE;
2086 		HT_SET_DATA_ADDR(target, &uninitialized_bucket);
2087 	} else if (GC_FLAGS(source) & IS_ARRAY_IMMUTABLE) {
2088 		HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK;
2089 		target->nTableMask = source->nTableMask;
2090 		target->nNumUsed = source->nNumUsed;
2091 		target->nNumOfElements = source->nNumOfElements;
2092 		target->nNextFreeElement = source->nNextFreeElement;
2093 		target->nTableSize = source->nTableSize;
2094 		HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
2095 		target->nInternalPointer = source->nInternalPointer;
2096 		memcpy(HT_GET_DATA_ADDR(target), HT_GET_DATA_ADDR(source), HT_USED_SIZE(source));
2097 	} else if (HT_FLAGS(source) & HASH_FLAG_PACKED) {
2098 		HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK;
2099 		target->nTableMask = HT_MIN_MASK;
2100 		target->nNumUsed = source->nNumUsed;
2101 		target->nNumOfElements = source->nNumOfElements;
2102 		target->nNextFreeElement = source->nNextFreeElement;
2103 		target->nTableSize = source->nTableSize;
2104 		HT_SET_DATA_ADDR(target, emalloc(HT_SIZE_EX(target->nTableSize, HT_MIN_MASK)));
2105 		target->nInternalPointer =
2106 			(source->nInternalPointer < source->nNumUsed) ?
2107 				source->nInternalPointer : 0;
2108 
2109 		HT_HASH_RESET_PACKED(target);
2110 
2111 		if (HT_IS_WITHOUT_HOLES(target)) {
2112 			zend_array_dup_packed_elements(source, target, 0);
2113 		} else {
2114 			zend_array_dup_packed_elements(source, target, 1);
2115 		}
2116 	} else {
2117 		HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK;
2118 		target->nTableMask = source->nTableMask;
2119 		target->nNextFreeElement = source->nNextFreeElement;
2120 		target->nInternalPointer =
2121 			(source->nInternalPointer < source->nNumUsed) ?
2122 				source->nInternalPointer : 0;
2123 
2124 		target->nTableSize = source->nTableSize;
2125 		HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
2126 		HT_HASH_RESET(target);
2127 
2128 		if (HT_HAS_STATIC_KEYS_ONLY(target)) {
2129 			if (HT_IS_WITHOUT_HOLES(source)) {
2130 				idx = zend_array_dup_elements(source, target, 1, 0);
2131 			} else {
2132 				idx = zend_array_dup_elements(source, target, 1, 1);
2133 			}
2134 		} else {
2135 			if (HT_IS_WITHOUT_HOLES(source)) {
2136 				idx = zend_array_dup_elements(source, target, 0, 0);
2137 			} else {
2138 				idx = zend_array_dup_elements(source, target, 0, 1);
2139 			}
2140 		}
2141 		target->nNumUsed = idx;
2142 		target->nNumOfElements = idx;
2143 	}
2144 	return target;
2145 }
2146 
2147 
zend_hash_merge(HashTable * target,HashTable * source,copy_ctor_func_t pCopyConstructor,zend_bool overwrite)2148 ZEND_API void ZEND_FASTCALL zend_hash_merge(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor, zend_bool overwrite)
2149 {
2150     uint32_t idx;
2151 	Bucket *p;
2152 	zval *t, *s;
2153 
2154 	IS_CONSISTENT(source);
2155 	IS_CONSISTENT(target);
2156 	HT_ASSERT_RC1(target);
2157 
2158 	if (overwrite) {
2159 		for (idx = 0; idx < source->nNumUsed; idx++) {
2160 			p = source->arData + idx;
2161 			s = &p->val;
2162 			if (UNEXPECTED(Z_TYPE_P(s) == IS_INDIRECT)) {
2163 				s = Z_INDIRECT_P(s);
2164 			}
2165 			if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) {
2166 				continue;
2167 			}
2168 			if (p->key) {
2169 				t = _zend_hash_add_or_update_i(target, p->key, s, HASH_UPDATE | HASH_UPDATE_INDIRECT);
2170 				if (pCopyConstructor) {
2171 					pCopyConstructor(t);
2172 				}
2173 			} else {
2174 				t = zend_hash_index_update(target, p->h, s);
2175 				if (pCopyConstructor) {
2176 					pCopyConstructor(t);
2177 				}
2178 			}
2179 		}
2180 	} else {
2181 		for (idx = 0; idx < source->nNumUsed; idx++) {
2182 			p = source->arData + idx;
2183 			s = &p->val;
2184 			if (UNEXPECTED(Z_TYPE_P(s) == IS_INDIRECT)) {
2185 				s = Z_INDIRECT_P(s);
2186 			}
2187 			if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) {
2188 				continue;
2189 			}
2190 			if (p->key) {
2191 				t = _zend_hash_add_or_update_i(target, p->key, s, HASH_ADD | HASH_UPDATE_INDIRECT);
2192 				if (t && pCopyConstructor) {
2193 					pCopyConstructor(t);
2194 				}
2195 			} else {
2196 				t = zend_hash_index_add(target, p->h, s);
2197 				if (t && pCopyConstructor) {
2198 					pCopyConstructor(t);
2199 				}
2200 			}
2201 		}
2202 	}
2203 }
2204 
2205 
zend_hash_replace_checker_wrapper(HashTable * target,zval * source_data,Bucket * p,void * pParam,merge_checker_func_t merge_checker_func)2206 static zend_bool ZEND_FASTCALL zend_hash_replace_checker_wrapper(HashTable *target, zval *source_data, Bucket *p, void *pParam, merge_checker_func_t merge_checker_func)
2207 {
2208 	zend_hash_key hash_key;
2209 
2210 	hash_key.h = p->h;
2211 	hash_key.key = p->key;
2212 	return merge_checker_func(target, source_data, &hash_key, pParam);
2213 }
2214 
2215 
zend_hash_merge_ex(HashTable * target,HashTable * source,copy_ctor_func_t pCopyConstructor,merge_checker_func_t pMergeSource,void * pParam)2216 ZEND_API void ZEND_FASTCALL zend_hash_merge_ex(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor, merge_checker_func_t pMergeSource, void *pParam)
2217 {
2218 	uint32_t idx;
2219 	Bucket *p;
2220 	zval *t;
2221 
2222 	IS_CONSISTENT(source);
2223 	IS_CONSISTENT(target);
2224 	HT_ASSERT_RC1(target);
2225 
2226 	for (idx = 0; idx < source->nNumUsed; idx++) {
2227 		p = source->arData + idx;
2228 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2229 		if (zend_hash_replace_checker_wrapper(target, &p->val, p, pParam, pMergeSource)) {
2230 			t = zend_hash_update(target, p->key, &p->val);
2231 			if (pCopyConstructor) {
2232 				pCopyConstructor(t);
2233 			}
2234 		}
2235 	}
2236 }
2237 
2238 
2239 /* Returns the hash table data if found and NULL if not. */
zend_hash_find(const HashTable * ht,zend_string * key)2240 ZEND_API zval* ZEND_FASTCALL zend_hash_find(const HashTable *ht, zend_string *key)
2241 {
2242 	Bucket *p;
2243 
2244 	IS_CONSISTENT(ht);
2245 
2246 	p = zend_hash_find_bucket(ht, key, 0);
2247 	return p ? &p->val : NULL;
2248 }
2249 
_zend_hash_find_known_hash(const HashTable * ht,zend_string * key)2250 ZEND_API zval* ZEND_FASTCALL _zend_hash_find_known_hash(const HashTable *ht, zend_string *key)
2251 {
2252 	Bucket *p;
2253 
2254 	IS_CONSISTENT(ht);
2255 
2256 	p = zend_hash_find_bucket(ht, key, 1);
2257 	return p ? &p->val : NULL;
2258 }
2259 
zend_hash_str_find(const HashTable * ht,const char * str,size_t len)2260 ZEND_API zval* ZEND_FASTCALL zend_hash_str_find(const HashTable *ht, const char *str, size_t len)
2261 {
2262 	zend_ulong h;
2263 	Bucket *p;
2264 
2265 	IS_CONSISTENT(ht);
2266 
2267 	h = zend_inline_hash_func(str, len);
2268 	p = zend_hash_str_find_bucket(ht, str, len, h);
2269 	return p ? &p->val : NULL;
2270 }
2271 
zend_hash_index_find(const HashTable * ht,zend_ulong h)2272 ZEND_API zval* ZEND_FASTCALL zend_hash_index_find(const HashTable *ht, zend_ulong h)
2273 {
2274 	Bucket *p;
2275 
2276 	IS_CONSISTENT(ht);
2277 
2278 	if (HT_FLAGS(ht) & HASH_FLAG_PACKED) {
2279 		if (h < ht->nNumUsed) {
2280 			p = ht->arData + h;
2281 			if (Z_TYPE(p->val) != IS_UNDEF) {
2282 				return &p->val;
2283 			}
2284 		}
2285 		return NULL;
2286 	}
2287 
2288 	p = zend_hash_index_find_bucket(ht, h);
2289 	return p ? &p->val : NULL;
2290 }
2291 
_zend_hash_index_find(const HashTable * ht,zend_ulong h)2292 ZEND_API zval* ZEND_FASTCALL _zend_hash_index_find(const HashTable *ht, zend_ulong h)
2293 {
2294 	Bucket *p;
2295 
2296 	IS_CONSISTENT(ht);
2297 
2298 	p = zend_hash_index_find_bucket(ht, h);
2299 	return p ? &p->val : NULL;
2300 }
2301 
zend_hash_internal_pointer_reset_ex(HashTable * ht,HashPosition * pos)2302 ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_reset_ex(HashTable *ht, HashPosition *pos)
2303 {
2304 	IS_CONSISTENT(ht);
2305 	HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2306 	*pos = _zend_hash_get_valid_pos(ht, 0);
2307 }
2308 
2309 
2310 /* This function will be extremely optimized by remembering
2311  * the end of the list
2312  */
zend_hash_internal_pointer_end_ex(HashTable * ht,HashPosition * pos)2313 ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_end_ex(HashTable *ht, HashPosition *pos)
2314 {
2315 	uint32_t idx;
2316 
2317 	IS_CONSISTENT(ht);
2318 	HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2319 
2320 	idx = ht->nNumUsed;
2321 	while (idx > 0) {
2322 		idx--;
2323 		if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2324 			*pos = idx;
2325 			return;
2326 		}
2327 	}
2328 	*pos = ht->nNumUsed;
2329 }
2330 
2331 
zend_hash_move_forward_ex(HashTable * ht,HashPosition * pos)2332 ZEND_API zend_result ZEND_FASTCALL zend_hash_move_forward_ex(HashTable *ht, HashPosition *pos)
2333 {
2334 	uint32_t idx;
2335 
2336 	IS_CONSISTENT(ht);
2337 	HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2338 
2339 	idx = _zend_hash_get_valid_pos(ht, *pos);
2340 	if (idx < ht->nNumUsed) {
2341 		while (1) {
2342 			idx++;
2343 			if (idx >= ht->nNumUsed) {
2344 				*pos = ht->nNumUsed;
2345 				return SUCCESS;
2346 			}
2347 			if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2348 				*pos = idx;
2349 				return SUCCESS;
2350 			}
2351 		}
2352 	} else {
2353 		return FAILURE;
2354 	}
2355 }
2356 
zend_hash_move_backwards_ex(HashTable * ht,HashPosition * pos)2357 ZEND_API zend_result ZEND_FASTCALL zend_hash_move_backwards_ex(HashTable *ht, HashPosition *pos)
2358 {
2359 	uint32_t idx = *pos;
2360 
2361 	IS_CONSISTENT(ht);
2362 	HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2363 
2364 	if (idx < ht->nNumUsed) {
2365 		while (idx > 0) {
2366 			idx--;
2367 			if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2368 				*pos = idx;
2369 				return SUCCESS;
2370 			}
2371 		}
2372 		*pos = ht->nNumUsed;
2373  		return SUCCESS;
2374 	} else {
2375  		return FAILURE;
2376 	}
2377 }
2378 
2379 
2380 /* This function should be made binary safe  */
zend_hash_get_current_key_ex(const HashTable * ht,zend_string ** str_index,zend_ulong * num_index,HashPosition * pos)2381 ZEND_API int ZEND_FASTCALL zend_hash_get_current_key_ex(const HashTable *ht, zend_string **str_index, zend_ulong *num_index, HashPosition *pos)
2382 {
2383 	uint32_t idx;
2384 	Bucket *p;
2385 
2386 	IS_CONSISTENT(ht);
2387 	idx = _zend_hash_get_valid_pos(ht, *pos);
2388 	if (idx < ht->nNumUsed) {
2389 		p = ht->arData + idx;
2390 		if (p->key) {
2391 			*str_index = p->key;
2392 			return HASH_KEY_IS_STRING;
2393 		} else {
2394 			*num_index = p->h;
2395 			return HASH_KEY_IS_LONG;
2396 		}
2397 	}
2398 	return HASH_KEY_NON_EXISTENT;
2399 }
2400 
zend_hash_get_current_key_zval_ex(const HashTable * ht,zval * key,HashPosition * pos)2401 ZEND_API void ZEND_FASTCALL zend_hash_get_current_key_zval_ex(const HashTable *ht, zval *key, HashPosition *pos)
2402 {
2403 	uint32_t idx;
2404 	Bucket *p;
2405 
2406 	IS_CONSISTENT(ht);
2407 	idx = _zend_hash_get_valid_pos(ht, *pos);
2408 	if (idx >= ht->nNumUsed) {
2409 		ZVAL_NULL(key);
2410 	} else {
2411 		p = ht->arData + idx;
2412 		if (p->key) {
2413 			ZVAL_STR_COPY(key, p->key);
2414 		} else {
2415 			ZVAL_LONG(key, p->h);
2416 		}
2417 	}
2418 }
2419 
zend_hash_get_current_key_type_ex(HashTable * ht,HashPosition * pos)2420 ZEND_API int ZEND_FASTCALL zend_hash_get_current_key_type_ex(HashTable *ht, HashPosition *pos)
2421 {
2422 	uint32_t idx;
2423 	Bucket *p;
2424 
2425 	IS_CONSISTENT(ht);
2426 	idx = _zend_hash_get_valid_pos(ht, *pos);
2427 	if (idx < ht->nNumUsed) {
2428 		p = ht->arData + idx;
2429 		if (p->key) {
2430 			return HASH_KEY_IS_STRING;
2431 		} else {
2432 			return HASH_KEY_IS_LONG;
2433 		}
2434 	}
2435 	return HASH_KEY_NON_EXISTENT;
2436 }
2437 
2438 
zend_hash_get_current_data_ex(HashTable * ht,HashPosition * pos)2439 ZEND_API zval* ZEND_FASTCALL zend_hash_get_current_data_ex(HashTable *ht, HashPosition *pos)
2440 {
2441 	uint32_t idx;
2442 	Bucket *p;
2443 
2444 	IS_CONSISTENT(ht);
2445 	idx = _zend_hash_get_valid_pos(ht, *pos);
2446 	if (idx < ht->nNumUsed) {
2447 		p = ht->arData + idx;
2448 		return &p->val;
2449 	} else {
2450 		return NULL;
2451 	}
2452 }
2453 
zend_hash_bucket_swap(Bucket * p,Bucket * q)2454 ZEND_API void zend_hash_bucket_swap(Bucket *p, Bucket *q)
2455 {
2456 	zval val;
2457 	zend_ulong h;
2458 	zend_string *key;
2459 
2460 	val = p->val;
2461 	h = p->h;
2462 	key = p->key;
2463 
2464 	p->val = q->val;
2465 	p->h = q->h;
2466 	p->key = q->key;
2467 
2468 	q->val = val;
2469 	q->h = h;
2470 	q->key = key;
2471 }
2472 
zend_hash_bucket_renum_swap(Bucket * p,Bucket * q)2473 ZEND_API void zend_hash_bucket_renum_swap(Bucket *p, Bucket *q)
2474 {
2475 	zval val;
2476 
2477 	val = p->val;
2478 	p->val = q->val;
2479 	q->val = val;
2480 }
2481 
zend_hash_bucket_packed_swap(Bucket * p,Bucket * q)2482 ZEND_API void zend_hash_bucket_packed_swap(Bucket *p, Bucket *q)
2483 {
2484 	zval val;
2485 	zend_ulong h;
2486 
2487 	val = p->val;
2488 	h = p->h;
2489 
2490 	p->val = q->val;
2491 	p->h = q->h;
2492 
2493 	q->val = val;
2494 	q->h = h;
2495 }
2496 
zend_hash_sort_ex(HashTable * ht,sort_func_t sort,bucket_compare_func_t compar,zend_bool renumber)2497 ZEND_API void ZEND_FASTCALL zend_hash_sort_ex(HashTable *ht, sort_func_t sort, bucket_compare_func_t compar, zend_bool renumber)
2498 {
2499 	Bucket *p;
2500 	uint32_t i, j;
2501 
2502 	IS_CONSISTENT(ht);
2503 	HT_ASSERT_RC1(ht);
2504 
2505 	if (!(ht->nNumOfElements>1) && !(renumber && ht->nNumOfElements>0)) {
2506 		/* Doesn't require sorting */
2507 		return;
2508 	}
2509 
2510 	if (HT_IS_WITHOUT_HOLES(ht)) {
2511 		/* Store original order of elements in extra space to allow stable sorting. */
2512 		for (i = 0; i < ht->nNumUsed; i++) {
2513 			Z_EXTRA(ht->arData[i].val) = i;
2514 		}
2515 	} else {
2516 		/* Remove holes and store original order. */
2517 		for (j = 0, i = 0; j < ht->nNumUsed; j++) {
2518 			p = ht->arData + j;
2519 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2520 			if (i != j) {
2521 				ht->arData[i] = *p;
2522 			}
2523 			Z_EXTRA(ht->arData[i].val) = i;
2524 			i++;
2525 		}
2526 		ht->nNumUsed = i;
2527 	}
2528 
2529 	sort((void *)ht->arData, ht->nNumUsed, sizeof(Bucket), (compare_func_t) compar,
2530 			(swap_func_t)(renumber? zend_hash_bucket_renum_swap :
2531 				((HT_FLAGS(ht) & HASH_FLAG_PACKED) ? zend_hash_bucket_packed_swap : zend_hash_bucket_swap)));
2532 
2533 	ht->nInternalPointer = 0;
2534 
2535 	if (renumber) {
2536 		for (j = 0; j < i; j++) {
2537 			p = ht->arData + j;
2538 			p->h = j;
2539 			if (p->key) {
2540 				zend_string_release(p->key);
2541 				p->key = NULL;
2542 			}
2543 		}
2544 
2545 		ht->nNextFreeElement = i;
2546 	}
2547 	if (HT_FLAGS(ht) & HASH_FLAG_PACKED) {
2548 		if (!renumber) {
2549 			zend_hash_packed_to_hash(ht);
2550 		}
2551 	} else {
2552 		if (renumber) {
2553 			void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
2554 			Bucket *old_buckets = ht->arData;
2555 
2556 			new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), (GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
2557 			HT_FLAGS(ht) |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
2558 			ht->nTableMask = HT_MIN_MASK;
2559 			HT_SET_DATA_ADDR(ht, new_data);
2560 			memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
2561 			pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
2562 			HT_HASH_RESET_PACKED(ht);
2563 		} else {
2564 			zend_hash_rehash(ht);
2565 		}
2566 	}
2567 }
2568 
zend_hash_compare_impl(HashTable * ht1,HashTable * ht2,compare_func_t compar,zend_bool ordered)2569 static zend_always_inline int zend_hash_compare_impl(HashTable *ht1, HashTable *ht2, compare_func_t compar, zend_bool ordered) {
2570 	uint32_t idx1, idx2;
2571 
2572 	if (ht1->nNumOfElements != ht2->nNumOfElements) {
2573 		return ht1->nNumOfElements > ht2->nNumOfElements ? 1 : -1;
2574 	}
2575 
2576 	for (idx1 = 0, idx2 = 0; idx1 < ht1->nNumUsed; idx1++) {
2577 		Bucket *p1 = ht1->arData + idx1, *p2;
2578 		zval *pData1, *pData2;
2579 		int result;
2580 
2581 		if (Z_TYPE(p1->val) == IS_UNDEF) continue;
2582 		if (ordered) {
2583 			while (1) {
2584 				ZEND_ASSERT(idx2 != ht2->nNumUsed);
2585 				p2 = ht2->arData + idx2;
2586 				if (Z_TYPE(p2->val) != IS_UNDEF) break;
2587 				idx2++;
2588 			}
2589 			if (p1->key == NULL && p2->key == NULL) { /* numeric indices */
2590 				if (p1->h != p2->h) {
2591 					return p1->h > p2->h ? 1 : -1;
2592 				}
2593 			} else if (p1->key != NULL && p2->key != NULL) { /* string indices */
2594 				if (ZSTR_LEN(p1->key) != ZSTR_LEN(p2->key)) {
2595 					return ZSTR_LEN(p1->key) > ZSTR_LEN(p2->key) ? 1 : -1;
2596 				}
2597 
2598 				result = memcmp(ZSTR_VAL(p1->key), ZSTR_VAL(p2->key), ZSTR_LEN(p1->key));
2599 				if (result != 0) {
2600 					return result;
2601 				}
2602 			} else {
2603 				/* Mixed key types: A string key is considered as larger */
2604 				return p1->key != NULL ? 1 : -1;
2605 			}
2606 			pData2 = &p2->val;
2607 			idx2++;
2608 		} else {
2609 			if (p1->key == NULL) { /* numeric index */
2610 				pData2 = zend_hash_index_find(ht2, p1->h);
2611 				if (pData2 == NULL) {
2612 					return 1;
2613 				}
2614 			} else { /* string index */
2615 				pData2 = zend_hash_find(ht2, p1->key);
2616 				if (pData2 == NULL) {
2617 					return 1;
2618 				}
2619 			}
2620 		}
2621 
2622 		pData1 = &p1->val;
2623 		if (Z_TYPE_P(pData1) == IS_INDIRECT) {
2624 			pData1 = Z_INDIRECT_P(pData1);
2625 		}
2626 		if (Z_TYPE_P(pData2) == IS_INDIRECT) {
2627 			pData2 = Z_INDIRECT_P(pData2);
2628 		}
2629 
2630 		if (Z_TYPE_P(pData1) == IS_UNDEF) {
2631 			if (Z_TYPE_P(pData2) != IS_UNDEF) {
2632 				return -1;
2633 			}
2634 		} else if (Z_TYPE_P(pData2) == IS_UNDEF) {
2635 			return 1;
2636 		} else {
2637 			result = compar(pData1, pData2);
2638 			if (result != 0) {
2639 				return result;
2640 			}
2641 		}
2642 	}
2643 
2644 	return 0;
2645 }
2646 
zend_hash_compare(HashTable * ht1,HashTable * ht2,compare_func_t compar,zend_bool ordered)2647 ZEND_API int zend_hash_compare(HashTable *ht1, HashTable *ht2, compare_func_t compar, zend_bool ordered)
2648 {
2649 	int result;
2650 	IS_CONSISTENT(ht1);
2651 	IS_CONSISTENT(ht2);
2652 
2653 	if (ht1 == ht2) {
2654 		return 0;
2655 	}
2656 
2657 	/* It's enough to protect only one of the arrays.
2658 	 * The second one may be referenced from the first and this may cause
2659 	 * false recursion detection.
2660 	 */
2661 	if (UNEXPECTED(GC_IS_RECURSIVE(ht1))) {
2662 		zend_error_noreturn(E_ERROR, "Nesting level too deep - recursive dependency?");
2663 	}
2664 
2665 	GC_TRY_PROTECT_RECURSION(ht1);
2666 	result = zend_hash_compare_impl(ht1, ht2, compar, ordered);
2667 	GC_TRY_UNPROTECT_RECURSION(ht1);
2668 
2669 	return result;
2670 }
2671 
2672 
zend_hash_minmax(const HashTable * ht,bucket_compare_func_t compar,uint32_t flag)2673 ZEND_API zval* ZEND_FASTCALL zend_hash_minmax(const HashTable *ht, bucket_compare_func_t compar, uint32_t flag)
2674 {
2675 	uint32_t idx;
2676 	Bucket *p, *res;
2677 
2678 	IS_CONSISTENT(ht);
2679 
2680 	if (ht->nNumOfElements == 0 ) {
2681 		return NULL;
2682 	}
2683 
2684 	idx = 0;
2685 	while (1) {
2686 		if (idx == ht->nNumUsed) {
2687 			return NULL;
2688 		}
2689 		if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) break;
2690 		idx++;
2691 	}
2692 	res = ht->arData + idx;
2693 	for (; idx < ht->nNumUsed; idx++) {
2694 		p = ht->arData + idx;
2695 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2696 
2697 		if (flag) {
2698 			if (compar(res, p) < 0) { /* max */
2699 				res = p;
2700 			}
2701 		} else {
2702 			if (compar(res, p) > 0) { /* min */
2703 				res = p;
2704 			}
2705 		}
2706 	}
2707 	return &res->val;
2708 }
2709 
_zend_handle_numeric_str_ex(const char * key,size_t length,zend_ulong * idx)2710 ZEND_API bool ZEND_FASTCALL _zend_handle_numeric_str_ex(const char *key, size_t length, zend_ulong *idx)
2711 {
2712 	register const char *tmp = key;
2713 
2714 	const char *end = key + length;
2715 
2716 	if (*tmp == '-') {
2717 		tmp++;
2718 	}
2719 
2720 	if ((*tmp == '0' && length > 1) /* numbers with leading zeros */
2721 	 || (end - tmp > MAX_LENGTH_OF_LONG - 1) /* number too long */
2722 	 || (SIZEOF_ZEND_LONG == 4 &&
2723 	     end - tmp == MAX_LENGTH_OF_LONG - 1 &&
2724 	     *tmp > '2')) { /* overflow */
2725 		return 0;
2726 	}
2727 	*idx = (*tmp - '0');
2728 	while (1) {
2729 		++tmp;
2730 		if (tmp == end) {
2731 			if (*key == '-') {
2732 				if (*idx-1 > ZEND_LONG_MAX) { /* overflow */
2733 					return 0;
2734 				}
2735 				*idx = 0 - *idx;
2736 			} else if (*idx > ZEND_LONG_MAX) { /* overflow */
2737 				return 0;
2738 			}
2739 			return 1;
2740 		}
2741 		if (*tmp <= '9' && *tmp >= '0') {
2742 			*idx = (*idx * 10) + (*tmp - '0');
2743 		} else {
2744 			return 0;
2745 		}
2746 	}
2747 }
2748 
2749 /* Takes a "symtable" hashtable (contains integer and non-numeric string keys)
2750  * and converts it to a "proptable" (contains only string keys).
2751  * If the symtable didn't need duplicating, its refcount is incremented.
2752  */
zend_symtable_to_proptable(HashTable * ht)2753 ZEND_API HashTable* ZEND_FASTCALL zend_symtable_to_proptable(HashTable *ht)
2754 {
2755 	zend_ulong num_key;
2756 	zend_string *str_key;
2757 	zval *zv;
2758 
2759 	if (UNEXPECTED(HT_IS_PACKED(ht))) {
2760 		goto convert;
2761 	}
2762 
2763 	ZEND_HASH_FOREACH_STR_KEY(ht, str_key) {
2764 		if (!str_key) {
2765 			goto convert;
2766 		}
2767 	} ZEND_HASH_FOREACH_END();
2768 
2769 	if (!(GC_FLAGS(ht) & IS_ARRAY_IMMUTABLE)) {
2770 		GC_ADDREF(ht);
2771 	}
2772 
2773 	return ht;
2774 
2775 convert:
2776 	{
2777 		HashTable *new_ht = zend_new_array(zend_hash_num_elements(ht));
2778 
2779 		ZEND_HASH_FOREACH_KEY_VAL(ht, num_key, str_key, zv) {
2780 			if (!str_key) {
2781 				str_key = zend_long_to_str(num_key);
2782 				zend_string_delref(str_key);
2783 			}
2784 			do {
2785 				if (Z_OPT_REFCOUNTED_P(zv)) {
2786 					if (Z_ISREF_P(zv) && Z_REFCOUNT_P(zv) == 1) {
2787 						zv = Z_REFVAL_P(zv);
2788 						if (!Z_OPT_REFCOUNTED_P(zv)) {
2789 							break;
2790 						}
2791 					}
2792 					Z_ADDREF_P(zv);
2793 				}
2794 			} while (0);
2795 			zend_hash_update(new_ht, str_key, zv);
2796 		} ZEND_HASH_FOREACH_END();
2797 
2798 		return new_ht;
2799 	}
2800 }
2801 
2802 /* Takes a "proptable" hashtable (contains only string keys) and converts it to
2803  * a "symtable" (contains integer and non-numeric string keys).
2804  * If the proptable didn't need duplicating, its refcount is incremented.
2805  */
zend_proptable_to_symtable(HashTable * ht,zend_bool always_duplicate)2806 ZEND_API HashTable* ZEND_FASTCALL zend_proptable_to_symtable(HashTable *ht, zend_bool always_duplicate)
2807 {
2808 	zend_ulong num_key;
2809 	zend_string *str_key;
2810 	zval *zv;
2811 
2812 	ZEND_HASH_FOREACH_STR_KEY(ht, str_key) {
2813 		/* The `str_key &&` here might seem redundant: property tables should
2814 		 * only have string keys. Unfortunately, this isn't true, at the very
2815 		 * least because of ArrayObject, which stores a symtable where the
2816 		 * property table should be.
2817 		 */
2818 		if (str_key && ZEND_HANDLE_NUMERIC(str_key, num_key)) {
2819 			goto convert;
2820 		}
2821 	} ZEND_HASH_FOREACH_END();
2822 
2823 	if (always_duplicate) {
2824 		return zend_array_dup(ht);
2825 	}
2826 
2827 	if (EXPECTED(!(GC_FLAGS(ht) & IS_ARRAY_IMMUTABLE))) {
2828 		GC_ADDREF(ht);
2829 	}
2830 
2831 	return ht;
2832 
2833 convert:
2834 	{
2835 		HashTable *new_ht = zend_new_array(zend_hash_num_elements(ht));
2836 
2837 		ZEND_HASH_FOREACH_KEY_VAL_IND(ht, num_key, str_key, zv) {
2838 			do {
2839 				if (Z_OPT_REFCOUNTED_P(zv)) {
2840 					if (Z_ISREF_P(zv) && Z_REFCOUNT_P(zv) == 1) {
2841 						zv = Z_REFVAL_P(zv);
2842 						if (!Z_OPT_REFCOUNTED_P(zv)) {
2843 							break;
2844 						}
2845 					}
2846 					Z_ADDREF_P(zv);
2847 				}
2848 			} while (0);
2849 			/* Again, thank ArrayObject for `!str_key ||`. */
2850 			if (!str_key || ZEND_HANDLE_NUMERIC(str_key, num_key)) {
2851 				zend_hash_index_update(new_ht, num_key, zv);
2852 			} else {
2853 				zend_hash_update(new_ht, str_key, zv);
2854 			}
2855 		} ZEND_HASH_FOREACH_END();
2856 
2857 		return new_ht;
2858 	}
2859 }
2860