xref: /PHP-7.4/Zend/zend_hash.c (revision 2c508c4d)
1 /*
2    +----------------------------------------------------------------------+
3    | Zend Engine                                                          |
4    +----------------------------------------------------------------------+
5    | Copyright (c) Zend Technologies Ltd. (http://www.zend.com)           |
6    +----------------------------------------------------------------------+
7    | This source file is subject to version 2.00 of the Zend license,     |
8    | that is bundled with this package in the file LICENSE, and is        |
9    | available through the world-wide-web at the following url:           |
10    | http://www.zend.com/license/2_00.txt.                                |
11    | If you did not receive a copy of the Zend license and are unable to  |
12    | obtain it through the world-wide-web, please send a note to          |
13    | license@zend.com so we can mail you a copy immediately.              |
14    +----------------------------------------------------------------------+
15    | Authors: Andi Gutmans <andi@php.net>                                 |
16    |          Zeev Suraski <zeev@php.net>                                 |
17    |          Dmitry Stogov <dmitry@php.net>                              |
18    +----------------------------------------------------------------------+
19 */
20 
21 #include "zend.h"
22 #include "zend_globals.h"
23 #include "zend_variables.h"
24 
25 #if defined(__aarch64__)
26 # include <arm_neon.h>
27 #endif
28 
29 #ifdef __SSE2__
30 # include <mmintrin.h>
31 # include <emmintrin.h>
32 #endif
33 
34 #if ZEND_DEBUG
35 # define HT_ASSERT(ht, expr) \
36 	ZEND_ASSERT((expr) || (HT_FLAGS(ht) & HASH_FLAG_ALLOW_COW_VIOLATION))
37 #else
38 # define HT_ASSERT(ht, expr)
39 #endif
40 
41 #define HT_ASSERT_RC1(ht) HT_ASSERT(ht, GC_REFCOUNT(ht) == 1)
42 
43 #define HT_POISONED_PTR ((HashTable *) (intptr_t) -1)
44 
45 #if ZEND_DEBUG
46 
47 #define HT_OK					0x00
48 #define HT_IS_DESTROYING		0x01
49 #define HT_DESTROYED			0x02
50 #define HT_CLEANING				0x03
51 
_zend_is_inconsistent(const HashTable * ht,const char * file,int line)52 static void _zend_is_inconsistent(const HashTable *ht, const char *file, int line)
53 {
54 	if ((HT_FLAGS(ht) & HASH_FLAG_CONSISTENCY) == HT_OK) {
55 		return;
56 	}
57 	switch (HT_FLAGS(ht) & HASH_FLAG_CONSISTENCY) {
58 		case HT_IS_DESTROYING:
59 			zend_output_debug_string(1, "%s(%d) : ht=%p is being destroyed", file, line, ht);
60 			break;
61 		case HT_DESTROYED:
62 			zend_output_debug_string(1, "%s(%d) : ht=%p is already destroyed", file, line, ht);
63 			break;
64 		case HT_CLEANING:
65 			zend_output_debug_string(1, "%s(%d) : ht=%p is being cleaned", file, line, ht);
66 			break;
67 		default:
68 			zend_output_debug_string(1, "%s(%d) : ht=%p is inconsistent", file, line, ht);
69 			break;
70 	}
71 	ZEND_ASSERT(0);
72 }
73 #define IS_CONSISTENT(a) _zend_is_inconsistent(a, __FILE__, __LINE__);
74 #define SET_INCONSISTENT(n) do { \
75 		HT_FLAGS(ht) = (HT_FLAGS(ht) & ~HASH_FLAG_CONSISTENCY) | (n); \
76 	} while (0)
77 #else
78 #define IS_CONSISTENT(a)
79 #define SET_INCONSISTENT(n)
80 #endif
81 
82 #define ZEND_HASH_IF_FULL_DO_RESIZE(ht)				\
83 	if ((ht)->nNumUsed >= (ht)->nTableSize) {		\
84 		zend_hash_do_resize(ht);					\
85 	}
86 
87 static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht);
88 
zend_hash_check_size(uint32_t nSize)89 static zend_always_inline uint32_t zend_hash_check_size(uint32_t nSize)
90 {
91 #if defined(ZEND_WIN32)
92 	unsigned long index;
93 #endif
94 
95 	/* Use big enough power of 2 */
96 	/* size should be between HT_MIN_SIZE and HT_MAX_SIZE */
97 	if (nSize <= HT_MIN_SIZE) {
98 		return HT_MIN_SIZE;
99 	} else if (UNEXPECTED(nSize >= HT_MAX_SIZE)) {
100 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", nSize, sizeof(Bucket), sizeof(Bucket));
101 	}
102 
103 #if defined(ZEND_WIN32)
104 	if (BitScanReverse(&index, nSize - 1)) {
105 		return 0x2 << ((31 - index) ^ 0x1f);
106 	} else {
107 		/* nSize is ensured to be in the valid range, fall back to it
108 		   rather than using an undefined bis scan result. */
109 		return nSize;
110 	}
111 #elif (defined(__GNUC__) || __has_builtin(__builtin_clz))  && defined(PHP_HAVE_BUILTIN_CLZ)
112 	return 0x2 << (__builtin_clz(nSize - 1) ^ 0x1f);
113 #else
114 	nSize -= 1;
115 	nSize |= (nSize >> 1);
116 	nSize |= (nSize >> 2);
117 	nSize |= (nSize >> 4);
118 	nSize |= (nSize >> 8);
119 	nSize |= (nSize >> 16);
120 	return nSize + 1;
121 #endif
122 }
123 
zend_hash_real_init_packed_ex(HashTable * ht)124 static zend_always_inline void zend_hash_real_init_packed_ex(HashTable *ht)
125 {
126 	void *data;
127 
128 	if (UNEXPECTED(GC_FLAGS(ht) & IS_ARRAY_PERSISTENT)) {
129 		data = pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), 1);
130 	} else if (EXPECTED(ht->nTableSize == HT_MIN_SIZE)) {
131 		data = emalloc(HT_SIZE_EX(HT_MIN_SIZE, HT_MIN_MASK));
132 	} else {
133 		data = emalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK));
134 	}
135 	HT_SET_DATA_ADDR(ht, data);
136 	/* Don't overwrite iterator count. */
137 	ht->u.v.flags = HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
138 	HT_HASH_RESET_PACKED(ht);
139 }
140 
zend_hash_real_init_mixed_ex(HashTable * ht)141 static zend_always_inline void zend_hash_real_init_mixed_ex(HashTable *ht)
142 {
143 	void *data;
144 	uint32_t nSize = ht->nTableSize;
145 
146 	if (UNEXPECTED(GC_FLAGS(ht) & IS_ARRAY_PERSISTENT)) {
147 		data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), 1);
148 	} else if (EXPECTED(nSize == HT_MIN_SIZE)) {
149 		data = emalloc(HT_SIZE_EX(HT_MIN_SIZE, HT_SIZE_TO_MASK(HT_MIN_SIZE)));
150 		ht->nTableMask = HT_SIZE_TO_MASK(HT_MIN_SIZE);
151 		HT_SET_DATA_ADDR(ht, data);
152 		/* Don't overwrite iterator count. */
153 		ht->u.v.flags = HASH_FLAG_STATIC_KEYS;
154 #ifdef __SSE2__
155 		do {
156 			__m128i xmm0 = _mm_setzero_si128();
157 			xmm0 = _mm_cmpeq_epi8(xmm0, xmm0);
158 			_mm_storeu_si128((__m128i*)&HT_HASH_EX(data,  0), xmm0);
159 			_mm_storeu_si128((__m128i*)&HT_HASH_EX(data,  4), xmm0);
160 			_mm_storeu_si128((__m128i*)&HT_HASH_EX(data,  8), xmm0);
161 			_mm_storeu_si128((__m128i*)&HT_HASH_EX(data, 12), xmm0);
162 		} while (0);
163 #elif defined(__aarch64__)
164 		do {
165 			int32x4_t t = vdupq_n_s32(-1);
166 			vst1q_s32((int32_t*)&HT_HASH_EX(data,  0), t);
167 			vst1q_s32((int32_t*)&HT_HASH_EX(data,  4), t);
168 			vst1q_s32((int32_t*)&HT_HASH_EX(data,  8), t);
169 			vst1q_s32((int32_t*)&HT_HASH_EX(data, 12), t);
170 		} while (0);
171 #else
172 		HT_HASH_EX(data,  0) = -1;
173 		HT_HASH_EX(data,  1) = -1;
174 		HT_HASH_EX(data,  2) = -1;
175 		HT_HASH_EX(data,  3) = -1;
176 		HT_HASH_EX(data,  4) = -1;
177 		HT_HASH_EX(data,  5) = -1;
178 		HT_HASH_EX(data,  6) = -1;
179 		HT_HASH_EX(data,  7) = -1;
180 		HT_HASH_EX(data,  8) = -1;
181 		HT_HASH_EX(data,  9) = -1;
182 		HT_HASH_EX(data, 10) = -1;
183 		HT_HASH_EX(data, 11) = -1;
184 		HT_HASH_EX(data, 12) = -1;
185 		HT_HASH_EX(data, 13) = -1;
186 		HT_HASH_EX(data, 14) = -1;
187 		HT_HASH_EX(data, 15) = -1;
188 #endif
189 		return;
190 	} else {
191 		data = emalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)));
192 	}
193 	ht->nTableMask = HT_SIZE_TO_MASK(nSize);
194 	HT_SET_DATA_ADDR(ht, data);
195 	HT_FLAGS(ht) = HASH_FLAG_STATIC_KEYS;
196 	HT_HASH_RESET(ht);
197 }
198 
zend_hash_real_init_ex(HashTable * ht,int packed)199 static zend_always_inline void zend_hash_real_init_ex(HashTable *ht, int packed)
200 {
201 	HT_ASSERT_RC1(ht);
202 	ZEND_ASSERT(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED);
203 	if (packed) {
204 		zend_hash_real_init_packed_ex(ht);
205 	} else {
206 		zend_hash_real_init_mixed_ex(ht);
207 	}
208 }
209 
210 static const uint32_t uninitialized_bucket[-HT_MIN_MASK] =
211 	{HT_INVALID_IDX, HT_INVALID_IDX};
212 
213 ZEND_API const HashTable zend_empty_array = {
214 	.gc.refcount = 2,
215 	.gc.u.type_info = IS_ARRAY | (GC_IMMUTABLE << GC_FLAGS_SHIFT),
216 	.u.flags = HASH_FLAG_UNINITIALIZED,
217 	.nTableMask = HT_MIN_MASK,
218 	.arData = (Bucket*)&uninitialized_bucket[2],
219 	.nNumUsed = 0,
220 	.nNumOfElements = 0,
221 	.nTableSize = HT_MIN_SIZE,
222 	.nInternalPointer = 0,
223 	.nNextFreeElement = 0,
224 	.pDestructor = ZVAL_PTR_DTOR
225 };
226 
_zend_hash_init_int(HashTable * ht,uint32_t nSize,dtor_func_t pDestructor,zend_bool persistent)227 static zend_always_inline void _zend_hash_init_int(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, zend_bool persistent)
228 {
229 	GC_SET_REFCOUNT(ht, 1);
230 	GC_TYPE_INFO(ht) = IS_ARRAY | (persistent ? (GC_PERSISTENT << GC_FLAGS_SHIFT) : (GC_COLLECTABLE << GC_FLAGS_SHIFT));
231 	HT_FLAGS(ht) = HASH_FLAG_UNINITIALIZED;
232 	ht->nTableMask = HT_MIN_MASK;
233 	HT_SET_DATA_ADDR(ht, &uninitialized_bucket);
234 	ht->nNumUsed = 0;
235 	ht->nNumOfElements = 0;
236 	ht->nInternalPointer = 0;
237 	ht->nNextFreeElement = 0;
238 	ht->pDestructor = pDestructor;
239 	ht->nTableSize = zend_hash_check_size(nSize);
240 }
241 
_zend_hash_init(HashTable * ht,uint32_t nSize,dtor_func_t pDestructor,zend_bool persistent)242 ZEND_API void ZEND_FASTCALL _zend_hash_init(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, zend_bool persistent)
243 {
244 	_zend_hash_init_int(ht, nSize, pDestructor, persistent);
245 }
246 
_zend_new_array_0(void)247 ZEND_API HashTable* ZEND_FASTCALL _zend_new_array_0(void)
248 {
249 	HashTable *ht = emalloc(sizeof(HashTable));
250 	_zend_hash_init_int(ht, HT_MIN_SIZE, ZVAL_PTR_DTOR, 0);
251 	return ht;
252 }
253 
_zend_new_array(uint32_t nSize)254 ZEND_API HashTable* ZEND_FASTCALL _zend_new_array(uint32_t nSize)
255 {
256 	HashTable *ht = emalloc(sizeof(HashTable));
257 	_zend_hash_init_int(ht, nSize, ZVAL_PTR_DTOR, 0);
258 	return ht;
259 }
260 
zend_new_pair(zval * val1,zval * val2)261 ZEND_API HashTable* ZEND_FASTCALL zend_new_pair(zval *val1, zval *val2)
262 {
263 	Bucket *p;
264 	HashTable *ht = emalloc(sizeof(HashTable));
265 	_zend_hash_init_int(ht, HT_MIN_SIZE, ZVAL_PTR_DTOR, 0);
266 	ht->nNumUsed = ht->nNumOfElements = ht->nNextFreeElement = 2;
267 	zend_hash_real_init_packed_ex(ht);
268 
269 	p = ht->arData;
270 	ZVAL_COPY_VALUE(&p->val, val1);
271 	p->h = 0;
272 	p->key = NULL;
273 
274 	p++;
275 	ZVAL_COPY_VALUE(&p->val, val2);
276 	p->h = 1;
277 	p->key = NULL;
278 	return ht;
279 }
280 
zend_hash_packed_grow(HashTable * ht)281 static void ZEND_FASTCALL zend_hash_packed_grow(HashTable *ht)
282 {
283 	HT_ASSERT_RC1(ht);
284 	if (ht->nTableSize >= HT_MAX_SIZE) {
285 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket), sizeof(Bucket));
286 	}
287 	ht->nTableSize += ht->nTableSize;
288 	HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), HT_USED_SIZE(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
289 }
290 
zend_hash_real_init(HashTable * ht,zend_bool packed)291 ZEND_API void ZEND_FASTCALL zend_hash_real_init(HashTable *ht, zend_bool packed)
292 {
293 	IS_CONSISTENT(ht);
294 
295 	HT_ASSERT_RC1(ht);
296 	zend_hash_real_init_ex(ht, packed);
297 }
298 
zend_hash_real_init_packed(HashTable * ht)299 ZEND_API void ZEND_FASTCALL zend_hash_real_init_packed(HashTable *ht)
300 {
301 	IS_CONSISTENT(ht);
302 
303 	HT_ASSERT_RC1(ht);
304 	zend_hash_real_init_packed_ex(ht);
305 }
306 
zend_hash_real_init_mixed(HashTable * ht)307 ZEND_API void ZEND_FASTCALL zend_hash_real_init_mixed(HashTable *ht)
308 {
309 	IS_CONSISTENT(ht);
310 
311 	HT_ASSERT_RC1(ht);
312 	zend_hash_real_init_mixed_ex(ht);
313 }
314 
zend_hash_packed_to_hash(HashTable * ht)315 ZEND_API void ZEND_FASTCALL zend_hash_packed_to_hash(HashTable *ht)
316 {
317 	void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
318 	Bucket *old_buckets = ht->arData;
319 	uint32_t nSize = ht->nTableSize;
320 
321 	HT_ASSERT_RC1(ht);
322 	HT_FLAGS(ht) &= ~HASH_FLAG_PACKED;
323 	new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
324 	ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
325 	HT_SET_DATA_ADDR(ht, new_data);
326 	memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
327 	pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
328 	zend_hash_rehash(ht);
329 }
330 
zend_hash_to_packed(HashTable * ht)331 ZEND_API void ZEND_FASTCALL zend_hash_to_packed(HashTable *ht)
332 {
333 	void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
334 	Bucket *old_buckets = ht->arData;
335 
336 	HT_ASSERT_RC1(ht);
337 	new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
338 	HT_FLAGS(ht) |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
339 	ht->nTableMask = HT_MIN_MASK;
340 	HT_SET_DATA_ADDR(ht, new_data);
341 	HT_HASH_RESET_PACKED(ht);
342 	memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
343 	pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
344 }
345 
zend_hash_extend(HashTable * ht,uint32_t nSize,zend_bool packed)346 ZEND_API void ZEND_FASTCALL zend_hash_extend(HashTable *ht, uint32_t nSize, zend_bool packed)
347 {
348 	HT_ASSERT_RC1(ht);
349 	if (nSize == 0) return;
350 	if (UNEXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
351 		if (nSize > ht->nTableSize) {
352 			ht->nTableSize = zend_hash_check_size(nSize);
353 		}
354 		zend_hash_real_init(ht, packed);
355 	} else {
356 		if (packed) {
357 			ZEND_ASSERT(HT_FLAGS(ht) & HASH_FLAG_PACKED);
358 			if (nSize > ht->nTableSize) {
359 				ht->nTableSize = zend_hash_check_size(nSize);
360 				HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), HT_USED_SIZE(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
361 			}
362 		} else {
363 			ZEND_ASSERT(!(HT_FLAGS(ht) & HASH_FLAG_PACKED));
364 			if (nSize > ht->nTableSize) {
365 				void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
366 				Bucket *old_buckets = ht->arData;
367 				nSize = zend_hash_check_size(nSize);
368 				ht->nTableSize = nSize;
369 				new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
370 				ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
371 				HT_SET_DATA_ADDR(ht, new_data);
372 				memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
373 				pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
374 				zend_hash_rehash(ht);
375 			}
376 		}
377 	}
378 }
379 
zend_hash_discard(HashTable * ht,uint32_t nNumUsed)380 ZEND_API void ZEND_FASTCALL zend_hash_discard(HashTable *ht, uint32_t nNumUsed)
381 {
382 	Bucket *p, *end, *arData;
383 	uint32_t nIndex;
384 
385 	arData = ht->arData;
386 	p = arData + ht->nNumUsed;
387 	end = arData + nNumUsed;
388 	ht->nNumUsed = nNumUsed;
389 	while (p != end) {
390 		p--;
391 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
392 		ht->nNumOfElements--;
393 		/* Collision pointers always directed from higher to lower buckets */
394 #if 0
395 		if (!(Z_NEXT(p->val) == HT_INVALID_IDX || HT_HASH_TO_BUCKET_EX(arData, Z_NEXT(p->val)) < p)) {
396 			abort();
397 		}
398 #endif
399 		nIndex = p->h | ht->nTableMask;
400 		HT_HASH_EX(arData, nIndex) = Z_NEXT(p->val);
401 	}
402 }
403 
zend_array_recalc_elements(HashTable * ht)404 static uint32_t zend_array_recalc_elements(HashTable *ht)
405 {
406        zval *val;
407        uint32_t num = ht->nNumOfElements;
408 
409 	   ZEND_HASH_FOREACH_VAL(ht, val) {
410 		   if (Z_TYPE_P(val) == IS_INDIRECT) {
411 			   if (UNEXPECTED(Z_TYPE_P(Z_INDIRECT_P(val)) == IS_UNDEF)) {
412 				   num--;
413 			   }
414 		   }
415        } ZEND_HASH_FOREACH_END();
416        return num;
417 }
418 /* }}} */
419 
zend_array_count(HashTable * ht)420 ZEND_API uint32_t zend_array_count(HashTable *ht)
421 {
422 	uint32_t num;
423 	if (UNEXPECTED(HT_FLAGS(ht) & HASH_FLAG_HAS_EMPTY_IND)) {
424 		num = zend_array_recalc_elements(ht);
425 		if (UNEXPECTED(ht->nNumOfElements == num)) {
426 			HT_FLAGS(ht) &= ~HASH_FLAG_HAS_EMPTY_IND;
427 		}
428 	} else if (UNEXPECTED(ht == &EG(symbol_table))) {
429 		num = zend_array_recalc_elements(ht);
430 	} else {
431 		num = zend_hash_num_elements(ht);
432 	}
433 	return num;
434 }
435 /* }}} */
436 
_zend_hash_get_valid_pos(const HashTable * ht,HashPosition pos)437 static zend_always_inline HashPosition _zend_hash_get_valid_pos(const HashTable *ht, HashPosition pos)
438 {
439 	while (pos < ht->nNumUsed && Z_ISUNDEF(ht->arData[pos].val)) {
440 		pos++;
441 	}
442 	return pos;
443 }
444 
_zend_hash_get_current_pos(const HashTable * ht)445 static zend_always_inline HashPosition _zend_hash_get_current_pos(const HashTable *ht)
446 {
447 	return _zend_hash_get_valid_pos(ht, ht->nInternalPointer);
448 }
449 
zend_hash_get_current_pos(const HashTable * ht)450 ZEND_API HashPosition ZEND_FASTCALL zend_hash_get_current_pos(const HashTable *ht)
451 {
452 	return _zend_hash_get_current_pos(ht);
453 }
454 
zend_hash_iterator_add(HashTable * ht,HashPosition pos)455 ZEND_API uint32_t ZEND_FASTCALL zend_hash_iterator_add(HashTable *ht, HashPosition pos)
456 {
457 	HashTableIterator *iter = EG(ht_iterators);
458 	HashTableIterator *end  = iter + EG(ht_iterators_count);
459 	uint32_t idx;
460 
461 	if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
462 		HT_INC_ITERATORS_COUNT(ht);
463 	}
464 	while (iter != end) {
465 		if (iter->ht == NULL) {
466 			iter->ht = ht;
467 			iter->pos = pos;
468 			idx = iter - EG(ht_iterators);
469 			if (idx + 1 > EG(ht_iterators_used)) {
470 				EG(ht_iterators_used) = idx + 1;
471 			}
472 			return idx;
473 		}
474 		iter++;
475 	}
476 	if (EG(ht_iterators) == EG(ht_iterators_slots)) {
477 		EG(ht_iterators) = emalloc(sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
478 		memcpy(EG(ht_iterators), EG(ht_iterators_slots), sizeof(HashTableIterator) * EG(ht_iterators_count));
479 	} else {
480 		EG(ht_iterators) = erealloc(EG(ht_iterators), sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
481 	}
482 	iter = EG(ht_iterators) + EG(ht_iterators_count);
483 	EG(ht_iterators_count) += 8;
484 	iter->ht = ht;
485 	iter->pos = pos;
486 	memset(iter + 1, 0, sizeof(HashTableIterator) * 7);
487 	idx = iter - EG(ht_iterators);
488 	EG(ht_iterators_used) = idx + 1;
489 	return idx;
490 }
491 
zend_hash_iterator_pos(uint32_t idx,HashTable * ht)492 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos(uint32_t idx, HashTable *ht)
493 {
494 	HashTableIterator *iter = EG(ht_iterators) + idx;
495 
496 	ZEND_ASSERT(idx != (uint32_t)-1);
497 	if (UNEXPECTED(iter->ht != ht)) {
498 		if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
499 				&& EXPECTED(!HT_ITERATORS_OVERFLOW(iter->ht))) {
500 			HT_DEC_ITERATORS_COUNT(iter->ht);
501 		}
502 		if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
503 			HT_INC_ITERATORS_COUNT(ht);
504 		}
505 		iter->ht = ht;
506 		iter->pos = _zend_hash_get_current_pos(ht);
507 	}
508 	return iter->pos;
509 }
510 
zend_hash_iterator_pos_ex(uint32_t idx,zval * array)511 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos_ex(uint32_t idx, zval *array)
512 {
513 	HashTable *ht = Z_ARRVAL_P(array);
514 	HashTableIterator *iter = EG(ht_iterators) + idx;
515 
516 	ZEND_ASSERT(idx != (uint32_t)-1);
517 	if (UNEXPECTED(iter->ht != ht)) {
518 		if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
519 				&& EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
520 			HT_DEC_ITERATORS_COUNT(iter->ht);
521 		}
522 		SEPARATE_ARRAY(array);
523 		ht = Z_ARRVAL_P(array);
524 		if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
525 			HT_INC_ITERATORS_COUNT(ht);
526 		}
527 		iter->ht = ht;
528 		iter->pos = _zend_hash_get_current_pos(ht);
529 	}
530 	return iter->pos;
531 }
532 
zend_hash_iterator_del(uint32_t idx)533 ZEND_API void ZEND_FASTCALL zend_hash_iterator_del(uint32_t idx)
534 {
535 	HashTableIterator *iter = EG(ht_iterators) + idx;
536 
537 	ZEND_ASSERT(idx != (uint32_t)-1);
538 
539 	if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
540 			&& EXPECTED(!HT_ITERATORS_OVERFLOW(iter->ht))) {
541 		ZEND_ASSERT(HT_ITERATORS_COUNT(iter->ht) != 0);
542 		HT_DEC_ITERATORS_COUNT(iter->ht);
543 	}
544 	iter->ht = NULL;
545 
546 	if (idx == EG(ht_iterators_used) - 1) {
547 		while (idx > 0 && EG(ht_iterators)[idx - 1].ht == NULL) {
548 			idx--;
549 		}
550 		EG(ht_iterators_used) = idx;
551 	}
552 }
553 
_zend_hash_iterators_remove(HashTable * ht)554 static zend_never_inline void ZEND_FASTCALL _zend_hash_iterators_remove(HashTable *ht)
555 {
556 	HashTableIterator *iter = EG(ht_iterators);
557 	HashTableIterator *end  = iter + EG(ht_iterators_used);
558 
559 	while (iter != end) {
560 		if (iter->ht == ht) {
561 			iter->ht = HT_POISONED_PTR;
562 		}
563 		iter++;
564 	}
565 }
566 
zend_hash_iterators_remove(HashTable * ht)567 static zend_always_inline void zend_hash_iterators_remove(HashTable *ht)
568 {
569 	if (UNEXPECTED(HT_HAS_ITERATORS(ht))) {
570 		_zend_hash_iterators_remove(ht);
571 	}
572 }
573 
zend_hash_iterators_lower_pos(HashTable * ht,HashPosition start)574 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterators_lower_pos(HashTable *ht, HashPosition start)
575 {
576 	HashTableIterator *iter = EG(ht_iterators);
577 	HashTableIterator *end  = iter + EG(ht_iterators_used);
578 	HashPosition res = ht->nNumUsed;
579 
580 	while (iter != end) {
581 		if (iter->ht == ht) {
582 			if (iter->pos >= start && iter->pos < res) {
583 				res = iter->pos;
584 			}
585 		}
586 		iter++;
587 	}
588 	return res;
589 }
590 
_zend_hash_iterators_update(HashTable * ht,HashPosition from,HashPosition to)591 ZEND_API void ZEND_FASTCALL _zend_hash_iterators_update(HashTable *ht, HashPosition from, HashPosition to)
592 {
593 	HashTableIterator *iter = EG(ht_iterators);
594 	HashTableIterator *end  = iter + EG(ht_iterators_used);
595 
596 	while (iter != end) {
597 		if (iter->ht == ht && iter->pos == from) {
598 			iter->pos = to;
599 		}
600 		iter++;
601 	}
602 }
603 
zend_hash_iterators_advance(HashTable * ht,HashPosition step)604 ZEND_API void ZEND_FASTCALL zend_hash_iterators_advance(HashTable *ht, HashPosition step)
605 {
606 	HashTableIterator *iter = EG(ht_iterators);
607 	HashTableIterator *end  = iter + EG(ht_iterators_used);
608 
609 	while (iter != end) {
610 		if (iter->ht == ht) {
611 			iter->pos += step;
612 		}
613 		iter++;
614 	}
615 }
616 
zend_hash_find_bucket(const HashTable * ht,zend_string * key,zend_bool known_hash)617 static zend_always_inline Bucket *zend_hash_find_bucket(const HashTable *ht, zend_string *key, zend_bool known_hash)
618 {
619 	zend_ulong h;
620 	uint32_t nIndex;
621 	uint32_t idx;
622 	Bucket *p, *arData;
623 
624 	if (known_hash) {
625 		h = ZSTR_H(key);
626 	} else {
627 		h = zend_string_hash_val(key);
628 	}
629 	arData = ht->arData;
630 	nIndex = h | ht->nTableMask;
631 	idx = HT_HASH_EX(arData, nIndex);
632 
633 	if (UNEXPECTED(idx == HT_INVALID_IDX)) {
634 		return NULL;
635 	}
636 	p = HT_HASH_TO_BUCKET_EX(arData, idx);
637 	if (EXPECTED(p->key == key)) { /* check for the same interned string */
638 		return p;
639 	}
640 
641 	while (1) {
642 		if (p->h == ZSTR_H(key) &&
643 		    EXPECTED(p->key) &&
644 		    zend_string_equal_content(p->key, key)) {
645 			return p;
646 		}
647 		idx = Z_NEXT(p->val);
648 		if (idx == HT_INVALID_IDX) {
649 			return NULL;
650 		}
651 		p = HT_HASH_TO_BUCKET_EX(arData, idx);
652 		if (p->key == key) { /* check for the same interned string */
653 			return p;
654 		}
655 	}
656 }
657 
zend_hash_str_find_bucket(const HashTable * ht,const char * str,size_t len,zend_ulong h)658 static zend_always_inline Bucket *zend_hash_str_find_bucket(const HashTable *ht, const char *str, size_t len, zend_ulong h)
659 {
660 	uint32_t nIndex;
661 	uint32_t idx;
662 	Bucket *p, *arData;
663 
664 	arData = ht->arData;
665 	nIndex = h | ht->nTableMask;
666 	idx = HT_HASH_EX(arData, nIndex);
667 	while (idx != HT_INVALID_IDX) {
668 		ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
669 		p = HT_HASH_TO_BUCKET_EX(arData, idx);
670 		if ((p->h == h)
671 			 && p->key
672 			 && (ZSTR_LEN(p->key) == len)
673 			 && !memcmp(ZSTR_VAL(p->key), str, len)) {
674 			return p;
675 		}
676 		idx = Z_NEXT(p->val);
677 	}
678 	return NULL;
679 }
680 
zend_hash_index_find_bucket(const HashTable * ht,zend_ulong h)681 static zend_always_inline Bucket *zend_hash_index_find_bucket(const HashTable *ht, zend_ulong h)
682 {
683 	uint32_t nIndex;
684 	uint32_t idx;
685 	Bucket *p, *arData;
686 
687 	arData = ht->arData;
688 	nIndex = h | ht->nTableMask;
689 	idx = HT_HASH_EX(arData, nIndex);
690 	while (idx != HT_INVALID_IDX) {
691 		ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
692 		p = HT_HASH_TO_BUCKET_EX(arData, idx);
693 		if (p->h == h && !p->key) {
694 			return p;
695 		}
696 		idx = Z_NEXT(p->val);
697 	}
698 	return NULL;
699 }
700 
_zend_hash_add_or_update_i(HashTable * ht,zend_string * key,zval * pData,uint32_t flag)701 static zend_always_inline zval *_zend_hash_add_or_update_i(HashTable *ht, zend_string *key, zval *pData, uint32_t flag)
702 {
703 	zend_ulong h;
704 	uint32_t nIndex;
705 	uint32_t idx;
706 	Bucket *p, *arData;
707 
708 	IS_CONSISTENT(ht);
709 	HT_ASSERT_RC1(ht);
710 
711 	if (UNEXPECTED(HT_FLAGS(ht) & (HASH_FLAG_UNINITIALIZED|HASH_FLAG_PACKED))) {
712 		if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
713 			zend_hash_real_init_mixed(ht);
714 			if (!ZSTR_IS_INTERNED(key)) {
715 				zend_string_addref(key);
716 				HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
717 				zend_string_hash_val(key);
718 			}
719 			goto add_to_hash;
720 		} else {
721 			zend_hash_packed_to_hash(ht);
722 			if (!ZSTR_IS_INTERNED(key)) {
723 				zend_string_addref(key);
724 				HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
725 				zend_string_hash_val(key);
726 			}
727 		}
728 	} else if ((flag & HASH_ADD_NEW) == 0 || ZEND_DEBUG) {
729 		p = zend_hash_find_bucket(ht, key, 0);
730 
731 		if (p) {
732 			zval *data;
733 
734 			ZEND_ASSERT((flag & HASH_ADD_NEW) == 0);
735 			if (flag & HASH_ADD) {
736 				if (!(flag & HASH_UPDATE_INDIRECT)) {
737 					return NULL;
738 				}
739 				ZEND_ASSERT(&p->val != pData);
740 				data = &p->val;
741 				if (Z_TYPE_P(data) == IS_INDIRECT) {
742 					data = Z_INDIRECT_P(data);
743 					if (Z_TYPE_P(data) != IS_UNDEF) {
744 						return NULL;
745 					}
746 				} else {
747 					return NULL;
748 				}
749 			} else {
750 				ZEND_ASSERT(&p->val != pData);
751 				data = &p->val;
752 				if ((flag & HASH_UPDATE_INDIRECT) && Z_TYPE_P(data) == IS_INDIRECT) {
753 					data = Z_INDIRECT_P(data);
754 				}
755 			}
756 			if (ht->pDestructor) {
757 				ht->pDestructor(data);
758 			}
759 			ZVAL_COPY_VALUE(data, pData);
760 			return data;
761 		}
762 		if (!ZSTR_IS_INTERNED(key)) {
763 			zend_string_addref(key);
764 			HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
765 		}
766 	} else if (!ZSTR_IS_INTERNED(key)) {
767 		zend_string_addref(key);
768 		HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
769 		zend_string_hash_val(key);
770 	}
771 
772 	ZEND_HASH_IF_FULL_DO_RESIZE(ht);		/* If the Hash table is full, resize it */
773 
774 add_to_hash:
775 	idx = ht->nNumUsed++;
776 	ht->nNumOfElements++;
777 	arData = ht->arData;
778 	p = arData + idx;
779 	p->key = key;
780 	p->h = h = ZSTR_H(key);
781 	nIndex = h | ht->nTableMask;
782 	Z_NEXT(p->val) = HT_HASH_EX(arData, nIndex);
783 	HT_HASH_EX(arData, nIndex) = HT_IDX_TO_HASH(idx);
784 	ZVAL_COPY_VALUE(&p->val, pData);
785 
786 	return &p->val;
787 }
788 
_zend_hash_str_add_or_update_i(HashTable * ht,const char * str,size_t len,zend_ulong h,zval * pData,uint32_t flag)789 static zend_always_inline zval *_zend_hash_str_add_or_update_i(HashTable *ht, const char *str, size_t len, zend_ulong h, zval *pData, uint32_t flag)
790 {
791 	zend_string *key;
792 	uint32_t nIndex;
793 	uint32_t idx;
794 	Bucket *p;
795 
796 	IS_CONSISTENT(ht);
797 	HT_ASSERT_RC1(ht);
798 
799 	if (UNEXPECTED(HT_FLAGS(ht) & (HASH_FLAG_UNINITIALIZED|HASH_FLAG_PACKED))) {
800 		if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
801 			zend_hash_real_init_mixed(ht);
802 			goto add_to_hash;
803 		} else {
804 			zend_hash_packed_to_hash(ht);
805 		}
806 	} else if ((flag & HASH_ADD_NEW) == 0) {
807 		p = zend_hash_str_find_bucket(ht, str, len, h);
808 
809 		if (p) {
810 			zval *data;
811 
812 			if (flag & HASH_ADD) {
813 				if (!(flag & HASH_UPDATE_INDIRECT)) {
814 					return NULL;
815 				}
816 				ZEND_ASSERT(&p->val != pData);
817 				data = &p->val;
818 				if (Z_TYPE_P(data) == IS_INDIRECT) {
819 					data = Z_INDIRECT_P(data);
820 					if (Z_TYPE_P(data) != IS_UNDEF) {
821 						return NULL;
822 					}
823 				} else {
824 					return NULL;
825 				}
826 			} else {
827 				ZEND_ASSERT(&p->val != pData);
828 				data = &p->val;
829 				if ((flag & HASH_UPDATE_INDIRECT) && Z_TYPE_P(data) == IS_INDIRECT) {
830 					data = Z_INDIRECT_P(data);
831 				}
832 			}
833 			if (ht->pDestructor) {
834 				ht->pDestructor(data);
835 			}
836 			ZVAL_COPY_VALUE(data, pData);
837 			return data;
838 		}
839 	}
840 
841 	ZEND_HASH_IF_FULL_DO_RESIZE(ht);		/* If the Hash table is full, resize it */
842 
843 add_to_hash:
844 	idx = ht->nNumUsed++;
845 	ht->nNumOfElements++;
846 	p = ht->arData + idx;
847 	p->key = key = zend_string_init(str, len, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
848 	p->h = ZSTR_H(key) = h;
849 	HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
850 	ZVAL_COPY_VALUE(&p->val, pData);
851 	nIndex = h | ht->nTableMask;
852 	Z_NEXT(p->val) = HT_HASH(ht, nIndex);
853 	HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
854 
855 	return &p->val;
856 }
857 
zend_hash_add_or_update(HashTable * ht,zend_string * key,zval * pData,uint32_t flag)858 ZEND_API zval* ZEND_FASTCALL zend_hash_add_or_update(HashTable *ht, zend_string *key, zval *pData, uint32_t flag)
859 {
860 	if (flag == HASH_ADD) {
861 		return zend_hash_add(ht, key, pData);
862 	} else if (flag == HASH_ADD_NEW) {
863 		return zend_hash_add_new(ht, key, pData);
864 	} else if (flag == HASH_UPDATE) {
865 		return zend_hash_update(ht, key, pData);
866 	} else {
867 		ZEND_ASSERT(flag == (HASH_UPDATE|HASH_UPDATE_INDIRECT));
868 		return zend_hash_update_ind(ht, key, pData);
869 	}
870 }
871 
zend_hash_add(HashTable * ht,zend_string * key,zval * pData)872 ZEND_API zval* ZEND_FASTCALL zend_hash_add(HashTable *ht, zend_string *key, zval *pData)
873 {
874 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD);
875 }
876 
zend_hash_update(HashTable * ht,zend_string * key,zval * pData)877 ZEND_API zval* ZEND_FASTCALL zend_hash_update(HashTable *ht, zend_string *key, zval *pData)
878 {
879 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE);
880 }
881 
zend_hash_update_ind(HashTable * ht,zend_string * key,zval * pData)882 ZEND_API zval* ZEND_FASTCALL zend_hash_update_ind(HashTable *ht, zend_string *key, zval *pData)
883 {
884 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT);
885 }
886 
zend_hash_add_new(HashTable * ht,zend_string * key,zval * pData)887 ZEND_API zval* ZEND_FASTCALL zend_hash_add_new(HashTable *ht, zend_string *key, zval *pData)
888 {
889 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD_NEW);
890 }
891 
zend_hash_str_add_or_update(HashTable * ht,const char * str,size_t len,zval * pData,uint32_t flag)892 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_or_update(HashTable *ht, const char *str, size_t len, zval *pData, uint32_t flag)
893 {
894 	if (flag == HASH_ADD) {
895 		return zend_hash_str_add(ht, str, len, pData);
896 	} else if (flag == HASH_ADD_NEW) {
897 		return zend_hash_str_add_new(ht, str, len, pData);
898 	} else if (flag == HASH_UPDATE) {
899 		return zend_hash_str_update(ht, str, len, pData);
900 	} else {
901 		ZEND_ASSERT(flag == (HASH_UPDATE|HASH_UPDATE_INDIRECT));
902 		return zend_hash_str_update_ind(ht, str, len, pData);
903 	}
904 }
905 
zend_hash_str_update(HashTable * ht,const char * str,size_t len,zval * pData)906 ZEND_API zval* ZEND_FASTCALL zend_hash_str_update(HashTable *ht, const char *str, size_t len, zval *pData)
907 {
908 	zend_ulong h = zend_hash_func(str, len);
909 
910 	return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_UPDATE);
911 }
912 
zend_hash_str_update_ind(HashTable * ht,const char * str,size_t len,zval * pData)913 ZEND_API zval* ZEND_FASTCALL zend_hash_str_update_ind(HashTable *ht, const char *str, size_t len, zval *pData)
914 {
915 	zend_ulong h = zend_hash_func(str, len);
916 
917 	return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT);
918 }
919 
zend_hash_str_add(HashTable * ht,const char * str,size_t len,zval * pData)920 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add(HashTable *ht, const char *str, size_t len, zval *pData)
921 {
922 	zend_ulong h = zend_hash_func(str, len);
923 
924 	return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_ADD);
925 }
926 
zend_hash_str_add_new(HashTable * ht,const char * str,size_t len,zval * pData)927 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_new(HashTable *ht, const char *str, size_t len, zval *pData)
928 {
929 	zend_ulong h = zend_hash_func(str, len);
930 
931 	return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_ADD_NEW);
932 }
933 
zend_hash_index_add_empty_element(HashTable * ht,zend_ulong h)934 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_empty_element(HashTable *ht, zend_ulong h)
935 {
936 	zval dummy;
937 
938 	ZVAL_NULL(&dummy);
939 	return zend_hash_index_add(ht, h, &dummy);
940 }
941 
zend_hash_add_empty_element(HashTable * ht,zend_string * key)942 ZEND_API zval* ZEND_FASTCALL zend_hash_add_empty_element(HashTable *ht, zend_string *key)
943 {
944 	zval dummy;
945 
946 	ZVAL_NULL(&dummy);
947 	return zend_hash_add(ht, key, &dummy);
948 }
949 
zend_hash_str_add_empty_element(HashTable * ht,const char * str,size_t len)950 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_empty_element(HashTable *ht, const char *str, size_t len)
951 {
952 	zval dummy;
953 
954 	ZVAL_NULL(&dummy);
955 	return zend_hash_str_add(ht, str, len, &dummy);
956 }
957 
_zend_hash_index_add_or_update_i(HashTable * ht,zend_ulong h,zval * pData,uint32_t flag)958 static zend_always_inline zval *_zend_hash_index_add_or_update_i(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag)
959 {
960 	uint32_t nIndex;
961 	uint32_t idx;
962 	Bucket *p;
963 
964 	IS_CONSISTENT(ht);
965 	HT_ASSERT_RC1(ht);
966 
967 	if (HT_FLAGS(ht) & HASH_FLAG_PACKED) {
968 		if (h < ht->nNumUsed) {
969 			p = ht->arData + h;
970 			if (Z_TYPE(p->val) != IS_UNDEF) {
971 replace:
972 				if (flag & HASH_ADD) {
973 					return NULL;
974 				}
975 				if (ht->pDestructor) {
976 					ht->pDestructor(&p->val);
977 				}
978 				ZVAL_COPY_VALUE(&p->val, pData);
979 				return &p->val;
980 			} else { /* we have to keep the order :( */
981 				goto convert_to_hash;
982 			}
983 		} else if (EXPECTED(h < ht->nTableSize)) {
984 add_to_packed:
985 			p = ht->arData + h;
986 			/* incremental initialization of empty Buckets */
987 			if ((flag & (HASH_ADD_NEW|HASH_ADD_NEXT)) != (HASH_ADD_NEW|HASH_ADD_NEXT)) {
988 				if (h > ht->nNumUsed) {
989 					Bucket *q = ht->arData + ht->nNumUsed;
990 					while (q != p) {
991 						ZVAL_UNDEF(&q->val);
992 						q++;
993 					}
994 				}
995 			}
996 			ht->nNextFreeElement = ht->nNumUsed = h + 1;
997 			goto add;
998 		} else if ((h >> 1) < ht->nTableSize &&
999 		           (ht->nTableSize >> 1) < ht->nNumOfElements) {
1000 			zend_hash_packed_grow(ht);
1001 			goto add_to_packed;
1002 		} else {
1003 			if (ht->nNumUsed >= ht->nTableSize) {
1004 				ht->nTableSize += ht->nTableSize;
1005 			}
1006 convert_to_hash:
1007 			zend_hash_packed_to_hash(ht);
1008 		}
1009 	} else if (HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED) {
1010 		if (h < ht->nTableSize) {
1011 			zend_hash_real_init_packed_ex(ht);
1012 			goto add_to_packed;
1013 		}
1014 		zend_hash_real_init_mixed(ht);
1015 	} else {
1016 		if ((flag & HASH_ADD_NEW) == 0 || ZEND_DEBUG) {
1017 			p = zend_hash_index_find_bucket(ht, h);
1018 			if (p) {
1019 				ZEND_ASSERT((flag & HASH_ADD_NEW) == 0);
1020 				goto replace;
1021 			}
1022 		}
1023 		ZEND_HASH_IF_FULL_DO_RESIZE(ht);		/* If the Hash table is full, resize it */
1024 	}
1025 
1026 	idx = ht->nNumUsed++;
1027 	nIndex = h | ht->nTableMask;
1028 	p = ht->arData + idx;
1029 	Z_NEXT(p->val) = HT_HASH(ht, nIndex);
1030 	HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
1031 	if ((zend_long)h >= (zend_long)ht->nNextFreeElement) {
1032 		ht->nNextFreeElement = h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
1033 	}
1034 add:
1035 	ht->nNumOfElements++;
1036 	p->h = h;
1037 	p->key = NULL;
1038 	ZVAL_COPY_VALUE(&p->val, pData);
1039 
1040 	return &p->val;
1041 }
1042 
zend_hash_index_add_or_update(HashTable * ht,zend_ulong h,zval * pData,uint32_t flag)1043 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_or_update(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag)
1044 {
1045 	if (flag == HASH_ADD) {
1046 		return zend_hash_index_add(ht, h, pData);
1047 	} else if (flag == (HASH_ADD|HASH_ADD_NEW)) {
1048 		return zend_hash_index_add_new(ht, h, pData);
1049 	} else if (flag == (HASH_ADD|HASH_ADD_NEXT)) {
1050 		ZEND_ASSERT(h == ht->nNextFreeElement);
1051 		return zend_hash_next_index_insert(ht, pData);
1052 	} else if (flag == (HASH_ADD|HASH_ADD_NEW|HASH_ADD_NEXT)) {
1053 		ZEND_ASSERT(h == ht->nNextFreeElement);
1054 		return zend_hash_next_index_insert_new(ht, pData);
1055 	} else {
1056 		ZEND_ASSERT(flag == HASH_UPDATE);
1057 		return zend_hash_index_update(ht, h, pData);
1058 	}
1059 }
1060 
zend_hash_index_add(HashTable * ht,zend_ulong h,zval * pData)1061 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add(HashTable *ht, zend_ulong h, zval *pData)
1062 {
1063 	return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD);
1064 }
1065 
zend_hash_index_add_new(HashTable * ht,zend_ulong h,zval * pData)1066 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_new(HashTable *ht, zend_ulong h, zval *pData)
1067 {
1068 	return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD | HASH_ADD_NEW);
1069 }
1070 
zend_hash_index_update(HashTable * ht,zend_ulong h,zval * pData)1071 ZEND_API zval* ZEND_FASTCALL zend_hash_index_update(HashTable *ht, zend_ulong h, zval *pData)
1072 {
1073 	return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_UPDATE);
1074 }
1075 
zend_hash_next_index_insert(HashTable * ht,zval * pData)1076 ZEND_API zval* ZEND_FASTCALL zend_hash_next_index_insert(HashTable *ht, zval *pData)
1077 {
1078 	return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEXT);
1079 }
1080 
zend_hash_next_index_insert_new(HashTable * ht,zval * pData)1081 ZEND_API zval* ZEND_FASTCALL zend_hash_next_index_insert_new(HashTable *ht, zval *pData)
1082 {
1083 	return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEW | HASH_ADD_NEXT);
1084 }
1085 
zend_hash_set_bucket_key(HashTable * ht,Bucket * b,zend_string * key)1086 ZEND_API zval* ZEND_FASTCALL zend_hash_set_bucket_key(HashTable *ht, Bucket *b, zend_string *key)
1087 {
1088 	uint32_t nIndex;
1089 	uint32_t idx, i;
1090 	Bucket *p, *arData;
1091 
1092 	IS_CONSISTENT(ht);
1093 	HT_ASSERT_RC1(ht);
1094 	ZEND_ASSERT(!(HT_FLAGS(ht) & HASH_FLAG_PACKED));
1095 
1096 	p = zend_hash_find_bucket(ht, key, 0);
1097 	if (UNEXPECTED(p)) {
1098 		return (p == b) ? &p->val : NULL;
1099 	}
1100 
1101 	if (!ZSTR_IS_INTERNED(key)) {
1102 		zend_string_addref(key);
1103 		HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
1104 	}
1105 
1106 	arData = ht->arData;
1107 
1108 	/* del from hash */
1109 	idx = HT_IDX_TO_HASH(b - arData);
1110 	nIndex = b->h | ht->nTableMask;
1111 	i = HT_HASH_EX(arData, nIndex);
1112 	if (i == idx) {
1113 		HT_HASH_EX(arData, nIndex) = Z_NEXT(b->val);
1114 	} else {
1115 		p = HT_HASH_TO_BUCKET_EX(arData, i);
1116 		while (Z_NEXT(p->val) != idx) {
1117 			i = Z_NEXT(p->val);
1118 			p = HT_HASH_TO_BUCKET_EX(arData, i);
1119 		}
1120 		Z_NEXT(p->val) = Z_NEXT(b->val);
1121 	}
1122 	zend_string_release(b->key);
1123 
1124 	/* add to hash */
1125 	idx = b - arData;
1126 	b->key = key;
1127 	b->h = ZSTR_H(key);
1128 	nIndex = b->h | ht->nTableMask;
1129 	idx = HT_IDX_TO_HASH(idx);
1130 	i = HT_HASH_EX(arData, nIndex);
1131 	if (i == HT_INVALID_IDX || i < idx) {
1132 		Z_NEXT(b->val) = i;
1133 		HT_HASH_EX(arData, nIndex) = idx;
1134 	} else {
1135 		p = HT_HASH_TO_BUCKET_EX(arData, i);
1136 		while (Z_NEXT(p->val) != HT_INVALID_IDX && Z_NEXT(p->val) > idx) {
1137 			i = Z_NEXT(p->val);
1138 			p = HT_HASH_TO_BUCKET_EX(arData, i);
1139 		}
1140 		Z_NEXT(b->val) = Z_NEXT(p->val);
1141 		Z_NEXT(p->val) = idx;
1142 	}
1143 	return &b->val;
1144 }
1145 
zend_hash_do_resize(HashTable * ht)1146 static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht)
1147 {
1148 
1149 	IS_CONSISTENT(ht);
1150 	HT_ASSERT_RC1(ht);
1151 
1152 	if (ht->nNumUsed > ht->nNumOfElements + (ht->nNumOfElements >> 5)) { /* additional term is there to amortize the cost of compaction */
1153 		zend_hash_rehash(ht);
1154 	} else if (ht->nTableSize < HT_MAX_SIZE) {	/* Let's double the table size */
1155 		void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
1156 		uint32_t nSize = ht->nTableSize + ht->nTableSize;
1157 		Bucket *old_buckets = ht->arData;
1158 
1159 		ht->nTableSize = nSize;
1160 		new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1161 		ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
1162 		HT_SET_DATA_ADDR(ht, new_data);
1163 		memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
1164 		pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1165 		zend_hash_rehash(ht);
1166 	} else {
1167 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket) + sizeof(uint32_t), sizeof(Bucket));
1168 	}
1169 }
1170 
zend_hash_rehash(HashTable * ht)1171 ZEND_API int ZEND_FASTCALL zend_hash_rehash(HashTable *ht)
1172 {
1173 	Bucket *p;
1174 	uint32_t nIndex, i;
1175 
1176 	IS_CONSISTENT(ht);
1177 
1178 	if (UNEXPECTED(ht->nNumOfElements == 0)) {
1179 		if (!(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
1180 			ht->nNumUsed = 0;
1181 			HT_HASH_RESET(ht);
1182 		}
1183 		return SUCCESS;
1184 	}
1185 
1186 	HT_HASH_RESET(ht);
1187 	i = 0;
1188 	p = ht->arData;
1189 	if (HT_IS_WITHOUT_HOLES(ht)) {
1190 		do {
1191 			nIndex = p->h | ht->nTableMask;
1192 			Z_NEXT(p->val) = HT_HASH(ht, nIndex);
1193 			HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
1194 			p++;
1195 		} while (++i < ht->nNumUsed);
1196 	} else {
1197 		uint32_t old_num_used = ht->nNumUsed;
1198 		do {
1199 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) {
1200 				uint32_t j = i;
1201 				Bucket *q = p;
1202 
1203 				if (EXPECTED(!HT_HAS_ITERATORS(ht))) {
1204 					while (++i < ht->nNumUsed) {
1205 						p++;
1206 						if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
1207 							ZVAL_COPY_VALUE(&q->val, &p->val);
1208 							q->h = p->h;
1209 							nIndex = q->h | ht->nTableMask;
1210 							q->key = p->key;
1211 							Z_NEXT(q->val) = HT_HASH(ht, nIndex);
1212 							HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
1213 							if (UNEXPECTED(ht->nInternalPointer == i)) {
1214 								ht->nInternalPointer = j;
1215 							}
1216 							q++;
1217 							j++;
1218 						}
1219 					}
1220 				} else {
1221 					uint32_t iter_pos = zend_hash_iterators_lower_pos(ht, 0);
1222 
1223 					while (++i < ht->nNumUsed) {
1224 						p++;
1225 						if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
1226 							ZVAL_COPY_VALUE(&q->val, &p->val);
1227 							q->h = p->h;
1228 							nIndex = q->h | ht->nTableMask;
1229 							q->key = p->key;
1230 							Z_NEXT(q->val) = HT_HASH(ht, nIndex);
1231 							HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
1232 							if (UNEXPECTED(ht->nInternalPointer == i)) {
1233 								ht->nInternalPointer = j;
1234 							}
1235 							if (UNEXPECTED(i >= iter_pos)) {
1236 								do {
1237 									zend_hash_iterators_update(ht, iter_pos, j);
1238 									iter_pos = zend_hash_iterators_lower_pos(ht, iter_pos + 1);
1239 								} while (iter_pos < i);
1240 							}
1241 							q++;
1242 							j++;
1243 						}
1244 					}
1245 				}
1246 				ht->nNumUsed = j;
1247 				break;
1248 			}
1249 			nIndex = p->h | ht->nTableMask;
1250 			Z_NEXT(p->val) = HT_HASH(ht, nIndex);
1251 			HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
1252 			p++;
1253 		} while (++i < ht->nNumUsed);
1254 
1255 		/* Migrate pointer to one past the end of the array to the new one past the end, so that
1256 		 * newly inserted elements are picked up correctly. */
1257 		if (UNEXPECTED(HT_HAS_ITERATORS(ht))) {
1258 			_zend_hash_iterators_update(ht, old_num_used, ht->nNumUsed);
1259 		}
1260 	}
1261 	return SUCCESS;
1262 }
1263 
_zend_hash_del_el_ex(HashTable * ht,uint32_t idx,Bucket * p,Bucket * prev)1264 static zend_always_inline void _zend_hash_del_el_ex(HashTable *ht, uint32_t idx, Bucket *p, Bucket *prev)
1265 {
1266 	if (!(HT_FLAGS(ht) & HASH_FLAG_PACKED)) {
1267 		if (prev) {
1268 			Z_NEXT(prev->val) = Z_NEXT(p->val);
1269 		} else {
1270 			HT_HASH(ht, p->h | ht->nTableMask) = Z_NEXT(p->val);
1271 		}
1272 	}
1273 	idx = HT_HASH_TO_IDX(idx);
1274 	ht->nNumOfElements--;
1275 	if (ht->nInternalPointer == idx || UNEXPECTED(HT_HAS_ITERATORS(ht))) {
1276 		uint32_t new_idx;
1277 
1278 		new_idx = idx;
1279 		while (1) {
1280 			new_idx++;
1281 			if (new_idx >= ht->nNumUsed) {
1282 				break;
1283 			} else if (Z_TYPE(ht->arData[new_idx].val) != IS_UNDEF) {
1284 				break;
1285 			}
1286 		}
1287 		if (ht->nInternalPointer == idx) {
1288 			ht->nInternalPointer = new_idx;
1289 		}
1290 		zend_hash_iterators_update(ht, idx, new_idx);
1291 	}
1292 	if (ht->nNumUsed - 1 == idx) {
1293 		do {
1294 			ht->nNumUsed--;
1295 		} while (ht->nNumUsed > 0 && (UNEXPECTED(Z_TYPE(ht->arData[ht->nNumUsed-1].val) == IS_UNDEF)));
1296 		ht->nInternalPointer = MIN(ht->nInternalPointer, ht->nNumUsed);
1297 	}
1298 	if (p->key) {
1299 		zend_string_release(p->key);
1300 	}
1301 	if (ht->pDestructor) {
1302 		zval tmp;
1303 		ZVAL_COPY_VALUE(&tmp, &p->val);
1304 		ZVAL_UNDEF(&p->val);
1305 		ht->pDestructor(&tmp);
1306 	} else {
1307 		ZVAL_UNDEF(&p->val);
1308 	}
1309 }
1310 
_zend_hash_del_el(HashTable * ht,uint32_t idx,Bucket * p)1311 static zend_always_inline void _zend_hash_del_el(HashTable *ht, uint32_t idx, Bucket *p)
1312 {
1313 	Bucket *prev = NULL;
1314 
1315 	if (!(HT_FLAGS(ht) & HASH_FLAG_PACKED)) {
1316 		uint32_t nIndex = p->h | ht->nTableMask;
1317 		uint32_t i = HT_HASH(ht, nIndex);
1318 
1319 		if (i != idx) {
1320 			prev = HT_HASH_TO_BUCKET(ht, i);
1321 			while (Z_NEXT(prev->val) != idx) {
1322 				i = Z_NEXT(prev->val);
1323 				prev = HT_HASH_TO_BUCKET(ht, i);
1324 			}
1325 	 	}
1326 	}
1327 
1328 	_zend_hash_del_el_ex(ht, idx, p, prev);
1329 }
1330 
zend_hash_del_bucket(HashTable * ht,Bucket * p)1331 ZEND_API void ZEND_FASTCALL zend_hash_del_bucket(HashTable *ht, Bucket *p)
1332 {
1333 	IS_CONSISTENT(ht);
1334 	HT_ASSERT_RC1(ht);
1335 	_zend_hash_del_el(ht, HT_IDX_TO_HASH(p - ht->arData), p);
1336 }
1337 
zend_hash_del(HashTable * ht,zend_string * key)1338 ZEND_API int ZEND_FASTCALL zend_hash_del(HashTable *ht, zend_string *key)
1339 {
1340 	zend_ulong h;
1341 	uint32_t nIndex;
1342 	uint32_t idx;
1343 	Bucket *p;
1344 	Bucket *prev = NULL;
1345 
1346 	IS_CONSISTENT(ht);
1347 	HT_ASSERT_RC1(ht);
1348 
1349 	h = zend_string_hash_val(key);
1350 	nIndex = h | ht->nTableMask;
1351 
1352 	idx = HT_HASH(ht, nIndex);
1353 	while (idx != HT_INVALID_IDX) {
1354 		p = HT_HASH_TO_BUCKET(ht, idx);
1355 		if ((p->key == key) ||
1356 			(p->h == h &&
1357 		     p->key &&
1358 		     zend_string_equal_content(p->key, key))) {
1359 			_zend_hash_del_el_ex(ht, idx, p, prev);
1360 			return SUCCESS;
1361 		}
1362 		prev = p;
1363 		idx = Z_NEXT(p->val);
1364 	}
1365 	return FAILURE;
1366 }
1367 
zend_hash_del_ind(HashTable * ht,zend_string * key)1368 ZEND_API int ZEND_FASTCALL zend_hash_del_ind(HashTable *ht, zend_string *key)
1369 {
1370 	zend_ulong h;
1371 	uint32_t nIndex;
1372 	uint32_t idx;
1373 	Bucket *p;
1374 	Bucket *prev = NULL;
1375 
1376 	IS_CONSISTENT(ht);
1377 	HT_ASSERT_RC1(ht);
1378 
1379 	h = zend_string_hash_val(key);
1380 	nIndex = h | ht->nTableMask;
1381 
1382 	idx = HT_HASH(ht, nIndex);
1383 	while (idx != HT_INVALID_IDX) {
1384 		p = HT_HASH_TO_BUCKET(ht, idx);
1385 		if ((p->key == key) ||
1386 			(p->h == h &&
1387 		     p->key &&
1388 		     zend_string_equal_content(p->key, key))) {
1389 			if (Z_TYPE(p->val) == IS_INDIRECT) {
1390 				zval *data = Z_INDIRECT(p->val);
1391 
1392 				if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
1393 					return FAILURE;
1394 				} else {
1395 					if (ht->pDestructor) {
1396 						zval tmp;
1397 						ZVAL_COPY_VALUE(&tmp, data);
1398 						ZVAL_UNDEF(data);
1399 						ht->pDestructor(&tmp);
1400 					} else {
1401 						ZVAL_UNDEF(data);
1402 					}
1403 					HT_FLAGS(ht) |= HASH_FLAG_HAS_EMPTY_IND;
1404 				}
1405 			} else {
1406 				_zend_hash_del_el_ex(ht, idx, p, prev);
1407 			}
1408 			return SUCCESS;
1409 		}
1410 		prev = p;
1411 		idx = Z_NEXT(p->val);
1412 	}
1413 	return FAILURE;
1414 }
1415 
zend_hash_str_del_ind(HashTable * ht,const char * str,size_t len)1416 ZEND_API int ZEND_FASTCALL zend_hash_str_del_ind(HashTable *ht, const char *str, size_t len)
1417 {
1418 	zend_ulong h;
1419 	uint32_t nIndex;
1420 	uint32_t idx;
1421 	Bucket *p;
1422 	Bucket *prev = NULL;
1423 
1424 	IS_CONSISTENT(ht);
1425 	HT_ASSERT_RC1(ht);
1426 
1427 	h = zend_inline_hash_func(str, len);
1428 	nIndex = h | ht->nTableMask;
1429 
1430 	idx = HT_HASH(ht, nIndex);
1431 	while (idx != HT_INVALID_IDX) {
1432 		p = HT_HASH_TO_BUCKET(ht, idx);
1433 		if ((p->h == h)
1434 			 && p->key
1435 			 && (ZSTR_LEN(p->key) == len)
1436 			 && !memcmp(ZSTR_VAL(p->key), str, len)) {
1437 			if (Z_TYPE(p->val) == IS_INDIRECT) {
1438 				zval *data = Z_INDIRECT(p->val);
1439 
1440 				if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
1441 					return FAILURE;
1442 				} else {
1443 					if (ht->pDestructor) {
1444 						ht->pDestructor(data);
1445 					}
1446 					ZVAL_UNDEF(data);
1447 					HT_FLAGS(ht) |= HASH_FLAG_HAS_EMPTY_IND;
1448 				}
1449 			} else {
1450 				_zend_hash_del_el_ex(ht, idx, p, prev);
1451 			}
1452 			return SUCCESS;
1453 		}
1454 		prev = p;
1455 		idx = Z_NEXT(p->val);
1456 	}
1457 	return FAILURE;
1458 }
1459 
zend_hash_str_del(HashTable * ht,const char * str,size_t len)1460 ZEND_API int ZEND_FASTCALL zend_hash_str_del(HashTable *ht, const char *str, size_t len)
1461 {
1462 	zend_ulong h;
1463 	uint32_t nIndex;
1464 	uint32_t idx;
1465 	Bucket *p;
1466 	Bucket *prev = NULL;
1467 
1468 	IS_CONSISTENT(ht);
1469 	HT_ASSERT_RC1(ht);
1470 
1471 	h = zend_inline_hash_func(str, len);
1472 	nIndex = h | ht->nTableMask;
1473 
1474 	idx = HT_HASH(ht, nIndex);
1475 	while (idx != HT_INVALID_IDX) {
1476 		p = HT_HASH_TO_BUCKET(ht, idx);
1477 		if ((p->h == h)
1478 			 && p->key
1479 			 && (ZSTR_LEN(p->key) == len)
1480 			 && !memcmp(ZSTR_VAL(p->key), str, len)) {
1481 			_zend_hash_del_el_ex(ht, idx, p, prev);
1482 			return SUCCESS;
1483 		}
1484 		prev = p;
1485 		idx = Z_NEXT(p->val);
1486 	}
1487 	return FAILURE;
1488 }
1489 
zend_hash_index_del(HashTable * ht,zend_ulong h)1490 ZEND_API int ZEND_FASTCALL zend_hash_index_del(HashTable *ht, zend_ulong h)
1491 {
1492 	uint32_t nIndex;
1493 	uint32_t idx;
1494 	Bucket *p;
1495 	Bucket *prev = NULL;
1496 
1497 	IS_CONSISTENT(ht);
1498 	HT_ASSERT_RC1(ht);
1499 
1500 	if (HT_FLAGS(ht) & HASH_FLAG_PACKED) {
1501 		if (h < ht->nNumUsed) {
1502 			p = ht->arData + h;
1503 			if (Z_TYPE(p->val) != IS_UNDEF) {
1504 				_zend_hash_del_el_ex(ht, HT_IDX_TO_HASH(h), p, NULL);
1505 				return SUCCESS;
1506 			}
1507 		}
1508 		return FAILURE;
1509 	}
1510 	nIndex = h | ht->nTableMask;
1511 
1512 	idx = HT_HASH(ht, nIndex);
1513 	while (idx != HT_INVALID_IDX) {
1514 		p = HT_HASH_TO_BUCKET(ht, idx);
1515 		if ((p->h == h) && (p->key == NULL)) {
1516 			_zend_hash_del_el_ex(ht, idx, p, prev);
1517 			return SUCCESS;
1518 		}
1519 		prev = p;
1520 		idx = Z_NEXT(p->val);
1521 	}
1522 	return FAILURE;
1523 }
1524 
zend_hash_destroy(HashTable * ht)1525 ZEND_API void ZEND_FASTCALL zend_hash_destroy(HashTable *ht)
1526 {
1527 	Bucket *p, *end;
1528 
1529 	IS_CONSISTENT(ht);
1530 	HT_ASSERT(ht, GC_REFCOUNT(ht) <= 1);
1531 
1532 	if (ht->nNumUsed) {
1533 		p = ht->arData;
1534 		end = p + ht->nNumUsed;
1535 		if (ht->pDestructor) {
1536 			SET_INCONSISTENT(HT_IS_DESTROYING);
1537 
1538 			if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1539 				if (HT_IS_WITHOUT_HOLES(ht)) {
1540 					do {
1541 						ht->pDestructor(&p->val);
1542 					} while (++p != end);
1543 				} else {
1544 					do {
1545 						if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1546 							ht->pDestructor(&p->val);
1547 						}
1548 					} while (++p != end);
1549 				}
1550 			} else if (HT_IS_WITHOUT_HOLES(ht)) {
1551 				do {
1552 					ht->pDestructor(&p->val);
1553 					if (EXPECTED(p->key)) {
1554 						zend_string_release(p->key);
1555 					}
1556 				} while (++p != end);
1557 			} else {
1558 				do {
1559 					if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1560 						ht->pDestructor(&p->val);
1561 						if (EXPECTED(p->key)) {
1562 							zend_string_release(p->key);
1563 						}
1564 					}
1565 				} while (++p != end);
1566 			}
1567 
1568 			SET_INCONSISTENT(HT_DESTROYED);
1569 		} else {
1570 			if (!HT_HAS_STATIC_KEYS_ONLY(ht)) {
1571 				do {
1572 					if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1573 						if (EXPECTED(p->key)) {
1574 							zend_string_release(p->key);
1575 						}
1576 					}
1577 				} while (++p != end);
1578 			}
1579 		}
1580 		zend_hash_iterators_remove(ht);
1581 	} else if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
1582 		return;
1583 	}
1584 	pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1585 }
1586 
zend_array_destroy(HashTable * ht)1587 ZEND_API void ZEND_FASTCALL zend_array_destroy(HashTable *ht)
1588 {
1589 	Bucket *p, *end;
1590 
1591 	IS_CONSISTENT(ht);
1592 	HT_ASSERT(ht, GC_REFCOUNT(ht) <= 1);
1593 
1594 	/* break possible cycles */
1595 	GC_REMOVE_FROM_BUFFER(ht);
1596 	GC_TYPE_INFO(ht) = IS_NULL /*???| (GC_WHITE << 16)*/;
1597 
1598 	if (ht->nNumUsed) {
1599 		/* In some rare cases destructors of regular arrays may be changed */
1600 		if (UNEXPECTED(ht->pDestructor != ZVAL_PTR_DTOR)) {
1601 			zend_hash_destroy(ht);
1602 			goto free_ht;
1603 		}
1604 
1605 		p = ht->arData;
1606 		end = p + ht->nNumUsed;
1607 		SET_INCONSISTENT(HT_IS_DESTROYING);
1608 
1609 		if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1610 			do {
1611 				i_zval_ptr_dtor(&p->val);
1612 			} while (++p != end);
1613 		} else if (HT_IS_WITHOUT_HOLES(ht)) {
1614 			do {
1615 				i_zval_ptr_dtor(&p->val);
1616 				if (EXPECTED(p->key)) {
1617 					zend_string_release_ex(p->key, 0);
1618 				}
1619 			} while (++p != end);
1620 		} else {
1621 			do {
1622 				if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1623 					i_zval_ptr_dtor(&p->val);
1624 					if (EXPECTED(p->key)) {
1625 						zend_string_release_ex(p->key, 0);
1626 					}
1627 				}
1628 			} while (++p != end);
1629 		}
1630 	} else if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
1631 		goto free_ht;
1632 	}
1633 	SET_INCONSISTENT(HT_DESTROYED);
1634 	efree(HT_GET_DATA_ADDR(ht));
1635 free_ht:
1636 	zend_hash_iterators_remove(ht);
1637 	FREE_HASHTABLE(ht);
1638 }
1639 
zend_hash_clean(HashTable * ht)1640 ZEND_API void ZEND_FASTCALL zend_hash_clean(HashTable *ht)
1641 {
1642 	Bucket *p, *end;
1643 
1644 	IS_CONSISTENT(ht);
1645 	HT_ASSERT_RC1(ht);
1646 
1647 	if (ht->nNumUsed) {
1648 		p = ht->arData;
1649 		end = p + ht->nNumUsed;
1650 		if (ht->pDestructor) {
1651 			if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1652 				if (HT_IS_WITHOUT_HOLES(ht)) {
1653 					do {
1654 						ht->pDestructor(&p->val);
1655 					} while (++p != end);
1656 				} else {
1657 					do {
1658 						if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1659 							ht->pDestructor(&p->val);
1660 						}
1661 					} while (++p != end);
1662 				}
1663 			} else if (HT_IS_WITHOUT_HOLES(ht)) {
1664 				do {
1665 					ht->pDestructor(&p->val);
1666 					if (EXPECTED(p->key)) {
1667 						zend_string_release(p->key);
1668 					}
1669 				} while (++p != end);
1670 			} else {
1671 				do {
1672 					if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1673 						ht->pDestructor(&p->val);
1674 						if (EXPECTED(p->key)) {
1675 							zend_string_release(p->key);
1676 						}
1677 					}
1678 				} while (++p != end);
1679 			}
1680 		} else {
1681 			if (!HT_HAS_STATIC_KEYS_ONLY(ht)) {
1682 				if (HT_IS_WITHOUT_HOLES(ht)) {
1683 					do {
1684 						if (EXPECTED(p->key)) {
1685 							zend_string_release(p->key);
1686 						}
1687 					} while (++p != end);
1688 				} else {
1689 					do {
1690 						if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1691 							if (EXPECTED(p->key)) {
1692 								zend_string_release(p->key);
1693 							}
1694 						}
1695 					} while (++p != end);
1696 				}
1697 			}
1698 		}
1699 		if (!(HT_FLAGS(ht) & HASH_FLAG_PACKED)) {
1700 			HT_HASH_RESET(ht);
1701 		}
1702 	}
1703 	ht->nNumUsed = 0;
1704 	ht->nNumOfElements = 0;
1705 	ht->nNextFreeElement = 0;
1706 	ht->nInternalPointer = 0;
1707 }
1708 
zend_symtable_clean(HashTable * ht)1709 ZEND_API void ZEND_FASTCALL zend_symtable_clean(HashTable *ht)
1710 {
1711 	Bucket *p, *end;
1712 
1713 	IS_CONSISTENT(ht);
1714 	HT_ASSERT_RC1(ht);
1715 
1716 	if (ht->nNumUsed) {
1717 		p = ht->arData;
1718 		end = p + ht->nNumUsed;
1719 		if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1720 			do {
1721 				i_zval_ptr_dtor(&p->val);
1722 			} while (++p != end);
1723 		} else if (HT_IS_WITHOUT_HOLES(ht)) {
1724 			do {
1725 				i_zval_ptr_dtor(&p->val);
1726 				if (EXPECTED(p->key)) {
1727 					zend_string_release(p->key);
1728 				}
1729 			} while (++p != end);
1730 		} else {
1731 			do {
1732 				if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1733 					i_zval_ptr_dtor(&p->val);
1734 					if (EXPECTED(p->key)) {
1735 						zend_string_release(p->key);
1736 					}
1737 				}
1738 			} while (++p != end);
1739 		}
1740 		HT_HASH_RESET(ht);
1741 	}
1742 	ht->nNumUsed = 0;
1743 	ht->nNumOfElements = 0;
1744 	ht->nNextFreeElement = 0;
1745 	ht->nInternalPointer = 0;
1746 }
1747 
zend_hash_graceful_destroy(HashTable * ht)1748 ZEND_API void ZEND_FASTCALL zend_hash_graceful_destroy(HashTable *ht)
1749 {
1750 	uint32_t idx;
1751 	Bucket *p;
1752 
1753 	IS_CONSISTENT(ht);
1754 	HT_ASSERT_RC1(ht);
1755 
1756 	p = ht->arData;
1757 	for (idx = 0; idx < ht->nNumUsed; idx++, p++) {
1758 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1759 		_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1760 	}
1761 	if (!(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
1762 		pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1763 	}
1764 
1765 	SET_INCONSISTENT(HT_DESTROYED);
1766 }
1767 
zend_hash_graceful_reverse_destroy(HashTable * ht)1768 ZEND_API void ZEND_FASTCALL zend_hash_graceful_reverse_destroy(HashTable *ht)
1769 {
1770 	uint32_t idx;
1771 	Bucket *p;
1772 
1773 	IS_CONSISTENT(ht);
1774 	HT_ASSERT_RC1(ht);
1775 
1776 	idx = ht->nNumUsed;
1777 	p = ht->arData + ht->nNumUsed;
1778 	while (idx > 0) {
1779 		idx--;
1780 		p--;
1781 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1782 		_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1783 	}
1784 
1785 	if (!(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
1786 		pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1787 	}
1788 
1789 	SET_INCONSISTENT(HT_DESTROYED);
1790 }
1791 
1792 /* This is used to recurse elements and selectively delete certain entries
1793  * from a hashtable. apply_func() receives the data and decides if the entry
1794  * should be deleted or recursion should be stopped. The following three
1795  * return codes are possible:
1796  * ZEND_HASH_APPLY_KEEP   - continue
1797  * ZEND_HASH_APPLY_STOP   - stop iteration
1798  * ZEND_HASH_APPLY_REMOVE - delete the element, combineable with the former
1799  */
1800 
zend_hash_apply(HashTable * ht,apply_func_t apply_func)1801 ZEND_API void ZEND_FASTCALL zend_hash_apply(HashTable *ht, apply_func_t apply_func)
1802 {
1803 	uint32_t idx;
1804 	Bucket *p;
1805 	int result;
1806 
1807 	IS_CONSISTENT(ht);
1808 
1809 	for (idx = 0; idx < ht->nNumUsed; idx++) {
1810 		p = ht->arData + idx;
1811 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1812 		result = apply_func(&p->val);
1813 
1814 		if (result & ZEND_HASH_APPLY_REMOVE) {
1815 			HT_ASSERT_RC1(ht);
1816 			_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1817 		}
1818 		if (result & ZEND_HASH_APPLY_STOP) {
1819 			break;
1820 		}
1821 	}
1822 }
1823 
1824 
zend_hash_apply_with_argument(HashTable * ht,apply_func_arg_t apply_func,void * argument)1825 ZEND_API void ZEND_FASTCALL zend_hash_apply_with_argument(HashTable *ht, apply_func_arg_t apply_func, void *argument)
1826 {
1827     uint32_t idx;
1828 	Bucket *p;
1829 	int result;
1830 
1831 	IS_CONSISTENT(ht);
1832 
1833 	for (idx = 0; idx < ht->nNumUsed; idx++) {
1834 		p = ht->arData + idx;
1835 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1836 		result = apply_func(&p->val, argument);
1837 
1838 		if (result & ZEND_HASH_APPLY_REMOVE) {
1839 			HT_ASSERT_RC1(ht);
1840 			_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1841 		}
1842 		if (result & ZEND_HASH_APPLY_STOP) {
1843 			break;
1844 		}
1845 	}
1846 }
1847 
1848 
zend_hash_apply_with_arguments(HashTable * ht,apply_func_args_t apply_func,int num_args,...)1849 ZEND_API void zend_hash_apply_with_arguments(HashTable *ht, apply_func_args_t apply_func, int num_args, ...)
1850 {
1851 	uint32_t idx;
1852 	Bucket *p;
1853 	va_list args;
1854 	zend_hash_key hash_key;
1855 	int result;
1856 
1857 	IS_CONSISTENT(ht);
1858 
1859 	for (idx = 0; idx < ht->nNumUsed; idx++) {
1860 		p = ht->arData + idx;
1861 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1862 		va_start(args, num_args);
1863 		hash_key.h = p->h;
1864 		hash_key.key = p->key;
1865 
1866 		result = apply_func(&p->val, num_args, args, &hash_key);
1867 
1868 		if (result & ZEND_HASH_APPLY_REMOVE) {
1869 			HT_ASSERT_RC1(ht);
1870 			_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1871 		}
1872 		if (result & ZEND_HASH_APPLY_STOP) {
1873 			va_end(args);
1874 			break;
1875 		}
1876 		va_end(args);
1877 	}
1878 }
1879 
1880 
zend_hash_reverse_apply(HashTable * ht,apply_func_t apply_func)1881 ZEND_API void ZEND_FASTCALL zend_hash_reverse_apply(HashTable *ht, apply_func_t apply_func)
1882 {
1883 	uint32_t idx;
1884 	Bucket *p;
1885 	int result;
1886 
1887 	IS_CONSISTENT(ht);
1888 
1889 	idx = ht->nNumUsed;
1890 	while (idx > 0) {
1891 		idx--;
1892 		p = ht->arData + idx;
1893 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1894 
1895 		result = apply_func(&p->val);
1896 
1897 		if (result & ZEND_HASH_APPLY_REMOVE) {
1898 			HT_ASSERT_RC1(ht);
1899 			_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1900 		}
1901 		if (result & ZEND_HASH_APPLY_STOP) {
1902 			break;
1903 		}
1904 	}
1905 }
1906 
1907 
zend_hash_copy(HashTable * target,HashTable * source,copy_ctor_func_t pCopyConstructor)1908 ZEND_API void ZEND_FASTCALL zend_hash_copy(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor)
1909 {
1910     uint32_t idx;
1911 	Bucket *p;
1912 	zval *new_entry, *data;
1913 
1914 	IS_CONSISTENT(source);
1915 	IS_CONSISTENT(target);
1916 	HT_ASSERT_RC1(target);
1917 
1918 	for (idx = 0; idx < source->nNumUsed; idx++) {
1919 		p = source->arData + idx;
1920 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1921 
1922 		/* INDIRECT element may point to UNDEF-ined slots */
1923 		data = &p->val;
1924 		if (Z_TYPE_P(data) == IS_INDIRECT) {
1925 			data = Z_INDIRECT_P(data);
1926 			if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
1927 				continue;
1928 			}
1929 		}
1930 		if (p->key) {
1931 			new_entry = zend_hash_update(target, p->key, data);
1932 		} else {
1933 			new_entry = zend_hash_index_update(target, p->h, data);
1934 		}
1935 		if (pCopyConstructor) {
1936 			pCopyConstructor(new_entry);
1937 		}
1938 	}
1939 }
1940 
1941 
zend_array_dup_element(HashTable * source,HashTable * target,uint32_t idx,Bucket * p,Bucket * q,int packed,int static_keys,int with_holes)1942 static zend_always_inline int zend_array_dup_element(HashTable *source, HashTable *target, uint32_t idx, Bucket *p, Bucket *q, int packed, int static_keys, int with_holes)
1943 {
1944 	zval *data = &p->val;
1945 
1946 	if (with_holes) {
1947 		if (!packed && Z_TYPE_INFO_P(data) == IS_INDIRECT) {
1948 			data = Z_INDIRECT_P(data);
1949 		}
1950 		if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) {
1951 			return 0;
1952 		}
1953 	} else if (!packed) {
1954 		/* INDIRECT element may point to UNDEF-ined slots */
1955 		if (Z_TYPE_INFO_P(data) == IS_INDIRECT) {
1956 			data = Z_INDIRECT_P(data);
1957 			if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) {
1958 				return 0;
1959 			}
1960 		}
1961 	}
1962 
1963 	do {
1964 		if (Z_OPT_REFCOUNTED_P(data)) {
1965 			if (Z_ISREF_P(data) && Z_REFCOUNT_P(data) == 1 &&
1966 			    (Z_TYPE_P(Z_REFVAL_P(data)) != IS_ARRAY ||
1967 			      Z_ARRVAL_P(Z_REFVAL_P(data)) != source)) {
1968 				data = Z_REFVAL_P(data);
1969 				if (!Z_OPT_REFCOUNTED_P(data)) {
1970 					break;
1971 				}
1972 			}
1973 			Z_ADDREF_P(data);
1974 		}
1975 	} while (0);
1976 	ZVAL_COPY_VALUE(&q->val, data);
1977 
1978 	q->h = p->h;
1979 	if (packed) {
1980 		q->key = NULL;
1981 	} else {
1982 		uint32_t nIndex;
1983 
1984 		q->key = p->key;
1985 		if (!static_keys && q->key) {
1986 			zend_string_addref(q->key);
1987 		}
1988 
1989 		nIndex = q->h | target->nTableMask;
1990 		Z_NEXT(q->val) = HT_HASH(target, nIndex);
1991 		HT_HASH(target, nIndex) = HT_IDX_TO_HASH(idx);
1992 	}
1993 	return 1;
1994 }
1995 
zend_array_dup_packed_elements(HashTable * source,HashTable * target,int with_holes)1996 static zend_always_inline void zend_array_dup_packed_elements(HashTable *source, HashTable *target, int with_holes)
1997 {
1998 	Bucket *p = source->arData;
1999 	Bucket *q = target->arData;
2000 	Bucket *end = p + source->nNumUsed;
2001 
2002 	do {
2003 		if (!zend_array_dup_element(source, target, 0, p, q, 1, 1, with_holes)) {
2004 			if (with_holes) {
2005 				ZVAL_UNDEF(&q->val);
2006 			}
2007 		}
2008 		p++; q++;
2009 	} while (p != end);
2010 }
2011 
zend_array_dup_elements(HashTable * source,HashTable * target,int static_keys,int with_holes)2012 static zend_always_inline uint32_t zend_array_dup_elements(HashTable *source, HashTable *target, int static_keys, int with_holes)
2013 {
2014 	uint32_t idx = 0;
2015 	Bucket *p = source->arData;
2016 	Bucket *q = target->arData;
2017 	Bucket *end = p + source->nNumUsed;
2018 
2019 	do {
2020 		if (!zend_array_dup_element(source, target, idx, p, q, 0, static_keys, with_holes)) {
2021 			uint32_t target_idx = idx;
2022 
2023 			idx++; p++;
2024 			while (p != end) {
2025 				if (zend_array_dup_element(source, target, target_idx, p, q, 0, static_keys, with_holes)) {
2026 					if (source->nInternalPointer == idx) {
2027 						target->nInternalPointer = target_idx;
2028 					}
2029 					target_idx++; q++;
2030 				}
2031 				idx++; p++;
2032 			}
2033 			return target_idx;
2034 		}
2035 		idx++; p++; q++;
2036 	} while (p != end);
2037 	return idx;
2038 }
2039 
zend_array_dup(HashTable * source)2040 ZEND_API HashTable* ZEND_FASTCALL zend_array_dup(HashTable *source)
2041 {
2042 	uint32_t idx;
2043 	HashTable *target;
2044 
2045 	IS_CONSISTENT(source);
2046 
2047 	ALLOC_HASHTABLE(target);
2048 	GC_SET_REFCOUNT(target, 1);
2049 	GC_TYPE_INFO(target) = IS_ARRAY | (GC_COLLECTABLE << GC_FLAGS_SHIFT);
2050 
2051 	target->pDestructor = ZVAL_PTR_DTOR;
2052 
2053 	if (source->nNumOfElements == 0) {
2054 		HT_FLAGS(target) = HASH_FLAG_UNINITIALIZED;
2055 		target->nTableMask = HT_MIN_MASK;
2056 		target->nNumUsed = 0;
2057 		target->nNumOfElements = 0;
2058 		target->nNextFreeElement = source->nNextFreeElement;
2059 		target->nInternalPointer = 0;
2060 		target->nTableSize = HT_MIN_SIZE;
2061 		HT_SET_DATA_ADDR(target, &uninitialized_bucket);
2062 	} else if (GC_FLAGS(source) & IS_ARRAY_IMMUTABLE) {
2063 		HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK;
2064 		target->nTableMask = source->nTableMask;
2065 		target->nNumUsed = source->nNumUsed;
2066 		target->nNumOfElements = source->nNumOfElements;
2067 		target->nNextFreeElement = source->nNextFreeElement;
2068 		target->nTableSize = source->nTableSize;
2069 		HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
2070 		target->nInternalPointer = source->nInternalPointer;
2071 		memcpy(HT_GET_DATA_ADDR(target), HT_GET_DATA_ADDR(source), HT_USED_SIZE(source));
2072 	} else if (HT_FLAGS(source) & HASH_FLAG_PACKED) {
2073 		HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK;
2074 		target->nTableMask = HT_MIN_MASK;
2075 		target->nNumUsed = source->nNumUsed;
2076 		target->nNumOfElements = source->nNumOfElements;
2077 		target->nNextFreeElement = source->nNextFreeElement;
2078 		target->nTableSize = source->nTableSize;
2079 		HT_SET_DATA_ADDR(target, emalloc(HT_SIZE_EX(target->nTableSize, HT_MIN_MASK)));
2080 		target->nInternalPointer =
2081 			(source->nInternalPointer < source->nNumUsed) ?
2082 				source->nInternalPointer : 0;
2083 
2084 		HT_HASH_RESET_PACKED(target);
2085 
2086 		if (HT_IS_WITHOUT_HOLES(target)) {
2087 			zend_array_dup_packed_elements(source, target, 0);
2088 		} else {
2089 			zend_array_dup_packed_elements(source, target, 1);
2090 		}
2091 	} else {
2092 		HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK;
2093 		target->nTableMask = source->nTableMask;
2094 		target->nNextFreeElement = source->nNextFreeElement;
2095 		target->nInternalPointer =
2096 			(source->nInternalPointer < source->nNumUsed) ?
2097 				source->nInternalPointer : 0;
2098 
2099 		target->nTableSize = source->nTableSize;
2100 		HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
2101 		HT_HASH_RESET(target);
2102 
2103 		if (HT_HAS_STATIC_KEYS_ONLY(target)) {
2104 			if (HT_IS_WITHOUT_HOLES(source)) {
2105 				idx = zend_array_dup_elements(source, target, 1, 0);
2106 			} else {
2107 				idx = zend_array_dup_elements(source, target, 1, 1);
2108 			}
2109 		} else {
2110 			if (HT_IS_WITHOUT_HOLES(source)) {
2111 				idx = zend_array_dup_elements(source, target, 0, 0);
2112 			} else {
2113 				idx = zend_array_dup_elements(source, target, 0, 1);
2114 			}
2115 		}
2116 		target->nNumUsed = idx;
2117 		target->nNumOfElements = idx;
2118 	}
2119 	return target;
2120 }
2121 
2122 
zend_hash_merge(HashTable * target,HashTable * source,copy_ctor_func_t pCopyConstructor,zend_bool overwrite)2123 ZEND_API void ZEND_FASTCALL zend_hash_merge(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor, zend_bool overwrite)
2124 {
2125     uint32_t idx;
2126 	Bucket *p;
2127 	zval *t, *s;
2128 
2129 	IS_CONSISTENT(source);
2130 	IS_CONSISTENT(target);
2131 	HT_ASSERT_RC1(target);
2132 
2133 	if (overwrite) {
2134 		for (idx = 0; idx < source->nNumUsed; idx++) {
2135 			p = source->arData + idx;
2136 			s = &p->val;
2137 			if (UNEXPECTED(Z_TYPE_P(s) == IS_INDIRECT)) {
2138 				s = Z_INDIRECT_P(s);
2139 			}
2140 			if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) {
2141 				continue;
2142 			}
2143 			if (p->key) {
2144 				t = _zend_hash_add_or_update_i(target, p->key, s, HASH_UPDATE | HASH_UPDATE_INDIRECT);
2145 				if (pCopyConstructor) {
2146 					pCopyConstructor(t);
2147 				}
2148 			} else {
2149 				t = zend_hash_index_update(target, p->h, s);
2150 				if (pCopyConstructor) {
2151 					pCopyConstructor(t);
2152 				}
2153 			}
2154 		}
2155 	} else {
2156 		for (idx = 0; idx < source->nNumUsed; idx++) {
2157 			p = source->arData + idx;
2158 			s = &p->val;
2159 			if (UNEXPECTED(Z_TYPE_P(s) == IS_INDIRECT)) {
2160 				s = Z_INDIRECT_P(s);
2161 			}
2162 			if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) {
2163 				continue;
2164 			}
2165 			if (p->key) {
2166 				t = _zend_hash_add_or_update_i(target, p->key, s, HASH_ADD | HASH_UPDATE_INDIRECT);
2167 				if (t && pCopyConstructor) {
2168 					pCopyConstructor(t);
2169 				}
2170 			} else {
2171 				t = zend_hash_index_add(target, p->h, s);
2172 				if (t && pCopyConstructor) {
2173 					pCopyConstructor(t);
2174 				}
2175 			}
2176 		}
2177 	}
2178 }
2179 
2180 
zend_hash_replace_checker_wrapper(HashTable * target,zval * source_data,Bucket * p,void * pParam,merge_checker_func_t merge_checker_func)2181 static zend_bool ZEND_FASTCALL zend_hash_replace_checker_wrapper(HashTable *target, zval *source_data, Bucket *p, void *pParam, merge_checker_func_t merge_checker_func)
2182 {
2183 	zend_hash_key hash_key;
2184 
2185 	hash_key.h = p->h;
2186 	hash_key.key = p->key;
2187 	return merge_checker_func(target, source_data, &hash_key, pParam);
2188 }
2189 
2190 
zend_hash_merge_ex(HashTable * target,HashTable * source,copy_ctor_func_t pCopyConstructor,merge_checker_func_t pMergeSource,void * pParam)2191 ZEND_API void ZEND_FASTCALL zend_hash_merge_ex(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor, merge_checker_func_t pMergeSource, void *pParam)
2192 {
2193 	uint32_t idx;
2194 	Bucket *p;
2195 	zval *t;
2196 
2197 	IS_CONSISTENT(source);
2198 	IS_CONSISTENT(target);
2199 	HT_ASSERT_RC1(target);
2200 
2201 	for (idx = 0; idx < source->nNumUsed; idx++) {
2202 		p = source->arData + idx;
2203 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2204 		if (zend_hash_replace_checker_wrapper(target, &p->val, p, pParam, pMergeSource)) {
2205 			t = zend_hash_update(target, p->key, &p->val);
2206 			if (pCopyConstructor) {
2207 				pCopyConstructor(t);
2208 			}
2209 		}
2210 	}
2211 }
2212 
2213 
2214 /* Returns the hash table data if found and NULL if not. */
zend_hash_find(const HashTable * ht,zend_string * key)2215 ZEND_API zval* ZEND_FASTCALL zend_hash_find(const HashTable *ht, zend_string *key)
2216 {
2217 	Bucket *p;
2218 
2219 	IS_CONSISTENT(ht);
2220 
2221 	p = zend_hash_find_bucket(ht, key, 0);
2222 	return p ? &p->val : NULL;
2223 }
2224 
_zend_hash_find_known_hash(const HashTable * ht,zend_string * key)2225 ZEND_API zval* ZEND_FASTCALL _zend_hash_find_known_hash(const HashTable *ht, zend_string *key)
2226 {
2227 	Bucket *p;
2228 
2229 	IS_CONSISTENT(ht);
2230 
2231 	p = zend_hash_find_bucket(ht, key, 1);
2232 	return p ? &p->val : NULL;
2233 }
2234 
zend_hash_str_find(const HashTable * ht,const char * str,size_t len)2235 ZEND_API zval* ZEND_FASTCALL zend_hash_str_find(const HashTable *ht, const char *str, size_t len)
2236 {
2237 	zend_ulong h;
2238 	Bucket *p;
2239 
2240 	IS_CONSISTENT(ht);
2241 
2242 	h = zend_inline_hash_func(str, len);
2243 	p = zend_hash_str_find_bucket(ht, str, len, h);
2244 	return p ? &p->val : NULL;
2245 }
2246 
zend_hash_index_find(const HashTable * ht,zend_ulong h)2247 ZEND_API zval* ZEND_FASTCALL zend_hash_index_find(const HashTable *ht, zend_ulong h)
2248 {
2249 	Bucket *p;
2250 
2251 	IS_CONSISTENT(ht);
2252 
2253 	if (HT_FLAGS(ht) & HASH_FLAG_PACKED) {
2254 		if (h < ht->nNumUsed) {
2255 			p = ht->arData + h;
2256 			if (Z_TYPE(p->val) != IS_UNDEF) {
2257 				return &p->val;
2258 			}
2259 		}
2260 		return NULL;
2261 	}
2262 
2263 	p = zend_hash_index_find_bucket(ht, h);
2264 	return p ? &p->val : NULL;
2265 }
2266 
_zend_hash_index_find(const HashTable * ht,zend_ulong h)2267 ZEND_API zval* ZEND_FASTCALL _zend_hash_index_find(const HashTable *ht, zend_ulong h)
2268 {
2269 	Bucket *p;
2270 
2271 	IS_CONSISTENT(ht);
2272 
2273 	p = zend_hash_index_find_bucket(ht, h);
2274 	return p ? &p->val : NULL;
2275 }
2276 
zend_hash_internal_pointer_reset_ex(HashTable * ht,HashPosition * pos)2277 ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_reset_ex(HashTable *ht, HashPosition *pos)
2278 {
2279 	IS_CONSISTENT(ht);
2280 	HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2281 	*pos = _zend_hash_get_valid_pos(ht, 0);
2282 }
2283 
2284 
2285 /* This function will be extremely optimized by remembering
2286  * the end of the list
2287  */
zend_hash_internal_pointer_end_ex(HashTable * ht,HashPosition * pos)2288 ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_end_ex(HashTable *ht, HashPosition *pos)
2289 {
2290 	uint32_t idx;
2291 
2292 	IS_CONSISTENT(ht);
2293 	HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2294 
2295 	idx = ht->nNumUsed;
2296 	while (idx > 0) {
2297 		idx--;
2298 		if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2299 			*pos = idx;
2300 			return;
2301 		}
2302 	}
2303 	*pos = ht->nNumUsed;
2304 }
2305 
2306 
zend_hash_move_forward_ex(HashTable * ht,HashPosition * pos)2307 ZEND_API int ZEND_FASTCALL zend_hash_move_forward_ex(HashTable *ht, HashPosition *pos)
2308 {
2309 	uint32_t idx;
2310 
2311 	IS_CONSISTENT(ht);
2312 	HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2313 
2314 	idx = _zend_hash_get_valid_pos(ht, *pos);
2315 	if (idx < ht->nNumUsed) {
2316 		while (1) {
2317 			idx++;
2318 			if (idx >= ht->nNumUsed) {
2319 				*pos = ht->nNumUsed;
2320 				return SUCCESS;
2321 			}
2322 			if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2323 				*pos = idx;
2324 				return SUCCESS;
2325 			}
2326 		}
2327 	} else {
2328 		return FAILURE;
2329 	}
2330 }
2331 
zend_hash_move_backwards_ex(HashTable * ht,HashPosition * pos)2332 ZEND_API int ZEND_FASTCALL zend_hash_move_backwards_ex(HashTable *ht, HashPosition *pos)
2333 {
2334 	uint32_t idx = *pos;
2335 
2336 	IS_CONSISTENT(ht);
2337 	HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2338 
2339 	if (idx < ht->nNumUsed) {
2340 		while (idx > 0) {
2341 			idx--;
2342 			if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2343 				*pos = idx;
2344 				return SUCCESS;
2345 			}
2346 		}
2347 		*pos = ht->nNumUsed;
2348  		return SUCCESS;
2349 	} else {
2350  		return FAILURE;
2351 	}
2352 }
2353 
2354 
2355 /* This function should be made binary safe  */
zend_hash_get_current_key_ex(const HashTable * ht,zend_string ** str_index,zend_ulong * num_index,HashPosition * pos)2356 ZEND_API int ZEND_FASTCALL zend_hash_get_current_key_ex(const HashTable *ht, zend_string **str_index, zend_ulong *num_index, HashPosition *pos)
2357 {
2358 	uint32_t idx;
2359 	Bucket *p;
2360 
2361 	IS_CONSISTENT(ht);
2362 	idx = _zend_hash_get_valid_pos(ht, *pos);
2363 	if (idx < ht->nNumUsed) {
2364 		p = ht->arData + idx;
2365 		if (p->key) {
2366 			*str_index = p->key;
2367 			return HASH_KEY_IS_STRING;
2368 		} else {
2369 			*num_index = p->h;
2370 			return HASH_KEY_IS_LONG;
2371 		}
2372 	}
2373 	return HASH_KEY_NON_EXISTENT;
2374 }
2375 
zend_hash_get_current_key_zval_ex(const HashTable * ht,zval * key,HashPosition * pos)2376 ZEND_API void ZEND_FASTCALL zend_hash_get_current_key_zval_ex(const HashTable *ht, zval *key, HashPosition *pos)
2377 {
2378 	uint32_t idx;
2379 	Bucket *p;
2380 
2381 	IS_CONSISTENT(ht);
2382 	idx = _zend_hash_get_valid_pos(ht, *pos);
2383 	if (idx >= ht->nNumUsed) {
2384 		ZVAL_NULL(key);
2385 	} else {
2386 		p = ht->arData + idx;
2387 		if (p->key) {
2388 			ZVAL_STR_COPY(key, p->key);
2389 		} else {
2390 			ZVAL_LONG(key, p->h);
2391 		}
2392 	}
2393 }
2394 
zend_hash_get_current_key_type_ex(HashTable * ht,HashPosition * pos)2395 ZEND_API int ZEND_FASTCALL zend_hash_get_current_key_type_ex(HashTable *ht, HashPosition *pos)
2396 {
2397 	uint32_t idx;
2398 	Bucket *p;
2399 
2400 	IS_CONSISTENT(ht);
2401 	idx = _zend_hash_get_valid_pos(ht, *pos);
2402 	if (idx < ht->nNumUsed) {
2403 		p = ht->arData + idx;
2404 		if (p->key) {
2405 			return HASH_KEY_IS_STRING;
2406 		} else {
2407 			return HASH_KEY_IS_LONG;
2408 		}
2409 	}
2410 	return HASH_KEY_NON_EXISTENT;
2411 }
2412 
2413 
zend_hash_get_current_data_ex(HashTable * ht,HashPosition * pos)2414 ZEND_API zval* ZEND_FASTCALL zend_hash_get_current_data_ex(HashTable *ht, HashPosition *pos)
2415 {
2416 	uint32_t idx;
2417 	Bucket *p;
2418 
2419 	IS_CONSISTENT(ht);
2420 	idx = _zend_hash_get_valid_pos(ht, *pos);
2421 	if (idx < ht->nNumUsed) {
2422 		p = ht->arData + idx;
2423 		return &p->val;
2424 	} else {
2425 		return NULL;
2426 	}
2427 }
2428 
zend_hash_bucket_swap(Bucket * p,Bucket * q)2429 ZEND_API void zend_hash_bucket_swap(Bucket *p, Bucket *q)
2430 {
2431 	zval val;
2432 	zend_ulong h;
2433 	zend_string *key;
2434 
2435 	ZVAL_COPY_VALUE(&val, &p->val);
2436 	h = p->h;
2437 	key = p->key;
2438 
2439 	ZVAL_COPY_VALUE(&p->val, &q->val);
2440 	p->h = q->h;
2441 	p->key = q->key;
2442 
2443 	ZVAL_COPY_VALUE(&q->val, &val);
2444 	q->h = h;
2445 	q->key = key;
2446 }
2447 
zend_hash_bucket_renum_swap(Bucket * p,Bucket * q)2448 ZEND_API void zend_hash_bucket_renum_swap(Bucket *p, Bucket *q)
2449 {
2450 	zval val;
2451 
2452 	ZVAL_COPY_VALUE(&val, &p->val);
2453 	ZVAL_COPY_VALUE(&p->val, &q->val);
2454 	ZVAL_COPY_VALUE(&q->val, &val);
2455 }
2456 
zend_hash_bucket_packed_swap(Bucket * p,Bucket * q)2457 ZEND_API void zend_hash_bucket_packed_swap(Bucket *p, Bucket *q)
2458 {
2459 	zval val;
2460 	zend_ulong h;
2461 
2462 	ZVAL_COPY_VALUE(&val, &p->val);
2463 	h = p->h;
2464 
2465 	ZVAL_COPY_VALUE(&p->val, &q->val);
2466 	p->h = q->h;
2467 
2468 	ZVAL_COPY_VALUE(&q->val, &val);
2469 	q->h = h;
2470 }
2471 
zend_hash_sort_ex(HashTable * ht,sort_func_t sort,compare_func_t compar,zend_bool renumber)2472 ZEND_API int ZEND_FASTCALL zend_hash_sort_ex(HashTable *ht, sort_func_t sort, compare_func_t compar, zend_bool renumber)
2473 {
2474 	Bucket *p;
2475 	uint32_t i, j;
2476 
2477 	IS_CONSISTENT(ht);
2478 	HT_ASSERT_RC1(ht);
2479 
2480 	if (!(ht->nNumOfElements>1) && !(renumber && ht->nNumOfElements>0)) { /* Doesn't require sorting */
2481 		return SUCCESS;
2482 	}
2483 
2484 	if (HT_IS_WITHOUT_HOLES(ht)) {
2485 		i = ht->nNumUsed;
2486 	} else {
2487 		for (j = 0, i = 0; j < ht->nNumUsed; j++) {
2488 			p = ht->arData + j;
2489 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2490 			if (i != j) {
2491 				ht->arData[i] = *p;
2492 			}
2493 			i++;
2494 		}
2495 	}
2496 
2497 	sort((void *)ht->arData, i, sizeof(Bucket), compar,
2498 			(swap_func_t)(renumber? zend_hash_bucket_renum_swap :
2499 				((HT_FLAGS(ht) & HASH_FLAG_PACKED) ? zend_hash_bucket_packed_swap : zend_hash_bucket_swap)));
2500 
2501 	ht->nNumUsed = i;
2502 	ht->nInternalPointer = 0;
2503 
2504 	if (renumber) {
2505 		for (j = 0; j < i; j++) {
2506 			p = ht->arData + j;
2507 			p->h = j;
2508 			if (p->key) {
2509 				zend_string_release(p->key);
2510 				p->key = NULL;
2511 			}
2512 		}
2513 
2514 		ht->nNextFreeElement = i;
2515 	}
2516 	if (HT_FLAGS(ht) & HASH_FLAG_PACKED) {
2517 		if (!renumber) {
2518 			zend_hash_packed_to_hash(ht);
2519 		}
2520 	} else {
2521 		if (renumber) {
2522 			void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
2523 			Bucket *old_buckets = ht->arData;
2524 
2525 			new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), (GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
2526 			HT_FLAGS(ht) |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
2527 			ht->nTableMask = HT_MIN_MASK;
2528 			HT_SET_DATA_ADDR(ht, new_data);
2529 			memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
2530 			pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
2531 			HT_HASH_RESET_PACKED(ht);
2532 		} else {
2533 			zend_hash_rehash(ht);
2534 		}
2535 	}
2536 
2537 	return SUCCESS;
2538 }
2539 
zend_hash_compare_impl(HashTable * ht1,HashTable * ht2,compare_func_t compar,zend_bool ordered)2540 static zend_always_inline int zend_hash_compare_impl(HashTable *ht1, HashTable *ht2, compare_func_t compar, zend_bool ordered) {
2541 	uint32_t idx1, idx2;
2542 
2543 	if (ht1->nNumOfElements != ht2->nNumOfElements) {
2544 		return ht1->nNumOfElements > ht2->nNumOfElements ? 1 : -1;
2545 	}
2546 
2547 	for (idx1 = 0, idx2 = 0; idx1 < ht1->nNumUsed; idx1++) {
2548 		Bucket *p1 = ht1->arData + idx1, *p2;
2549 		zval *pData1, *pData2;
2550 		int result;
2551 
2552 		if (Z_TYPE(p1->val) == IS_UNDEF) continue;
2553 		if (ordered) {
2554 			while (1) {
2555 				ZEND_ASSERT(idx2 != ht2->nNumUsed);
2556 				p2 = ht2->arData + idx2;
2557 				if (Z_TYPE(p2->val) != IS_UNDEF) break;
2558 				idx2++;
2559 			}
2560 			if (p1->key == NULL && p2->key == NULL) { /* numeric indices */
2561 				if (p1->h != p2->h) {
2562 					return p1->h > p2->h ? 1 : -1;
2563 				}
2564 			} else if (p1->key != NULL && p2->key != NULL) { /* string indices */
2565 				if (ZSTR_LEN(p1->key) != ZSTR_LEN(p2->key)) {
2566 					return ZSTR_LEN(p1->key) > ZSTR_LEN(p2->key) ? 1 : -1;
2567 				}
2568 
2569 				result = memcmp(ZSTR_VAL(p1->key), ZSTR_VAL(p2->key), ZSTR_LEN(p1->key));
2570 				if (result != 0) {
2571 					return result;
2572 				}
2573 			} else {
2574 				/* Mixed key types: A string key is considered as larger */
2575 				return p1->key != NULL ? 1 : -1;
2576 			}
2577 			pData2 = &p2->val;
2578 			idx2++;
2579 		} else {
2580 			if (p1->key == NULL) { /* numeric index */
2581 				pData2 = zend_hash_index_find(ht2, p1->h);
2582 				if (pData2 == NULL) {
2583 					return 1;
2584 				}
2585 			} else { /* string index */
2586 				pData2 = zend_hash_find(ht2, p1->key);
2587 				if (pData2 == NULL) {
2588 					return 1;
2589 				}
2590 			}
2591 		}
2592 
2593 		pData1 = &p1->val;
2594 		if (Z_TYPE_P(pData1) == IS_INDIRECT) {
2595 			pData1 = Z_INDIRECT_P(pData1);
2596 		}
2597 		if (Z_TYPE_P(pData2) == IS_INDIRECT) {
2598 			pData2 = Z_INDIRECT_P(pData2);
2599 		}
2600 
2601 		if (Z_TYPE_P(pData1) == IS_UNDEF) {
2602 			if (Z_TYPE_P(pData2) != IS_UNDEF) {
2603 				return -1;
2604 			}
2605 		} else if (Z_TYPE_P(pData2) == IS_UNDEF) {
2606 			return 1;
2607 		} else {
2608 			result = compar(pData1, pData2);
2609 			if (result != 0) {
2610 				return result;
2611 			}
2612 		}
2613 	}
2614 
2615 	return 0;
2616 }
2617 
zend_hash_compare(HashTable * ht1,HashTable * ht2,compare_func_t compar,zend_bool ordered)2618 ZEND_API int zend_hash_compare(HashTable *ht1, HashTable *ht2, compare_func_t compar, zend_bool ordered)
2619 {
2620 	int result;
2621 	IS_CONSISTENT(ht1);
2622 	IS_CONSISTENT(ht2);
2623 
2624 	if (ht1 == ht2) {
2625 		return 0;
2626 	}
2627 
2628 	/* It's enough to protect only one of the arrays.
2629 	 * The second one may be referenced from the first and this may cause
2630 	 * false recursion detection.
2631 	 */
2632 	if (UNEXPECTED(GC_IS_RECURSIVE(ht1))) {
2633 		zend_error_noreturn(E_ERROR, "Nesting level too deep - recursive dependency?");
2634 	}
2635 
2636 	if (!(GC_FLAGS(ht1) & GC_IMMUTABLE)) {
2637 		GC_PROTECT_RECURSION(ht1);
2638 	}
2639 	result = zend_hash_compare_impl(ht1, ht2, compar, ordered);
2640 	if (!(GC_FLAGS(ht1) & GC_IMMUTABLE)) {
2641 		GC_UNPROTECT_RECURSION(ht1);
2642 	}
2643 
2644 	return result;
2645 }
2646 
2647 
zend_hash_minmax(const HashTable * ht,compare_func_t compar,uint32_t flag)2648 ZEND_API zval* ZEND_FASTCALL zend_hash_minmax(const HashTable *ht, compare_func_t compar, uint32_t flag)
2649 {
2650 	uint32_t idx;
2651 	Bucket *p, *res;
2652 
2653 	IS_CONSISTENT(ht);
2654 
2655 	if (ht->nNumOfElements == 0 ) {
2656 		return NULL;
2657 	}
2658 
2659 	idx = 0;
2660 	while (1) {
2661 		if (idx == ht->nNumUsed) {
2662 			return NULL;
2663 		}
2664 		if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) break;
2665 		idx++;
2666 	}
2667 	res = ht->arData + idx;
2668 	for (; idx < ht->nNumUsed; idx++) {
2669 		p = ht->arData + idx;
2670 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2671 
2672 		if (flag) {
2673 			if (compar(res, p) < 0) { /* max */
2674 				res = p;
2675 			}
2676 		} else {
2677 			if (compar(res, p) > 0) { /* min */
2678 				res = p;
2679 			}
2680 		}
2681 	}
2682 	return &res->val;
2683 }
2684 
_zend_handle_numeric_str_ex(const char * key,size_t length,zend_ulong * idx)2685 ZEND_API int ZEND_FASTCALL _zend_handle_numeric_str_ex(const char *key, size_t length, zend_ulong *idx)
2686 {
2687 	register const char *tmp = key;
2688 
2689 	const char *end = key + length;
2690 
2691 	if (*tmp == '-') {
2692 		tmp++;
2693 	}
2694 
2695 	if ((*tmp == '0' && length > 1) /* numbers with leading zeros */
2696 	 || (end - tmp > MAX_LENGTH_OF_LONG - 1) /* number too long */
2697 	 || (SIZEOF_ZEND_LONG == 4 &&
2698 	     end - tmp == MAX_LENGTH_OF_LONG - 1 &&
2699 	     *tmp > '2')) { /* overflow */
2700 		return 0;
2701 	}
2702 	*idx = (*tmp - '0');
2703 	while (1) {
2704 		++tmp;
2705 		if (tmp == end) {
2706 			if (*key == '-') {
2707 				if (*idx-1 > ZEND_LONG_MAX) { /* overflow */
2708 					return 0;
2709 				}
2710 				*idx = 0 - *idx;
2711 			} else if (*idx > ZEND_LONG_MAX) { /* overflow */
2712 				return 0;
2713 			}
2714 			return 1;
2715 		}
2716 		if (*tmp <= '9' && *tmp >= '0') {
2717 			*idx = (*idx * 10) + (*tmp - '0');
2718 		} else {
2719 			return 0;
2720 		}
2721 	}
2722 }
2723 
2724 /* Takes a "symtable" hashtable (contains integer and non-numeric string keys)
2725  * and converts it to a "proptable" (contains only string keys).
2726  * If the symtable didn't need duplicating, its refcount is incremented.
2727  */
zend_symtable_to_proptable(HashTable * ht)2728 ZEND_API HashTable* ZEND_FASTCALL zend_symtable_to_proptable(HashTable *ht)
2729 {
2730 	zend_ulong num_key;
2731 	zend_string *str_key;
2732 	zval *zv;
2733 
2734 	if (UNEXPECTED(HT_IS_PACKED(ht))) {
2735 		goto convert;
2736 	}
2737 
2738 	ZEND_HASH_FOREACH_STR_KEY(ht, str_key) {
2739 		if (!str_key) {
2740 			goto convert;
2741 		}
2742 	} ZEND_HASH_FOREACH_END();
2743 
2744 	if (!(GC_FLAGS(ht) & IS_ARRAY_IMMUTABLE)) {
2745 		GC_ADDREF(ht);
2746 	}
2747 
2748 	return ht;
2749 
2750 convert:
2751 	{
2752 		HashTable *new_ht = zend_new_array(zend_hash_num_elements(ht));
2753 
2754 		ZEND_HASH_FOREACH_KEY_VAL(ht, num_key, str_key, zv) {
2755 			if (!str_key) {
2756 				str_key = zend_long_to_str(num_key);
2757 				zend_string_delref(str_key);
2758 			}
2759 			do {
2760 				if (Z_OPT_REFCOUNTED_P(zv)) {
2761 					if (Z_ISREF_P(zv) && Z_REFCOUNT_P(zv) == 1) {
2762 						zv = Z_REFVAL_P(zv);
2763 						if (!Z_OPT_REFCOUNTED_P(zv)) {
2764 							break;
2765 						}
2766 					}
2767 					Z_ADDREF_P(zv);
2768 				}
2769 			} while (0);
2770 			zend_hash_update(new_ht, str_key, zv);
2771 		} ZEND_HASH_FOREACH_END();
2772 
2773 		return new_ht;
2774 	}
2775 }
2776 
2777 /* Takes a "proptable" hashtable (contains only string keys) and converts it to
2778  * a "symtable" (contains integer and non-numeric string keys).
2779  * If the proptable didn't need duplicating, its refcount is incremented.
2780  */
zend_proptable_to_symtable(HashTable * ht,zend_bool always_duplicate)2781 ZEND_API HashTable* ZEND_FASTCALL zend_proptable_to_symtable(HashTable *ht, zend_bool always_duplicate)
2782 {
2783 	zend_ulong num_key;
2784 	zend_string *str_key;
2785 	zval *zv;
2786 
2787 	ZEND_HASH_FOREACH_STR_KEY(ht, str_key) {
2788 		/* The `str_key &&` here might seem redundant: property tables should
2789 		 * only have string keys. Unfortunately, this isn't true, at the very
2790 		 * least because of ArrayObject, which stores a symtable where the
2791 		 * property table should be.
2792 		 */
2793 		if (str_key && ZEND_HANDLE_NUMERIC(str_key, num_key)) {
2794 			goto convert;
2795 		}
2796 	} ZEND_HASH_FOREACH_END();
2797 
2798 	if (always_duplicate) {
2799 		return zend_array_dup(ht);
2800 	}
2801 
2802 	if (EXPECTED(!(GC_FLAGS(ht) & IS_ARRAY_IMMUTABLE))) {
2803 		GC_ADDREF(ht);
2804 	}
2805 
2806 	return ht;
2807 
2808 convert:
2809 	{
2810 		HashTable *new_ht = zend_new_array(zend_hash_num_elements(ht));
2811 
2812 		ZEND_HASH_FOREACH_KEY_VAL_IND(ht, num_key, str_key, zv) {
2813 			do {
2814 				if (Z_OPT_REFCOUNTED_P(zv)) {
2815 					if (Z_ISREF_P(zv) && Z_REFCOUNT_P(zv) == 1) {
2816 						zv = Z_REFVAL_P(zv);
2817 						if (!Z_OPT_REFCOUNTED_P(zv)) {
2818 							break;
2819 						}
2820 					}
2821 					Z_ADDREF_P(zv);
2822 				}
2823 			} while (0);
2824 			/* Again, thank ArrayObject for `!str_key ||`. */
2825 			if (!str_key || ZEND_HANDLE_NUMERIC(str_key, num_key)) {
2826 				zend_hash_index_update(new_ht, num_key, zv);
2827 			} else {
2828 				zend_hash_update(new_ht, str_key, zv);
2829 			}
2830 		} ZEND_HASH_FOREACH_END();
2831 
2832 		return new_ht;
2833 	}
2834 }
2835