xref: /PHP-7.3/Zend/zend_hash.c (revision e304468e)
1 /*
2    +----------------------------------------------------------------------+
3    | Zend Engine                                                          |
4    +----------------------------------------------------------------------+
5    | Copyright (c) 1998-2018 Zend Technologies Ltd. (http://www.zend.com) |
6    +----------------------------------------------------------------------+
7    | This source file is subject to version 2.00 of the Zend license,     |
8    | that is bundled with this package in the file LICENSE, and is        |
9    | available through the world-wide-web at the following url:           |
10    | http://www.zend.com/license/2_00.txt.                                |
11    | If you did not receive a copy of the Zend license and are unable to  |
12    | obtain it through the world-wide-web, please send a note to          |
13    | license@zend.com so we can mail you a copy immediately.              |
14    +----------------------------------------------------------------------+
15    | Authors: Andi Gutmans <andi@php.net>                                 |
16    |          Zeev Suraski <zeev@php.net>                                 |
17    |          Dmitry Stogov <dmitry@php.net>                              |
18    +----------------------------------------------------------------------+
19 */
20 
21 #include "zend.h"
22 #include "zend_globals.h"
23 #include "zend_variables.h"
24 
25 #ifdef __SSE2__
26 # include <mmintrin.h>
27 # include <emmintrin.h>
28 #endif
29 
30 #if ZEND_DEBUG
31 # define HT_ASSERT(ht, expr) \
32 	ZEND_ASSERT((expr) || (HT_FLAGS(ht) & HASH_FLAG_ALLOW_COW_VIOLATION))
33 #else
34 # define HT_ASSERT(ht, expr)
35 #endif
36 
37 #define HT_ASSERT_RC1(ht) HT_ASSERT(ht, GC_REFCOUNT(ht) == 1)
38 
39 #define HT_POISONED_PTR ((HashTable *) (intptr_t) -1)
40 
41 #if ZEND_DEBUG
42 
43 #define HT_OK					0x00
44 #define HT_IS_DESTROYING		0x01
45 #define HT_DESTROYED			0x02
46 #define HT_CLEANING				0x03
47 
_zend_is_inconsistent(const HashTable * ht,const char * file,int line)48 static void _zend_is_inconsistent(const HashTable *ht, const char *file, int line)
49 {
50 	if ((HT_FLAGS(ht) & HASH_FLAG_CONSISTENCY) == HT_OK) {
51 		return;
52 	}
53 	switch (HT_FLAGS(ht) & HASH_FLAG_CONSISTENCY) {
54 		case HT_IS_DESTROYING:
55 			zend_output_debug_string(1, "%s(%d) : ht=%p is being destroyed", file, line, ht);
56 			break;
57 		case HT_DESTROYED:
58 			zend_output_debug_string(1, "%s(%d) : ht=%p is already destroyed", file, line, ht);
59 			break;
60 		case HT_CLEANING:
61 			zend_output_debug_string(1, "%s(%d) : ht=%p is being cleaned", file, line, ht);
62 			break;
63 		default:
64 			zend_output_debug_string(1, "%s(%d) : ht=%p is inconsistent", file, line, ht);
65 			break;
66 	}
67 	ZEND_ASSERT(0);
68 }
69 #define IS_CONSISTENT(a) _zend_is_inconsistent(a, __FILE__, __LINE__);
70 #define SET_INCONSISTENT(n) do { \
71 		HT_FLAGS(ht) = (HT_FLAGS(ht) & ~HASH_FLAG_CONSISTENCY) | (n); \
72 	} while (0)
73 #else
74 #define IS_CONSISTENT(a)
75 #define SET_INCONSISTENT(n)
76 #endif
77 
78 #define ZEND_HASH_IF_FULL_DO_RESIZE(ht)				\
79 	if ((ht)->nNumUsed >= (ht)->nTableSize) {		\
80 		zend_hash_do_resize(ht);					\
81 	}
82 
83 static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht);
84 
zend_hash_check_size(uint32_t nSize)85 static zend_always_inline uint32_t zend_hash_check_size(uint32_t nSize)
86 {
87 #if defined(ZEND_WIN32)
88 	unsigned long index;
89 #endif
90 
91 	/* Use big enough power of 2 */
92 	/* size should be between HT_MIN_SIZE and HT_MAX_SIZE */
93 	if (nSize <= HT_MIN_SIZE) {
94 		return HT_MIN_SIZE;
95 	} else if (UNEXPECTED(nSize >= HT_MAX_SIZE)) {
96 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", nSize, sizeof(Bucket), sizeof(Bucket));
97 	}
98 
99 #if defined(ZEND_WIN32)
100 	if (BitScanReverse(&index, nSize - 1)) {
101 		return 0x2 << ((31 - index) ^ 0x1f);
102 	} else {
103 		/* nSize is ensured to be in the valid range, fall back to it
104 		   rather than using an undefined bis scan result. */
105 		return nSize;
106 	}
107 #elif (defined(__GNUC__) || __has_builtin(__builtin_clz))  && defined(PHP_HAVE_BUILTIN_CLZ)
108 	return 0x2 << (__builtin_clz(nSize - 1) ^ 0x1f);
109 #else
110 	nSize -= 1;
111 	nSize |= (nSize >> 1);
112 	nSize |= (nSize >> 2);
113 	nSize |= (nSize >> 4);
114 	nSize |= (nSize >> 8);
115 	nSize |= (nSize >> 16);
116 	return nSize + 1;
117 #endif
118 }
119 
zend_hash_real_init_packed_ex(HashTable * ht)120 static zend_always_inline void zend_hash_real_init_packed_ex(HashTable *ht)
121 {
122 	HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
123 	HT_FLAGS(ht) |= HASH_FLAG_INITIALIZED | HASH_FLAG_PACKED;
124 	HT_HASH_RESET_PACKED(ht);
125 }
126 
zend_hash_real_init_mixed_ex(HashTable * ht)127 static zend_always_inline void zend_hash_real_init_mixed_ex(HashTable *ht)
128 {
129 	uint32_t nSize = ht->nTableSize;
130 
131 	ht->nTableMask = HT_SIZE_TO_MASK(nSize);
132 	HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
133 	HT_FLAGS(ht) |= HASH_FLAG_INITIALIZED;
134 	if (EXPECTED(ht->nTableMask == HT_SIZE_TO_MASK(HT_MIN_SIZE))) {
135 		Bucket *arData = ht->arData;
136 
137 #ifdef __SSE2__
138 		__m128i xmm0 = _mm_setzero_si128();
139 		xmm0 = _mm_cmpeq_epi8(xmm0, xmm0);
140 		_mm_storeu_si128((__m128i*)&HT_HASH_EX(arData, -16), xmm0);
141 		_mm_storeu_si128((__m128i*)&HT_HASH_EX(arData, -12), xmm0);
142 		_mm_storeu_si128((__m128i*)&HT_HASH_EX(arData, -8), xmm0);
143 		_mm_storeu_si128((__m128i*)&HT_HASH_EX(arData, -4), xmm0);
144 #else
145 		HT_HASH_EX(arData, -16) = -1;
146 		HT_HASH_EX(arData, -15) = -1;
147 		HT_HASH_EX(arData, -14) = -1;
148 		HT_HASH_EX(arData, -13) = -1;
149 		HT_HASH_EX(arData, -12) = -1;
150 		HT_HASH_EX(arData, -11) = -1;
151 		HT_HASH_EX(arData, -10) = -1;
152 		HT_HASH_EX(arData, -9) = -1;
153 		HT_HASH_EX(arData, -8) = -1;
154 		HT_HASH_EX(arData, -7) = -1;
155 		HT_HASH_EX(arData, -6) = -1;
156 		HT_HASH_EX(arData, -5) = -1;
157 		HT_HASH_EX(arData, -4) = -1;
158 		HT_HASH_EX(arData, -3) = -1;
159 		HT_HASH_EX(arData, -2) = -1;
160 		HT_HASH_EX(arData, -1) = -1;
161 #endif
162 	} else {
163 		HT_HASH_RESET(ht);
164 	}
165 }
166 
zend_hash_real_init_ex(HashTable * ht,int packed)167 static zend_always_inline void zend_hash_real_init_ex(HashTable *ht, int packed)
168 {
169 	HT_ASSERT_RC1(ht);
170 	ZEND_ASSERT(!(HT_FLAGS(ht) & HASH_FLAG_INITIALIZED));
171 	if (packed) {
172 		zend_hash_real_init_packed_ex(ht);
173 	} else {
174 		zend_hash_real_init_mixed_ex(ht);
175 	}
176 }
177 
178 static const uint32_t uninitialized_bucket[-HT_MIN_MASK] =
179 	{HT_INVALID_IDX, HT_INVALID_IDX};
180 
181 ZEND_API const HashTable zend_empty_array = {
182 	.gc.refcount = 2,
183 	.gc.u.type_info = IS_ARRAY | (GC_IMMUTABLE << GC_FLAGS_SHIFT),
184 	.u.flags = HASH_FLAG_STATIC_KEYS,
185 	.nTableMask = HT_MIN_MASK,
186 	.arData = (Bucket*)&uninitialized_bucket[2],
187 	.nNumUsed = 0,
188 	.nNumOfElements = 0,
189 	.nTableSize = HT_MIN_SIZE,
190 	.nInternalPointer = 0,
191 	.nNextFreeElement = 0,
192 	.pDestructor = ZVAL_PTR_DTOR
193 };
194 
_zend_hash_init_int(HashTable * ht,uint32_t nSize,dtor_func_t pDestructor,zend_bool persistent)195 static zend_always_inline void _zend_hash_init_int(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, zend_bool persistent)
196 {
197 	GC_SET_REFCOUNT(ht, 1);
198 	GC_TYPE_INFO(ht) = IS_ARRAY | (persistent ? (GC_PERSISTENT << GC_FLAGS_SHIFT) : (GC_COLLECTABLE << GC_FLAGS_SHIFT));
199 	HT_FLAGS(ht) = HASH_FLAG_STATIC_KEYS;
200 	ht->nTableMask = HT_MIN_MASK;
201 	HT_SET_DATA_ADDR(ht, &uninitialized_bucket);
202 	ht->nNumUsed = 0;
203 	ht->nNumOfElements = 0;
204 	ht->nInternalPointer = 0;
205 	ht->nNextFreeElement = 0;
206 	ht->pDestructor = pDestructor;
207 	ht->nTableSize = zend_hash_check_size(nSize);
208 }
209 
_zend_hash_init(HashTable * ht,uint32_t nSize,dtor_func_t pDestructor,zend_bool persistent)210 ZEND_API void ZEND_FASTCALL _zend_hash_init(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, zend_bool persistent)
211 {
212 	_zend_hash_init_int(ht, nSize, pDestructor, persistent);
213 }
214 
_zend_new_array_0(void)215 ZEND_API HashTable* ZEND_FASTCALL _zend_new_array_0(void)
216 {
217 	HashTable *ht = emalloc(sizeof(HashTable));
218 	_zend_hash_init_int(ht, HT_MIN_SIZE, ZVAL_PTR_DTOR, 0);
219 	return ht;
220 }
221 
_zend_new_array(uint32_t nSize)222 ZEND_API HashTable* ZEND_FASTCALL _zend_new_array(uint32_t nSize)
223 {
224 	HashTable *ht = emalloc(sizeof(HashTable));
225 	_zend_hash_init_int(ht, nSize, ZVAL_PTR_DTOR, 0);
226 	return ht;
227 }
228 
zend_hash_packed_grow(HashTable * ht)229 static void ZEND_FASTCALL zend_hash_packed_grow(HashTable *ht)
230 {
231 	HT_ASSERT_RC1(ht);
232 	if (ht->nTableSize >= HT_MAX_SIZE) {
233 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket), sizeof(Bucket));
234 	}
235 	ht->nTableSize += ht->nTableSize;
236 	HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), HT_USED_SIZE(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
237 }
238 
zend_hash_real_init(HashTable * ht,zend_bool packed)239 ZEND_API void ZEND_FASTCALL zend_hash_real_init(HashTable *ht, zend_bool packed)
240 {
241 	IS_CONSISTENT(ht);
242 
243 	HT_ASSERT_RC1(ht);
244 	zend_hash_real_init_ex(ht, packed);
245 }
246 
zend_hash_real_init_packed(HashTable * ht)247 ZEND_API void ZEND_FASTCALL zend_hash_real_init_packed(HashTable *ht)
248 {
249 	IS_CONSISTENT(ht);
250 
251 	HT_ASSERT_RC1(ht);
252 	zend_hash_real_init_packed_ex(ht);
253 }
254 
zend_hash_real_init_mixed(HashTable * ht)255 ZEND_API void ZEND_FASTCALL zend_hash_real_init_mixed(HashTable *ht)
256 {
257 	IS_CONSISTENT(ht);
258 
259 	HT_ASSERT_RC1(ht);
260 	zend_hash_real_init_mixed_ex(ht);
261 }
262 
zend_hash_packed_to_hash(HashTable * ht)263 ZEND_API void ZEND_FASTCALL zend_hash_packed_to_hash(HashTable *ht)
264 {
265 	void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
266 	Bucket *old_buckets = ht->arData;
267 	uint32_t nSize = ht->nTableSize;
268 
269 	HT_ASSERT_RC1(ht);
270 	HT_FLAGS(ht) &= ~HASH_FLAG_PACKED;
271 	new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
272 	ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
273 	HT_SET_DATA_ADDR(ht, new_data);
274 	memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
275 	pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
276 	zend_hash_rehash(ht);
277 }
278 
zend_hash_to_packed(HashTable * ht)279 ZEND_API void ZEND_FASTCALL zend_hash_to_packed(HashTable *ht)
280 {
281 	void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
282 	Bucket *old_buckets = ht->arData;
283 
284 	HT_ASSERT_RC1(ht);
285 	new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
286 	HT_FLAGS(ht) |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
287 	ht->nTableMask = HT_MIN_MASK;
288 	HT_SET_DATA_ADDR(ht, new_data);
289 	HT_HASH_RESET_PACKED(ht);
290 	memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
291 	pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
292 }
293 
zend_hash_extend(HashTable * ht,uint32_t nSize,zend_bool packed)294 ZEND_API void ZEND_FASTCALL zend_hash_extend(HashTable *ht, uint32_t nSize, zend_bool packed)
295 {
296 	HT_ASSERT_RC1(ht);
297 	if (nSize == 0) return;
298 	if (UNEXPECTED(!(HT_FLAGS(ht) & HASH_FLAG_INITIALIZED))) {
299 		if (nSize > ht->nTableSize) {
300 			ht->nTableSize = zend_hash_check_size(nSize);
301 		}
302 		zend_hash_real_init(ht, packed);
303 	} else {
304 		if (packed) {
305 			ZEND_ASSERT(HT_FLAGS(ht) & HASH_FLAG_PACKED);
306 			if (nSize > ht->nTableSize) {
307 				ht->nTableSize = zend_hash_check_size(nSize);
308 				HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), HT_USED_SIZE(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
309 			}
310 		} else {
311 			ZEND_ASSERT(!(HT_FLAGS(ht) & HASH_FLAG_PACKED));
312 			if (nSize > ht->nTableSize) {
313 				void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
314 				Bucket *old_buckets = ht->arData;
315 				nSize = zend_hash_check_size(nSize);
316 				ht->nTableSize = nSize;
317 				new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
318 				ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
319 				HT_SET_DATA_ADDR(ht, new_data);
320 				memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
321 				pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
322 				zend_hash_rehash(ht);
323 			}
324 		}
325 	}
326 }
327 
zend_hash_discard(HashTable * ht,uint32_t nNumUsed)328 ZEND_API void ZEND_FASTCALL zend_hash_discard(HashTable *ht, uint32_t nNumUsed)
329 {
330 	Bucket *p, *end, *arData;
331 	uint32_t nIndex;
332 
333 	arData = ht->arData;
334 	p = arData + ht->nNumUsed;
335 	end = arData + nNumUsed;
336 	ht->nNumUsed = nNumUsed;
337 	while (p != end) {
338 		p--;
339 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
340 		ht->nNumOfElements--;
341 		/* Collision pointers always directed from higher to lower buckets */
342 		nIndex = p->h | ht->nTableMask;
343 		HT_HASH_EX(arData, nIndex) = Z_NEXT(p->val);
344 	}
345 }
346 
zend_array_recalc_elements(HashTable * ht)347 static uint32_t zend_array_recalc_elements(HashTable *ht)
348 {
349        zval *val;
350        uint32_t num = ht->nNumOfElements;
351 
352 	   ZEND_HASH_FOREACH_VAL(ht, val) {
353 		   if (Z_TYPE_P(val) == IS_INDIRECT) {
354 			   if (UNEXPECTED(Z_TYPE_P(Z_INDIRECT_P(val)) == IS_UNDEF)) {
355 				   num--;
356 			   }
357 		   }
358        } ZEND_HASH_FOREACH_END();
359        return num;
360 }
361 /* }}} */
362 
zend_array_count(HashTable * ht)363 ZEND_API uint32_t zend_array_count(HashTable *ht)
364 {
365 	uint32_t num;
366 	if (UNEXPECTED(HT_FLAGS(ht) & HASH_FLAG_HAS_EMPTY_IND)) {
367 		num = zend_array_recalc_elements(ht);
368 		if (UNEXPECTED(ht->nNumOfElements == num)) {
369 			HT_FLAGS(ht) &= ~HASH_FLAG_HAS_EMPTY_IND;
370 		}
371 	} else if (UNEXPECTED(ht == &EG(symbol_table))) {
372 		num = zend_array_recalc_elements(ht);
373 	} else {
374 		num = zend_hash_num_elements(ht);
375 	}
376 	return num;
377 }
378 /* }}} */
379 
_zend_hash_get_valid_pos(const HashTable * ht,HashPosition pos)380 static zend_always_inline HashPosition _zend_hash_get_valid_pos(const HashTable *ht, HashPosition pos)
381 {
382 	while (pos < ht->nNumUsed && Z_ISUNDEF(ht->arData[pos].val)) {
383 		pos++;
384 	}
385 	return pos;
386 }
387 
_zend_hash_get_current_pos(const HashTable * ht)388 static zend_always_inline HashPosition _zend_hash_get_current_pos(const HashTable *ht)
389 {
390 	return _zend_hash_get_valid_pos(ht, ht->nInternalPointer);
391 }
392 
zend_hash_get_current_pos(const HashTable * ht)393 ZEND_API HashPosition ZEND_FASTCALL zend_hash_get_current_pos(const HashTable *ht)
394 {
395 	return _zend_hash_get_current_pos(ht);
396 }
397 
zend_hash_iterator_add(HashTable * ht,HashPosition pos)398 ZEND_API uint32_t ZEND_FASTCALL zend_hash_iterator_add(HashTable *ht, HashPosition pos)
399 {
400 	HashTableIterator *iter = EG(ht_iterators);
401 	HashTableIterator *end  = iter + EG(ht_iterators_count);
402 	uint32_t idx;
403 
404 	if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
405 		HT_INC_ITERATORS_COUNT(ht);
406 	}
407 	while (iter != end) {
408 		if (iter->ht == NULL) {
409 			iter->ht = ht;
410 			iter->pos = pos;
411 			idx = iter - EG(ht_iterators);
412 			if (idx + 1 > EG(ht_iterators_used)) {
413 				EG(ht_iterators_used) = idx + 1;
414 			}
415 			return idx;
416 		}
417 		iter++;
418 	}
419 	if (EG(ht_iterators) == EG(ht_iterators_slots)) {
420 		EG(ht_iterators) = emalloc(sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
421 		memcpy(EG(ht_iterators), EG(ht_iterators_slots), sizeof(HashTableIterator) * EG(ht_iterators_count));
422 	} else {
423 		EG(ht_iterators) = erealloc(EG(ht_iterators), sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
424 	}
425 	iter = EG(ht_iterators) + EG(ht_iterators_count);
426 	EG(ht_iterators_count) += 8;
427 	iter->ht = ht;
428 	iter->pos = pos;
429 	memset(iter + 1, 0, sizeof(HashTableIterator) * 7);
430 	idx = iter - EG(ht_iterators);
431 	EG(ht_iterators_used) = idx + 1;
432 	return idx;
433 }
434 
zend_hash_iterator_pos(uint32_t idx,HashTable * ht)435 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos(uint32_t idx, HashTable *ht)
436 {
437 	HashTableIterator *iter = EG(ht_iterators) + idx;
438 
439 	ZEND_ASSERT(idx != (uint32_t)-1);
440 	if (UNEXPECTED(iter->ht != ht)) {
441 		if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
442 				&& EXPECTED(!HT_ITERATORS_OVERFLOW(iter->ht))) {
443 			HT_DEC_ITERATORS_COUNT(iter->ht);
444 		}
445 		if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
446 			HT_INC_ITERATORS_COUNT(ht);
447 		}
448 		iter->ht = ht;
449 		iter->pos = _zend_hash_get_current_pos(ht);
450 	}
451 	return iter->pos;
452 }
453 
zend_hash_iterator_pos_ex(uint32_t idx,zval * array)454 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos_ex(uint32_t idx, zval *array)
455 {
456 	HashTable *ht = Z_ARRVAL_P(array);
457 	HashTableIterator *iter = EG(ht_iterators) + idx;
458 
459 	ZEND_ASSERT(idx != (uint32_t)-1);
460 	if (UNEXPECTED(iter->ht != ht)) {
461 		if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
462 				&& EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
463 			HT_DEC_ITERATORS_COUNT(iter->ht);
464 		}
465 		SEPARATE_ARRAY(array);
466 		ht = Z_ARRVAL_P(array);
467 		if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
468 			HT_INC_ITERATORS_COUNT(ht);
469 		}
470 		iter->ht = ht;
471 		iter->pos = _zend_hash_get_current_pos(ht);
472 	}
473 	return iter->pos;
474 }
475 
zend_hash_iterator_del(uint32_t idx)476 ZEND_API void ZEND_FASTCALL zend_hash_iterator_del(uint32_t idx)
477 {
478 	HashTableIterator *iter = EG(ht_iterators) + idx;
479 
480 	ZEND_ASSERT(idx != (uint32_t)-1);
481 
482 	if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
483 			&& EXPECTED(!HT_ITERATORS_OVERFLOW(iter->ht))) {
484 		ZEND_ASSERT(HT_ITERATORS_COUNT(iter->ht) != 0);
485 		HT_DEC_ITERATORS_COUNT(iter->ht);
486 	}
487 	iter->ht = NULL;
488 
489 	if (idx == EG(ht_iterators_used) - 1) {
490 		while (idx > 0 && EG(ht_iterators)[idx - 1].ht == NULL) {
491 			idx--;
492 		}
493 		EG(ht_iterators_used) = idx;
494 	}
495 }
496 
_zend_hash_iterators_remove(HashTable * ht)497 static zend_never_inline void ZEND_FASTCALL _zend_hash_iterators_remove(HashTable *ht)
498 {
499 	HashTableIterator *iter = EG(ht_iterators);
500 	HashTableIterator *end  = iter + EG(ht_iterators_used);
501 
502 	while (iter != end) {
503 		if (iter->ht == ht) {
504 			iter->ht = HT_POISONED_PTR;
505 		}
506 		iter++;
507 	}
508 }
509 
zend_hash_iterators_remove(HashTable * ht)510 static zend_always_inline void zend_hash_iterators_remove(HashTable *ht)
511 {
512 	if (UNEXPECTED(HT_HAS_ITERATORS(ht))) {
513 		_zend_hash_iterators_remove(ht);
514 	}
515 }
516 
zend_hash_iterators_lower_pos(HashTable * ht,HashPosition start)517 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterators_lower_pos(HashTable *ht, HashPosition start)
518 {
519 	HashTableIterator *iter = EG(ht_iterators);
520 	HashTableIterator *end  = iter + EG(ht_iterators_used);
521 	HashPosition res = ht->nNumUsed;
522 
523 	while (iter != end) {
524 		if (iter->ht == ht) {
525 			if (iter->pos >= start && iter->pos < res) {
526 				res = iter->pos;
527 			}
528 		}
529 		iter++;
530 	}
531 	return res;
532 }
533 
_zend_hash_iterators_update(HashTable * ht,HashPosition from,HashPosition to)534 ZEND_API void ZEND_FASTCALL _zend_hash_iterators_update(HashTable *ht, HashPosition from, HashPosition to)
535 {
536 	HashTableIterator *iter = EG(ht_iterators);
537 	HashTableIterator *end  = iter + EG(ht_iterators_used);
538 
539 	while (iter != end) {
540 		if (iter->ht == ht && iter->pos == from) {
541 			iter->pos = to;
542 		}
543 		iter++;
544 	}
545 }
546 
zend_hash_iterators_advance(HashTable * ht,HashPosition step)547 ZEND_API void ZEND_FASTCALL zend_hash_iterators_advance(HashTable *ht, HashPosition step)
548 {
549 	HashTableIterator *iter = EG(ht_iterators);
550 	HashTableIterator *end  = iter + EG(ht_iterators_used);
551 
552 	while (iter != end) {
553 		if (iter->ht == ht) {
554 			iter->pos += step;
555 		}
556 		iter++;
557 	}
558 }
559 
zend_hash_find_bucket(const HashTable * ht,zend_string * key,zend_bool known_hash)560 static zend_always_inline Bucket *zend_hash_find_bucket(const HashTable *ht, zend_string *key, zend_bool known_hash)
561 {
562 	zend_ulong h;
563 	uint32_t nIndex;
564 	uint32_t idx;
565 	Bucket *p, *arData;
566 
567 	if (known_hash) {
568 		h = ZSTR_H(key);
569 	} else {
570 		h = zend_string_hash_val(key);
571 	}
572 	arData = ht->arData;
573 	nIndex = h | ht->nTableMask;
574 	idx = HT_HASH_EX(arData, nIndex);
575 
576 	if (UNEXPECTED(idx == HT_INVALID_IDX)) {
577 		return NULL;
578 	}
579 	p = HT_HASH_TO_BUCKET_EX(arData, idx);
580 	if (EXPECTED(p->key == key)) { /* check for the same interned string */
581 		return p;
582 	}
583 
584 	while (1) {
585 		if (p->h == ZSTR_H(key) &&
586 		    EXPECTED(p->key) &&
587 		    zend_string_equal_content(p->key, key)) {
588 			return p;
589 		}
590 		idx = Z_NEXT(p->val);
591 		if (idx == HT_INVALID_IDX) {
592 			return NULL;
593 		}
594 		p = HT_HASH_TO_BUCKET_EX(arData, idx);
595 		if (p->key == key) { /* check for the same interned string */
596 			return p;
597 		}
598 	}
599 }
600 
zend_hash_str_find_bucket(const HashTable * ht,const char * str,size_t len,zend_ulong h)601 static zend_always_inline Bucket *zend_hash_str_find_bucket(const HashTable *ht, const char *str, size_t len, zend_ulong h)
602 {
603 	uint32_t nIndex;
604 	uint32_t idx;
605 	Bucket *p, *arData;
606 
607 	arData = ht->arData;
608 	nIndex = h | ht->nTableMask;
609 	idx = HT_HASH_EX(arData, nIndex);
610 	while (idx != HT_INVALID_IDX) {
611 		ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
612 		p = HT_HASH_TO_BUCKET_EX(arData, idx);
613 		if ((p->h == h)
614 			 && p->key
615 			 && (ZSTR_LEN(p->key) == len)
616 			 && !memcmp(ZSTR_VAL(p->key), str, len)) {
617 			return p;
618 		}
619 		idx = Z_NEXT(p->val);
620 	}
621 	return NULL;
622 }
623 
zend_hash_index_find_bucket(const HashTable * ht,zend_ulong h)624 static zend_always_inline Bucket *zend_hash_index_find_bucket(const HashTable *ht, zend_ulong h)
625 {
626 	uint32_t nIndex;
627 	uint32_t idx;
628 	Bucket *p, *arData;
629 
630 	arData = ht->arData;
631 	nIndex = h | ht->nTableMask;
632 	idx = HT_HASH_EX(arData, nIndex);
633 	while (idx != HT_INVALID_IDX) {
634 		ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
635 		p = HT_HASH_TO_BUCKET_EX(arData, idx);
636 		if (p->h == h && !p->key) {
637 			return p;
638 		}
639 		idx = Z_NEXT(p->val);
640 	}
641 	return NULL;
642 }
643 
_zend_hash_add_or_update_i(HashTable * ht,zend_string * key,zval * pData,uint32_t flag)644 static zend_always_inline zval *_zend_hash_add_or_update_i(HashTable *ht, zend_string *key, zval *pData, uint32_t flag)
645 {
646 	zend_ulong h;
647 	uint32_t nIndex;
648 	uint32_t idx;
649 	Bucket *p, *arData;
650 
651 	IS_CONSISTENT(ht);
652 	HT_ASSERT_RC1(ht);
653 
654 	if (UNEXPECTED(!(HT_FLAGS(ht) & HASH_FLAG_INITIALIZED))) {
655 		zend_hash_real_init_mixed(ht);
656 		if (!ZSTR_IS_INTERNED(key)) {
657 			zend_string_addref(key);
658 			HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
659 			zend_string_hash_val(key);
660 		}
661 		goto add_to_hash;
662 	} else if (HT_FLAGS(ht) & HASH_FLAG_PACKED) {
663 		zend_hash_packed_to_hash(ht);
664 		if (!ZSTR_IS_INTERNED(key)) {
665 			zend_string_addref(key);
666 			HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
667 			zend_string_hash_val(key);
668 		}
669 	} else if ((flag & HASH_ADD_NEW) == 0) {
670 		p = zend_hash_find_bucket(ht, key, 0);
671 
672 		if (p) {
673 			zval *data;
674 
675 			if (flag & HASH_ADD) {
676 				if (!(flag & HASH_UPDATE_INDIRECT)) {
677 					return NULL;
678 				}
679 				ZEND_ASSERT(&p->val != pData);
680 				data = &p->val;
681 				if (Z_TYPE_P(data) == IS_INDIRECT) {
682 					data = Z_INDIRECT_P(data);
683 					if (Z_TYPE_P(data) != IS_UNDEF) {
684 						return NULL;
685 					}
686 				} else {
687 					return NULL;
688 				}
689 			} else {
690 				ZEND_ASSERT(&p->val != pData);
691 				data = &p->val;
692 				if ((flag & HASH_UPDATE_INDIRECT) && Z_TYPE_P(data) == IS_INDIRECT) {
693 					data = Z_INDIRECT_P(data);
694 				}
695 			}
696 			if (ht->pDestructor) {
697 				ht->pDestructor(data);
698 			}
699 			ZVAL_COPY_VALUE(data, pData);
700 			return data;
701 		}
702 		if (!ZSTR_IS_INTERNED(key)) {
703 			zend_string_addref(key);
704 			HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
705 		}
706 	} else if (!ZSTR_IS_INTERNED(key)) {
707 		zend_string_addref(key);
708 		HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
709 		zend_string_hash_val(key);
710 	}
711 
712 	ZEND_HASH_IF_FULL_DO_RESIZE(ht);		/* If the Hash table is full, resize it */
713 
714 add_to_hash:
715 	idx = ht->nNumUsed++;
716 	ht->nNumOfElements++;
717 	arData = ht->arData;
718 	p = arData + idx;
719 	p->key = key;
720 	p->h = h = ZSTR_H(key);
721 	nIndex = h | ht->nTableMask;
722 	Z_NEXT(p->val) = HT_HASH_EX(arData, nIndex);
723 	HT_HASH_EX(arData, nIndex) = HT_IDX_TO_HASH(idx);
724 	ZVAL_COPY_VALUE(&p->val, pData);
725 
726 	return &p->val;
727 }
728 
_zend_hash_str_add_or_update_i(HashTable * ht,const char * str,size_t len,zend_ulong h,zval * pData,uint32_t flag)729 static zend_always_inline zval *_zend_hash_str_add_or_update_i(HashTable *ht, const char *str, size_t len, zend_ulong h, zval *pData, uint32_t flag)
730 {
731 	zend_string *key;
732 	uint32_t nIndex;
733 	uint32_t idx;
734 	Bucket *p;
735 
736 	IS_CONSISTENT(ht);
737 	HT_ASSERT_RC1(ht);
738 
739 	if (UNEXPECTED(!(HT_FLAGS(ht) & HASH_FLAG_INITIALIZED))) {
740 		zend_hash_real_init_mixed(ht);
741 		goto add_to_hash;
742 	} else if (HT_FLAGS(ht) & HASH_FLAG_PACKED) {
743 		zend_hash_packed_to_hash(ht);
744 	} else if ((flag & HASH_ADD_NEW) == 0) {
745 		p = zend_hash_str_find_bucket(ht, str, len, h);
746 
747 		if (p) {
748 			zval *data;
749 
750 			if (flag & HASH_ADD) {
751 				if (!(flag & HASH_UPDATE_INDIRECT)) {
752 					return NULL;
753 				}
754 				ZEND_ASSERT(&p->val != pData);
755 				data = &p->val;
756 				if (Z_TYPE_P(data) == IS_INDIRECT) {
757 					data = Z_INDIRECT_P(data);
758 					if (Z_TYPE_P(data) != IS_UNDEF) {
759 						return NULL;
760 					}
761 				} else {
762 					return NULL;
763 				}
764 			} else {
765 				ZEND_ASSERT(&p->val != pData);
766 				data = &p->val;
767 				if ((flag & HASH_UPDATE_INDIRECT) && Z_TYPE_P(data) == IS_INDIRECT) {
768 					data = Z_INDIRECT_P(data);
769 				}
770 			}
771 			if (ht->pDestructor) {
772 				ht->pDestructor(data);
773 			}
774 			ZVAL_COPY_VALUE(data, pData);
775 			return data;
776 		}
777 	}
778 
779 	ZEND_HASH_IF_FULL_DO_RESIZE(ht);		/* If the Hash table is full, resize it */
780 
781 add_to_hash:
782 	idx = ht->nNumUsed++;
783 	ht->nNumOfElements++;
784 	p = ht->arData + idx;
785 	p->key = key = zend_string_init(str, len, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
786 	p->h = ZSTR_H(key) = h;
787 	HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
788 	ZVAL_COPY_VALUE(&p->val, pData);
789 	nIndex = h | ht->nTableMask;
790 	Z_NEXT(p->val) = HT_HASH(ht, nIndex);
791 	HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
792 
793 	return &p->val;
794 }
795 
zend_hash_add_or_update(HashTable * ht,zend_string * key,zval * pData,uint32_t flag)796 ZEND_API zval* ZEND_FASTCALL zend_hash_add_or_update(HashTable *ht, zend_string *key, zval *pData, uint32_t flag)
797 {
798 	if (flag == HASH_ADD) {
799 		return zend_hash_add(ht, key, pData);
800 	} else if (flag == HASH_ADD_NEW) {
801 		return zend_hash_add_new(ht, key, pData);
802 	} else if (flag == HASH_UPDATE) {
803 		return zend_hash_update(ht, key, pData);
804 	} else {
805 		ZEND_ASSERT(flag == (HASH_UPDATE|HASH_UPDATE_INDIRECT));
806 		return zend_hash_update_ind(ht, key, pData);
807 	}
808 }
809 
zend_hash_add(HashTable * ht,zend_string * key,zval * pData)810 ZEND_API zval* ZEND_FASTCALL zend_hash_add(HashTable *ht, zend_string *key, zval *pData)
811 {
812 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD);
813 }
814 
zend_hash_update(HashTable * ht,zend_string * key,zval * pData)815 ZEND_API zval* ZEND_FASTCALL zend_hash_update(HashTable *ht, zend_string *key, zval *pData)
816 {
817 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE);
818 }
819 
zend_hash_update_ind(HashTable * ht,zend_string * key,zval * pData)820 ZEND_API zval* ZEND_FASTCALL zend_hash_update_ind(HashTable *ht, zend_string *key, zval *pData)
821 {
822 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT);
823 }
824 
zend_hash_add_new(HashTable * ht,zend_string * key,zval * pData)825 ZEND_API zval* ZEND_FASTCALL zend_hash_add_new(HashTable *ht, zend_string *key, zval *pData)
826 {
827 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD_NEW);
828 }
829 
zend_hash_str_add_or_update(HashTable * ht,const char * str,size_t len,zval * pData,uint32_t flag)830 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_or_update(HashTable *ht, const char *str, size_t len, zval *pData, uint32_t flag)
831 {
832 	if (flag == HASH_ADD) {
833 		return zend_hash_str_add(ht, str, len, pData);
834 	} else if (flag == HASH_ADD_NEW) {
835 		return zend_hash_str_add_new(ht, str, len, pData);
836 	} else if (flag == HASH_UPDATE) {
837 		return zend_hash_str_update(ht, str, len, pData);
838 	} else {
839 		ZEND_ASSERT(flag == (HASH_UPDATE|HASH_UPDATE_INDIRECT));
840 		return zend_hash_str_update_ind(ht, str, len, pData);
841 	}
842 }
843 
zend_hash_str_update(HashTable * ht,const char * str,size_t len,zval * pData)844 ZEND_API zval* ZEND_FASTCALL zend_hash_str_update(HashTable *ht, const char *str, size_t len, zval *pData)
845 {
846 	zend_ulong h = zend_hash_func(str, len);
847 
848 	return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_UPDATE);
849 }
850 
zend_hash_str_update_ind(HashTable * ht,const char * str,size_t len,zval * pData)851 ZEND_API zval* ZEND_FASTCALL zend_hash_str_update_ind(HashTable *ht, const char *str, size_t len, zval *pData)
852 {
853 	zend_ulong h = zend_hash_func(str, len);
854 
855 	return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT);
856 }
857 
zend_hash_str_add(HashTable * ht,const char * str,size_t len,zval * pData)858 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add(HashTable *ht, const char *str, size_t len, zval *pData)
859 {
860 	zend_ulong h = zend_hash_func(str, len);
861 
862 	return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_ADD);
863 }
864 
zend_hash_str_add_new(HashTable * ht,const char * str,size_t len,zval * pData)865 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_new(HashTable *ht, const char *str, size_t len, zval *pData)
866 {
867 	zend_ulong h = zend_hash_func(str, len);
868 
869 	return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_ADD_NEW);
870 }
871 
zend_hash_index_add_empty_element(HashTable * ht,zend_ulong h)872 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_empty_element(HashTable *ht, zend_ulong h)
873 {
874 	zval dummy;
875 
876 	ZVAL_NULL(&dummy);
877 	return zend_hash_index_add(ht, h, &dummy);
878 }
879 
zend_hash_add_empty_element(HashTable * ht,zend_string * key)880 ZEND_API zval* ZEND_FASTCALL zend_hash_add_empty_element(HashTable *ht, zend_string *key)
881 {
882 	zval dummy;
883 
884 	ZVAL_NULL(&dummy);
885 	return zend_hash_add(ht, key, &dummy);
886 }
887 
zend_hash_str_add_empty_element(HashTable * ht,const char * str,size_t len)888 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_empty_element(HashTable *ht, const char *str, size_t len)
889 {
890 	zval dummy;
891 
892 	ZVAL_NULL(&dummy);
893 	return zend_hash_str_add(ht, str, len, &dummy);
894 }
895 
_zend_hash_index_add_or_update_i(HashTable * ht,zend_ulong h,zval * pData,uint32_t flag)896 static zend_always_inline zval *_zend_hash_index_add_or_update_i(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag)
897 {
898 	uint32_t nIndex;
899 	uint32_t idx;
900 	Bucket *p;
901 
902 	IS_CONSISTENT(ht);
903 	HT_ASSERT_RC1(ht);
904 
905 	if (HT_FLAGS(ht) & HASH_FLAG_PACKED) {
906 		if (h < ht->nNumUsed) {
907 			p = ht->arData + h;
908 			if (Z_TYPE(p->val) != IS_UNDEF) {
909 replace:
910 				if (flag & HASH_ADD) {
911 					return NULL;
912 				}
913 				if (ht->pDestructor) {
914 					ht->pDestructor(&p->val);
915 				}
916 				ZVAL_COPY_VALUE(&p->val, pData);
917 				return &p->val;
918 			} else { /* we have to keep the order :( */
919 				goto convert_to_hash;
920 			}
921 		} else if (EXPECTED(h < ht->nTableSize)) {
922 add_to_packed:
923 			p = ht->arData + h;
924 			/* incremental initialization of empty Buckets */
925 			if ((flag & (HASH_ADD_NEW|HASH_ADD_NEXT)) != (HASH_ADD_NEW|HASH_ADD_NEXT)) {
926 				if (h > ht->nNumUsed) {
927 					Bucket *q = ht->arData + ht->nNumUsed;
928 					while (q != p) {
929 						ZVAL_UNDEF(&q->val);
930 						q++;
931 					}
932 				}
933 			}
934 			ht->nNextFreeElement = ht->nNumUsed = h + 1;
935 			goto add;
936 		} else if ((h >> 1) < ht->nTableSize &&
937 		           (ht->nTableSize >> 1) < ht->nNumOfElements) {
938 			zend_hash_packed_grow(ht);
939 			goto add_to_packed;
940 		} else {
941 			if (ht->nNumUsed >= ht->nTableSize) {
942 				ht->nTableSize += ht->nTableSize;
943 			}
944 convert_to_hash:
945 			zend_hash_packed_to_hash(ht);
946 		}
947 	} else if (!(HT_FLAGS(ht) & HASH_FLAG_INITIALIZED)) {
948 		if (h < ht->nTableSize) {
949 			zend_hash_real_init_packed_ex(ht);
950 			goto add_to_packed;
951 		}
952 		zend_hash_real_init_mixed(ht);
953 	} else {
954 		if ((flag & HASH_ADD_NEW) == 0) {
955 			p = zend_hash_index_find_bucket(ht, h);
956 			if (p) {
957 				goto replace;
958 			}
959 		}
960 		ZEND_HASH_IF_FULL_DO_RESIZE(ht);		/* If the Hash table is full, resize it */
961 	}
962 
963 	idx = ht->nNumUsed++;
964 	nIndex = h | ht->nTableMask;
965 	p = ht->arData + idx;
966 	Z_NEXT(p->val) = HT_HASH(ht, nIndex);
967 	HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
968 	if ((zend_long)h >= (zend_long)ht->nNextFreeElement) {
969 		ht->nNextFreeElement = h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
970 	}
971 add:
972 	ht->nNumOfElements++;
973 	p->h = h;
974 	p->key = NULL;
975 	ZVAL_COPY_VALUE(&p->val, pData);
976 
977 	return &p->val;
978 }
979 
zend_hash_index_add_or_update(HashTable * ht,zend_ulong h,zval * pData,uint32_t flag)980 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_or_update(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag)
981 {
982 	if (flag == HASH_ADD) {
983 		return zend_hash_index_add(ht, h, pData);
984 	} else if (flag == (HASH_ADD|HASH_ADD_NEW)) {
985 		return zend_hash_index_add_new(ht, h, pData);
986 	} else if (flag == (HASH_ADD|HASH_ADD_NEXT)) {
987 		ZEND_ASSERT(h == ht->nNextFreeElement);
988 		return zend_hash_next_index_insert(ht, pData);
989 	} else if (flag == (HASH_ADD|HASH_ADD_NEW|HASH_ADD_NEXT)) {
990 		ZEND_ASSERT(h == ht->nNextFreeElement);
991 		return zend_hash_next_index_insert_new(ht, pData);
992 	} else {
993 		ZEND_ASSERT(flag == HASH_UPDATE);
994 		return zend_hash_index_update(ht, h, pData);
995 	}
996 }
997 
zend_hash_index_add(HashTable * ht,zend_ulong h,zval * pData)998 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add(HashTable *ht, zend_ulong h, zval *pData)
999 {
1000 	return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD);
1001 }
1002 
zend_hash_index_add_new(HashTable * ht,zend_ulong h,zval * pData)1003 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_new(HashTable *ht, zend_ulong h, zval *pData)
1004 {
1005 	return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD | HASH_ADD_NEW);
1006 }
1007 
zend_hash_index_update(HashTable * ht,zend_ulong h,zval * pData)1008 ZEND_API zval* ZEND_FASTCALL zend_hash_index_update(HashTable *ht, zend_ulong h, zval *pData)
1009 {
1010 	return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_UPDATE);
1011 }
1012 
zend_hash_next_index_insert(HashTable * ht,zval * pData)1013 ZEND_API zval* ZEND_FASTCALL zend_hash_next_index_insert(HashTable *ht, zval *pData)
1014 {
1015 	return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEXT);
1016 }
1017 
zend_hash_next_index_insert_new(HashTable * ht,zval * pData)1018 ZEND_API zval* ZEND_FASTCALL zend_hash_next_index_insert_new(HashTable *ht, zval *pData)
1019 {
1020 	return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEW | HASH_ADD_NEXT);
1021 }
1022 
zend_hash_do_resize(HashTable * ht)1023 static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht)
1024 {
1025 
1026 	IS_CONSISTENT(ht);
1027 	HT_ASSERT_RC1(ht);
1028 
1029 	if (ht->nNumUsed > ht->nNumOfElements + (ht->nNumOfElements >> 5)) { /* additional term is there to amortize the cost of compaction */
1030 		zend_hash_rehash(ht);
1031 	} else if (ht->nTableSize < HT_MAX_SIZE) {	/* Let's double the table size */
1032 		void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
1033 		uint32_t nSize = ht->nTableSize + ht->nTableSize;
1034 		Bucket *old_buckets = ht->arData;
1035 
1036 		ht->nTableSize = nSize;
1037 		new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1038 		ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
1039 		HT_SET_DATA_ADDR(ht, new_data);
1040 		memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
1041 		pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1042 		zend_hash_rehash(ht);
1043 	} else {
1044 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket) + sizeof(uint32_t), sizeof(Bucket));
1045 	}
1046 }
1047 
zend_hash_rehash(HashTable * ht)1048 ZEND_API int ZEND_FASTCALL zend_hash_rehash(HashTable *ht)
1049 {
1050 	Bucket *p;
1051 	uint32_t nIndex, i;
1052 
1053 	IS_CONSISTENT(ht);
1054 
1055 	if (UNEXPECTED(ht->nNumOfElements == 0)) {
1056 		if (HT_FLAGS(ht) & HASH_FLAG_INITIALIZED) {
1057 			ht->nNumUsed = 0;
1058 			HT_HASH_RESET(ht);
1059 		}
1060 		return SUCCESS;
1061 	}
1062 
1063 	HT_HASH_RESET(ht);
1064 	i = 0;
1065 	p = ht->arData;
1066 	if (HT_IS_WITHOUT_HOLES(ht)) {
1067 		do {
1068 			nIndex = p->h | ht->nTableMask;
1069 			Z_NEXT(p->val) = HT_HASH(ht, nIndex);
1070 			HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
1071 			p++;
1072 		} while (++i < ht->nNumUsed);
1073 	} else {
1074 		uint32_t old_num_used = ht->nNumUsed;
1075 		do {
1076 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) {
1077 				uint32_t j = i;
1078 				Bucket *q = p;
1079 
1080 				if (EXPECTED(!HT_HAS_ITERATORS(ht))) {
1081 					while (++i < ht->nNumUsed) {
1082 						p++;
1083 						if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
1084 							ZVAL_COPY_VALUE(&q->val, &p->val);
1085 							q->h = p->h;
1086 							nIndex = q->h | ht->nTableMask;
1087 							q->key = p->key;
1088 							Z_NEXT(q->val) = HT_HASH(ht, nIndex);
1089 							HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
1090 							if (UNEXPECTED(ht->nInternalPointer == i)) {
1091 								ht->nInternalPointer = j;
1092 							}
1093 							q++;
1094 							j++;
1095 						}
1096 					}
1097 				} else {
1098 					uint32_t iter_pos = zend_hash_iterators_lower_pos(ht, 0);
1099 
1100 					while (++i < ht->nNumUsed) {
1101 						p++;
1102 						if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
1103 							ZVAL_COPY_VALUE(&q->val, &p->val);
1104 							q->h = p->h;
1105 							nIndex = q->h | ht->nTableMask;
1106 							q->key = p->key;
1107 							Z_NEXT(q->val) = HT_HASH(ht, nIndex);
1108 							HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
1109 							if (UNEXPECTED(ht->nInternalPointer == i)) {
1110 								ht->nInternalPointer = j;
1111 							}
1112 							if (UNEXPECTED(i >= iter_pos)) {
1113 								do {
1114 									zend_hash_iterators_update(ht, iter_pos, j);
1115 									iter_pos = zend_hash_iterators_lower_pos(ht, iter_pos + 1);
1116 								} while (iter_pos < i);
1117 							}
1118 							q++;
1119 							j++;
1120 						}
1121 					}
1122 				}
1123 				ht->nNumUsed = j;
1124 				break;
1125 			}
1126 			nIndex = p->h | ht->nTableMask;
1127 			Z_NEXT(p->val) = HT_HASH(ht, nIndex);
1128 			HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
1129 			p++;
1130 		} while (++i < ht->nNumUsed);
1131 
1132 		/* Migrate pointer to one past the end of the array to the new one past the end, so that
1133 		 * newly inserted elements are picked up correctly. */
1134 		if (UNEXPECTED(HT_HAS_ITERATORS(ht))) {
1135 			_zend_hash_iterators_update(ht, old_num_used, ht->nNumUsed);
1136 		}
1137 	}
1138 	return SUCCESS;
1139 }
1140 
_zend_hash_del_el_ex(HashTable * ht,uint32_t idx,Bucket * p,Bucket * prev)1141 static zend_always_inline void _zend_hash_del_el_ex(HashTable *ht, uint32_t idx, Bucket *p, Bucket *prev)
1142 {
1143 	if (!(HT_FLAGS(ht) & HASH_FLAG_PACKED)) {
1144 		if (prev) {
1145 			Z_NEXT(prev->val) = Z_NEXT(p->val);
1146 		} else {
1147 			HT_HASH(ht, p->h | ht->nTableMask) = Z_NEXT(p->val);
1148 		}
1149 	}
1150 	idx = HT_HASH_TO_IDX(idx);
1151 	ht->nNumOfElements--;
1152 	if (ht->nInternalPointer == idx || UNEXPECTED(HT_HAS_ITERATORS(ht))) {
1153 		uint32_t new_idx;
1154 
1155 		new_idx = idx;
1156 		while (1) {
1157 			new_idx++;
1158 			if (new_idx >= ht->nNumUsed) {
1159 				break;
1160 			} else if (Z_TYPE(ht->arData[new_idx].val) != IS_UNDEF) {
1161 				break;
1162 			}
1163 		}
1164 		if (ht->nInternalPointer == idx) {
1165 			ht->nInternalPointer = new_idx;
1166 		}
1167 		zend_hash_iterators_update(ht, idx, new_idx);
1168 	}
1169 	if (ht->nNumUsed - 1 == idx) {
1170 		do {
1171 			ht->nNumUsed--;
1172 		} while (ht->nNumUsed > 0 && (UNEXPECTED(Z_TYPE(ht->arData[ht->nNumUsed-1].val) == IS_UNDEF)));
1173 		ht->nInternalPointer = MIN(ht->nInternalPointer, ht->nNumUsed);
1174 	}
1175 	if (p->key) {
1176 		zend_string_release(p->key);
1177 	}
1178 	if (ht->pDestructor) {
1179 		zval tmp;
1180 		ZVAL_COPY_VALUE(&tmp, &p->val);
1181 		ZVAL_UNDEF(&p->val);
1182 		ht->pDestructor(&tmp);
1183 	} else {
1184 		ZVAL_UNDEF(&p->val);
1185 	}
1186 }
1187 
_zend_hash_del_el(HashTable * ht,uint32_t idx,Bucket * p)1188 static zend_always_inline void _zend_hash_del_el(HashTable *ht, uint32_t idx, Bucket *p)
1189 {
1190 	Bucket *prev = NULL;
1191 
1192 	if (!(HT_FLAGS(ht) & HASH_FLAG_PACKED)) {
1193 		uint32_t nIndex = p->h | ht->nTableMask;
1194 		uint32_t i = HT_HASH(ht, nIndex);
1195 
1196 		if (i != idx) {
1197 			prev = HT_HASH_TO_BUCKET(ht, i);
1198 			while (Z_NEXT(prev->val) != idx) {
1199 				i = Z_NEXT(prev->val);
1200 				prev = HT_HASH_TO_BUCKET(ht, i);
1201 			}
1202 	 	}
1203 	}
1204 
1205 	_zend_hash_del_el_ex(ht, idx, p, prev);
1206 }
1207 
zend_hash_del_bucket(HashTable * ht,Bucket * p)1208 ZEND_API void ZEND_FASTCALL zend_hash_del_bucket(HashTable *ht, Bucket *p)
1209 {
1210 	IS_CONSISTENT(ht);
1211 	HT_ASSERT_RC1(ht);
1212 	_zend_hash_del_el(ht, HT_IDX_TO_HASH(p - ht->arData), p);
1213 }
1214 
zend_hash_del(HashTable * ht,zend_string * key)1215 ZEND_API int ZEND_FASTCALL zend_hash_del(HashTable *ht, zend_string *key)
1216 {
1217 	zend_ulong h;
1218 	uint32_t nIndex;
1219 	uint32_t idx;
1220 	Bucket *p;
1221 	Bucket *prev = NULL;
1222 
1223 	IS_CONSISTENT(ht);
1224 	HT_ASSERT_RC1(ht);
1225 
1226 	h = zend_string_hash_val(key);
1227 	nIndex = h | ht->nTableMask;
1228 
1229 	idx = HT_HASH(ht, nIndex);
1230 	while (idx != HT_INVALID_IDX) {
1231 		p = HT_HASH_TO_BUCKET(ht, idx);
1232 		if ((p->key == key) ||
1233 			(p->h == h &&
1234 		     p->key &&
1235 		     zend_string_equal_content(p->key, key))) {
1236 			_zend_hash_del_el_ex(ht, idx, p, prev);
1237 			return SUCCESS;
1238 		}
1239 		prev = p;
1240 		idx = Z_NEXT(p->val);
1241 	}
1242 	return FAILURE;
1243 }
1244 
zend_hash_del_ind(HashTable * ht,zend_string * key)1245 ZEND_API int ZEND_FASTCALL zend_hash_del_ind(HashTable *ht, zend_string *key)
1246 {
1247 	zend_ulong h;
1248 	uint32_t nIndex;
1249 	uint32_t idx;
1250 	Bucket *p;
1251 	Bucket *prev = NULL;
1252 
1253 	IS_CONSISTENT(ht);
1254 	HT_ASSERT_RC1(ht);
1255 
1256 	h = zend_string_hash_val(key);
1257 	nIndex = h | ht->nTableMask;
1258 
1259 	idx = HT_HASH(ht, nIndex);
1260 	while (idx != HT_INVALID_IDX) {
1261 		p = HT_HASH_TO_BUCKET(ht, idx);
1262 		if ((p->key == key) ||
1263 			(p->h == h &&
1264 		     p->key &&
1265 		     zend_string_equal_content(p->key, key))) {
1266 			if (Z_TYPE(p->val) == IS_INDIRECT) {
1267 				zval *data = Z_INDIRECT(p->val);
1268 
1269 				if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
1270 					return FAILURE;
1271 				} else {
1272 					if (ht->pDestructor) {
1273 						zval tmp;
1274 						ZVAL_COPY_VALUE(&tmp, data);
1275 						ZVAL_UNDEF(data);
1276 						ht->pDestructor(&tmp);
1277 					} else {
1278 						ZVAL_UNDEF(data);
1279 					}
1280 					HT_FLAGS(ht) |= HASH_FLAG_HAS_EMPTY_IND;
1281 				}
1282 			} else {
1283 				_zend_hash_del_el_ex(ht, idx, p, prev);
1284 			}
1285 			return SUCCESS;
1286 		}
1287 		prev = p;
1288 		idx = Z_NEXT(p->val);
1289 	}
1290 	return FAILURE;
1291 }
1292 
zend_hash_str_del_ind(HashTable * ht,const char * str,size_t len)1293 ZEND_API int ZEND_FASTCALL zend_hash_str_del_ind(HashTable *ht, const char *str, size_t len)
1294 {
1295 	zend_ulong h;
1296 	uint32_t nIndex;
1297 	uint32_t idx;
1298 	Bucket *p;
1299 	Bucket *prev = NULL;
1300 
1301 	IS_CONSISTENT(ht);
1302 	HT_ASSERT_RC1(ht);
1303 
1304 	h = zend_inline_hash_func(str, len);
1305 	nIndex = h | ht->nTableMask;
1306 
1307 	idx = HT_HASH(ht, nIndex);
1308 	while (idx != HT_INVALID_IDX) {
1309 		p = HT_HASH_TO_BUCKET(ht, idx);
1310 		if ((p->h == h)
1311 			 && p->key
1312 			 && (ZSTR_LEN(p->key) == len)
1313 			 && !memcmp(ZSTR_VAL(p->key), str, len)) {
1314 			if (Z_TYPE(p->val) == IS_INDIRECT) {
1315 				zval *data = Z_INDIRECT(p->val);
1316 
1317 				if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
1318 					return FAILURE;
1319 				} else {
1320 					if (ht->pDestructor) {
1321 						ht->pDestructor(data);
1322 					}
1323 					ZVAL_UNDEF(data);
1324 					HT_FLAGS(ht) |= HASH_FLAG_HAS_EMPTY_IND;
1325 				}
1326 			} else {
1327 				_zend_hash_del_el_ex(ht, idx, p, prev);
1328 			}
1329 			return SUCCESS;
1330 		}
1331 		prev = p;
1332 		idx = Z_NEXT(p->val);
1333 	}
1334 	return FAILURE;
1335 }
1336 
zend_hash_str_del(HashTable * ht,const char * str,size_t len)1337 ZEND_API int ZEND_FASTCALL zend_hash_str_del(HashTable *ht, const char *str, size_t len)
1338 {
1339 	zend_ulong h;
1340 	uint32_t nIndex;
1341 	uint32_t idx;
1342 	Bucket *p;
1343 	Bucket *prev = NULL;
1344 
1345 	IS_CONSISTENT(ht);
1346 	HT_ASSERT_RC1(ht);
1347 
1348 	h = zend_inline_hash_func(str, len);
1349 	nIndex = h | ht->nTableMask;
1350 
1351 	idx = HT_HASH(ht, nIndex);
1352 	while (idx != HT_INVALID_IDX) {
1353 		p = HT_HASH_TO_BUCKET(ht, idx);
1354 		if ((p->h == h)
1355 			 && p->key
1356 			 && (ZSTR_LEN(p->key) == len)
1357 			 && !memcmp(ZSTR_VAL(p->key), str, len)) {
1358 			_zend_hash_del_el_ex(ht, idx, p, prev);
1359 			return SUCCESS;
1360 		}
1361 		prev = p;
1362 		idx = Z_NEXT(p->val);
1363 	}
1364 	return FAILURE;
1365 }
1366 
zend_hash_index_del(HashTable * ht,zend_ulong h)1367 ZEND_API int ZEND_FASTCALL zend_hash_index_del(HashTable *ht, zend_ulong h)
1368 {
1369 	uint32_t nIndex;
1370 	uint32_t idx;
1371 	Bucket *p;
1372 	Bucket *prev = NULL;
1373 
1374 	IS_CONSISTENT(ht);
1375 	HT_ASSERT_RC1(ht);
1376 
1377 	if (HT_FLAGS(ht) & HASH_FLAG_PACKED) {
1378 		if (h < ht->nNumUsed) {
1379 			p = ht->arData + h;
1380 			if (Z_TYPE(p->val) != IS_UNDEF) {
1381 				_zend_hash_del_el_ex(ht, HT_IDX_TO_HASH(h), p, NULL);
1382 				return SUCCESS;
1383 			}
1384 		}
1385 		return FAILURE;
1386 	}
1387 	nIndex = h | ht->nTableMask;
1388 
1389 	idx = HT_HASH(ht, nIndex);
1390 	while (idx != HT_INVALID_IDX) {
1391 		p = HT_HASH_TO_BUCKET(ht, idx);
1392 		if ((p->h == h) && (p->key == NULL)) {
1393 			_zend_hash_del_el_ex(ht, idx, p, prev);
1394 			return SUCCESS;
1395 		}
1396 		prev = p;
1397 		idx = Z_NEXT(p->val);
1398 	}
1399 	return FAILURE;
1400 }
1401 
zend_hash_destroy(HashTable * ht)1402 ZEND_API void ZEND_FASTCALL zend_hash_destroy(HashTable *ht)
1403 {
1404 	Bucket *p, *end;
1405 
1406 	IS_CONSISTENT(ht);
1407 	HT_ASSERT(ht, GC_REFCOUNT(ht) <= 1);
1408 
1409 	if (ht->nNumUsed) {
1410 		p = ht->arData;
1411 		end = p + ht->nNumUsed;
1412 		if (ht->pDestructor) {
1413 			SET_INCONSISTENT(HT_IS_DESTROYING);
1414 
1415 			if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1416 				if (HT_IS_WITHOUT_HOLES(ht)) {
1417 					do {
1418 						ht->pDestructor(&p->val);
1419 					} while (++p != end);
1420 				} else {
1421 					do {
1422 						if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1423 							ht->pDestructor(&p->val);
1424 						}
1425 					} while (++p != end);
1426 				}
1427 			} else if (HT_IS_WITHOUT_HOLES(ht)) {
1428 				do {
1429 					ht->pDestructor(&p->val);
1430 					if (EXPECTED(p->key)) {
1431 						zend_string_release(p->key);
1432 					}
1433 				} while (++p != end);
1434 			} else {
1435 				do {
1436 					if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1437 						ht->pDestructor(&p->val);
1438 						if (EXPECTED(p->key)) {
1439 							zend_string_release(p->key);
1440 						}
1441 					}
1442 				} while (++p != end);
1443 			}
1444 
1445 			SET_INCONSISTENT(HT_DESTROYED);
1446 		} else {
1447 			if (!HT_HAS_STATIC_KEYS_ONLY(ht)) {
1448 				do {
1449 					if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1450 						if (EXPECTED(p->key)) {
1451 							zend_string_release(p->key);
1452 						}
1453 					}
1454 				} while (++p != end);
1455 			}
1456 		}
1457 		zend_hash_iterators_remove(ht);
1458 	} else if (EXPECTED(!(HT_FLAGS(ht) & HASH_FLAG_INITIALIZED))) {
1459 		return;
1460 	}
1461 	pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1462 }
1463 
zend_array_destroy(HashTable * ht)1464 ZEND_API void ZEND_FASTCALL zend_array_destroy(HashTable *ht)
1465 {
1466 	Bucket *p, *end;
1467 
1468 	IS_CONSISTENT(ht);
1469 	HT_ASSERT(ht, GC_REFCOUNT(ht) <= 1);
1470 
1471 	/* break possible cycles */
1472 	GC_REMOVE_FROM_BUFFER(ht);
1473 	GC_TYPE_INFO(ht) = IS_NULL /*???| (GC_WHITE << 16)*/;
1474 
1475 	if (ht->nNumUsed) {
1476 		/* In some rare cases destructors of regular arrays may be changed */
1477 		if (UNEXPECTED(ht->pDestructor != ZVAL_PTR_DTOR)) {
1478 			zend_hash_destroy(ht);
1479 			goto free_ht;
1480 		}
1481 
1482 		p = ht->arData;
1483 		end = p + ht->nNumUsed;
1484 		SET_INCONSISTENT(HT_IS_DESTROYING);
1485 
1486 		if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1487 			do {
1488 				i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
1489 			} while (++p != end);
1490 		} else if (HT_IS_WITHOUT_HOLES(ht)) {
1491 			do {
1492 				i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
1493 				if (EXPECTED(p->key)) {
1494 					zend_string_release_ex(p->key, 0);
1495 				}
1496 			} while (++p != end);
1497 		} else {
1498 			do {
1499 				if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1500 					i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
1501 					if (EXPECTED(p->key)) {
1502 						zend_string_release_ex(p->key, 0);
1503 					}
1504 				}
1505 			} while (++p != end);
1506 		}
1507 	} else if (EXPECTED(!(HT_FLAGS(ht) & HASH_FLAG_INITIALIZED))) {
1508 		goto free_ht;
1509 	}
1510 	zend_hash_iterators_remove(ht);
1511 	SET_INCONSISTENT(HT_DESTROYED);
1512 	efree(HT_GET_DATA_ADDR(ht));
1513 free_ht:
1514 	FREE_HASHTABLE(ht);
1515 }
1516 
zend_hash_clean(HashTable * ht)1517 ZEND_API void ZEND_FASTCALL zend_hash_clean(HashTable *ht)
1518 {
1519 	Bucket *p, *end;
1520 
1521 	IS_CONSISTENT(ht);
1522 	HT_ASSERT_RC1(ht);
1523 
1524 	if (ht->nNumUsed) {
1525 		p = ht->arData;
1526 		end = p + ht->nNumUsed;
1527 		if (ht->pDestructor) {
1528 			if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1529 				if (HT_IS_WITHOUT_HOLES(ht)) {
1530 					do {
1531 						ht->pDestructor(&p->val);
1532 					} while (++p != end);
1533 				} else {
1534 					do {
1535 						if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1536 							ht->pDestructor(&p->val);
1537 						}
1538 					} while (++p != end);
1539 				}
1540 			} else if (HT_IS_WITHOUT_HOLES(ht)) {
1541 				do {
1542 					ht->pDestructor(&p->val);
1543 					if (EXPECTED(p->key)) {
1544 						zend_string_release(p->key);
1545 					}
1546 				} while (++p != end);
1547 			} else {
1548 				do {
1549 					if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1550 						ht->pDestructor(&p->val);
1551 						if (EXPECTED(p->key)) {
1552 							zend_string_release(p->key);
1553 						}
1554 					}
1555 				} while (++p != end);
1556 			}
1557 		} else {
1558 			if (!HT_HAS_STATIC_KEYS_ONLY(ht)) {
1559 				if (HT_IS_WITHOUT_HOLES(ht)) {
1560 					do {
1561 						if (EXPECTED(p->key)) {
1562 							zend_string_release(p->key);
1563 						}
1564 					} while (++p != end);
1565 				} else {
1566 					do {
1567 						if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1568 							if (EXPECTED(p->key)) {
1569 								zend_string_release(p->key);
1570 							}
1571 						}
1572 					} while (++p != end);
1573 				}
1574 			}
1575 		}
1576 		if (!(HT_FLAGS(ht) & HASH_FLAG_PACKED)) {
1577 			HT_HASH_RESET(ht);
1578 		}
1579 	}
1580 	ht->nNumUsed = 0;
1581 	ht->nNumOfElements = 0;
1582 	ht->nNextFreeElement = 0;
1583 	ht->nInternalPointer = 0;
1584 }
1585 
zend_symtable_clean(HashTable * ht)1586 ZEND_API void ZEND_FASTCALL zend_symtable_clean(HashTable *ht)
1587 {
1588 	Bucket *p, *end;
1589 
1590 	IS_CONSISTENT(ht);
1591 	HT_ASSERT_RC1(ht);
1592 
1593 	if (ht->nNumUsed) {
1594 		p = ht->arData;
1595 		end = p + ht->nNumUsed;
1596 		if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1597 			do {
1598 				i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
1599 			} while (++p != end);
1600 		} else if (HT_IS_WITHOUT_HOLES(ht)) {
1601 			do {
1602 				i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
1603 				if (EXPECTED(p->key)) {
1604 					zend_string_release(p->key);
1605 				}
1606 			} while (++p != end);
1607 		} else {
1608 			do {
1609 				if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1610 					i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
1611 					if (EXPECTED(p->key)) {
1612 						zend_string_release(p->key);
1613 					}
1614 				}
1615 			} while (++p != end);
1616 		}
1617 		HT_HASH_RESET(ht);
1618 	}
1619 	ht->nNumUsed = 0;
1620 	ht->nNumOfElements = 0;
1621 	ht->nNextFreeElement = 0;
1622 	ht->nInternalPointer = 0;
1623 }
1624 
zend_hash_graceful_destroy(HashTable * ht)1625 ZEND_API void ZEND_FASTCALL zend_hash_graceful_destroy(HashTable *ht)
1626 {
1627 	uint32_t idx;
1628 	Bucket *p;
1629 
1630 	IS_CONSISTENT(ht);
1631 	HT_ASSERT_RC1(ht);
1632 
1633 	p = ht->arData;
1634 	for (idx = 0; idx < ht->nNumUsed; idx++, p++) {
1635 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1636 		_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1637 	}
1638 	if (HT_FLAGS(ht) & HASH_FLAG_INITIALIZED) {
1639 		pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1640 	}
1641 
1642 	SET_INCONSISTENT(HT_DESTROYED);
1643 }
1644 
zend_hash_graceful_reverse_destroy(HashTable * ht)1645 ZEND_API void ZEND_FASTCALL zend_hash_graceful_reverse_destroy(HashTable *ht)
1646 {
1647 	uint32_t idx;
1648 	Bucket *p;
1649 
1650 	IS_CONSISTENT(ht);
1651 	HT_ASSERT_RC1(ht);
1652 
1653 	idx = ht->nNumUsed;
1654 	p = ht->arData + ht->nNumUsed;
1655 	while (idx > 0) {
1656 		idx--;
1657 		p--;
1658 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1659 		_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1660 	}
1661 
1662 	if (HT_FLAGS(ht) & HASH_FLAG_INITIALIZED) {
1663 		pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
1664 	}
1665 
1666 	SET_INCONSISTENT(HT_DESTROYED);
1667 }
1668 
1669 /* This is used to recurse elements and selectively delete certain entries
1670  * from a hashtable. apply_func() receives the data and decides if the entry
1671  * should be deleted or recursion should be stopped. The following three
1672  * return codes are possible:
1673  * ZEND_HASH_APPLY_KEEP   - continue
1674  * ZEND_HASH_APPLY_STOP   - stop iteration
1675  * ZEND_HASH_APPLY_REMOVE - delete the element, combineable with the former
1676  */
1677 
zend_hash_apply(HashTable * ht,apply_func_t apply_func)1678 ZEND_API void ZEND_FASTCALL zend_hash_apply(HashTable *ht, apply_func_t apply_func)
1679 {
1680 	uint32_t idx;
1681 	Bucket *p;
1682 	int result;
1683 
1684 	IS_CONSISTENT(ht);
1685 
1686 	for (idx = 0; idx < ht->nNumUsed; idx++) {
1687 		p = ht->arData + idx;
1688 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1689 		result = apply_func(&p->val);
1690 
1691 		if (result & ZEND_HASH_APPLY_REMOVE) {
1692 			HT_ASSERT_RC1(ht);
1693 			_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1694 		}
1695 		if (result & ZEND_HASH_APPLY_STOP) {
1696 			break;
1697 		}
1698 	}
1699 }
1700 
1701 
zend_hash_apply_with_argument(HashTable * ht,apply_func_arg_t apply_func,void * argument)1702 ZEND_API void ZEND_FASTCALL zend_hash_apply_with_argument(HashTable *ht, apply_func_arg_t apply_func, void *argument)
1703 {
1704     uint32_t idx;
1705 	Bucket *p;
1706 	int result;
1707 
1708 	IS_CONSISTENT(ht);
1709 
1710 	for (idx = 0; idx < ht->nNumUsed; idx++) {
1711 		p = ht->arData + idx;
1712 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1713 		result = apply_func(&p->val, argument);
1714 
1715 		if (result & ZEND_HASH_APPLY_REMOVE) {
1716 			HT_ASSERT_RC1(ht);
1717 			_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1718 		}
1719 		if (result & ZEND_HASH_APPLY_STOP) {
1720 			break;
1721 		}
1722 	}
1723 }
1724 
1725 
zend_hash_apply_with_arguments(HashTable * ht,apply_func_args_t apply_func,int num_args,...)1726 ZEND_API void zend_hash_apply_with_arguments(HashTable *ht, apply_func_args_t apply_func, int num_args, ...)
1727 {
1728 	uint32_t idx;
1729 	Bucket *p;
1730 	va_list args;
1731 	zend_hash_key hash_key;
1732 	int result;
1733 
1734 	IS_CONSISTENT(ht);
1735 
1736 	for (idx = 0; idx < ht->nNumUsed; idx++) {
1737 		p = ht->arData + idx;
1738 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1739 		va_start(args, num_args);
1740 		hash_key.h = p->h;
1741 		hash_key.key = p->key;
1742 
1743 		result = apply_func(&p->val, num_args, args, &hash_key);
1744 
1745 		if (result & ZEND_HASH_APPLY_REMOVE) {
1746 			HT_ASSERT_RC1(ht);
1747 			_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1748 		}
1749 		if (result & ZEND_HASH_APPLY_STOP) {
1750 			va_end(args);
1751 			break;
1752 		}
1753 		va_end(args);
1754 	}
1755 }
1756 
1757 
zend_hash_reverse_apply(HashTable * ht,apply_func_t apply_func)1758 ZEND_API void ZEND_FASTCALL zend_hash_reverse_apply(HashTable *ht, apply_func_t apply_func)
1759 {
1760 	uint32_t idx;
1761 	Bucket *p;
1762 	int result;
1763 
1764 	IS_CONSISTENT(ht);
1765 
1766 	idx = ht->nNumUsed;
1767 	while (idx > 0) {
1768 		idx--;
1769 		p = ht->arData + idx;
1770 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1771 
1772 		result = apply_func(&p->val);
1773 
1774 		if (result & ZEND_HASH_APPLY_REMOVE) {
1775 			HT_ASSERT_RC1(ht);
1776 			_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1777 		}
1778 		if (result & ZEND_HASH_APPLY_STOP) {
1779 			break;
1780 		}
1781 	}
1782 }
1783 
1784 
zend_hash_copy(HashTable * target,HashTable * source,copy_ctor_func_t pCopyConstructor)1785 ZEND_API void ZEND_FASTCALL zend_hash_copy(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor)
1786 {
1787     uint32_t idx;
1788 	Bucket *p;
1789 	zval *new_entry, *data;
1790 
1791 	IS_CONSISTENT(source);
1792 	IS_CONSISTENT(target);
1793 	HT_ASSERT_RC1(target);
1794 
1795 	for (idx = 0; idx < source->nNumUsed; idx++) {
1796 		p = source->arData + idx;
1797 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1798 
1799 		/* INDIRECT element may point to UNDEF-ined slots */
1800 		data = &p->val;
1801 		if (Z_TYPE_P(data) == IS_INDIRECT) {
1802 			data = Z_INDIRECT_P(data);
1803 			if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
1804 				continue;
1805 			}
1806 		}
1807 		if (p->key) {
1808 			new_entry = zend_hash_update(target, p->key, data);
1809 		} else {
1810 			new_entry = zend_hash_index_update(target, p->h, data);
1811 		}
1812 		if (pCopyConstructor) {
1813 			pCopyConstructor(new_entry);
1814 		}
1815 	}
1816 }
1817 
1818 
zend_array_dup_element(HashTable * source,HashTable * target,uint32_t idx,Bucket * p,Bucket * q,int packed,int static_keys,int with_holes)1819 static zend_always_inline int zend_array_dup_element(HashTable *source, HashTable *target, uint32_t idx, Bucket *p, Bucket *q, int packed, int static_keys, int with_holes)
1820 {
1821 	zval *data = &p->val;
1822 
1823 	if (with_holes) {
1824 		if (!packed && Z_TYPE_INFO_P(data) == IS_INDIRECT) {
1825 			data = Z_INDIRECT_P(data);
1826 		}
1827 		if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) {
1828 			return 0;
1829 		}
1830 	} else if (!packed) {
1831 		/* INDIRECT element may point to UNDEF-ined slots */
1832 		if (Z_TYPE_INFO_P(data) == IS_INDIRECT) {
1833 			data = Z_INDIRECT_P(data);
1834 			if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) {
1835 				return 0;
1836 			}
1837 		}
1838 	}
1839 
1840 	do {
1841 		if (Z_OPT_REFCOUNTED_P(data)) {
1842 			if (Z_ISREF_P(data) && Z_REFCOUNT_P(data) == 1 &&
1843 			    (Z_TYPE_P(Z_REFVAL_P(data)) != IS_ARRAY ||
1844 			      Z_ARRVAL_P(Z_REFVAL_P(data)) != source)) {
1845 				data = Z_REFVAL_P(data);
1846 				if (!Z_OPT_REFCOUNTED_P(data)) {
1847 					break;
1848 				}
1849 			}
1850 			Z_ADDREF_P(data);
1851 		}
1852 	} while (0);
1853 	ZVAL_COPY_VALUE(&q->val, data);
1854 
1855 	q->h = p->h;
1856 	if (packed) {
1857 		q->key = NULL;
1858 	} else {
1859 		uint32_t nIndex;
1860 
1861 		q->key = p->key;
1862 		if (!static_keys && q->key) {
1863 			zend_string_addref(q->key);
1864 		}
1865 
1866 		nIndex = q->h | target->nTableMask;
1867 		Z_NEXT(q->val) = HT_HASH(target, nIndex);
1868 		HT_HASH(target, nIndex) = HT_IDX_TO_HASH(idx);
1869 	}
1870 	return 1;
1871 }
1872 
zend_array_dup_packed_elements(HashTable * source,HashTable * target,int with_holes)1873 static zend_always_inline void zend_array_dup_packed_elements(HashTable *source, HashTable *target, int with_holes)
1874 {
1875 	Bucket *p = source->arData;
1876 	Bucket *q = target->arData;
1877 	Bucket *end = p + source->nNumUsed;
1878 
1879 	do {
1880 		if (!zend_array_dup_element(source, target, 0, p, q, 1, 1, with_holes)) {
1881 			if (with_holes) {
1882 				ZVAL_UNDEF(&q->val);
1883 			}
1884 		}
1885 		p++; q++;
1886 	} while (p != end);
1887 }
1888 
zend_array_dup_elements(HashTable * source,HashTable * target,int static_keys,int with_holes)1889 static zend_always_inline uint32_t zend_array_dup_elements(HashTable *source, HashTable *target, int static_keys, int with_holes)
1890 {
1891 	uint32_t idx = 0;
1892 	Bucket *p = source->arData;
1893 	Bucket *q = target->arData;
1894 	Bucket *end = p + source->nNumUsed;
1895 
1896 	do {
1897 		if (!zend_array_dup_element(source, target, idx, p, q, 0, static_keys, with_holes)) {
1898 			uint32_t target_idx = idx;
1899 
1900 			idx++; p++;
1901 			while (p != end) {
1902 				if (zend_array_dup_element(source, target, target_idx, p, q, 0, static_keys, with_holes)) {
1903 					if (source->nInternalPointer == idx) {
1904 						target->nInternalPointer = target_idx;
1905 					}
1906 					target_idx++; q++;
1907 				}
1908 				idx++; p++;
1909 			}
1910 			return target_idx;
1911 		}
1912 		idx++; p++; q++;
1913 	} while (p != end);
1914 	return idx;
1915 }
1916 
zend_array_dup(HashTable * source)1917 ZEND_API HashTable* ZEND_FASTCALL zend_array_dup(HashTable *source)
1918 {
1919 	uint32_t idx;
1920 	HashTable *target;
1921 
1922 	IS_CONSISTENT(source);
1923 
1924 	ALLOC_HASHTABLE(target);
1925 	GC_SET_REFCOUNT(target, 1);
1926 	GC_TYPE_INFO(target) = IS_ARRAY | (GC_COLLECTABLE << GC_FLAGS_SHIFT);
1927 
1928 	target->nTableSize = source->nTableSize;
1929 	target->pDestructor = ZVAL_PTR_DTOR;
1930 
1931 	if (source->nNumOfElements == 0) {
1932 		uint32_t mask = HASH_FLAG_MASK & ~(HASH_FLAG_INITIALIZED|HASH_FLAG_PACKED);
1933 		HT_FLAGS(target) = (HT_FLAGS(source) & mask) | HASH_FLAG_STATIC_KEYS;
1934 		target->nTableMask = HT_MIN_MASK;
1935 		target->nNumUsed = 0;
1936 		target->nNumOfElements = 0;
1937 		target->nNextFreeElement = source->nNextFreeElement;
1938 		target->nInternalPointer = 0;
1939 		HT_SET_DATA_ADDR(target, &uninitialized_bucket);
1940 	} else if (GC_FLAGS(source) & IS_ARRAY_IMMUTABLE) {
1941 		HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK;
1942 		target->nTableMask = source->nTableMask;
1943 		target->nNumUsed = source->nNumUsed;
1944 		target->nNumOfElements = source->nNumOfElements;
1945 		target->nNextFreeElement = source->nNextFreeElement;
1946 		HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
1947 		target->nInternalPointer = source->nInternalPointer;
1948 		memcpy(HT_GET_DATA_ADDR(target), HT_GET_DATA_ADDR(source), HT_USED_SIZE(source));
1949 	} else if (HT_FLAGS(source) & HASH_FLAG_PACKED) {
1950 		HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK;
1951 		target->nTableMask = HT_MIN_MASK;
1952 		target->nNumUsed = source->nNumUsed;
1953 		target->nNumOfElements = source->nNumOfElements;
1954 		target->nNextFreeElement = source->nNextFreeElement;
1955 		HT_SET_DATA_ADDR(target, emalloc(HT_SIZE_EX(target->nTableSize, HT_MIN_MASK)));
1956 		target->nInternalPointer =
1957 			(source->nInternalPointer < source->nNumUsed) ?
1958 				source->nInternalPointer : 0;
1959 
1960 		HT_HASH_RESET_PACKED(target);
1961 
1962 		if (HT_IS_WITHOUT_HOLES(target)) {
1963 			zend_array_dup_packed_elements(source, target, 0);
1964 		} else {
1965 			zend_array_dup_packed_elements(source, target, 1);
1966 		}
1967 	} else {
1968 		HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK;
1969 		target->nTableMask = source->nTableMask;
1970 		target->nNextFreeElement = source->nNextFreeElement;
1971 		target->nInternalPointer =
1972 			(source->nInternalPointer < source->nNumUsed) ?
1973 				source->nInternalPointer : 0;
1974 
1975 		HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
1976 		HT_HASH_RESET(target);
1977 
1978 		if (HT_HAS_STATIC_KEYS_ONLY(target)) {
1979 			if (HT_IS_WITHOUT_HOLES(source)) {
1980 				idx = zend_array_dup_elements(source, target, 1, 0);
1981 			} else {
1982 				idx = zend_array_dup_elements(source, target, 1, 1);
1983 			}
1984 		} else {
1985 			if (HT_IS_WITHOUT_HOLES(source)) {
1986 				idx = zend_array_dup_elements(source, target, 0, 0);
1987 			} else {
1988 				idx = zend_array_dup_elements(source, target, 0, 1);
1989 			}
1990 		}
1991 		target->nNumUsed = idx;
1992 		target->nNumOfElements = idx;
1993 	}
1994 	return target;
1995 }
1996 
1997 
zend_hash_merge(HashTable * target,HashTable * source,copy_ctor_func_t pCopyConstructor,zend_bool overwrite)1998 ZEND_API void ZEND_FASTCALL zend_hash_merge(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor, zend_bool overwrite)
1999 {
2000     uint32_t idx;
2001 	Bucket *p;
2002 	zval *t, *s;
2003 
2004 	IS_CONSISTENT(source);
2005 	IS_CONSISTENT(target);
2006 	HT_ASSERT_RC1(target);
2007 
2008 	if (overwrite) {
2009 		for (idx = 0; idx < source->nNumUsed; idx++) {
2010 			p = source->arData + idx;
2011 			s = &p->val;
2012 			if (UNEXPECTED(Z_TYPE_P(s) == IS_INDIRECT)) {
2013 				s = Z_INDIRECT_P(s);
2014 			}
2015 			if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) {
2016 				continue;
2017 			}
2018 			if (p->key) {
2019 				t = _zend_hash_add_or_update_i(target, p->key, s, HASH_UPDATE | HASH_UPDATE_INDIRECT);
2020 				if (pCopyConstructor) {
2021 					pCopyConstructor(t);
2022 				}
2023 			} else {
2024 				t = zend_hash_index_update(target, p->h, s);
2025 				if (pCopyConstructor) {
2026 					pCopyConstructor(t);
2027 				}
2028 			}
2029 		}
2030 	} else {
2031 		for (idx = 0; idx < source->nNumUsed; idx++) {
2032 			p = source->arData + idx;
2033 			s = &p->val;
2034 			if (UNEXPECTED(Z_TYPE_P(s) == IS_INDIRECT)) {
2035 				s = Z_INDIRECT_P(s);
2036 			}
2037 			if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) {
2038 				continue;
2039 			}
2040 			if (p->key) {
2041 				t = _zend_hash_add_or_update_i(target, p->key, s, HASH_ADD | HASH_UPDATE_INDIRECT);
2042 				if (t && pCopyConstructor) {
2043 					pCopyConstructor(t);
2044 				}
2045 			} else {
2046 				t = zend_hash_index_add(target, p->h, s);
2047 				if (t && pCopyConstructor) {
2048 					pCopyConstructor(t);
2049 				}
2050 			}
2051 		}
2052 	}
2053 }
2054 
2055 
zend_hash_replace_checker_wrapper(HashTable * target,zval * source_data,Bucket * p,void * pParam,merge_checker_func_t merge_checker_func)2056 static zend_bool ZEND_FASTCALL zend_hash_replace_checker_wrapper(HashTable *target, zval *source_data, Bucket *p, void *pParam, merge_checker_func_t merge_checker_func)
2057 {
2058 	zend_hash_key hash_key;
2059 
2060 	hash_key.h = p->h;
2061 	hash_key.key = p->key;
2062 	return merge_checker_func(target, source_data, &hash_key, pParam);
2063 }
2064 
2065 
zend_hash_merge_ex(HashTable * target,HashTable * source,copy_ctor_func_t pCopyConstructor,merge_checker_func_t pMergeSource,void * pParam)2066 ZEND_API void ZEND_FASTCALL zend_hash_merge_ex(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor, merge_checker_func_t pMergeSource, void *pParam)
2067 {
2068 	uint32_t idx;
2069 	Bucket *p;
2070 	zval *t;
2071 
2072 	IS_CONSISTENT(source);
2073 	IS_CONSISTENT(target);
2074 	HT_ASSERT_RC1(target);
2075 
2076 	for (idx = 0; idx < source->nNumUsed; idx++) {
2077 		p = source->arData + idx;
2078 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2079 		if (zend_hash_replace_checker_wrapper(target, &p->val, p, pParam, pMergeSource)) {
2080 			t = zend_hash_update(target, p->key, &p->val);
2081 			if (pCopyConstructor) {
2082 				pCopyConstructor(t);
2083 			}
2084 		}
2085 	}
2086 }
2087 
2088 
2089 /* Returns the hash table data if found and NULL if not. */
zend_hash_find(const HashTable * ht,zend_string * key)2090 ZEND_API zval* ZEND_FASTCALL zend_hash_find(const HashTable *ht, zend_string *key)
2091 {
2092 	Bucket *p;
2093 
2094 	IS_CONSISTENT(ht);
2095 
2096 	p = zend_hash_find_bucket(ht, key, 0);
2097 	return p ? &p->val : NULL;
2098 }
2099 
_zend_hash_find_known_hash(const HashTable * ht,zend_string * key)2100 ZEND_API zval* ZEND_FASTCALL _zend_hash_find_known_hash(const HashTable *ht, zend_string *key)
2101 {
2102 	Bucket *p;
2103 
2104 	IS_CONSISTENT(ht);
2105 
2106 	p = zend_hash_find_bucket(ht, key, 1);
2107 	return p ? &p->val : NULL;
2108 }
2109 
zend_hash_str_find(const HashTable * ht,const char * str,size_t len)2110 ZEND_API zval* ZEND_FASTCALL zend_hash_str_find(const HashTable *ht, const char *str, size_t len)
2111 {
2112 	zend_ulong h;
2113 	Bucket *p;
2114 
2115 	IS_CONSISTENT(ht);
2116 
2117 	h = zend_inline_hash_func(str, len);
2118 	p = zend_hash_str_find_bucket(ht, str, len, h);
2119 	return p ? &p->val : NULL;
2120 }
2121 
zend_hash_exists(const HashTable * ht,zend_string * key)2122 ZEND_API zend_bool ZEND_FASTCALL zend_hash_exists(const HashTable *ht, zend_string *key)
2123 {
2124 	Bucket *p;
2125 
2126 	IS_CONSISTENT(ht);
2127 
2128 	p = zend_hash_find_bucket(ht, key, 0);
2129 	return p ? 1 : 0;
2130 }
2131 
zend_hash_str_exists(const HashTable * ht,const char * str,size_t len)2132 ZEND_API zend_bool ZEND_FASTCALL zend_hash_str_exists(const HashTable *ht, const char *str, size_t len)
2133 {
2134 	zend_ulong h;
2135 	Bucket *p;
2136 
2137 	IS_CONSISTENT(ht);
2138 
2139 	h = zend_inline_hash_func(str, len);
2140 	p = zend_hash_str_find_bucket(ht, str, len, h);
2141 	return p ? 1 : 0;
2142 }
2143 
zend_hash_index_find(const HashTable * ht,zend_ulong h)2144 ZEND_API zval* ZEND_FASTCALL zend_hash_index_find(const HashTable *ht, zend_ulong h)
2145 {
2146 	Bucket *p;
2147 
2148 	IS_CONSISTENT(ht);
2149 
2150 	if (HT_FLAGS(ht) & HASH_FLAG_PACKED) {
2151 		if (h < ht->nNumUsed) {
2152 			p = ht->arData + h;
2153 			if (Z_TYPE(p->val) != IS_UNDEF) {
2154 				return &p->val;
2155 			}
2156 		}
2157 		return NULL;
2158 	}
2159 
2160 	p = zend_hash_index_find_bucket(ht, h);
2161 	return p ? &p->val : NULL;
2162 }
2163 
_zend_hash_index_find(const HashTable * ht,zend_ulong h)2164 ZEND_API zval* ZEND_FASTCALL _zend_hash_index_find(const HashTable *ht, zend_ulong h)
2165 {
2166 	Bucket *p;
2167 
2168 	IS_CONSISTENT(ht);
2169 
2170 	p = zend_hash_index_find_bucket(ht, h);
2171 	return p ? &p->val : NULL;
2172 }
2173 
zend_hash_index_exists(const HashTable * ht,zend_ulong h)2174 ZEND_API zend_bool ZEND_FASTCALL zend_hash_index_exists(const HashTable *ht, zend_ulong h)
2175 {
2176 	Bucket *p;
2177 
2178 	IS_CONSISTENT(ht);
2179 
2180 	if (HT_FLAGS(ht) & HASH_FLAG_PACKED) {
2181 		if (h < ht->nNumUsed) {
2182 			if (Z_TYPE(ht->arData[h].val) != IS_UNDEF) {
2183 				return 1;
2184 			}
2185 		}
2186 		return 0;
2187 	}
2188 
2189 	p = zend_hash_index_find_bucket(ht, h);
2190 	return p ? 1 : 0;
2191 }
2192 
2193 
zend_hash_internal_pointer_reset_ex(HashTable * ht,HashPosition * pos)2194 ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_reset_ex(HashTable *ht, HashPosition *pos)
2195 {
2196 	IS_CONSISTENT(ht);
2197 	HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2198 	*pos = _zend_hash_get_valid_pos(ht, 0);
2199 }
2200 
2201 
2202 /* This function will be extremely optimized by remembering
2203  * the end of the list
2204  */
zend_hash_internal_pointer_end_ex(HashTable * ht,HashPosition * pos)2205 ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_end_ex(HashTable *ht, HashPosition *pos)
2206 {
2207 	uint32_t idx;
2208 
2209 	IS_CONSISTENT(ht);
2210 	HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2211 
2212 	idx = ht->nNumUsed;
2213 	while (idx > 0) {
2214 		idx--;
2215 		if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2216 			*pos = idx;
2217 			return;
2218 		}
2219 	}
2220 	*pos = ht->nNumUsed;
2221 }
2222 
2223 
zend_hash_move_forward_ex(HashTable * ht,HashPosition * pos)2224 ZEND_API int ZEND_FASTCALL zend_hash_move_forward_ex(HashTable *ht, HashPosition *pos)
2225 {
2226 	uint32_t idx;
2227 
2228 	IS_CONSISTENT(ht);
2229 	HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2230 
2231 	idx = _zend_hash_get_valid_pos(ht, *pos);
2232 	if (idx < ht->nNumUsed) {
2233 		while (1) {
2234 			idx++;
2235 			if (idx >= ht->nNumUsed) {
2236 				*pos = ht->nNumUsed;
2237 				return SUCCESS;
2238 			}
2239 			if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2240 				*pos = idx;
2241 				return SUCCESS;
2242 			}
2243 		}
2244 	} else {
2245 		return FAILURE;
2246 	}
2247 }
2248 
zend_hash_move_backwards_ex(HashTable * ht,HashPosition * pos)2249 ZEND_API int ZEND_FASTCALL zend_hash_move_backwards_ex(HashTable *ht, HashPosition *pos)
2250 {
2251 	uint32_t idx = *pos;
2252 
2253 	IS_CONSISTENT(ht);
2254 	HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2255 
2256 	if (idx < ht->nNumUsed) {
2257 		while (idx > 0) {
2258 			idx--;
2259 			if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2260 				*pos = idx;
2261 				return SUCCESS;
2262 			}
2263 		}
2264 		*pos = ht->nNumUsed;
2265  		return SUCCESS;
2266 	} else {
2267  		return FAILURE;
2268 	}
2269 }
2270 
2271 
2272 /* This function should be made binary safe  */
zend_hash_get_current_key_ex(const HashTable * ht,zend_string ** str_index,zend_ulong * num_index,HashPosition * pos)2273 ZEND_API int ZEND_FASTCALL zend_hash_get_current_key_ex(const HashTable *ht, zend_string **str_index, zend_ulong *num_index, HashPosition *pos)
2274 {
2275 	uint32_t idx;
2276 	Bucket *p;
2277 
2278 	IS_CONSISTENT(ht);
2279 	idx = _zend_hash_get_valid_pos(ht, *pos);
2280 	if (idx < ht->nNumUsed) {
2281 		p = ht->arData + idx;
2282 		if (p->key) {
2283 			*str_index = p->key;
2284 			return HASH_KEY_IS_STRING;
2285 		} else {
2286 			*num_index = p->h;
2287 			return HASH_KEY_IS_LONG;
2288 		}
2289 	}
2290 	return HASH_KEY_NON_EXISTENT;
2291 }
2292 
zend_hash_get_current_key_zval_ex(const HashTable * ht,zval * key,HashPosition * pos)2293 ZEND_API void ZEND_FASTCALL zend_hash_get_current_key_zval_ex(const HashTable *ht, zval *key, HashPosition *pos)
2294 {
2295 	uint32_t idx;
2296 	Bucket *p;
2297 
2298 	IS_CONSISTENT(ht);
2299 	idx = _zend_hash_get_valid_pos(ht, *pos);
2300 	if (idx >= ht->nNumUsed) {
2301 		ZVAL_NULL(key);
2302 	} else {
2303 		p = ht->arData + idx;
2304 		if (p->key) {
2305 			ZVAL_STR_COPY(key, p->key);
2306 		} else {
2307 			ZVAL_LONG(key, p->h);
2308 		}
2309 	}
2310 }
2311 
zend_hash_get_current_key_type_ex(HashTable * ht,HashPosition * pos)2312 ZEND_API int ZEND_FASTCALL zend_hash_get_current_key_type_ex(HashTable *ht, HashPosition *pos)
2313 {
2314 	uint32_t idx;
2315 	Bucket *p;
2316 
2317 	IS_CONSISTENT(ht);
2318 	idx = _zend_hash_get_valid_pos(ht, *pos);
2319 	if (idx < ht->nNumUsed) {
2320 		p = ht->arData + idx;
2321 		if (p->key) {
2322 			return HASH_KEY_IS_STRING;
2323 		} else {
2324 			return HASH_KEY_IS_LONG;
2325 		}
2326 	}
2327 	return HASH_KEY_NON_EXISTENT;
2328 }
2329 
2330 
zend_hash_get_current_data_ex(HashTable * ht,HashPosition * pos)2331 ZEND_API zval* ZEND_FASTCALL zend_hash_get_current_data_ex(HashTable *ht, HashPosition *pos)
2332 {
2333 	uint32_t idx;
2334 	Bucket *p;
2335 
2336 	IS_CONSISTENT(ht);
2337 	idx = _zend_hash_get_valid_pos(ht, *pos);
2338 	if (idx < ht->nNumUsed) {
2339 		p = ht->arData + idx;
2340 		return &p->val;
2341 	} else {
2342 		return NULL;
2343 	}
2344 }
2345 
zend_hash_bucket_swap(Bucket * p,Bucket * q)2346 ZEND_API void zend_hash_bucket_swap(Bucket *p, Bucket *q)
2347 {
2348 	zval val;
2349 	zend_ulong h;
2350 	zend_string *key;
2351 
2352 	ZVAL_COPY_VALUE(&val, &p->val);
2353 	h = p->h;
2354 	key = p->key;
2355 
2356 	ZVAL_COPY_VALUE(&p->val, &q->val);
2357 	p->h = q->h;
2358 	p->key = q->key;
2359 
2360 	ZVAL_COPY_VALUE(&q->val, &val);
2361 	q->h = h;
2362 	q->key = key;
2363 }
2364 
zend_hash_bucket_renum_swap(Bucket * p,Bucket * q)2365 ZEND_API void zend_hash_bucket_renum_swap(Bucket *p, Bucket *q)
2366 {
2367 	zval val;
2368 
2369 	ZVAL_COPY_VALUE(&val, &p->val);
2370 	ZVAL_COPY_VALUE(&p->val, &q->val);
2371 	ZVAL_COPY_VALUE(&q->val, &val);
2372 }
2373 
zend_hash_bucket_packed_swap(Bucket * p,Bucket * q)2374 ZEND_API void zend_hash_bucket_packed_swap(Bucket *p, Bucket *q)
2375 {
2376 	zval val;
2377 	zend_ulong h;
2378 
2379 	ZVAL_COPY_VALUE(&val, &p->val);
2380 	h = p->h;
2381 
2382 	ZVAL_COPY_VALUE(&p->val, &q->val);
2383 	p->h = q->h;
2384 
2385 	ZVAL_COPY_VALUE(&q->val, &val);
2386 	q->h = h;
2387 }
2388 
zend_hash_sort_ex(HashTable * ht,sort_func_t sort,compare_func_t compar,zend_bool renumber)2389 ZEND_API int ZEND_FASTCALL zend_hash_sort_ex(HashTable *ht, sort_func_t sort, compare_func_t compar, zend_bool renumber)
2390 {
2391 	Bucket *p;
2392 	uint32_t i, j;
2393 
2394 	IS_CONSISTENT(ht);
2395 	HT_ASSERT_RC1(ht);
2396 
2397 	if (!(ht->nNumOfElements>1) && !(renumber && ht->nNumOfElements>0)) { /* Doesn't require sorting */
2398 		return SUCCESS;
2399 	}
2400 
2401 	if (HT_IS_WITHOUT_HOLES(ht)) {
2402 		i = ht->nNumUsed;
2403 	} else {
2404 		for (j = 0, i = 0; j < ht->nNumUsed; j++) {
2405 			p = ht->arData + j;
2406 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2407 			if (i != j) {
2408 				ht->arData[i] = *p;
2409 			}
2410 			i++;
2411 		}
2412 	}
2413 
2414 	sort((void *)ht->arData, i, sizeof(Bucket), compar,
2415 			(swap_func_t)(renumber? zend_hash_bucket_renum_swap :
2416 				((HT_FLAGS(ht) & HASH_FLAG_PACKED) ? zend_hash_bucket_packed_swap : zend_hash_bucket_swap)));
2417 
2418 	ht->nNumUsed = i;
2419 	ht->nInternalPointer = 0;
2420 
2421 	if (renumber) {
2422 		for (j = 0; j < i; j++) {
2423 			p = ht->arData + j;
2424 			p->h = j;
2425 			if (p->key) {
2426 				zend_string_release(p->key);
2427 				p->key = NULL;
2428 			}
2429 		}
2430 
2431 		ht->nNextFreeElement = i;
2432 	}
2433 	if (HT_FLAGS(ht) & HASH_FLAG_PACKED) {
2434 		if (!renumber) {
2435 			zend_hash_packed_to_hash(ht);
2436 		}
2437 	} else {
2438 		if (renumber) {
2439 			void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
2440 			Bucket *old_buckets = ht->arData;
2441 
2442 			new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), (GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
2443 			HT_FLAGS(ht) |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
2444 			ht->nTableMask = HT_MIN_MASK;
2445 			HT_SET_DATA_ADDR(ht, new_data);
2446 			memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
2447 			pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
2448 			HT_HASH_RESET_PACKED(ht);
2449 		} else {
2450 			zend_hash_rehash(ht);
2451 		}
2452 	}
2453 
2454 	return SUCCESS;
2455 }
2456 
zend_hash_compare_impl(HashTable * ht1,HashTable * ht2,compare_func_t compar,zend_bool ordered)2457 static zend_always_inline int zend_hash_compare_impl(HashTable *ht1, HashTable *ht2, compare_func_t compar, zend_bool ordered) {
2458 	uint32_t idx1, idx2;
2459 
2460 	if (ht1->nNumOfElements != ht2->nNumOfElements) {
2461 		return ht1->nNumOfElements > ht2->nNumOfElements ? 1 : -1;
2462 	}
2463 
2464 	for (idx1 = 0, idx2 = 0; idx1 < ht1->nNumUsed; idx1++) {
2465 		Bucket *p1 = ht1->arData + idx1, *p2;
2466 		zval *pData1, *pData2;
2467 		int result;
2468 
2469 		if (Z_TYPE(p1->val) == IS_UNDEF) continue;
2470 		if (ordered) {
2471 			while (1) {
2472 				ZEND_ASSERT(idx2 != ht2->nNumUsed);
2473 				p2 = ht2->arData + idx2;
2474 				if (Z_TYPE(p2->val) != IS_UNDEF) break;
2475 				idx2++;
2476 			}
2477 			if (p1->key == NULL && p2->key == NULL) { /* numeric indices */
2478 				if (p1->h != p2->h) {
2479 					return p1->h > p2->h ? 1 : -1;
2480 				}
2481 			} else if (p1->key != NULL && p2->key != NULL) { /* string indices */
2482 				if (ZSTR_LEN(p1->key) != ZSTR_LEN(p2->key)) {
2483 					return ZSTR_LEN(p1->key) > ZSTR_LEN(p2->key) ? 1 : -1;
2484 				}
2485 
2486 				result = memcmp(ZSTR_VAL(p1->key), ZSTR_VAL(p2->key), ZSTR_LEN(p1->key));
2487 				if (result != 0) {
2488 					return result;
2489 				}
2490 			} else {
2491 				/* Mixed key types: A string key is considered as larger */
2492 				return p1->key != NULL ? 1 : -1;
2493 			}
2494 			pData2 = &p2->val;
2495 			idx2++;
2496 		} else {
2497 			if (p1->key == NULL) { /* numeric index */
2498 				pData2 = zend_hash_index_find(ht2, p1->h);
2499 				if (pData2 == NULL) {
2500 					return 1;
2501 				}
2502 			} else { /* string index */
2503 				pData2 = zend_hash_find(ht2, p1->key);
2504 				if (pData2 == NULL) {
2505 					return 1;
2506 				}
2507 			}
2508 		}
2509 
2510 		pData1 = &p1->val;
2511 		if (Z_TYPE_P(pData1) == IS_INDIRECT) {
2512 			pData1 = Z_INDIRECT_P(pData1);
2513 		}
2514 		if (Z_TYPE_P(pData2) == IS_INDIRECT) {
2515 			pData2 = Z_INDIRECT_P(pData2);
2516 		}
2517 
2518 		if (Z_TYPE_P(pData1) == IS_UNDEF) {
2519 			if (Z_TYPE_P(pData2) != IS_UNDEF) {
2520 				return -1;
2521 			}
2522 		} else if (Z_TYPE_P(pData2) == IS_UNDEF) {
2523 			return 1;
2524 		} else {
2525 			result = compar(pData1, pData2);
2526 			if (result != 0) {
2527 				return result;
2528 			}
2529 		}
2530 	}
2531 
2532 	return 0;
2533 }
2534 
zend_hash_compare(HashTable * ht1,HashTable * ht2,compare_func_t compar,zend_bool ordered)2535 ZEND_API int zend_hash_compare(HashTable *ht1, HashTable *ht2, compare_func_t compar, zend_bool ordered)
2536 {
2537 	int result;
2538 	IS_CONSISTENT(ht1);
2539 	IS_CONSISTENT(ht2);
2540 
2541 	if (ht1 == ht2) {
2542 		return 0;
2543 	}
2544 
2545 	/* It's enough to protect only one of the arrays.
2546 	 * The second one may be referenced from the first and this may cause
2547 	 * false recursion detection.
2548 	 */
2549 	if (UNEXPECTED(GC_IS_RECURSIVE(ht1))) {
2550 		zend_error_noreturn(E_ERROR, "Nesting level too deep - recursive dependency?");
2551 	}
2552 
2553 	if (!(GC_FLAGS(ht1) & GC_IMMUTABLE)) {
2554 		GC_PROTECT_RECURSION(ht1);
2555 	}
2556 	result = zend_hash_compare_impl(ht1, ht2, compar, ordered);
2557 	if (!(GC_FLAGS(ht1) & GC_IMMUTABLE)) {
2558 		GC_UNPROTECT_RECURSION(ht1);
2559 	}
2560 
2561 	return result;
2562 }
2563 
2564 
zend_hash_minmax(const HashTable * ht,compare_func_t compar,uint32_t flag)2565 ZEND_API zval* ZEND_FASTCALL zend_hash_minmax(const HashTable *ht, compare_func_t compar, uint32_t flag)
2566 {
2567 	uint32_t idx;
2568 	Bucket *p, *res;
2569 
2570 	IS_CONSISTENT(ht);
2571 
2572 	if (ht->nNumOfElements == 0 ) {
2573 		return NULL;
2574 	}
2575 
2576 	idx = 0;
2577 	while (1) {
2578 		if (idx == ht->nNumUsed) {
2579 			return NULL;
2580 		}
2581 		if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) break;
2582 		idx++;
2583 	}
2584 	res = ht->arData + idx;
2585 	for (; idx < ht->nNumUsed; idx++) {
2586 		p = ht->arData + idx;
2587 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2588 
2589 		if (flag) {
2590 			if (compar(res, p) < 0) { /* max */
2591 				res = p;
2592 			}
2593 		} else {
2594 			if (compar(res, p) > 0) { /* min */
2595 				res = p;
2596 			}
2597 		}
2598 	}
2599 	return &res->val;
2600 }
2601 
_zend_handle_numeric_str_ex(const char * key,size_t length,zend_ulong * idx)2602 ZEND_API int ZEND_FASTCALL _zend_handle_numeric_str_ex(const char *key, size_t length, zend_ulong *idx)
2603 {
2604 	register const char *tmp = key;
2605 
2606 	const char *end = key + length;
2607 
2608 	if (*tmp == '-') {
2609 		tmp++;
2610 	}
2611 
2612 	if ((*tmp == '0' && length > 1) /* numbers with leading zeros */
2613 	 || (end - tmp > MAX_LENGTH_OF_LONG - 1) /* number too long */
2614 	 || (SIZEOF_ZEND_LONG == 4 &&
2615 	     end - tmp == MAX_LENGTH_OF_LONG - 1 &&
2616 	     *tmp > '2')) { /* overflow */
2617 		return 0;
2618 	}
2619 	*idx = (*tmp - '0');
2620 	while (1) {
2621 		++tmp;
2622 		if (tmp == end) {
2623 			if (*key == '-') {
2624 				if (*idx-1 > ZEND_LONG_MAX) { /* overflow */
2625 					return 0;
2626 				}
2627 				*idx = 0 - *idx;
2628 			} else if (*idx > ZEND_LONG_MAX) { /* overflow */
2629 				return 0;
2630 			}
2631 			return 1;
2632 		}
2633 		if (*tmp <= '9' && *tmp >= '0') {
2634 			*idx = (*idx * 10) + (*tmp - '0');
2635 		} else {
2636 			return 0;
2637 		}
2638 	}
2639 }
2640 
2641 /* Takes a "symtable" hashtable (contains integer and non-numeric string keys)
2642  * and converts it to a "proptable" (contains only string keys).
2643  * If the symtable didn't need duplicating, its refcount is incremented.
2644  */
zend_symtable_to_proptable(HashTable * ht)2645 ZEND_API HashTable* ZEND_FASTCALL zend_symtable_to_proptable(HashTable *ht)
2646 {
2647 	zend_ulong num_key;
2648 	zend_string *str_key;
2649 	zval *zv;
2650 
2651 	if (UNEXPECTED(HT_IS_PACKED(ht))) {
2652 		goto convert;
2653 	}
2654 
2655 	ZEND_HASH_FOREACH_STR_KEY(ht, str_key) {
2656 		if (!str_key) {
2657 			goto convert;
2658 		}
2659 	} ZEND_HASH_FOREACH_END();
2660 
2661 	if (!(GC_FLAGS(ht) & IS_ARRAY_IMMUTABLE)) {
2662 		GC_ADDREF(ht);
2663 	}
2664 
2665 	return ht;
2666 
2667 convert:
2668 	{
2669 		HashTable *new_ht = zend_new_array(zend_hash_num_elements(ht));
2670 
2671 		ZEND_HASH_FOREACH_KEY_VAL(ht, num_key, str_key, zv) {
2672 			if (!str_key) {
2673 				str_key = zend_long_to_str(num_key);
2674 				zend_string_delref(str_key);
2675 			}
2676 			do {
2677 				if (Z_OPT_REFCOUNTED_P(zv)) {
2678 					if (Z_ISREF_P(zv) && Z_REFCOUNT_P(zv) == 1) {
2679 						zv = Z_REFVAL_P(zv);
2680 						if (!Z_OPT_REFCOUNTED_P(zv)) {
2681 							break;
2682 						}
2683 					}
2684 					Z_ADDREF_P(zv);
2685 				}
2686 			} while (0);
2687 			zend_hash_update(new_ht, str_key, zv);
2688 		} ZEND_HASH_FOREACH_END();
2689 
2690 		return new_ht;
2691 	}
2692 }
2693 
2694 /* Takes a "proptable" hashtable (contains only string keys) and converts it to
2695  * a "symtable" (contains integer and non-numeric string keys).
2696  * If the proptable didn't need duplicating, its refcount is incremented.
2697  */
zend_proptable_to_symtable(HashTable * ht,zend_bool always_duplicate)2698 ZEND_API HashTable* ZEND_FASTCALL zend_proptable_to_symtable(HashTable *ht, zend_bool always_duplicate)
2699 {
2700 	zend_ulong num_key;
2701 	zend_string *str_key;
2702 	zval *zv;
2703 
2704 	ZEND_HASH_FOREACH_STR_KEY(ht, str_key) {
2705 		/* The `str_key &&` here might seem redundant: property tables should
2706 		 * only have string keys. Unfortunately, this isn't true, at the very
2707 		 * least because of ArrayObject, which stores a symtable where the
2708 		 * property table should be.
2709 		 */
2710 		if (str_key && ZEND_HANDLE_NUMERIC(str_key, num_key)) {
2711 			goto convert;
2712 		}
2713 	} ZEND_HASH_FOREACH_END();
2714 
2715 	if (always_duplicate) {
2716 		return zend_array_dup(ht);
2717 	}
2718 
2719 	if (EXPECTED(!(GC_FLAGS(ht) & IS_ARRAY_IMMUTABLE))) {
2720 		GC_ADDREF(ht);
2721 	}
2722 
2723 	return ht;
2724 
2725 convert:
2726 	{
2727 		HashTable *new_ht = zend_new_array(zend_hash_num_elements(ht));
2728 
2729 		ZEND_HASH_FOREACH_KEY_VAL_IND(ht, num_key, str_key, zv) {
2730 			do {
2731 				if (Z_OPT_REFCOUNTED_P(zv)) {
2732 					if (Z_ISREF_P(zv) && Z_REFCOUNT_P(zv) == 1) {
2733 						zv = Z_REFVAL_P(zv);
2734 						if (!Z_OPT_REFCOUNTED_P(zv)) {
2735 							break;
2736 						}
2737 					}
2738 					Z_ADDREF_P(zv);
2739 				}
2740 			} while (0);
2741 			/* Again, thank ArrayObject for `!str_key ||`. */
2742 			if (!str_key || ZEND_HANDLE_NUMERIC(str_key, num_key)) {
2743 				zend_hash_index_update(new_ht, num_key, zv);
2744 			} else {
2745 				zend_hash_update(new_ht, str_key, zv);
2746 			}
2747 		} ZEND_HASH_FOREACH_END();
2748 
2749 		return new_ht;
2750 	}
2751 }
2752 
2753 /*
2754  * Local variables:
2755  * tab-width: 4
2756  * c-basic-offset: 4
2757  * indent-tabs-mode: t
2758  * End:
2759  * vim600: sw=4 ts=4 fdm=marker
2760  * vim<600: sw=4 ts=4
2761  */
2762