xref: /PHP-7.1/Zend/zend_hash.c (revision b711a96a)
1 /*
2    +----------------------------------------------------------------------+
3    | Zend Engine                                                          |
4    +----------------------------------------------------------------------+
5    | Copyright (c) 1998-2018 Zend Technologies Ltd. (http://www.zend.com) |
6    +----------------------------------------------------------------------+
7    | This source file is subject to version 2.00 of the Zend license,     |
8    | that is bundled with this package in the file LICENSE, and is        |
9    | available through the world-wide-web at the following url:           |
10    | http://www.zend.com/license/2_00.txt.                                |
11    | If you did not receive a copy of the Zend license and are unable to  |
12    | obtain it through the world-wide-web, please send a note to          |
13    | license@zend.com so we can mail you a copy immediately.              |
14    +----------------------------------------------------------------------+
15    | Authors: Andi Gutmans <andi@zend.com>                                |
16    |          Zeev Suraski <zeev@zend.com>                                |
17    |          Dmitry Stogov <dmitry@zend.com>                             |
18    +----------------------------------------------------------------------+
19 */
20 
21 /* $Id$ */
22 
23 #include "zend.h"
24 #include "zend_globals.h"
25 #include "zend_variables.h"
26 
27 #define HT_DEBUG 0
28 #if HT_DEBUG
29 # define HT_ASSERT(c) ZEND_ASSERT(c)
30 #else
31 # define HT_ASSERT(c)
32 #endif
33 
34 #define HT_POISONED_PTR ((HashTable *) (intptr_t) -1)
35 
36 #if ZEND_DEBUG
37 
38 #define HT_OK					0x00
39 #define HT_IS_DESTROYING		0x40
40 #define HT_DESTROYED			0x80
41 #define HT_CLEANING				0xc0
42 
_zend_is_inconsistent(const HashTable * ht,const char * file,int line)43 static void _zend_is_inconsistent(const HashTable *ht, const char *file, int line)
44 {
45 	if (ht->u.v.consistency == HT_OK) {
46 		return;
47 	}
48 	switch (ht->u.v.consistency) {
49 		case HT_IS_DESTROYING:
50 			zend_output_debug_string(1, "%s(%d) : ht=%p is being destroyed", file, line, ht);
51 			break;
52 		case HT_DESTROYED:
53 			zend_output_debug_string(1, "%s(%d) : ht=%p is already destroyed", file, line, ht);
54 			break;
55 		case HT_CLEANING:
56 			zend_output_debug_string(1, "%s(%d) : ht=%p is being cleaned", file, line, ht);
57 			break;
58 		default:
59 			zend_output_debug_string(1, "%s(%d) : ht=%p is inconsistent", file, line, ht);
60 			break;
61 	}
62 	zend_bailout();
63 }
64 #define IS_CONSISTENT(a) _zend_is_inconsistent(a, __FILE__, __LINE__);
65 #define SET_INCONSISTENT(n) do { \
66 		(ht)->u.v.consistency = n; \
67 	} while (0)
68 #else
69 #define IS_CONSISTENT(a)
70 #define SET_INCONSISTENT(n)
71 #endif
72 
73 #define HASH_PROTECT_RECURSION(ht)														\
74 	if ((ht)->u.flags & HASH_FLAG_APPLY_PROTECTION) {									\
75 		if (((ht)->u.flags & ZEND_HASH_APPLY_COUNT_MASK) >= (3 << 8)) {												\
76 			zend_error_noreturn(E_ERROR, "Nesting level too deep - recursive dependency?");\
77 		}																				\
78 		ZEND_HASH_INC_APPLY_COUNT(ht);													\
79 	}
80 
81 #define HASH_UNPROTECT_RECURSION(ht)													\
82 	if ((ht)->u.flags & HASH_FLAG_APPLY_PROTECTION) {									\
83 		ZEND_HASH_DEC_APPLY_COUNT(ht);													\
84 	}
85 
86 #define ZEND_HASH_IF_FULL_DO_RESIZE(ht)				\
87 	if ((ht)->nNumUsed >= (ht)->nTableSize) {		\
88 		zend_hash_do_resize(ht);					\
89 	}
90 
91 static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht);
92 
zend_hash_check_size(uint32_t nSize)93 static zend_always_inline uint32_t zend_hash_check_size(uint32_t nSize)
94 {
95 #if defined(ZEND_WIN32)
96 	unsigned long index;
97 #endif
98 
99 	/* Use big enough power of 2 */
100 	/* size should be between HT_MIN_SIZE and HT_MAX_SIZE */
101 	if (nSize < HT_MIN_SIZE) {
102 		nSize = HT_MIN_SIZE;
103 	} else if (UNEXPECTED(nSize >= HT_MAX_SIZE)) {
104 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", nSize, sizeof(Bucket), sizeof(Bucket));
105 	}
106 
107 #if defined(ZEND_WIN32)
108 	if (BitScanReverse(&index, nSize - 1)) {
109 		return 0x2 << ((31 - index) ^ 0x1f);
110 	} else {
111 		/* nSize is ensured to be in the valid range, fall back to it
112 		   rather than using an undefined bis scan result. */
113 		return nSize;
114 	}
115 #elif (defined(__GNUC__) || __has_builtin(__builtin_clz))  && defined(PHP_HAVE_BUILTIN_CLZ)
116 	return 0x2 << (__builtin_clz(nSize - 1) ^ 0x1f);
117 #else
118 	nSize -= 1;
119 	nSize |= (nSize >> 1);
120 	nSize |= (nSize >> 2);
121 	nSize |= (nSize >> 4);
122 	nSize |= (nSize >> 8);
123 	nSize |= (nSize >> 16);
124 	return nSize + 1;
125 #endif
126 }
127 
zend_hash_real_init_ex(HashTable * ht,int packed)128 static zend_always_inline void zend_hash_real_init_ex(HashTable *ht, int packed)
129 {
130 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
131 	ZEND_ASSERT(!((ht)->u.flags & HASH_FLAG_INITIALIZED));
132 	if (packed) {
133 		HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), (ht)->u.flags & HASH_FLAG_PERSISTENT));
134 		(ht)->u.flags |= HASH_FLAG_INITIALIZED | HASH_FLAG_PACKED;
135 		HT_HASH_RESET_PACKED(ht);
136 	} else {
137 		(ht)->nTableMask = -(ht)->nTableSize;
138 		HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), (ht)->u.flags & HASH_FLAG_PERSISTENT));
139 		(ht)->u.flags |= HASH_FLAG_INITIALIZED;
140 		if (EXPECTED(ht->nTableMask == (uint32_t)-8)) {
141 			Bucket *arData = ht->arData;
142 
143 			HT_HASH_EX(arData, -8) = -1;
144 			HT_HASH_EX(arData, -7) = -1;
145 			HT_HASH_EX(arData, -6) = -1;
146 			HT_HASH_EX(arData, -5) = -1;
147 			HT_HASH_EX(arData, -4) = -1;
148 			HT_HASH_EX(arData, -3) = -1;
149 			HT_HASH_EX(arData, -2) = -1;
150 			HT_HASH_EX(arData, -1) = -1;
151 		} else {
152 			HT_HASH_RESET(ht);
153 		}
154 	}
155 }
156 
zend_hash_check_init(HashTable * ht,int packed)157 static zend_always_inline void zend_hash_check_init(HashTable *ht, int packed)
158 {
159 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
160 	if (UNEXPECTED(!((ht)->u.flags & HASH_FLAG_INITIALIZED))) {
161 		zend_hash_real_init_ex(ht, packed);
162 	}
163 }
164 
165 #define CHECK_INIT(ht, packed) \
166 	zend_hash_check_init(ht, packed)
167 
168 static const uint32_t uninitialized_bucket[-HT_MIN_MASK] =
169 	{HT_INVALID_IDX, HT_INVALID_IDX};
170 
_zend_hash_init(HashTable * ht,uint32_t nSize,dtor_func_t pDestructor,zend_bool persistent ZEND_FILE_LINE_DC)171 ZEND_API void ZEND_FASTCALL _zend_hash_init(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, zend_bool persistent ZEND_FILE_LINE_DC)
172 {
173 	GC_REFCOUNT(ht) = 1;
174 	GC_TYPE_INFO(ht) = IS_ARRAY;
175 	ht->u.flags = (persistent ? HASH_FLAG_PERSISTENT : 0) | HASH_FLAG_APPLY_PROTECTION | HASH_FLAG_STATIC_KEYS;
176 	ht->nTableMask = HT_MIN_MASK;
177 	HT_SET_DATA_ADDR(ht, &uninitialized_bucket);
178 	ht->nNumUsed = 0;
179 	ht->nNumOfElements = 0;
180 	ht->nInternalPointer = HT_INVALID_IDX;
181 	ht->nNextFreeElement = 0;
182 	ht->pDestructor = pDestructor;
183 	ht->nTableSize = zend_hash_check_size(nSize);
184 }
185 
zend_hash_packed_grow(HashTable * ht)186 static void ZEND_FASTCALL zend_hash_packed_grow(HashTable *ht)
187 {
188 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
189 	if (ht->nTableSize >= HT_MAX_SIZE) {
190 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket), sizeof(Bucket));
191 	}
192 	ht->nTableSize += ht->nTableSize;
193 	HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE(ht), HT_USED_SIZE(ht), ht->u.flags & HASH_FLAG_PERSISTENT));
194 }
195 
zend_hash_real_init(HashTable * ht,zend_bool packed)196 ZEND_API void ZEND_FASTCALL zend_hash_real_init(HashTable *ht, zend_bool packed)
197 {
198 	IS_CONSISTENT(ht);
199 
200 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
201 	zend_hash_real_init_ex(ht, packed);
202 }
203 
zend_hash_packed_to_hash(HashTable * ht)204 ZEND_API void ZEND_FASTCALL zend_hash_packed_to_hash(HashTable *ht)
205 {
206 	void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
207 	Bucket *old_buckets = ht->arData;
208 
209 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
210 	ht->u.flags &= ~HASH_FLAG_PACKED;
211 	new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, -ht->nTableSize), (ht)->u.flags & HASH_FLAG_PERSISTENT);
212 	ht->nTableMask = -ht->nTableSize;
213 	HT_SET_DATA_ADDR(ht, new_data);
214 	memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
215 	pefree(old_data, (ht)->u.flags & HASH_FLAG_PERSISTENT);
216 	zend_hash_rehash(ht);
217 }
218 
zend_hash_to_packed(HashTable * ht)219 ZEND_API void ZEND_FASTCALL zend_hash_to_packed(HashTable *ht)
220 {
221 	void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
222 	Bucket *old_buckets = ht->arData;
223 
224 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
225 	new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), (ht)->u.flags & HASH_FLAG_PERSISTENT);
226 	ht->u.flags |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
227 	ht->nTableMask = HT_MIN_MASK;
228 	HT_SET_DATA_ADDR(ht, new_data);
229 	HT_HASH_RESET_PACKED(ht);
230 	memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
231 	pefree(old_data, (ht)->u.flags & HASH_FLAG_PERSISTENT);
232 }
233 
_zend_hash_init_ex(HashTable * ht,uint32_t nSize,dtor_func_t pDestructor,zend_bool persistent,zend_bool bApplyProtection ZEND_FILE_LINE_DC)234 ZEND_API void ZEND_FASTCALL _zend_hash_init_ex(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, zend_bool persistent, zend_bool bApplyProtection ZEND_FILE_LINE_DC)
235 {
236 	_zend_hash_init(ht, nSize, pDestructor, persistent ZEND_FILE_LINE_RELAY_CC);
237 	if (!bApplyProtection) {
238 		ht->u.flags &= ~HASH_FLAG_APPLY_PROTECTION;
239 	}
240 }
241 
zend_hash_extend(HashTable * ht,uint32_t nSize,zend_bool packed)242 ZEND_API void ZEND_FASTCALL zend_hash_extend(HashTable *ht, uint32_t nSize, zend_bool packed)
243 {
244 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
245 	if (nSize == 0) return;
246 	if (UNEXPECTED(!((ht)->u.flags & HASH_FLAG_INITIALIZED))) {
247 		if (nSize > ht->nTableSize) {
248 			ht->nTableSize = zend_hash_check_size(nSize);
249 		}
250 		zend_hash_check_init(ht, packed);
251 	} else {
252 		if (packed) {
253 			ZEND_ASSERT(ht->u.flags & HASH_FLAG_PACKED);
254 			if (nSize > ht->nTableSize) {
255 				ht->nTableSize = zend_hash_check_size(nSize);
256 				HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE(ht), HT_USED_SIZE(ht), ht->u.flags & HASH_FLAG_PERSISTENT));
257 			}
258 		} else {
259 			ZEND_ASSERT(!(ht->u.flags & HASH_FLAG_PACKED));
260 			if (nSize > ht->nTableSize) {
261 				void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
262 				Bucket *old_buckets = ht->arData;
263 				nSize = zend_hash_check_size(nSize);
264 				new_data = pemalloc(HT_SIZE_EX(nSize, -nSize), ht->u.flags & HASH_FLAG_PERSISTENT);
265 				ht->nTableSize = nSize;
266 				ht->nTableMask = -ht->nTableSize;
267 				HT_SET_DATA_ADDR(ht, new_data);
268 				memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
269 				pefree(old_data, ht->u.flags & HASH_FLAG_PERSISTENT);
270 				zend_hash_rehash(ht);
271 			}
272 		}
273 	}
274 }
275 
zend_array_recalc_elements(HashTable * ht)276 static uint32_t zend_array_recalc_elements(HashTable *ht)
277 {
278        zval *val;
279        uint32_t num = ht->nNumOfElements;
280 
281 	   ZEND_HASH_FOREACH_VAL(ht, val) {
282 		   if (Z_TYPE_P(val) == IS_INDIRECT) {
283 			   if (UNEXPECTED(Z_TYPE_P(Z_INDIRECT_P(val)) == IS_UNDEF)) {
284 				   num--;
285 			   }
286 		   }
287        } ZEND_HASH_FOREACH_END();
288        return num;
289 }
290 /* }}} */
291 
zend_array_count(HashTable * ht)292 ZEND_API uint32_t zend_array_count(HashTable *ht)
293 {
294 	uint32_t num;
295 	if (UNEXPECTED(ht->u.v.flags & HASH_FLAG_HAS_EMPTY_IND)) {
296 		num = zend_array_recalc_elements(ht);
297 		if (UNEXPECTED(ht->nNumOfElements == num)) {
298 			ht->u.v.flags &= ~HASH_FLAG_HAS_EMPTY_IND;
299 		}
300 	} else if (UNEXPECTED(ht == &EG(symbol_table))) {
301 		num = zend_array_recalc_elements(ht);
302 	} else {
303 		num = zend_hash_num_elements(ht);
304 	}
305 	return num;
306 }
307 /* }}} */
308 
zend_hash_set_apply_protection(HashTable * ht,zend_bool bApplyProtection)309 ZEND_API void ZEND_FASTCALL zend_hash_set_apply_protection(HashTable *ht, zend_bool bApplyProtection)
310 {
311 	if (bApplyProtection) {
312 		ht->u.flags |= HASH_FLAG_APPLY_PROTECTION;
313 	} else {
314 		ht->u.flags &= ~HASH_FLAG_APPLY_PROTECTION;
315 	}
316 }
317 
zend_hash_iterator_add(HashTable * ht,HashPosition pos)318 ZEND_API uint32_t ZEND_FASTCALL zend_hash_iterator_add(HashTable *ht, HashPosition pos)
319 {
320 	HashTableIterator *iter = EG(ht_iterators);
321 	HashTableIterator *end  = iter + EG(ht_iterators_count);
322 	uint32_t idx;
323 
324 	if (EXPECTED(ht->u.v.nIteratorsCount != 255)) {
325 		ht->u.v.nIteratorsCount++;
326 	}
327 	while (iter != end) {
328 		if (iter->ht == NULL) {
329 			iter->ht = ht;
330 			iter->pos = pos;
331 			idx = iter - EG(ht_iterators);
332 			if (idx + 1 > EG(ht_iterators_used)) {
333 				EG(ht_iterators_used) = idx + 1;
334 			}
335 			return idx;
336 		}
337 		iter++;
338 	}
339 	if (EG(ht_iterators) == EG(ht_iterators_slots)) {
340 		EG(ht_iterators) = emalloc(sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
341 		memcpy(EG(ht_iterators), EG(ht_iterators_slots), sizeof(HashTableIterator) * EG(ht_iterators_count));
342 	} else {
343 		EG(ht_iterators) = erealloc(EG(ht_iterators), sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
344 	}
345 	iter = EG(ht_iterators) + EG(ht_iterators_count);
346 	EG(ht_iterators_count) += 8;
347 	iter->ht = ht;
348 	iter->pos = pos;
349 	memset(iter + 1, 0, sizeof(HashTableIterator) * 7);
350 	idx = iter - EG(ht_iterators);
351 	EG(ht_iterators_used) = idx + 1;
352 	return idx;
353 }
354 
zend_hash_iterator_pos(uint32_t idx,HashTable * ht)355 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos(uint32_t idx, HashTable *ht)
356 {
357 	HashTableIterator *iter = EG(ht_iterators) + idx;
358 
359 	ZEND_ASSERT(idx != (uint32_t)-1);
360 	if (iter->pos == HT_INVALID_IDX) {
361 		return HT_INVALID_IDX;
362 	} else if (UNEXPECTED(iter->ht != ht)) {
363 		if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
364 				&& EXPECTED(iter->ht->u.v.nIteratorsCount != 255)) {
365 			iter->ht->u.v.nIteratorsCount--;
366 		}
367 		if (EXPECTED(ht->u.v.nIteratorsCount != 255)) {
368 			ht->u.v.nIteratorsCount++;
369 		}
370 		iter->ht = ht;
371 		iter->pos = ht->nInternalPointer;
372 	}
373 	return iter->pos;
374 }
375 
zend_hash_iterator_pos_ex(uint32_t idx,zval * array)376 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos_ex(uint32_t idx, zval *array)
377 {
378 	HashTable *ht = Z_ARRVAL_P(array);
379 	HashTableIterator *iter = EG(ht_iterators) + idx;
380 
381 	ZEND_ASSERT(idx != (uint32_t)-1);
382 	if (iter->pos == HT_INVALID_IDX) {
383 		return HT_INVALID_IDX;
384 	} else if (UNEXPECTED(iter->ht != ht)) {
385 		if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
386 				&& EXPECTED(iter->ht->u.v.nIteratorsCount != 255)) {
387 			iter->ht->u.v.nIteratorsCount--;
388 		}
389 		SEPARATE_ARRAY(array);
390 		ht = Z_ARRVAL_P(array);
391 		if (EXPECTED(ht->u.v.nIteratorsCount != 255)) {
392 			ht->u.v.nIteratorsCount++;
393 		}
394 		iter->ht = ht;
395 		iter->pos = ht->nInternalPointer;
396 	}
397 	return iter->pos;
398 }
399 
zend_hash_iterator_del(uint32_t idx)400 ZEND_API void ZEND_FASTCALL zend_hash_iterator_del(uint32_t idx)
401 {
402 	HashTableIterator *iter = EG(ht_iterators) + idx;
403 
404 	ZEND_ASSERT(idx != (uint32_t)-1);
405 
406 	if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
407 			&& EXPECTED(iter->ht->u.v.nIteratorsCount != 255)) {
408 		iter->ht->u.v.nIteratorsCount--;
409 	}
410 	iter->ht = NULL;
411 
412 	if (idx == EG(ht_iterators_used) - 1) {
413 		while (idx > 0 && EG(ht_iterators)[idx - 1].ht == NULL) {
414 			idx--;
415 		}
416 		EG(ht_iterators_used) = idx;
417 	}
418 }
419 
_zend_hash_iterators_remove(HashTable * ht)420 static zend_never_inline void ZEND_FASTCALL _zend_hash_iterators_remove(HashTable *ht)
421 {
422 	HashTableIterator *iter = EG(ht_iterators);
423 	HashTableIterator *end  = iter + EG(ht_iterators_used);
424 
425 	while (iter != end) {
426 		if (iter->ht == ht) {
427 			iter->ht = HT_POISONED_PTR;
428 		}
429 		iter++;
430 	}
431 }
432 
zend_hash_iterators_remove(HashTable * ht)433 static zend_always_inline void zend_hash_iterators_remove(HashTable *ht)
434 {
435 	if (UNEXPECTED(ht->u.v.nIteratorsCount)) {
436 		_zend_hash_iterators_remove(ht);
437 	}
438 }
439 
zend_hash_iterators_lower_pos(HashTable * ht,HashPosition start)440 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterators_lower_pos(HashTable *ht, HashPosition start)
441 {
442 	HashTableIterator *iter = EG(ht_iterators);
443 	HashTableIterator *end  = iter + EG(ht_iterators_used);
444 	HashPosition res = HT_INVALID_IDX;
445 
446 	while (iter != end) {
447 		if (iter->ht == ht) {
448 			if (iter->pos >= start && iter->pos < res) {
449 				res = iter->pos;
450 			}
451 		}
452 		iter++;
453 	}
454 	return res;
455 }
456 
_zend_hash_iterators_update(HashTable * ht,HashPosition from,HashPosition to)457 ZEND_API void ZEND_FASTCALL _zend_hash_iterators_update(HashTable *ht, HashPosition from, HashPosition to)
458 {
459 	HashTableIterator *iter = EG(ht_iterators);
460 	HashTableIterator *end  = iter + EG(ht_iterators_used);
461 
462 	while (iter != end) {
463 		if (iter->ht == ht && iter->pos == from) {
464 			iter->pos = to;
465 		}
466 		iter++;
467 	}
468 }
469 
zend_hash_find_bucket(const HashTable * ht,zend_string * key)470 static zend_always_inline Bucket *zend_hash_find_bucket(const HashTable *ht, zend_string *key)
471 {
472 	zend_ulong h;
473 	uint32_t nIndex;
474 	uint32_t idx;
475 	Bucket *p, *arData;
476 
477 	h = zend_string_hash_val(key);
478 	arData = ht->arData;
479 	nIndex = h | ht->nTableMask;
480 	idx = HT_HASH_EX(arData, nIndex);
481 	while (EXPECTED(idx != HT_INVALID_IDX)) {
482 		p = HT_HASH_TO_BUCKET_EX(arData, idx);
483 		if (EXPECTED(p->key == key)) { /* check for the same interned string */
484 			return p;
485 		} else if (EXPECTED(p->h == h) &&
486 		     EXPECTED(p->key) &&
487 		     EXPECTED(ZSTR_LEN(p->key) == ZSTR_LEN(key)) &&
488 		     EXPECTED(memcmp(ZSTR_VAL(p->key), ZSTR_VAL(key), ZSTR_LEN(key)) == 0)) {
489 			return p;
490 		}
491 		idx = Z_NEXT(p->val);
492 	}
493 	return NULL;
494 }
495 
zend_hash_str_find_bucket(const HashTable * ht,const char * str,size_t len,zend_ulong h)496 static zend_always_inline Bucket *zend_hash_str_find_bucket(const HashTable *ht, const char *str, size_t len, zend_ulong h)
497 {
498 	uint32_t nIndex;
499 	uint32_t idx;
500 	Bucket *p, *arData;
501 
502 	arData = ht->arData;
503 	nIndex = h | ht->nTableMask;
504 	idx = HT_HASH_EX(arData, nIndex);
505 	while (idx != HT_INVALID_IDX) {
506 		ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
507 		p = HT_HASH_TO_BUCKET_EX(arData, idx);
508 		if ((p->h == h)
509 			 && p->key
510 			 && (ZSTR_LEN(p->key) == len)
511 			 && !memcmp(ZSTR_VAL(p->key), str, len)) {
512 			return p;
513 		}
514 		idx = Z_NEXT(p->val);
515 	}
516 	return NULL;
517 }
518 
zend_hash_index_find_bucket(const HashTable * ht,zend_ulong h)519 static zend_always_inline Bucket *zend_hash_index_find_bucket(const HashTable *ht, zend_ulong h)
520 {
521 	uint32_t nIndex;
522 	uint32_t idx;
523 	Bucket *p, *arData;
524 
525 	arData = ht->arData;
526 	nIndex = h | ht->nTableMask;
527 	idx = HT_HASH_EX(arData, nIndex);
528 	while (idx != HT_INVALID_IDX) {
529 		ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
530 		p = HT_HASH_TO_BUCKET_EX(arData, idx);
531 		if (p->h == h && !p->key) {
532 			return p;
533 		}
534 		idx = Z_NEXT(p->val);
535 	}
536 	return NULL;
537 }
538 
_zend_hash_add_or_update_i(HashTable * ht,zend_string * key,zval * pData,uint32_t flag ZEND_FILE_LINE_DC)539 static zend_always_inline zval *_zend_hash_add_or_update_i(HashTable *ht, zend_string *key, zval *pData, uint32_t flag ZEND_FILE_LINE_DC)
540 {
541 	zend_ulong h;
542 	uint32_t nIndex;
543 	uint32_t idx;
544 	Bucket *p;
545 
546 	IS_CONSISTENT(ht);
547 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
548 
549 	if (UNEXPECTED(!(ht->u.flags & HASH_FLAG_INITIALIZED))) {
550 		CHECK_INIT(ht, 0);
551 		goto add_to_hash;
552 	} else if (ht->u.flags & HASH_FLAG_PACKED) {
553 		zend_hash_packed_to_hash(ht);
554 	} else if ((flag & HASH_ADD_NEW) == 0) {
555 		p = zend_hash_find_bucket(ht, key);
556 
557 		if (p) {
558 			zval *data;
559 
560 			if (flag & HASH_ADD) {
561 				if (!(flag & HASH_UPDATE_INDIRECT)) {
562 					return NULL;
563 				}
564 				ZEND_ASSERT(&p->val != pData);
565 				data = &p->val;
566 				if (Z_TYPE_P(data) == IS_INDIRECT) {
567 					data = Z_INDIRECT_P(data);
568 					if (Z_TYPE_P(data) != IS_UNDEF) {
569 						return NULL;
570 					}
571 				} else {
572 					return NULL;
573 				}
574 			} else {
575 				ZEND_ASSERT(&p->val != pData);
576 				data = &p->val;
577 				if ((flag & HASH_UPDATE_INDIRECT) && Z_TYPE_P(data) == IS_INDIRECT) {
578 					data = Z_INDIRECT_P(data);
579 				}
580 			}
581 			if (ht->pDestructor) {
582 				ht->pDestructor(data);
583 			}
584 			ZVAL_COPY_VALUE(data, pData);
585 			return data;
586 		}
587 	}
588 
589 	ZEND_HASH_IF_FULL_DO_RESIZE(ht);		/* If the Hash table is full, resize it */
590 
591 add_to_hash:
592 	idx = ht->nNumUsed++;
593 	ht->nNumOfElements++;
594 	if (ht->nInternalPointer == HT_INVALID_IDX) {
595 		ht->nInternalPointer = idx;
596 	}
597 	zend_hash_iterators_update(ht, HT_INVALID_IDX, idx);
598 	p = ht->arData + idx;
599 	p->key = key;
600 	if (!ZSTR_IS_INTERNED(key)) {
601 		zend_string_addref(key);
602 		ht->u.flags &= ~HASH_FLAG_STATIC_KEYS;
603 		zend_string_hash_val(key);
604 	}
605 	p->h = h = ZSTR_H(key);
606 	ZVAL_COPY_VALUE(&p->val, pData);
607 	nIndex = h | ht->nTableMask;
608 	Z_NEXT(p->val) = HT_HASH(ht, nIndex);
609 	HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
610 
611 	return &p->val;
612 }
613 
_zend_hash_add_or_update(HashTable * ht,zend_string * key,zval * pData,uint32_t flag ZEND_FILE_LINE_DC)614 ZEND_API zval* ZEND_FASTCALL _zend_hash_add_or_update(HashTable *ht, zend_string *key, zval *pData, uint32_t flag ZEND_FILE_LINE_DC)
615 {
616 	return _zend_hash_add_or_update_i(ht, key, pData, flag ZEND_FILE_LINE_RELAY_CC);
617 }
618 
_zend_hash_add(HashTable * ht,zend_string * key,zval * pData ZEND_FILE_LINE_DC)619 ZEND_API zval* ZEND_FASTCALL _zend_hash_add(HashTable *ht, zend_string *key, zval *pData ZEND_FILE_LINE_DC)
620 {
621 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD ZEND_FILE_LINE_RELAY_CC);
622 }
623 
_zend_hash_update(HashTable * ht,zend_string * key,zval * pData ZEND_FILE_LINE_DC)624 ZEND_API zval* ZEND_FASTCALL _zend_hash_update(HashTable *ht, zend_string *key, zval *pData ZEND_FILE_LINE_DC)
625 {
626 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE ZEND_FILE_LINE_RELAY_CC);
627 }
628 
_zend_hash_update_ind(HashTable * ht,zend_string * key,zval * pData ZEND_FILE_LINE_DC)629 ZEND_API zval* ZEND_FASTCALL _zend_hash_update_ind(HashTable *ht, zend_string *key, zval *pData ZEND_FILE_LINE_DC)
630 {
631 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT ZEND_FILE_LINE_RELAY_CC);
632 }
633 
_zend_hash_add_new(HashTable * ht,zend_string * key,zval * pData ZEND_FILE_LINE_DC)634 ZEND_API zval* ZEND_FASTCALL _zend_hash_add_new(HashTable *ht, zend_string *key, zval *pData ZEND_FILE_LINE_DC)
635 {
636 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD_NEW ZEND_FILE_LINE_RELAY_CC);
637 }
638 
_zend_hash_str_add_or_update(HashTable * ht,const char * str,size_t len,zval * pData,uint32_t flag ZEND_FILE_LINE_DC)639 ZEND_API zval* ZEND_FASTCALL _zend_hash_str_add_or_update(HashTable *ht, const char *str, size_t len, zval *pData, uint32_t flag ZEND_FILE_LINE_DC)
640 {
641 	zend_string *key = zend_string_init(str, len, ht->u.flags & HASH_FLAG_PERSISTENT);
642 	zval *ret = _zend_hash_add_or_update_i(ht, key, pData, flag ZEND_FILE_LINE_RELAY_CC);
643 	zend_string_release(key);
644 	return ret;
645 }
646 
_zend_hash_str_update(HashTable * ht,const char * str,size_t len,zval * pData ZEND_FILE_LINE_DC)647 ZEND_API zval* ZEND_FASTCALL _zend_hash_str_update(HashTable *ht, const char *str, size_t len, zval *pData ZEND_FILE_LINE_DC)
648 {
649 	zend_string *key = zend_string_init(str, len, ht->u.flags & HASH_FLAG_PERSISTENT);
650 	zval *ret = _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE ZEND_FILE_LINE_RELAY_CC);
651 	zend_string_release(key);
652 	return ret;
653 }
654 
_zend_hash_str_update_ind(HashTable * ht,const char * str,size_t len,zval * pData ZEND_FILE_LINE_DC)655 ZEND_API zval* ZEND_FASTCALL _zend_hash_str_update_ind(HashTable *ht, const char *str, size_t len, zval *pData ZEND_FILE_LINE_DC)
656 {
657 	zend_string *key = zend_string_init(str, len, ht->u.flags & HASH_FLAG_PERSISTENT);
658 	zval *ret = _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT ZEND_FILE_LINE_RELAY_CC);
659 	zend_string_release(key);
660 	return ret;
661 }
662 
_zend_hash_str_add(HashTable * ht,const char * str,size_t len,zval * pData ZEND_FILE_LINE_DC)663 ZEND_API zval* ZEND_FASTCALL _zend_hash_str_add(HashTable *ht, const char *str, size_t len, zval *pData ZEND_FILE_LINE_DC)
664 {
665 	zend_string *key = zend_string_init(str, len, ht->u.flags & HASH_FLAG_PERSISTENT);
666 	zval *ret = _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD ZEND_FILE_LINE_RELAY_CC);
667 	zend_string_release(key);
668 	return ret;
669 }
670 
_zend_hash_str_add_new(HashTable * ht,const char * str,size_t len,zval * pData ZEND_FILE_LINE_DC)671 ZEND_API zval* ZEND_FASTCALL _zend_hash_str_add_new(HashTable *ht, const char *str, size_t len, zval *pData ZEND_FILE_LINE_DC)
672 {
673 	zend_string *key = zend_string_init(str, len, ht->u.flags & HASH_FLAG_PERSISTENT);
674 	zval *ret = _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD_NEW ZEND_FILE_LINE_RELAY_CC);
675 	zend_string_delref(key);
676 	return ret;
677 }
678 
zend_hash_index_add_empty_element(HashTable * ht,zend_ulong h)679 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_empty_element(HashTable *ht, zend_ulong h)
680 {
681 	zval dummy;
682 
683 	ZVAL_NULL(&dummy);
684 	return zend_hash_index_add(ht, h, &dummy);
685 }
686 
zend_hash_add_empty_element(HashTable * ht,zend_string * key)687 ZEND_API zval* ZEND_FASTCALL zend_hash_add_empty_element(HashTable *ht, zend_string *key)
688 {
689 	zval dummy;
690 
691 	ZVAL_NULL(&dummy);
692 	return zend_hash_add(ht, key, &dummy);
693 }
694 
zend_hash_str_add_empty_element(HashTable * ht,const char * str,size_t len)695 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_empty_element(HashTable *ht, const char *str, size_t len)
696 {
697 	zval dummy;
698 
699 	ZVAL_NULL(&dummy);
700 	return zend_hash_str_add(ht, str, len, &dummy);
701 }
702 
_zend_hash_index_add_or_update_i(HashTable * ht,zend_ulong h,zval * pData,uint32_t flag ZEND_FILE_LINE_DC)703 static zend_always_inline zval *_zend_hash_index_add_or_update_i(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag ZEND_FILE_LINE_DC)
704 {
705 	uint32_t nIndex;
706 	uint32_t idx;
707 	Bucket *p;
708 
709 	IS_CONSISTENT(ht);
710 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
711 
712 	if (UNEXPECTED(!(ht->u.flags & HASH_FLAG_INITIALIZED))) {
713 		CHECK_INIT(ht, h < ht->nTableSize);
714 		if (h < ht->nTableSize) {
715 			p = ht->arData + h;
716 			goto add_to_packed;
717 		}
718 		goto add_to_hash;
719 	} else if (ht->u.flags & HASH_FLAG_PACKED) {
720 		if (h < ht->nNumUsed) {
721 			p = ht->arData + h;
722 			if (Z_TYPE(p->val) != IS_UNDEF) {
723 				if (flag & HASH_ADD) {
724 					return NULL;
725 				}
726 				if (ht->pDestructor) {
727 					ht->pDestructor(&p->val);
728 				}
729 				ZVAL_COPY_VALUE(&p->val, pData);
730 				if ((zend_long)h >= (zend_long)ht->nNextFreeElement) {
731 					ht->nNextFreeElement = h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
732 				}
733 				return &p->val;
734 			} else { /* we have to keep the order :( */
735 				goto convert_to_hash;
736 			}
737 		} else if (EXPECTED(h < ht->nTableSize)) {
738 			p = ht->arData + h;
739 		} else if ((h >> 1) < ht->nTableSize &&
740 		           (ht->nTableSize >> 1) < ht->nNumOfElements) {
741 			zend_hash_packed_grow(ht);
742 			p = ht->arData + h;
743 		} else {
744 			goto convert_to_hash;
745 		}
746 
747 add_to_packed:
748 		/* incremental initialization of empty Buckets */
749 		if ((flag & (HASH_ADD_NEW|HASH_ADD_NEXT)) == (HASH_ADD_NEW|HASH_ADD_NEXT)) {
750 			ht->nNumUsed = h + 1;
751 		} else if (h >= ht->nNumUsed) {
752 			if (h > ht->nNumUsed) {
753 				Bucket *q = ht->arData + ht->nNumUsed;
754 				while (q != p) {
755 					ZVAL_UNDEF(&q->val);
756 					q++;
757 				}
758 			}
759 			ht->nNumUsed = h + 1;
760 		}
761 		ht->nNumOfElements++;
762 		if (ht->nInternalPointer == HT_INVALID_IDX) {
763 			ht->nInternalPointer = h;
764 		}
765 		zend_hash_iterators_update(ht, HT_INVALID_IDX, h);
766 		if ((zend_long)h >= (zend_long)ht->nNextFreeElement) {
767 			ht->nNextFreeElement = h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
768 		}
769 		p->h = h;
770 		p->key = NULL;
771 		ZVAL_COPY_VALUE(&p->val, pData);
772 
773 		return &p->val;
774 
775 convert_to_hash:
776 		zend_hash_packed_to_hash(ht);
777 	} else if ((flag & HASH_ADD_NEW) == 0) {
778 		p = zend_hash_index_find_bucket(ht, h);
779 		if (p) {
780 			if (flag & HASH_ADD) {
781 				return NULL;
782 			}
783 			ZEND_ASSERT(&p->val != pData);
784 			if (ht->pDestructor) {
785 				ht->pDestructor(&p->val);
786 			}
787 			ZVAL_COPY_VALUE(&p->val, pData);
788 			if ((zend_long)h >= (zend_long)ht->nNextFreeElement) {
789 				ht->nNextFreeElement = h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
790 			}
791 			return &p->val;
792 		}
793 	}
794 
795 	ZEND_HASH_IF_FULL_DO_RESIZE(ht);		/* If the Hash table is full, resize it */
796 
797 add_to_hash:
798 	idx = ht->nNumUsed++;
799 	ht->nNumOfElements++;
800 	if (ht->nInternalPointer == HT_INVALID_IDX) {
801 		ht->nInternalPointer = idx;
802 	}
803 	zend_hash_iterators_update(ht, HT_INVALID_IDX, idx);
804 	if ((zend_long)h >= (zend_long)ht->nNextFreeElement) {
805 		ht->nNextFreeElement = h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
806 	}
807 	p = ht->arData + idx;
808 	p->h = h;
809 	p->key = NULL;
810 	nIndex = h | ht->nTableMask;
811 	ZVAL_COPY_VALUE(&p->val, pData);
812 	Z_NEXT(p->val) = HT_HASH(ht, nIndex);
813 	HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
814 
815 	return &p->val;
816 }
817 
_zend_hash_index_add_or_update(HashTable * ht,zend_ulong h,zval * pData,uint32_t flag ZEND_FILE_LINE_DC)818 ZEND_API zval* ZEND_FASTCALL _zend_hash_index_add_or_update(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag ZEND_FILE_LINE_DC)
819 {
820 	return _zend_hash_index_add_or_update_i(ht, h, pData, flag ZEND_FILE_LINE_RELAY_CC);
821 }
822 
_zend_hash_index_add(HashTable * ht,zend_ulong h,zval * pData ZEND_FILE_LINE_DC)823 ZEND_API zval* ZEND_FASTCALL _zend_hash_index_add(HashTable *ht, zend_ulong h, zval *pData ZEND_FILE_LINE_DC)
824 {
825 	return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD ZEND_FILE_LINE_RELAY_CC);
826 }
827 
_zend_hash_index_add_new(HashTable * ht,zend_ulong h,zval * pData ZEND_FILE_LINE_DC)828 ZEND_API zval* ZEND_FASTCALL _zend_hash_index_add_new(HashTable *ht, zend_ulong h, zval *pData ZEND_FILE_LINE_DC)
829 {
830 	return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD | HASH_ADD_NEW ZEND_FILE_LINE_RELAY_CC);
831 }
832 
_zend_hash_index_update(HashTable * ht,zend_ulong h,zval * pData ZEND_FILE_LINE_DC)833 ZEND_API zval* ZEND_FASTCALL _zend_hash_index_update(HashTable *ht, zend_ulong h, zval *pData ZEND_FILE_LINE_DC)
834 {
835 	return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_UPDATE ZEND_FILE_LINE_RELAY_CC);
836 }
837 
_zend_hash_next_index_insert(HashTable * ht,zval * pData ZEND_FILE_LINE_DC)838 ZEND_API zval* ZEND_FASTCALL _zend_hash_next_index_insert(HashTable *ht, zval *pData ZEND_FILE_LINE_DC)
839 {
840 	return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEXT ZEND_FILE_LINE_RELAY_CC);
841 }
842 
_zend_hash_next_index_insert_new(HashTable * ht,zval * pData ZEND_FILE_LINE_DC)843 ZEND_API zval* ZEND_FASTCALL _zend_hash_next_index_insert_new(HashTable *ht, zval *pData ZEND_FILE_LINE_DC)
844 {
845 	return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEW | HASH_ADD_NEXT ZEND_FILE_LINE_RELAY_CC);
846 }
847 
zend_hash_do_resize(HashTable * ht)848 static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht)
849 {
850 
851 	IS_CONSISTENT(ht);
852 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
853 
854 	if (ht->nNumUsed > ht->nNumOfElements + (ht->nNumOfElements >> 5)) { /* additional term is there to amortize the cost of compaction */
855 		zend_hash_rehash(ht);
856 	} else if (ht->nTableSize < HT_MAX_SIZE) {	/* Let's double the table size */
857 		void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
858 		uint32_t nSize = ht->nTableSize + ht->nTableSize;
859 		Bucket *old_buckets = ht->arData;
860 
861 		new_data = pemalloc(HT_SIZE_EX(nSize, -nSize), ht->u.flags & HASH_FLAG_PERSISTENT);
862 		ht->nTableSize = nSize;
863 		ht->nTableMask = -ht->nTableSize;
864 		HT_SET_DATA_ADDR(ht, new_data);
865 		memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
866 		pefree(old_data, ht->u.flags & HASH_FLAG_PERSISTENT);
867 		zend_hash_rehash(ht);
868 	} else {
869 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket) + sizeof(uint32_t), sizeof(Bucket));
870 	}
871 }
872 
zend_hash_rehash(HashTable * ht)873 ZEND_API int ZEND_FASTCALL zend_hash_rehash(HashTable *ht)
874 {
875 	Bucket *p;
876 	uint32_t nIndex, i;
877 
878 	IS_CONSISTENT(ht);
879 
880 	if (UNEXPECTED(ht->nNumOfElements == 0)) {
881 		if (ht->u.flags & HASH_FLAG_INITIALIZED) {
882 			ht->nNumUsed = 0;
883 			HT_HASH_RESET(ht);
884 		}
885 		return SUCCESS;
886 	}
887 
888 	HT_HASH_RESET(ht);
889 	i = 0;
890 	p = ht->arData;
891 	if (HT_IS_WITHOUT_HOLES(ht)) {
892 		do {
893 			nIndex = p->h | ht->nTableMask;
894 			Z_NEXT(p->val) = HT_HASH(ht, nIndex);
895 			HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
896 			p++;
897 		} while (++i < ht->nNumUsed);
898 	} else {
899 		do {
900 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) {
901 				uint32_t j = i;
902 				Bucket *q = p;
903 
904 				if (EXPECTED(ht->u.v.nIteratorsCount == 0)) {
905 					while (++i < ht->nNumUsed) {
906 						p++;
907 						if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
908 							ZVAL_COPY_VALUE(&q->val, &p->val);
909 							q->h = p->h;
910 							nIndex = q->h | ht->nTableMask;
911 							q->key = p->key;
912 							Z_NEXT(q->val) = HT_HASH(ht, nIndex);
913 							HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
914 							if (UNEXPECTED(ht->nInternalPointer == i)) {
915 								ht->nInternalPointer = j;
916 							}
917 							q++;
918 							j++;
919 						}
920 					}
921 				} else {
922 					uint32_t iter_pos = zend_hash_iterators_lower_pos(ht, 0);
923 
924 					while (++i < ht->nNumUsed) {
925 						p++;
926 						if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
927 							ZVAL_COPY_VALUE(&q->val, &p->val);
928 							q->h = p->h;
929 							nIndex = q->h | ht->nTableMask;
930 							q->key = p->key;
931 							Z_NEXT(q->val) = HT_HASH(ht, nIndex);
932 							HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
933 							if (UNEXPECTED(ht->nInternalPointer == i)) {
934 								ht->nInternalPointer = j;
935 							}
936 							if (UNEXPECTED(i == iter_pos)) {
937 								zend_hash_iterators_update(ht, i, j);
938 								iter_pos = zend_hash_iterators_lower_pos(ht, iter_pos + 1);
939 							}
940 							q++;
941 							j++;
942 						}
943 					}
944 				}
945 				ht->nNumUsed = j;
946 				break;
947 			}
948 			nIndex = p->h | ht->nTableMask;
949 			Z_NEXT(p->val) = HT_HASH(ht, nIndex);
950 			HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
951 			p++;
952 		} while (++i < ht->nNumUsed);
953 	}
954 	return SUCCESS;
955 }
956 
_zend_hash_del_el_ex(HashTable * ht,uint32_t idx,Bucket * p,Bucket * prev)957 static zend_always_inline void _zend_hash_del_el_ex(HashTable *ht, uint32_t idx, Bucket *p, Bucket *prev)
958 {
959 	if (!(ht->u.flags & HASH_FLAG_PACKED)) {
960 		if (prev) {
961 			Z_NEXT(prev->val) = Z_NEXT(p->val);
962 		} else {
963 			HT_HASH(ht, p->h | ht->nTableMask) = Z_NEXT(p->val);
964 		}
965 	}
966 	if (HT_IDX_TO_HASH(ht->nNumUsed - 1) == idx) {
967 		do {
968 			ht->nNumUsed--;
969 		} while (ht->nNumUsed > 0 && (UNEXPECTED(Z_TYPE(ht->arData[ht->nNumUsed-1].val) == IS_UNDEF)));
970 	}
971 	ht->nNumOfElements--;
972 	if (HT_IDX_TO_HASH(ht->nInternalPointer) == idx || UNEXPECTED(ht->u.v.nIteratorsCount)) {
973 		uint32_t new_idx;
974 
975 		new_idx = idx = HT_HASH_TO_IDX(idx);
976 		while (1) {
977 			new_idx++;
978 			if (new_idx >= ht->nNumUsed) {
979 				new_idx = HT_INVALID_IDX;
980 				break;
981 			} else if (Z_TYPE(ht->arData[new_idx].val) != IS_UNDEF) {
982 				break;
983 			}
984 		}
985 		if (ht->nInternalPointer == idx) {
986 			ht->nInternalPointer = new_idx;
987 		}
988 		zend_hash_iterators_update(ht, idx, new_idx);
989 	}
990 	if (p->key) {
991 		zend_string_release(p->key);
992 	}
993 	if (ht->pDestructor) {
994 		zval tmp;
995 		ZVAL_COPY_VALUE(&tmp, &p->val);
996 		ZVAL_UNDEF(&p->val);
997 		ht->pDestructor(&tmp);
998 	} else {
999 		ZVAL_UNDEF(&p->val);
1000 	}
1001 }
1002 
_zend_hash_del_el(HashTable * ht,uint32_t idx,Bucket * p)1003 static zend_always_inline void _zend_hash_del_el(HashTable *ht, uint32_t idx, Bucket *p)
1004 {
1005 	Bucket *prev = NULL;
1006 
1007 	if (!(ht->u.flags & HASH_FLAG_PACKED)) {
1008 		uint32_t nIndex = p->h | ht->nTableMask;
1009 		uint32_t i = HT_HASH(ht, nIndex);
1010 
1011 		if (i != idx) {
1012 			prev = HT_HASH_TO_BUCKET(ht, i);
1013 			while (Z_NEXT(prev->val) != idx) {
1014 				i = Z_NEXT(prev->val);
1015 				prev = HT_HASH_TO_BUCKET(ht, i);
1016 			}
1017 	 	}
1018 	}
1019 
1020 	_zend_hash_del_el_ex(ht, idx, p, prev);
1021 }
1022 
zend_hash_del_bucket(HashTable * ht,Bucket * p)1023 ZEND_API void ZEND_FASTCALL zend_hash_del_bucket(HashTable *ht, Bucket *p)
1024 {
1025 	IS_CONSISTENT(ht);
1026 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
1027 	_zend_hash_del_el(ht, HT_IDX_TO_HASH(p - ht->arData), p);
1028 }
1029 
zend_hash_del(HashTable * ht,zend_string * key)1030 ZEND_API int ZEND_FASTCALL zend_hash_del(HashTable *ht, zend_string *key)
1031 {
1032 	zend_ulong h;
1033 	uint32_t nIndex;
1034 	uint32_t idx;
1035 	Bucket *p;
1036 	Bucket *prev = NULL;
1037 
1038 	IS_CONSISTENT(ht);
1039 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
1040 
1041 	h = zend_string_hash_val(key);
1042 	nIndex = h | ht->nTableMask;
1043 
1044 	idx = HT_HASH(ht, nIndex);
1045 	while (idx != HT_INVALID_IDX) {
1046 		p = HT_HASH_TO_BUCKET(ht, idx);
1047 		if ((p->key == key) ||
1048 			(p->h == h &&
1049 		     p->key &&
1050 		     ZSTR_LEN(p->key) == ZSTR_LEN(key) &&
1051 		     memcmp(ZSTR_VAL(p->key), ZSTR_VAL(key), ZSTR_LEN(key)) == 0)) {
1052 			_zend_hash_del_el_ex(ht, idx, p, prev);
1053 			return SUCCESS;
1054 		}
1055 		prev = p;
1056 		idx = Z_NEXT(p->val);
1057 	}
1058 	return FAILURE;
1059 }
1060 
zend_hash_del_ind(HashTable * ht,zend_string * key)1061 ZEND_API int ZEND_FASTCALL zend_hash_del_ind(HashTable *ht, zend_string *key)
1062 {
1063 	zend_ulong h;
1064 	uint32_t nIndex;
1065 	uint32_t idx;
1066 	Bucket *p;
1067 	Bucket *prev = NULL;
1068 
1069 	IS_CONSISTENT(ht);
1070 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
1071 
1072 	h = zend_string_hash_val(key);
1073 	nIndex = h | ht->nTableMask;
1074 
1075 	idx = HT_HASH(ht, nIndex);
1076 	while (idx != HT_INVALID_IDX) {
1077 		p = HT_HASH_TO_BUCKET(ht, idx);
1078 		if ((p->key == key) ||
1079 			(p->h == h &&
1080 		     p->key &&
1081 		     ZSTR_LEN(p->key) == ZSTR_LEN(key) &&
1082 		     memcmp(ZSTR_VAL(p->key), ZSTR_VAL(key), ZSTR_LEN(key)) == 0)) {
1083 			if (Z_TYPE(p->val) == IS_INDIRECT) {
1084 				zval *data = Z_INDIRECT(p->val);
1085 
1086 				if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
1087 					return FAILURE;
1088 				} else {
1089 					if (ht->pDestructor) {
1090 						zval tmp;
1091 						ZVAL_COPY_VALUE(&tmp, data);
1092 						ZVAL_UNDEF(data);
1093 						ht->pDestructor(&tmp);
1094 					} else {
1095 						ZVAL_UNDEF(data);
1096 					}
1097 					ht->u.v.flags |= HASH_FLAG_HAS_EMPTY_IND;
1098 				}
1099 			} else {
1100 				_zend_hash_del_el_ex(ht, idx, p, prev);
1101 			}
1102 			return SUCCESS;
1103 		}
1104 		prev = p;
1105 		idx = Z_NEXT(p->val);
1106 	}
1107 	return FAILURE;
1108 }
1109 
zend_hash_str_del_ind(HashTable * ht,const char * str,size_t len)1110 ZEND_API int ZEND_FASTCALL zend_hash_str_del_ind(HashTable *ht, const char *str, size_t len)
1111 {
1112 	zend_ulong h;
1113 	uint32_t nIndex;
1114 	uint32_t idx;
1115 	Bucket *p;
1116 	Bucket *prev = NULL;
1117 
1118 	IS_CONSISTENT(ht);
1119 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
1120 
1121 	h = zend_inline_hash_func(str, len);
1122 	nIndex = h | ht->nTableMask;
1123 
1124 	idx = HT_HASH(ht, nIndex);
1125 	while (idx != HT_INVALID_IDX) {
1126 		p = HT_HASH_TO_BUCKET(ht, idx);
1127 		if ((p->h == h)
1128 			 && p->key
1129 			 && (ZSTR_LEN(p->key) == len)
1130 			 && !memcmp(ZSTR_VAL(p->key), str, len)) {
1131 			if (Z_TYPE(p->val) == IS_INDIRECT) {
1132 				zval *data = Z_INDIRECT(p->val);
1133 
1134 				if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
1135 					return FAILURE;
1136 				} else {
1137 					if (ht->pDestructor) {
1138 						ht->pDestructor(data);
1139 					}
1140 					ZVAL_UNDEF(data);
1141 					ht->u.v.flags |= HASH_FLAG_HAS_EMPTY_IND;
1142 				}
1143 			} else {
1144 				_zend_hash_del_el_ex(ht, idx, p, prev);
1145 			}
1146 			return SUCCESS;
1147 		}
1148 		prev = p;
1149 		idx = Z_NEXT(p->val);
1150 	}
1151 	return FAILURE;
1152 }
1153 
zend_hash_str_del(HashTable * ht,const char * str,size_t len)1154 ZEND_API int ZEND_FASTCALL zend_hash_str_del(HashTable *ht, const char *str, size_t len)
1155 {
1156 	zend_ulong h;
1157 	uint32_t nIndex;
1158 	uint32_t idx;
1159 	Bucket *p;
1160 	Bucket *prev = NULL;
1161 
1162 	IS_CONSISTENT(ht);
1163 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
1164 
1165 	h = zend_inline_hash_func(str, len);
1166 	nIndex = h | ht->nTableMask;
1167 
1168 	idx = HT_HASH(ht, nIndex);
1169 	while (idx != HT_INVALID_IDX) {
1170 		p = HT_HASH_TO_BUCKET(ht, idx);
1171 		if ((p->h == h)
1172 			 && p->key
1173 			 && (ZSTR_LEN(p->key) == len)
1174 			 && !memcmp(ZSTR_VAL(p->key), str, len)) {
1175 			_zend_hash_del_el_ex(ht, idx, p, prev);
1176 			return SUCCESS;
1177 		}
1178 		prev = p;
1179 		idx = Z_NEXT(p->val);
1180 	}
1181 	return FAILURE;
1182 }
1183 
zend_hash_index_del(HashTable * ht,zend_ulong h)1184 ZEND_API int ZEND_FASTCALL zend_hash_index_del(HashTable *ht, zend_ulong h)
1185 {
1186 	uint32_t nIndex;
1187 	uint32_t idx;
1188 	Bucket *p;
1189 	Bucket *prev = NULL;
1190 
1191 	IS_CONSISTENT(ht);
1192 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
1193 
1194 	if (ht->u.flags & HASH_FLAG_PACKED) {
1195 		if (h < ht->nNumUsed) {
1196 			p = ht->arData + h;
1197 			if (Z_TYPE(p->val) != IS_UNDEF) {
1198 				_zend_hash_del_el_ex(ht, HT_IDX_TO_HASH(h), p, NULL);
1199 				return SUCCESS;
1200 			}
1201 		}
1202 		return FAILURE;
1203 	}
1204 	nIndex = h | ht->nTableMask;
1205 
1206 	idx = HT_HASH(ht, nIndex);
1207 	while (idx != HT_INVALID_IDX) {
1208 		p = HT_HASH_TO_BUCKET(ht, idx);
1209 		if ((p->h == h) && (p->key == NULL)) {
1210 			_zend_hash_del_el_ex(ht, idx, p, prev);
1211 			return SUCCESS;
1212 		}
1213 		prev = p;
1214 		idx = Z_NEXT(p->val);
1215 	}
1216 	return FAILURE;
1217 }
1218 
zend_hash_destroy(HashTable * ht)1219 ZEND_API void ZEND_FASTCALL zend_hash_destroy(HashTable *ht)
1220 {
1221 	Bucket *p, *end;
1222 
1223 	IS_CONSISTENT(ht);
1224 	HT_ASSERT(GC_REFCOUNT(ht) <= 1);
1225 
1226 	if (ht->nNumUsed) {
1227 		p = ht->arData;
1228 		end = p + ht->nNumUsed;
1229 		if (ht->pDestructor) {
1230 			SET_INCONSISTENT(HT_IS_DESTROYING);
1231 
1232 			if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1233 				if (HT_IS_WITHOUT_HOLES(ht)) {
1234 					do {
1235 						ht->pDestructor(&p->val);
1236 					} while (++p != end);
1237 				} else {
1238 					do {
1239 						if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1240 							ht->pDestructor(&p->val);
1241 						}
1242 					} while (++p != end);
1243 				}
1244 			} else if (HT_IS_WITHOUT_HOLES(ht)) {
1245 				do {
1246 					ht->pDestructor(&p->val);
1247 					if (EXPECTED(p->key)) {
1248 						zend_string_release(p->key);
1249 					}
1250 				} while (++p != end);
1251 			} else {
1252 				do {
1253 					if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1254 						ht->pDestructor(&p->val);
1255 						if (EXPECTED(p->key)) {
1256 							zend_string_release(p->key);
1257 						}
1258 					}
1259 				} while (++p != end);
1260 			}
1261 
1262 			SET_INCONSISTENT(HT_DESTROYED);
1263 		} else {
1264 			if (!HT_HAS_STATIC_KEYS_ONLY(ht)) {
1265 				do {
1266 					if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1267 						if (EXPECTED(p->key)) {
1268 							zend_string_release(p->key);
1269 						}
1270 					}
1271 				} while (++p != end);
1272 			}
1273 		}
1274 		zend_hash_iterators_remove(ht);
1275 	} else if (EXPECTED(!(ht->u.flags & HASH_FLAG_INITIALIZED))) {
1276 		return;
1277 	}
1278 	pefree(HT_GET_DATA_ADDR(ht), ht->u.flags & HASH_FLAG_PERSISTENT);
1279 }
1280 
zend_array_destroy(HashTable * ht)1281 ZEND_API void ZEND_FASTCALL zend_array_destroy(HashTable *ht)
1282 {
1283 	Bucket *p, *end;
1284 
1285 	IS_CONSISTENT(ht);
1286 	HT_ASSERT(GC_REFCOUNT(ht) <= 1);
1287 
1288 	/* break possible cycles */
1289 	GC_REMOVE_FROM_BUFFER(ht);
1290 	GC_TYPE_INFO(ht) = IS_NULL | (GC_WHITE << 16);
1291 
1292 	if (ht->nNumUsed) {
1293 		/* In some rare cases destructors of regular arrays may be changed */
1294 		if (UNEXPECTED(ht->pDestructor != ZVAL_PTR_DTOR)) {
1295 			zend_hash_destroy(ht);
1296 			goto free_ht;
1297 		}
1298 
1299 		p = ht->arData;
1300 		end = p + ht->nNumUsed;
1301 		SET_INCONSISTENT(HT_IS_DESTROYING);
1302 
1303 		if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1304 			do {
1305 				i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
1306 			} while (++p != end);
1307 		} else if (HT_IS_WITHOUT_HOLES(ht)) {
1308 			do {
1309 				i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
1310 				if (EXPECTED(p->key)) {
1311 					zend_string_release(p->key);
1312 				}
1313 			} while (++p != end);
1314 		} else {
1315 			do {
1316 				if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1317 					i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
1318 					if (EXPECTED(p->key)) {
1319 						zend_string_release(p->key);
1320 					}
1321 				}
1322 			} while (++p != end);
1323 		}
1324 		zend_hash_iterators_remove(ht);
1325 		SET_INCONSISTENT(HT_DESTROYED);
1326 	} else if (EXPECTED(!(ht->u.flags & HASH_FLAG_INITIALIZED))) {
1327 		goto free_ht;
1328 	}
1329 	efree(HT_GET_DATA_ADDR(ht));
1330 free_ht:
1331 	FREE_HASHTABLE(ht);
1332 }
1333 
zend_hash_clean(HashTable * ht)1334 ZEND_API void ZEND_FASTCALL zend_hash_clean(HashTable *ht)
1335 {
1336 	Bucket *p, *end;
1337 
1338 	IS_CONSISTENT(ht);
1339 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
1340 
1341 	if (ht->nNumUsed) {
1342 		p = ht->arData;
1343 		end = p + ht->nNumUsed;
1344 		if (ht->pDestructor) {
1345 			if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1346 				if (HT_IS_WITHOUT_HOLES(ht)) {
1347 					do {
1348 						ht->pDestructor(&p->val);
1349 					} while (++p != end);
1350 				} else {
1351 					do {
1352 						if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1353 							ht->pDestructor(&p->val);
1354 						}
1355 					} while (++p != end);
1356 				}
1357 			} else if (HT_IS_WITHOUT_HOLES(ht)) {
1358 				do {
1359 					ht->pDestructor(&p->val);
1360 					if (EXPECTED(p->key)) {
1361 						zend_string_release(p->key);
1362 					}
1363 				} while (++p != end);
1364 			} else {
1365 				do {
1366 					if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1367 						ht->pDestructor(&p->val);
1368 						if (EXPECTED(p->key)) {
1369 							zend_string_release(p->key);
1370 						}
1371 					}
1372 				} while (++p != end);
1373 			}
1374 		} else {
1375 			if (!HT_HAS_STATIC_KEYS_ONLY(ht)) {
1376 				if (HT_IS_WITHOUT_HOLES(ht)) {
1377 					do {
1378 						if (EXPECTED(p->key)) {
1379 							zend_string_release(p->key);
1380 						}
1381 					} while (++p != end);
1382 				} else {
1383 					do {
1384 						if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1385 							if (EXPECTED(p->key)) {
1386 								zend_string_release(p->key);
1387 							}
1388 						}
1389 					} while (++p != end);
1390 				}
1391 			}
1392 		}
1393 		if (!(ht->u.flags & HASH_FLAG_PACKED)) {
1394 			HT_HASH_RESET(ht);
1395 		}
1396 	}
1397 	ht->nNumUsed = 0;
1398 	ht->nNumOfElements = 0;
1399 	ht->nNextFreeElement = 0;
1400 	ht->nInternalPointer = HT_INVALID_IDX;
1401 }
1402 
zend_symtable_clean(HashTable * ht)1403 ZEND_API void ZEND_FASTCALL zend_symtable_clean(HashTable *ht)
1404 {
1405 	Bucket *p, *end;
1406 
1407 	IS_CONSISTENT(ht);
1408 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
1409 
1410 	if (ht->nNumUsed) {
1411 		p = ht->arData;
1412 		end = p + ht->nNumUsed;
1413 		if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1414 			do {
1415 				i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
1416 			} while (++p != end);
1417 		} else if (HT_IS_WITHOUT_HOLES(ht)) {
1418 			do {
1419 				i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
1420 				if (EXPECTED(p->key)) {
1421 					zend_string_release(p->key);
1422 				}
1423 			} while (++p != end);
1424 		} else {
1425 			do {
1426 				if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1427 					i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
1428 					if (EXPECTED(p->key)) {
1429 						zend_string_release(p->key);
1430 					}
1431 				}
1432 			} while (++p != end);
1433 		}
1434 		HT_HASH_RESET(ht);
1435 	}
1436 	ht->nNumUsed = 0;
1437 	ht->nNumOfElements = 0;
1438 	ht->nNextFreeElement = 0;
1439 	ht->nInternalPointer = HT_INVALID_IDX;
1440 }
1441 
zend_hash_graceful_destroy(HashTable * ht)1442 ZEND_API void ZEND_FASTCALL zend_hash_graceful_destroy(HashTable *ht)
1443 {
1444 	uint32_t idx;
1445 	Bucket *p;
1446 
1447 	IS_CONSISTENT(ht);
1448 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
1449 
1450 	p = ht->arData;
1451 	for (idx = 0; idx < ht->nNumUsed; idx++, p++) {
1452 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1453 		_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1454 	}
1455 	if (ht->u.flags & HASH_FLAG_INITIALIZED) {
1456 		pefree(HT_GET_DATA_ADDR(ht), ht->u.flags & HASH_FLAG_PERSISTENT);
1457 	}
1458 
1459 	SET_INCONSISTENT(HT_DESTROYED);
1460 }
1461 
zend_hash_graceful_reverse_destroy(HashTable * ht)1462 ZEND_API void ZEND_FASTCALL zend_hash_graceful_reverse_destroy(HashTable *ht)
1463 {
1464 	uint32_t idx;
1465 	Bucket *p;
1466 
1467 	IS_CONSISTENT(ht);
1468 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
1469 
1470 	idx = ht->nNumUsed;
1471 	p = ht->arData + ht->nNumUsed;
1472 	while (idx > 0) {
1473 		idx--;
1474 		p--;
1475 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1476 		_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1477 	}
1478 
1479 	if (ht->u.flags & HASH_FLAG_INITIALIZED) {
1480 		pefree(HT_GET_DATA_ADDR(ht), ht->u.flags & HASH_FLAG_PERSISTENT);
1481 	}
1482 
1483 	SET_INCONSISTENT(HT_DESTROYED);
1484 }
1485 
1486 /* This is used to recurse elements and selectively delete certain entries
1487  * from a hashtable. apply_func() receives the data and decides if the entry
1488  * should be deleted or recursion should be stopped. The following three
1489  * return codes are possible:
1490  * ZEND_HASH_APPLY_KEEP   - continue
1491  * ZEND_HASH_APPLY_STOP   - stop iteration
1492  * ZEND_HASH_APPLY_REMOVE - delete the element, combineable with the former
1493  */
1494 
zend_hash_apply(HashTable * ht,apply_func_t apply_func)1495 ZEND_API void ZEND_FASTCALL zend_hash_apply(HashTable *ht, apply_func_t apply_func)
1496 {
1497 	uint32_t idx;
1498 	Bucket *p;
1499 	int result;
1500 
1501 	IS_CONSISTENT(ht);
1502 
1503 	HASH_PROTECT_RECURSION(ht);
1504 	for (idx = 0; idx < ht->nNumUsed; idx++) {
1505 		p = ht->arData + idx;
1506 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1507 		result = apply_func(&p->val);
1508 
1509 		if (result & ZEND_HASH_APPLY_REMOVE) {
1510 			HT_ASSERT(GC_REFCOUNT(ht) == 1);
1511 			_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1512 		}
1513 		if (result & ZEND_HASH_APPLY_STOP) {
1514 			break;
1515 		}
1516 	}
1517 	HASH_UNPROTECT_RECURSION(ht);
1518 }
1519 
1520 
zend_hash_apply_with_argument(HashTable * ht,apply_func_arg_t apply_func,void * argument)1521 ZEND_API void ZEND_FASTCALL zend_hash_apply_with_argument(HashTable *ht, apply_func_arg_t apply_func, void *argument)
1522 {
1523     uint32_t idx;
1524 	Bucket *p;
1525 	int result;
1526 
1527 	IS_CONSISTENT(ht);
1528 
1529 	HASH_PROTECT_RECURSION(ht);
1530 	for (idx = 0; idx < ht->nNumUsed; idx++) {
1531 		p = ht->arData + idx;
1532 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1533 		result = apply_func(&p->val, argument);
1534 
1535 		if (result & ZEND_HASH_APPLY_REMOVE) {
1536 			HT_ASSERT(GC_REFCOUNT(ht) == 1);
1537 			_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1538 		}
1539 		if (result & ZEND_HASH_APPLY_STOP) {
1540 			break;
1541 		}
1542 	}
1543 	HASH_UNPROTECT_RECURSION(ht);
1544 }
1545 
1546 
zend_hash_apply_with_arguments(HashTable * ht,apply_func_args_t apply_func,int num_args,...)1547 ZEND_API void ZEND_FASTCALL zend_hash_apply_with_arguments(HashTable *ht, apply_func_args_t apply_func, int num_args, ...)
1548 {
1549 	uint32_t idx;
1550 	Bucket *p;
1551 	va_list args;
1552 	zend_hash_key hash_key;
1553 	int result;
1554 
1555 	IS_CONSISTENT(ht);
1556 
1557 	HASH_PROTECT_RECURSION(ht);
1558 
1559 	for (idx = 0; idx < ht->nNumUsed; idx++) {
1560 		p = ht->arData + idx;
1561 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1562 		va_start(args, num_args);
1563 		hash_key.h = p->h;
1564 		hash_key.key = p->key;
1565 
1566 		result = apply_func(&p->val, num_args, args, &hash_key);
1567 
1568 		if (result & ZEND_HASH_APPLY_REMOVE) {
1569 			HT_ASSERT(GC_REFCOUNT(ht) == 1);
1570 			_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1571 		}
1572 		if (result & ZEND_HASH_APPLY_STOP) {
1573 			va_end(args);
1574 			break;
1575 		}
1576 		va_end(args);
1577 	}
1578 
1579 	HASH_UNPROTECT_RECURSION(ht);
1580 }
1581 
1582 
zend_hash_reverse_apply(HashTable * ht,apply_func_t apply_func)1583 ZEND_API void ZEND_FASTCALL zend_hash_reverse_apply(HashTable *ht, apply_func_t apply_func)
1584 {
1585 	uint32_t idx;
1586 	Bucket *p;
1587 	int result;
1588 
1589 	IS_CONSISTENT(ht);
1590 
1591 	HASH_PROTECT_RECURSION(ht);
1592 	idx = ht->nNumUsed;
1593 	while (idx > 0) {
1594 		idx--;
1595 		p = ht->arData + idx;
1596 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1597 
1598 		result = apply_func(&p->val);
1599 
1600 		if (result & ZEND_HASH_APPLY_REMOVE) {
1601 			HT_ASSERT(GC_REFCOUNT(ht) == 1);
1602 			_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1603 		}
1604 		if (result & ZEND_HASH_APPLY_STOP) {
1605 			break;
1606 		}
1607 	}
1608 	HASH_UNPROTECT_RECURSION(ht);
1609 }
1610 
1611 
zend_hash_copy(HashTable * target,HashTable * source,copy_ctor_func_t pCopyConstructor)1612 ZEND_API void ZEND_FASTCALL zend_hash_copy(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor)
1613 {
1614     uint32_t idx;
1615 	Bucket *p;
1616 	zval *new_entry, *data;
1617 	zend_bool setTargetPointer;
1618 
1619 	IS_CONSISTENT(source);
1620 	IS_CONSISTENT(target);
1621 	HT_ASSERT(GC_REFCOUNT(target) == 1);
1622 
1623 	setTargetPointer = (target->nInternalPointer == HT_INVALID_IDX);
1624 	for (idx = 0; idx < source->nNumUsed; idx++) {
1625 		p = source->arData + idx;
1626 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1627 
1628 		if (setTargetPointer && source->nInternalPointer == idx) {
1629 			target->nInternalPointer = HT_INVALID_IDX;
1630 		}
1631 		/* INDIRECT element may point to UNDEF-ined slots */
1632 		data = &p->val;
1633 		if (Z_TYPE_P(data) == IS_INDIRECT) {
1634 			data = Z_INDIRECT_P(data);
1635 			if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
1636 				continue;
1637 			}
1638 		}
1639 		if (p->key) {
1640 			new_entry = zend_hash_update(target, p->key, data);
1641 		} else {
1642 			new_entry = zend_hash_index_update(target, p->h, data);
1643 		}
1644 		if (pCopyConstructor) {
1645 			pCopyConstructor(new_entry);
1646 		}
1647 	}
1648 	if (target->nInternalPointer == HT_INVALID_IDX && target->nNumOfElements > 0) {
1649 		idx = 0;
1650 		while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) {
1651 			idx++;
1652 		}
1653 		target->nInternalPointer = idx;
1654 	}
1655 }
1656 
1657 
zend_array_dup_element(HashTable * source,HashTable * target,uint32_t idx,Bucket * p,Bucket * q,int packed,int static_keys,int with_holes)1658 static zend_always_inline int zend_array_dup_element(HashTable *source, HashTable *target, uint32_t idx, Bucket *p, Bucket *q, int packed, int static_keys, int with_holes)
1659 {
1660 	zval *data = &p->val;
1661 
1662 	if (with_holes) {
1663 		if (!packed && Z_TYPE_INFO_P(data) == IS_INDIRECT) {
1664 			data = Z_INDIRECT_P(data);
1665 		}
1666 		if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) {
1667 			return 0;
1668 		}
1669 	} else if (!packed) {
1670 		/* INDIRECT element may point to UNDEF-ined slots */
1671 		if (Z_TYPE_INFO_P(data) == IS_INDIRECT) {
1672 			data = Z_INDIRECT_P(data);
1673 			if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) {
1674 				return 0;
1675 			}
1676 		}
1677 	}
1678 
1679 	do {
1680 		if (Z_OPT_REFCOUNTED_P(data)) {
1681 			if (Z_ISREF_P(data) && Z_REFCOUNT_P(data) == 1 &&
1682 			    (Z_TYPE_P(Z_REFVAL_P(data)) != IS_ARRAY ||
1683 			      Z_ARRVAL_P(Z_REFVAL_P(data)) != source)) {
1684 				data = Z_REFVAL_P(data);
1685 				if (!Z_OPT_REFCOUNTED_P(data)) {
1686 					break;
1687 				}
1688 			}
1689 			Z_ADDREF_P(data);
1690 		}
1691 	} while (0);
1692 	ZVAL_COPY_VALUE(&q->val, data);
1693 
1694 	q->h = p->h;
1695 	if (packed) {
1696 		q->key = NULL;
1697 	} else {
1698 		uint32_t nIndex;
1699 
1700 		q->key = p->key;
1701 		if (!static_keys && q->key) {
1702 			zend_string_addref(q->key);
1703 		}
1704 
1705 		nIndex = q->h | target->nTableMask;
1706 		Z_NEXT(q->val) = HT_HASH(target, nIndex);
1707 		HT_HASH(target, nIndex) = HT_IDX_TO_HASH(idx);
1708 	}
1709 	return 1;
1710 }
1711 
zend_array_dup_packed_elements(HashTable * source,HashTable * target,int with_holes)1712 static zend_always_inline void zend_array_dup_packed_elements(HashTable *source, HashTable *target, int with_holes)
1713 {
1714 	Bucket *p = source->arData;
1715 	Bucket *q = target->arData;
1716 	Bucket *end = p + source->nNumUsed;
1717 
1718 	do {
1719 		if (!zend_array_dup_element(source, target, 0, p, q, 1, 1, with_holes)) {
1720 			if (with_holes) {
1721 				ZVAL_UNDEF(&q->val);
1722 			}
1723 		}
1724 		p++; q++;
1725 	} while (p != end);
1726 }
1727 
zend_array_dup_elements(HashTable * source,HashTable * target,int static_keys,int with_holes)1728 static zend_always_inline uint32_t zend_array_dup_elements(HashTable *source, HashTable *target, int static_keys, int with_holes)
1729 {
1730 	uint32_t idx = 0;
1731 	Bucket *p = source->arData;
1732 	Bucket *q = target->arData;
1733 	Bucket *end = p + source->nNumUsed;
1734 
1735 	do {
1736 		if (!zend_array_dup_element(source, target, idx, p, q, 0, static_keys, with_holes)) {
1737 			uint32_t target_idx = idx;
1738 
1739 			idx++; p++;
1740 			while (p != end) {
1741 				if (zend_array_dup_element(source, target, target_idx, p, q, 0, static_keys, with_holes)) {
1742 					if (source->nInternalPointer == idx) {
1743 						target->nInternalPointer = target_idx;
1744 					}
1745 					target_idx++; q++;
1746 				}
1747 				idx++; p++;
1748 			}
1749 			return target_idx;
1750 		}
1751 		idx++; p++; q++;
1752 	} while (p != end);
1753 	return idx;
1754 }
1755 
zend_array_dup(HashTable * source)1756 ZEND_API HashTable* ZEND_FASTCALL zend_array_dup(HashTable *source)
1757 {
1758 	uint32_t idx;
1759 	HashTable *target;
1760 
1761 	IS_CONSISTENT(source);
1762 
1763 	ALLOC_HASHTABLE(target);
1764 	GC_REFCOUNT(target) = 1;
1765 	GC_TYPE_INFO(target) = IS_ARRAY;
1766 
1767 	target->nTableSize = source->nTableSize;
1768 	target->pDestructor = ZVAL_PTR_DTOR;
1769 
1770 	if (source->nNumUsed == 0) {
1771 		target->u.flags = (source->u.flags & ~(HASH_FLAG_INITIALIZED|HASH_FLAG_PACKED|HASH_FLAG_PERSISTENT|ZEND_HASH_APPLY_COUNT_MASK)) | HASH_FLAG_APPLY_PROTECTION | HASH_FLAG_STATIC_KEYS;
1772 		target->nTableMask = HT_MIN_MASK;
1773 		target->nNumUsed = 0;
1774 		target->nNumOfElements = 0;
1775 		target->nNextFreeElement = 0;
1776 		target->nInternalPointer = HT_INVALID_IDX;
1777 		HT_SET_DATA_ADDR(target, &uninitialized_bucket);
1778 	} else if (GC_FLAGS(source) & IS_ARRAY_IMMUTABLE) {
1779 		target->u.flags = (source->u.flags & ~HASH_FLAG_PERSISTENT) | HASH_FLAG_APPLY_PROTECTION;
1780 		target->nTableMask = source->nTableMask;
1781 		target->nNumUsed = source->nNumUsed;
1782 		target->nNumOfElements = source->nNumOfElements;
1783 		target->nNextFreeElement = source->nNextFreeElement;
1784 		HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
1785 		target->nInternalPointer = source->nInternalPointer;
1786 		memcpy(HT_GET_DATA_ADDR(target), HT_GET_DATA_ADDR(source), HT_USED_SIZE(source));
1787 		if (target->nNumOfElements > 0 &&
1788 		    target->nInternalPointer == HT_INVALID_IDX) {
1789 			idx = 0;
1790 			while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) {
1791 				idx++;
1792 			}
1793 			target->nInternalPointer = idx;
1794 		}
1795 	} else if (source->u.flags & HASH_FLAG_PACKED) {
1796 		target->u.flags = (source->u.flags & ~(HASH_FLAG_PERSISTENT|ZEND_HASH_APPLY_COUNT_MASK)) | HASH_FLAG_APPLY_PROTECTION;
1797 		target->nTableMask = source->nTableMask;
1798 		target->nNumUsed = source->nNumUsed;
1799 		target->nNumOfElements = source->nNumOfElements;
1800 		target->nNextFreeElement = source->nNextFreeElement;
1801 		HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
1802 		target->nInternalPointer = source->nInternalPointer;
1803 		HT_HASH_RESET_PACKED(target);
1804 
1805 		if (HT_IS_WITHOUT_HOLES(target)) {
1806 			zend_array_dup_packed_elements(source, target, 0);
1807 		} else {
1808 			zend_array_dup_packed_elements(source, target, 1);
1809 		}
1810 		if (target->nNumOfElements > 0 &&
1811 		    target->nInternalPointer == HT_INVALID_IDX) {
1812 			idx = 0;
1813 			while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) {
1814 				idx++;
1815 			}
1816 			target->nInternalPointer = idx;
1817 		}
1818 	} else {
1819 		target->u.flags = (source->u.flags & ~(HASH_FLAG_PERSISTENT|ZEND_HASH_APPLY_COUNT_MASK)) | HASH_FLAG_APPLY_PROTECTION;
1820 		target->nTableMask = source->nTableMask;
1821 		target->nNextFreeElement = source->nNextFreeElement;
1822 		target->nInternalPointer = source->nInternalPointer;
1823 
1824 		HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
1825 		HT_HASH_RESET(target);
1826 
1827 		if (HT_HAS_STATIC_KEYS_ONLY(target)) {
1828 			if (HT_IS_WITHOUT_HOLES(source)) {
1829 				idx = zend_array_dup_elements(source, target, 1, 0);
1830 			} else {
1831 				idx = zend_array_dup_elements(source, target, 1, 1);
1832 			}
1833 		} else {
1834 			if (HT_IS_WITHOUT_HOLES(source)) {
1835 				idx = zend_array_dup_elements(source, target, 0, 0);
1836 			} else {
1837 				idx = zend_array_dup_elements(source, target, 0, 1);
1838 			}
1839 		}
1840 		target->nNumUsed = idx;
1841 		target->nNumOfElements = idx;
1842 		if (idx > 0 && target->nInternalPointer == HT_INVALID_IDX) {
1843 			target->nInternalPointer = 0;
1844 		}
1845 	}
1846 	return target;
1847 }
1848 
1849 
_zend_hash_merge(HashTable * target,HashTable * source,copy_ctor_func_t pCopyConstructor,zend_bool overwrite ZEND_FILE_LINE_DC)1850 ZEND_API void ZEND_FASTCALL _zend_hash_merge(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor, zend_bool overwrite ZEND_FILE_LINE_DC)
1851 {
1852     uint32_t idx;
1853 	Bucket *p;
1854 	zval *t;
1855 
1856 	IS_CONSISTENT(source);
1857 	IS_CONSISTENT(target);
1858 	HT_ASSERT(GC_REFCOUNT(target) == 1);
1859 
1860 	if (overwrite) {
1861 		for (idx = 0; idx < source->nNumUsed; idx++) {
1862 			p = source->arData + idx;
1863 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1864 			if (UNEXPECTED(Z_TYPE(p->val) == IS_INDIRECT) &&
1865 			    UNEXPECTED(Z_TYPE_P(Z_INDIRECT(p->val)) == IS_UNDEF)) {
1866 			    continue;
1867 			}
1868 			if (p->key) {
1869 				t = _zend_hash_add_or_update_i(target, p->key, &p->val, HASH_UPDATE | HASH_UPDATE_INDIRECT ZEND_FILE_LINE_RELAY_CC);
1870 				if (t && pCopyConstructor) {
1871 					pCopyConstructor(t);
1872 				}
1873 			} else {
1874 				t = zend_hash_index_update(target, p->h, &p->val);
1875 				if (t && pCopyConstructor) {
1876 					pCopyConstructor(t);
1877 				}
1878 			}
1879 		}
1880 	} else {
1881 		for (idx = 0; idx < source->nNumUsed; idx++) {
1882 			p = source->arData + idx;
1883 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1884 			if (UNEXPECTED(Z_TYPE(p->val) == IS_INDIRECT) &&
1885 			    UNEXPECTED(Z_TYPE_P(Z_INDIRECT(p->val)) == IS_UNDEF)) {
1886 			    continue;
1887 			}
1888 			if (p->key) {
1889 				t = _zend_hash_add_or_update_i(target, p->key, &p->val, HASH_ADD | HASH_UPDATE_INDIRECT ZEND_FILE_LINE_RELAY_CC);
1890 				if (t && pCopyConstructor) {
1891 					pCopyConstructor(t);
1892 				}
1893 			} else {
1894 				t = zend_hash_index_add(target, p->h, &p->val);
1895 				if (t && pCopyConstructor) {
1896 					pCopyConstructor(t);
1897 				}
1898 			}
1899 		}
1900 	}
1901 	if (target->nNumOfElements > 0) {
1902 		idx = 0;
1903 		while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) {
1904 			idx++;
1905 		}
1906 		target->nInternalPointer = idx;
1907 	}
1908 }
1909 
1910 
zend_hash_replace_checker_wrapper(HashTable * target,zval * source_data,Bucket * p,void * pParam,merge_checker_func_t merge_checker_func)1911 static zend_bool ZEND_FASTCALL zend_hash_replace_checker_wrapper(HashTable *target, zval *source_data, Bucket *p, void *pParam, merge_checker_func_t merge_checker_func)
1912 {
1913 	zend_hash_key hash_key;
1914 
1915 	hash_key.h = p->h;
1916 	hash_key.key = p->key;
1917 	return merge_checker_func(target, source_data, &hash_key, pParam);
1918 }
1919 
1920 
zend_hash_merge_ex(HashTable * target,HashTable * source,copy_ctor_func_t pCopyConstructor,merge_checker_func_t pMergeSource,void * pParam)1921 ZEND_API void ZEND_FASTCALL zend_hash_merge_ex(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor, merge_checker_func_t pMergeSource, void *pParam)
1922 {
1923 	uint32_t idx;
1924 	Bucket *p;
1925 	zval *t;
1926 
1927 	IS_CONSISTENT(source);
1928 	IS_CONSISTENT(target);
1929 	HT_ASSERT(GC_REFCOUNT(target) == 1);
1930 
1931 	for (idx = 0; idx < source->nNumUsed; idx++) {
1932 		p = source->arData + idx;
1933 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1934 		if (zend_hash_replace_checker_wrapper(target, &p->val, p, pParam, pMergeSource)) {
1935 			t = zend_hash_update(target, p->key, &p->val);
1936 			if (t && pCopyConstructor) {
1937 				pCopyConstructor(t);
1938 			}
1939 		}
1940 	}
1941 	if (target->nNumOfElements > 0) {
1942 		idx = 0;
1943 		while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) {
1944 			idx++;
1945 		}
1946 		target->nInternalPointer = idx;
1947 	}
1948 }
1949 
1950 
1951 /* Returns the hash table data if found and NULL if not. */
zend_hash_find(const HashTable * ht,zend_string * key)1952 ZEND_API zval* ZEND_FASTCALL zend_hash_find(const HashTable *ht, zend_string *key)
1953 {
1954 	Bucket *p;
1955 
1956 	IS_CONSISTENT(ht);
1957 
1958 	p = zend_hash_find_bucket(ht, key);
1959 	return p ? &p->val : NULL;
1960 }
1961 
zend_hash_str_find(const HashTable * ht,const char * str,size_t len)1962 ZEND_API zval* ZEND_FASTCALL zend_hash_str_find(const HashTable *ht, const char *str, size_t len)
1963 {
1964 	zend_ulong h;
1965 	Bucket *p;
1966 
1967 	IS_CONSISTENT(ht);
1968 
1969 	h = zend_inline_hash_func(str, len);
1970 	p = zend_hash_str_find_bucket(ht, str, len, h);
1971 	return p ? &p->val : NULL;
1972 }
1973 
zend_hash_exists(const HashTable * ht,zend_string * key)1974 ZEND_API zend_bool ZEND_FASTCALL zend_hash_exists(const HashTable *ht, zend_string *key)
1975 {
1976 	Bucket *p;
1977 
1978 	IS_CONSISTENT(ht);
1979 
1980 	p = zend_hash_find_bucket(ht, key);
1981 	return p ? 1 : 0;
1982 }
1983 
zend_hash_str_exists(const HashTable * ht,const char * str,size_t len)1984 ZEND_API zend_bool ZEND_FASTCALL zend_hash_str_exists(const HashTable *ht, const char *str, size_t len)
1985 {
1986 	zend_ulong h;
1987 	Bucket *p;
1988 
1989 	IS_CONSISTENT(ht);
1990 
1991 	h = zend_inline_hash_func(str, len);
1992 	p = zend_hash_str_find_bucket(ht, str, len, h);
1993 	return p ? 1 : 0;
1994 }
1995 
zend_hash_index_find(const HashTable * ht,zend_ulong h)1996 ZEND_API zval* ZEND_FASTCALL zend_hash_index_find(const HashTable *ht, zend_ulong h)
1997 {
1998 	Bucket *p;
1999 
2000 	IS_CONSISTENT(ht);
2001 
2002 	if (ht->u.flags & HASH_FLAG_PACKED) {
2003 		if (h < ht->nNumUsed) {
2004 			p = ht->arData + h;
2005 			if (Z_TYPE(p->val) != IS_UNDEF) {
2006 				return &p->val;
2007 			}
2008 		}
2009 		return NULL;
2010 	}
2011 
2012 	p = zend_hash_index_find_bucket(ht, h);
2013 	return p ? &p->val : NULL;
2014 }
2015 
_zend_hash_index_find(const HashTable * ht,zend_ulong h)2016 ZEND_API zval* ZEND_FASTCALL _zend_hash_index_find(const HashTable *ht, zend_ulong h)
2017 {
2018 	Bucket *p;
2019 
2020 	IS_CONSISTENT(ht);
2021 
2022 	p = zend_hash_index_find_bucket(ht, h);
2023 	return p ? &p->val : NULL;
2024 }
2025 
zend_hash_index_exists(const HashTable * ht,zend_ulong h)2026 ZEND_API zend_bool ZEND_FASTCALL zend_hash_index_exists(const HashTable *ht, zend_ulong h)
2027 {
2028 	Bucket *p;
2029 
2030 	IS_CONSISTENT(ht);
2031 
2032 	if (ht->u.flags & HASH_FLAG_PACKED) {
2033 		if (h < ht->nNumUsed) {
2034 			if (Z_TYPE(ht->arData[h].val) != IS_UNDEF) {
2035 				return 1;
2036 			}
2037 		}
2038 		return 0;
2039 	}
2040 
2041 	p = zend_hash_index_find_bucket(ht, h);
2042 	return p ? 1 : 0;
2043 }
2044 
2045 
zend_hash_internal_pointer_reset_ex(HashTable * ht,HashPosition * pos)2046 ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_reset_ex(HashTable *ht, HashPosition *pos)
2047 {
2048     uint32_t idx;
2049 
2050 	IS_CONSISTENT(ht);
2051 	HT_ASSERT(&ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2052 
2053 	for (idx = 0; idx < ht->nNumUsed; idx++) {
2054 		if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2055 			*pos = idx;
2056 			return;
2057 		}
2058 	}
2059 	*pos = HT_INVALID_IDX;
2060 }
2061 
2062 
2063 /* This function will be extremely optimized by remembering
2064  * the end of the list
2065  */
zend_hash_internal_pointer_end_ex(HashTable * ht,HashPosition * pos)2066 ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_end_ex(HashTable *ht, HashPosition *pos)
2067 {
2068 	uint32_t idx;
2069 
2070 	IS_CONSISTENT(ht);
2071 	HT_ASSERT(&ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2072 
2073 	idx = ht->nNumUsed;
2074 	while (idx > 0) {
2075 		idx--;
2076 		if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2077 			*pos = idx;
2078 			return;
2079 		}
2080 	}
2081 	*pos = HT_INVALID_IDX;
2082 }
2083 
2084 
zend_hash_move_forward_ex(HashTable * ht,HashPosition * pos)2085 ZEND_API int ZEND_FASTCALL zend_hash_move_forward_ex(HashTable *ht, HashPosition *pos)
2086 {
2087 	uint32_t idx = *pos;
2088 
2089 	IS_CONSISTENT(ht);
2090 	HT_ASSERT(&ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2091 
2092 	if (idx != HT_INVALID_IDX) {
2093 		while (1) {
2094 			idx++;
2095 			if (idx >= ht->nNumUsed) {
2096 				*pos = HT_INVALID_IDX;
2097 				return SUCCESS;
2098 			}
2099 			if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2100 				*pos = idx;
2101 				return SUCCESS;
2102 			}
2103 		}
2104 	} else {
2105  		return FAILURE;
2106 	}
2107 }
2108 
zend_hash_move_backwards_ex(HashTable * ht,HashPosition * pos)2109 ZEND_API int ZEND_FASTCALL zend_hash_move_backwards_ex(HashTable *ht, HashPosition *pos)
2110 {
2111 	uint32_t idx = *pos;
2112 
2113 	IS_CONSISTENT(ht);
2114 	HT_ASSERT(&ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2115 
2116 	if (idx != HT_INVALID_IDX) {
2117 		while (idx > 0) {
2118 			idx--;
2119 			if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2120 				*pos = idx;
2121 				return SUCCESS;
2122 			}
2123 		}
2124 		*pos = HT_INVALID_IDX;
2125  		return SUCCESS;
2126 	} else {
2127  		return FAILURE;
2128 	}
2129 }
2130 
2131 
2132 /* This function should be made binary safe  */
zend_hash_get_current_key_ex(const HashTable * ht,zend_string ** str_index,zend_ulong * num_index,HashPosition * pos)2133 ZEND_API int ZEND_FASTCALL zend_hash_get_current_key_ex(const HashTable *ht, zend_string **str_index, zend_ulong *num_index, HashPosition *pos)
2134 {
2135 	uint32_t idx = *pos;
2136 	Bucket *p;
2137 
2138 	IS_CONSISTENT(ht);
2139 	if (idx != HT_INVALID_IDX) {
2140 		p = ht->arData + idx;
2141 		if (p->key) {
2142 			*str_index = p->key;
2143 			return HASH_KEY_IS_STRING;
2144 		} else {
2145 			*num_index = p->h;
2146 			return HASH_KEY_IS_LONG;
2147 		}
2148 	}
2149 	return HASH_KEY_NON_EXISTENT;
2150 }
2151 
zend_hash_get_current_key_zval_ex(const HashTable * ht,zval * key,HashPosition * pos)2152 ZEND_API void ZEND_FASTCALL zend_hash_get_current_key_zval_ex(const HashTable *ht, zval *key, HashPosition *pos)
2153 {
2154 	uint32_t idx = *pos;
2155 	Bucket *p;
2156 
2157 	IS_CONSISTENT(ht);
2158 	if (idx == HT_INVALID_IDX) {
2159 		ZVAL_NULL(key);
2160 	} else {
2161 		p = ht->arData + idx;
2162 		if (p->key) {
2163 			ZVAL_STR_COPY(key, p->key);
2164 		} else {
2165 			ZVAL_LONG(key, p->h);
2166 		}
2167 	}
2168 }
2169 
zend_hash_get_current_key_type_ex(HashTable * ht,HashPosition * pos)2170 ZEND_API int ZEND_FASTCALL zend_hash_get_current_key_type_ex(HashTable *ht, HashPosition *pos)
2171 {
2172     uint32_t idx = *pos;
2173 	Bucket *p;
2174 
2175 	IS_CONSISTENT(ht);
2176 	if (idx != HT_INVALID_IDX) {
2177 		p = ht->arData + idx;
2178 		if (p->key) {
2179 			return HASH_KEY_IS_STRING;
2180 		} else {
2181 			return HASH_KEY_IS_LONG;
2182 		}
2183 	}
2184 	return HASH_KEY_NON_EXISTENT;
2185 }
2186 
2187 
zend_hash_get_current_data_ex(HashTable * ht,HashPosition * pos)2188 ZEND_API zval* ZEND_FASTCALL zend_hash_get_current_data_ex(HashTable *ht, HashPosition *pos)
2189 {
2190 	uint32_t idx = *pos;
2191 	Bucket *p;
2192 
2193 	IS_CONSISTENT(ht);
2194 	if (idx != HT_INVALID_IDX) {
2195 		p = ht->arData + idx;
2196 		return &p->val;
2197 	} else {
2198 		return NULL;
2199 	}
2200 }
2201 
zend_hash_bucket_swap(Bucket * p,Bucket * q)2202 ZEND_API void zend_hash_bucket_swap(Bucket *p, Bucket *q)
2203 {
2204 	zval val;
2205 	zend_ulong h;
2206 	zend_string *key;
2207 
2208 	ZVAL_COPY_VALUE(&val, &p->val);
2209 	h = p->h;
2210 	key = p->key;
2211 
2212 	ZVAL_COPY_VALUE(&p->val, &q->val);
2213 	p->h = q->h;
2214 	p->key = q->key;
2215 
2216 	ZVAL_COPY_VALUE(&q->val, &val);
2217 	q->h = h;
2218 	q->key = key;
2219 }
2220 
zend_hash_bucket_renum_swap(Bucket * p,Bucket * q)2221 ZEND_API void zend_hash_bucket_renum_swap(Bucket *p, Bucket *q)
2222 {
2223 	zval val;
2224 
2225 	ZVAL_COPY_VALUE(&val, &p->val);
2226 	ZVAL_COPY_VALUE(&p->val, &q->val);
2227 	ZVAL_COPY_VALUE(&q->val, &val);
2228 }
2229 
zend_hash_bucket_packed_swap(Bucket * p,Bucket * q)2230 ZEND_API void zend_hash_bucket_packed_swap(Bucket *p, Bucket *q)
2231 {
2232 	zval val;
2233 	zend_ulong h;
2234 
2235 	ZVAL_COPY_VALUE(&val, &p->val);
2236 	h = p->h;
2237 
2238 	ZVAL_COPY_VALUE(&p->val, &q->val);
2239 	p->h = q->h;
2240 
2241 	ZVAL_COPY_VALUE(&q->val, &val);
2242 	q->h = h;
2243 }
2244 
zend_hash_sort_ex(HashTable * ht,sort_func_t sort,compare_func_t compar,zend_bool renumber)2245 ZEND_API int ZEND_FASTCALL zend_hash_sort_ex(HashTable *ht, sort_func_t sort, compare_func_t compar, zend_bool renumber)
2246 {
2247 	Bucket *p;
2248 	uint32_t i, j;
2249 
2250 	IS_CONSISTENT(ht);
2251 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
2252 
2253 	if (!(ht->nNumOfElements>1) && !(renumber && ht->nNumOfElements>0)) { /* Doesn't require sorting */
2254 		return SUCCESS;
2255 	}
2256 
2257 	if (HT_IS_WITHOUT_HOLES(ht)) {
2258 		i = ht->nNumUsed;
2259 	} else {
2260 		for (j = 0, i = 0; j < ht->nNumUsed; j++) {
2261 			p = ht->arData + j;
2262 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2263 			if (i != j) {
2264 				ht->arData[i] = *p;
2265 			}
2266 			i++;
2267 		}
2268 	}
2269 
2270 	sort((void *)ht->arData, i, sizeof(Bucket), compar,
2271 			(swap_func_t)(renumber? zend_hash_bucket_renum_swap :
2272 				((ht->u.flags & HASH_FLAG_PACKED) ? zend_hash_bucket_packed_swap : zend_hash_bucket_swap)));
2273 
2274 	ht->nNumUsed = i;
2275 	ht->nInternalPointer = 0;
2276 
2277 	if (renumber) {
2278 		for (j = 0; j < i; j++) {
2279 			p = ht->arData + j;
2280 			p->h = j;
2281 			if (p->key) {
2282 				zend_string_release(p->key);
2283 				p->key = NULL;
2284 			}
2285 		}
2286 
2287 		ht->nNextFreeElement = i;
2288 	}
2289 	if (ht->u.flags & HASH_FLAG_PACKED) {
2290 		if (!renumber) {
2291 			zend_hash_packed_to_hash(ht);
2292 		}
2293 	} else {
2294 		if (renumber) {
2295 			void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
2296 			Bucket *old_buckets = ht->arData;
2297 
2298 			new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), (ht->u.flags & HASH_FLAG_PERSISTENT));
2299 			ht->u.flags |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
2300 			ht->nTableMask = HT_MIN_MASK;
2301 			HT_SET_DATA_ADDR(ht, new_data);
2302 			memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
2303 			pefree(old_data, ht->u.flags & HASH_FLAG_PERSISTENT & HASH_FLAG_PERSISTENT);
2304 			HT_HASH_RESET_PACKED(ht);
2305 		} else {
2306 			zend_hash_rehash(ht);
2307 		}
2308 	}
2309 
2310 	return SUCCESS;
2311 }
2312 
zend_hash_compare_impl(HashTable * ht1,HashTable * ht2,compare_func_t compar,zend_bool ordered)2313 static zend_always_inline int zend_hash_compare_impl(HashTable *ht1, HashTable *ht2, compare_func_t compar, zend_bool ordered) {
2314 	uint32_t idx1, idx2;
2315 
2316 	if (ht1->nNumOfElements != ht2->nNumOfElements) {
2317 		return ht1->nNumOfElements > ht2->nNumOfElements ? 1 : -1;
2318 	}
2319 
2320 	for (idx1 = 0, idx2 = 0; idx1 < ht1->nNumUsed; idx1++) {
2321 		Bucket *p1 = ht1->arData + idx1, *p2;
2322 		zval *pData1, *pData2;
2323 		int result;
2324 
2325 		if (Z_TYPE(p1->val) == IS_UNDEF) continue;
2326 		if (ordered) {
2327 			while (1) {
2328 				ZEND_ASSERT(idx2 != ht2->nNumUsed);
2329 				p2 = ht2->arData + idx2;
2330 				if (Z_TYPE(p2->val) != IS_UNDEF) break;
2331 				idx2++;
2332 			}
2333 			if (p1->key == NULL && p2->key == NULL) { /* numeric indices */
2334 				if (p1->h != p2->h) {
2335 					return p1->h > p2->h ? 1 : -1;
2336 				}
2337 			} else if (p1->key != NULL && p2->key != NULL) { /* string indices */
2338 				if (ZSTR_LEN(p1->key) != ZSTR_LEN(p2->key)) {
2339 					return ZSTR_LEN(p1->key) > ZSTR_LEN(p2->key) ? 1 : -1;
2340 				}
2341 
2342 				result = memcmp(ZSTR_VAL(p1->key), ZSTR_VAL(p2->key), ZSTR_LEN(p1->key));
2343 				if (result != 0) {
2344 					return result;
2345 				}
2346 			} else {
2347 				/* Mixed key types: A string key is considered as larger */
2348 				return p1->key != NULL ? 1 : -1;
2349 			}
2350 			pData2 = &p2->val;
2351 			idx2++;
2352 		} else {
2353 			if (p1->key == NULL) { /* numeric index */
2354 				pData2 = zend_hash_index_find(ht2, p1->h);
2355 				if (pData2 == NULL) {
2356 					return 1;
2357 				}
2358 			} else { /* string index */
2359 				pData2 = zend_hash_find(ht2, p1->key);
2360 				if (pData2 == NULL) {
2361 					return 1;
2362 				}
2363 			}
2364 		}
2365 
2366 		pData1 = &p1->val;
2367 		if (Z_TYPE_P(pData1) == IS_INDIRECT) {
2368 			pData1 = Z_INDIRECT_P(pData1);
2369 		}
2370 		if (Z_TYPE_P(pData2) == IS_INDIRECT) {
2371 			pData2 = Z_INDIRECT_P(pData2);
2372 		}
2373 
2374 		if (Z_TYPE_P(pData1) == IS_UNDEF) {
2375 			if (Z_TYPE_P(pData2) != IS_UNDEF) {
2376 				return -1;
2377 			}
2378 		} else if (Z_TYPE_P(pData2) == IS_UNDEF) {
2379 			return 1;
2380 		} else {
2381 			result = compar(pData1, pData2);
2382 			if (result != 0) {
2383 				return result;
2384 			}
2385 		}
2386 	}
2387 
2388 	return 0;
2389 }
2390 
zend_hash_compare(HashTable * ht1,HashTable * ht2,compare_func_t compar,zend_bool ordered)2391 ZEND_API int zend_hash_compare(HashTable *ht1, HashTable *ht2, compare_func_t compar, zend_bool ordered)
2392 {
2393 	int result;
2394 	IS_CONSISTENT(ht1);
2395 	IS_CONSISTENT(ht2);
2396 
2397 	HASH_PROTECT_RECURSION(ht1);
2398 	HASH_PROTECT_RECURSION(ht2);
2399 	result = zend_hash_compare_impl(ht1, ht2, compar, ordered);
2400 	HASH_UNPROTECT_RECURSION(ht1);
2401 	HASH_UNPROTECT_RECURSION(ht2);
2402 
2403 	return result;
2404 }
2405 
2406 
zend_hash_minmax(const HashTable * ht,compare_func_t compar,uint32_t flag)2407 ZEND_API zval* ZEND_FASTCALL zend_hash_minmax(const HashTable *ht, compare_func_t compar, uint32_t flag)
2408 {
2409 	uint32_t idx;
2410 	Bucket *p, *res;
2411 
2412 	IS_CONSISTENT(ht);
2413 
2414 	if (ht->nNumOfElements == 0 ) {
2415 		return NULL;
2416 	}
2417 
2418 	idx = 0;
2419 	while (1) {
2420 		if (idx == ht->nNumUsed) {
2421 			return NULL;
2422 		}
2423 		if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) break;
2424 		idx++;
2425 	}
2426 	res = ht->arData + idx;
2427 	for (; idx < ht->nNumUsed; idx++) {
2428 		p = ht->arData + idx;
2429 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2430 
2431 		if (flag) {
2432 			if (compar(res, p) < 0) { /* max */
2433 				res = p;
2434 			}
2435 		} else {
2436 			if (compar(res, p) > 0) { /* min */
2437 				res = p;
2438 			}
2439 		}
2440 	}
2441 	return &res->val;
2442 }
2443 
_zend_handle_numeric_str_ex(const char * key,size_t length,zend_ulong * idx)2444 ZEND_API int ZEND_FASTCALL _zend_handle_numeric_str_ex(const char *key, size_t length, zend_ulong *idx)
2445 {
2446 	register const char *tmp = key;
2447 
2448 	const char *end = key + length;
2449 
2450 	if (*tmp == '-') {
2451 		tmp++;
2452 	}
2453 
2454 	if ((*tmp == '0' && length > 1) /* numbers with leading zeros */
2455 	 || (end - tmp > MAX_LENGTH_OF_LONG - 1) /* number too long */
2456 	 || (SIZEOF_ZEND_LONG == 4 &&
2457 	     end - tmp == MAX_LENGTH_OF_LONG - 1 &&
2458 	     *tmp > '2')) { /* overflow */
2459 		return 0;
2460 	}
2461 	*idx = (*tmp - '0');
2462 	while (1) {
2463 		++tmp;
2464 		if (tmp == end) {
2465 			if (*key == '-') {
2466 				if (*idx-1 > ZEND_LONG_MAX) { /* overflow */
2467 					return 0;
2468 				}
2469 				*idx = 0 - *idx;
2470 			} else if (*idx > ZEND_LONG_MAX) { /* overflow */
2471 				return 0;
2472 			}
2473 			return 1;
2474 		}
2475 		if (*tmp <= '9' && *tmp >= '0') {
2476 			*idx = (*idx * 10) + (*tmp - '0');
2477 		} else {
2478 			return 0;
2479 		}
2480 	}
2481 }
2482 
2483 /*
2484  * Local variables:
2485  * tab-width: 4
2486  * c-basic-offset: 4
2487  * indent-tabs-mode: t
2488  * End:
2489  */
2490