xref: /PHP-7.0/Zend/zend_hash.c (revision b1301a06)
1 /*
2    +----------------------------------------------------------------------+
3    | Zend Engine                                                          |
4    +----------------------------------------------------------------------+
5    | Copyright (c) 1998-2017 Zend Technologies Ltd. (http://www.zend.com) |
6    +----------------------------------------------------------------------+
7    | This source file is subject to version 2.00 of the Zend license,     |
8    | that is bundled with this package in the file LICENSE, and is        |
9    | available through the world-wide-web at the following url:           |
10    | http://www.zend.com/license/2_00.txt.                                |
11    | If you did not receive a copy of the Zend license and are unable to  |
12    | obtain it through the world-wide-web, please send a note to          |
13    | license@zend.com so we can mail you a copy immediately.              |
14    +----------------------------------------------------------------------+
15    | Authors: Andi Gutmans <andi@zend.com>                                |
16    |          Zeev Suraski <zeev@zend.com>                                |
17    |          Dmitry Stogov <dmitry@zend.com>                             |
18    +----------------------------------------------------------------------+
19 */
20 
21 /* $Id$ */
22 
23 #include "zend.h"
24 #include "zend_globals.h"
25 #include "zend_variables.h"
26 
27 #define HT_DEBUG 0
28 #if HT_DEBUG
29 # define HT_ASSERT(c) ZEND_ASSERT(c)
30 #else
31 # define HT_ASSERT(c)
32 #endif
33 
34 #define HT_POISONED_PTR ((HashTable *) (intptr_t) -1)
35 
36 #if ZEND_DEBUG
37 /*
38 #define HASH_MASK_CONSISTENCY	0xc0
39 */
40 #define HT_OK					0x00
41 #define HT_IS_DESTROYING		0x40
42 #define HT_DESTROYED			0x80
43 #define HT_CLEANING				0xc0
44 
_zend_is_inconsistent(const HashTable * ht,const char * file,int line)45 static void _zend_is_inconsistent(const HashTable *ht, const char *file, int line)
46 {
47 	if ((ht->u.flags & HASH_MASK_CONSISTENCY) == HT_OK) {
48 		return;
49 	}
50 	switch ((ht->u.flags & HASH_MASK_CONSISTENCY)) {
51 		case HT_IS_DESTROYING:
52 			zend_output_debug_string(1, "%s(%d) : ht=%p is being destroyed", file, line, ht);
53 			break;
54 		case HT_DESTROYED:
55 			zend_output_debug_string(1, "%s(%d) : ht=%p is already destroyed", file, line, ht);
56 			break;
57 		case HT_CLEANING:
58 			zend_output_debug_string(1, "%s(%d) : ht=%p is being cleaned", file, line, ht);
59 			break;
60 		default:
61 			zend_output_debug_string(1, "%s(%d) : ht=%p is inconsistent", file, line, ht);
62 			break;
63 	}
64 	zend_bailout();
65 }
66 #define IS_CONSISTENT(a) _zend_is_inconsistent(a, __FILE__, __LINE__);
67 #define SET_INCONSISTENT(n) do { \
68 		(ht)->u.flags |= n; \
69 	} while (0)
70 #else
71 #define IS_CONSISTENT(a)
72 #define SET_INCONSISTENT(n)
73 #endif
74 
75 #define HASH_PROTECT_RECURSION(ht)														\
76 	if ((ht)->u.flags & HASH_FLAG_APPLY_PROTECTION) {									\
77 		if (((ht)->u.flags & ZEND_HASH_APPLY_COUNT_MASK) >= (3 << 8)) {												\
78 			zend_error_noreturn(E_ERROR, "Nesting level too deep - recursive dependency?");\
79 		}																				\
80 		ZEND_HASH_INC_APPLY_COUNT(ht);													\
81 	}
82 
83 #define HASH_UNPROTECT_RECURSION(ht)													\
84 	if ((ht)->u.flags & HASH_FLAG_APPLY_PROTECTION) {									\
85 		ZEND_HASH_DEC_APPLY_COUNT(ht);													\
86 	}
87 
88 #define ZEND_HASH_IF_FULL_DO_RESIZE(ht)				\
89 	if ((ht)->nNumUsed >= (ht)->nTableSize) {		\
90 		zend_hash_do_resize(ht);					\
91 	}
92 
93 static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht);
94 
zend_hash_check_size(uint32_t nSize)95 static uint32_t zend_always_inline zend_hash_check_size(uint32_t nSize)
96 {
97 #if defined(ZEND_WIN32)
98 	unsigned long index;
99 #endif
100 
101 	/* Use big enough power of 2 */
102 	/* size should be between HT_MIN_SIZE and HT_MAX_SIZE */
103 	if (nSize < HT_MIN_SIZE) {
104 		nSize = HT_MIN_SIZE;
105 	} else if (UNEXPECTED(nSize >= HT_MAX_SIZE)) {
106 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nSize, sizeof(Bucket), sizeof(Bucket));
107 	}
108 
109 #if defined(ZEND_WIN32)
110 	if (BitScanReverse(&index, nSize - 1)) {
111 		return 0x2 << ((31 - index) ^ 0x1f);
112 	} else {
113 		/* nSize is ensured to be in the valid range, fall back to it
114 		   rather than using an undefined bis scan result. */
115 		return nSize;
116 	}
117 #elif (defined(__GNUC__) || __has_builtin(__builtin_clz))  && defined(PHP_HAVE_BUILTIN_CLZ)
118 	return 0x2 << (__builtin_clz(nSize - 1) ^ 0x1f);
119 #else
120 	nSize -= 1;
121 	nSize |= (nSize >> 1);
122 	nSize |= (nSize >> 2);
123 	nSize |= (nSize >> 4);
124 	nSize |= (nSize >> 8);
125 	nSize |= (nSize >> 16);
126 	return nSize + 1;
127 #endif
128 }
129 
zend_hash_real_init_ex(HashTable * ht,int packed)130 static void zend_always_inline zend_hash_real_init_ex(HashTable *ht, int packed)
131 {
132 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
133 	ZEND_ASSERT(!((ht)->u.flags & HASH_FLAG_INITIALIZED));
134 	if (packed) {
135 		HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), (ht)->u.flags & HASH_FLAG_PERSISTENT));
136 		(ht)->u.flags |= HASH_FLAG_INITIALIZED | HASH_FLAG_PACKED;
137 		HT_HASH_RESET_PACKED(ht);
138 	} else {
139 		(ht)->nTableMask = -(ht)->nTableSize;
140 		HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), (ht)->u.flags & HASH_FLAG_PERSISTENT));
141 		(ht)->u.flags |= HASH_FLAG_INITIALIZED;
142 		if (EXPECTED(ht->nTableMask == -8)) {
143 			Bucket *arData = ht->arData;
144 
145 			HT_HASH_EX(arData, -8) = -1;
146 			HT_HASH_EX(arData, -7) = -1;
147 			HT_HASH_EX(arData, -6) = -1;
148 			HT_HASH_EX(arData, -5) = -1;
149 			HT_HASH_EX(arData, -4) = -1;
150 			HT_HASH_EX(arData, -3) = -1;
151 			HT_HASH_EX(arData, -2) = -1;
152 			HT_HASH_EX(arData, -1) = -1;
153 		} else {
154 			HT_HASH_RESET(ht);
155 		}
156 	}
157 }
158 
zend_hash_check_init(HashTable * ht,int packed)159 static void zend_always_inline zend_hash_check_init(HashTable *ht, int packed)
160 {
161 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
162 	if (UNEXPECTED(!((ht)->u.flags & HASH_FLAG_INITIALIZED))) {
163 		zend_hash_real_init_ex(ht, packed);
164 	}
165 }
166 
167 #define CHECK_INIT(ht, packed) \
168 	zend_hash_check_init(ht, packed)
169 
170 static const uint32_t uninitialized_bucket[-HT_MIN_MASK] =
171 	{HT_INVALID_IDX, HT_INVALID_IDX};
172 
_zend_hash_init(HashTable * ht,uint32_t nSize,dtor_func_t pDestructor,zend_bool persistent ZEND_FILE_LINE_DC)173 ZEND_API void ZEND_FASTCALL _zend_hash_init(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, zend_bool persistent ZEND_FILE_LINE_DC)
174 {
175 	GC_REFCOUNT(ht) = 1;
176 	GC_TYPE_INFO(ht) = IS_ARRAY;
177 	ht->u.flags = (persistent ? HASH_FLAG_PERSISTENT : 0) | HASH_FLAG_APPLY_PROTECTION | HASH_FLAG_STATIC_KEYS;
178 	ht->nTableMask = HT_MIN_MASK;
179 	HT_SET_DATA_ADDR(ht, &uninitialized_bucket);
180 	ht->nNumUsed = 0;
181 	ht->nNumOfElements = 0;
182 	ht->nInternalPointer = HT_INVALID_IDX;
183 	ht->nNextFreeElement = 0;
184 	ht->pDestructor = pDestructor;
185 	ht->nTableSize = zend_hash_check_size(nSize);
186 }
187 
zend_hash_packed_grow(HashTable * ht)188 static void ZEND_FASTCALL zend_hash_packed_grow(HashTable *ht)
189 {
190 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
191 	if (ht->nTableSize >= HT_MAX_SIZE) {
192 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket), sizeof(Bucket));
193 	}
194 	HANDLE_BLOCK_INTERRUPTIONS();
195 	ht->nTableSize += ht->nTableSize;
196 	HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE(ht), HT_USED_SIZE(ht), ht->u.flags & HASH_FLAG_PERSISTENT));
197 	HANDLE_UNBLOCK_INTERRUPTIONS();
198 }
199 
zend_hash_real_init(HashTable * ht,zend_bool packed)200 ZEND_API void ZEND_FASTCALL zend_hash_real_init(HashTable *ht, zend_bool packed)
201 {
202 	IS_CONSISTENT(ht);
203 
204 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
205 	zend_hash_real_init_ex(ht, packed);
206 }
207 
zend_hash_packed_to_hash(HashTable * ht)208 ZEND_API void ZEND_FASTCALL zend_hash_packed_to_hash(HashTable *ht)
209 {
210 	void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
211 	Bucket *old_buckets = ht->arData;
212 
213 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
214 	HANDLE_BLOCK_INTERRUPTIONS();
215 	ht->u.flags &= ~HASH_FLAG_PACKED;
216 	new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, -ht->nTableSize), (ht)->u.flags & HASH_FLAG_PERSISTENT);
217 	ht->nTableMask = -ht->nTableSize;
218 	HT_SET_DATA_ADDR(ht, new_data);
219 	memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
220 	pefree(old_data, (ht)->u.flags & HASH_FLAG_PERSISTENT);
221 	zend_hash_rehash(ht);
222 	HANDLE_UNBLOCK_INTERRUPTIONS();
223 }
224 
zend_hash_to_packed(HashTable * ht)225 ZEND_API void ZEND_FASTCALL zend_hash_to_packed(HashTable *ht)
226 {
227 	void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
228 	Bucket *old_buckets = ht->arData;
229 
230 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
231 	HANDLE_BLOCK_INTERRUPTIONS();
232 	new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), (ht)->u.flags & HASH_FLAG_PERSISTENT);
233 	ht->u.flags |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
234 	ht->nTableMask = HT_MIN_MASK;
235 	HT_SET_DATA_ADDR(ht, new_data);
236 	HT_HASH_RESET_PACKED(ht);
237 	memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
238 	pefree(old_data, (ht)->u.flags & HASH_FLAG_PERSISTENT);
239 	HANDLE_UNBLOCK_INTERRUPTIONS();
240 }
241 
_zend_hash_init_ex(HashTable * ht,uint32_t nSize,dtor_func_t pDestructor,zend_bool persistent,zend_bool bApplyProtection ZEND_FILE_LINE_DC)242 ZEND_API void ZEND_FASTCALL _zend_hash_init_ex(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, zend_bool persistent, zend_bool bApplyProtection ZEND_FILE_LINE_DC)
243 {
244 	_zend_hash_init(ht, nSize, pDestructor, persistent ZEND_FILE_LINE_RELAY_CC);
245 	if (!bApplyProtection) {
246 		ht->u.flags &= ~HASH_FLAG_APPLY_PROTECTION;
247 	}
248 }
249 
zend_hash_extend(HashTable * ht,uint32_t nSize,zend_bool packed)250 ZEND_API void ZEND_FASTCALL zend_hash_extend(HashTable *ht, uint32_t nSize, zend_bool packed)
251 {
252 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
253 	if (nSize == 0) return;
254 	if (UNEXPECTED(!((ht)->u.flags & HASH_FLAG_INITIALIZED))) {
255 		if (nSize > ht->nTableSize) {
256 			ht->nTableSize = zend_hash_check_size(nSize);
257 		}
258 		zend_hash_check_init(ht, packed);
259 	} else {
260 		if (packed) {
261 			ZEND_ASSERT(ht->u.flags & HASH_FLAG_PACKED);
262 			if (nSize > ht->nTableSize) {
263 				HANDLE_BLOCK_INTERRUPTIONS();
264 				ht->nTableSize = zend_hash_check_size(nSize);
265 				HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE(ht), HT_USED_SIZE(ht), ht->u.flags & HASH_FLAG_PERSISTENT));
266 				HANDLE_UNBLOCK_INTERRUPTIONS();
267 			}
268 		} else {
269 			ZEND_ASSERT(!(ht->u.flags & HASH_FLAG_PACKED));
270 			if (nSize > ht->nTableSize) {
271 				void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
272 				Bucket *old_buckets = ht->arData;
273 				nSize = zend_hash_check_size(nSize);
274 				HANDLE_BLOCK_INTERRUPTIONS();
275 				new_data = pemalloc(HT_SIZE_EX(nSize, -nSize), ht->u.flags & HASH_FLAG_PERSISTENT);
276 				ht->nTableSize = nSize;
277 				ht->nTableMask = -ht->nTableSize;
278 				HT_SET_DATA_ADDR(ht, new_data);
279 				memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
280 				pefree(old_data, ht->u.flags & HASH_FLAG_PERSISTENT);
281 				zend_hash_rehash(ht);
282 				HANDLE_UNBLOCK_INTERRUPTIONS();
283 			}
284 		}
285 	}
286 }
287 
zend_array_recalc_elements(HashTable * ht)288 static uint32_t zend_array_recalc_elements(HashTable *ht)
289 {
290        zval *val;
291        uint32_t num = ht->nNumOfElements;
292 
293 	   ZEND_HASH_FOREACH_VAL(ht, val) {
294 		   if (Z_TYPE_P(val) == IS_INDIRECT) {
295 			   if (UNEXPECTED(Z_TYPE_P(Z_INDIRECT_P(val)) == IS_UNDEF)) {
296 				   num--;
297 			   }
298 		   }
299        } ZEND_HASH_FOREACH_END();
300        return num;
301 }
302 /* }}} */
303 
zend_array_count(HashTable * ht)304 ZEND_API uint32_t zend_array_count(HashTable *ht)
305 {
306 	uint32_t num;
307 	if (UNEXPECTED(ht->u.v.flags & HASH_FLAG_HAS_EMPTY_IND)) {
308 		num = zend_array_recalc_elements(ht);
309 		if (UNEXPECTED(ht->nNumOfElements == num)) {
310 			ht->u.v.flags &= ~HASH_FLAG_HAS_EMPTY_IND;
311 		}
312 	} else if (UNEXPECTED(ht == &EG(symbol_table))) {
313 		num = zend_array_recalc_elements(ht);
314 	} else {
315 		num = zend_hash_num_elements(ht);
316 	}
317 	return num;
318 }
319 /* }}} */
320 
zend_hash_set_apply_protection(HashTable * ht,zend_bool bApplyProtection)321 ZEND_API void ZEND_FASTCALL zend_hash_set_apply_protection(HashTable *ht, zend_bool bApplyProtection)
322 {
323 	if (bApplyProtection) {
324 		ht->u.flags |= HASH_FLAG_APPLY_PROTECTION;
325 	} else {
326 		ht->u.flags &= ~HASH_FLAG_APPLY_PROTECTION;
327 	}
328 }
329 
zend_hash_iterator_add(HashTable * ht,HashPosition pos)330 ZEND_API uint32_t ZEND_FASTCALL zend_hash_iterator_add(HashTable *ht, HashPosition pos)
331 {
332 	HashTableIterator *iter = EG(ht_iterators);
333 	HashTableIterator *end  = iter + EG(ht_iterators_count);
334 	uint32_t idx;
335 
336 	if (EXPECTED(ht->u.v.nIteratorsCount != 255)) {
337 		ht->u.v.nIteratorsCount++;
338 	}
339 	while (iter != end) {
340 		if (iter->ht == NULL) {
341 			iter->ht = ht;
342 			iter->pos = pos;
343 			idx = iter - EG(ht_iterators);
344 			if (idx + 1 > EG(ht_iterators_used)) {
345 				EG(ht_iterators_used) = idx + 1;
346 			}
347 			return idx;
348 		}
349 		iter++;
350 	}
351 	if (EG(ht_iterators) == EG(ht_iterators_slots)) {
352 		EG(ht_iterators) = emalloc(sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
353 		memcpy(EG(ht_iterators), EG(ht_iterators_slots), sizeof(HashTableIterator) * EG(ht_iterators_count));
354 	} else {
355 		EG(ht_iterators) = erealloc(EG(ht_iterators), sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
356 	}
357 	iter = EG(ht_iterators) + EG(ht_iterators_count);
358 	EG(ht_iterators_count) += 8;
359 	iter->ht = ht;
360 	iter->pos = pos;
361 	memset(iter + 1, 0, sizeof(HashTableIterator) * 7);
362 	idx = iter - EG(ht_iterators);
363 	EG(ht_iterators_used) = idx + 1;
364 	return idx;
365 }
366 
zend_hash_iterator_pos(uint32_t idx,HashTable * ht)367 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos(uint32_t idx, HashTable *ht)
368 {
369 	HashTableIterator *iter = EG(ht_iterators) + idx;
370 
371 	ZEND_ASSERT(idx != (uint32_t)-1);
372 	if (iter->pos == HT_INVALID_IDX) {
373 		return HT_INVALID_IDX;
374 	} else if (UNEXPECTED(iter->ht != ht)) {
375 		if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
376 				&& EXPECTED(iter->ht->u.v.nIteratorsCount != 255)) {
377 			iter->ht->u.v.nIteratorsCount--;
378 		}
379 		if (EXPECTED(ht->u.v.nIteratorsCount != 255)) {
380 			ht->u.v.nIteratorsCount++;
381 		}
382 		iter->ht = ht;
383 		iter->pos = ht->nInternalPointer;
384 	}
385 	return iter->pos;
386 }
387 
zend_hash_iterator_pos_ex(uint32_t idx,zval * array)388 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos_ex(uint32_t idx, zval *array)
389 {
390 	HashTable *ht = Z_ARRVAL_P(array);
391 	HashTableIterator *iter = EG(ht_iterators) + idx;
392 
393 	ZEND_ASSERT(idx != (uint32_t)-1);
394 	if (iter->pos == HT_INVALID_IDX) {
395 		return HT_INVALID_IDX;
396 	} else if (UNEXPECTED(iter->ht != ht)) {
397 		if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
398 				&& EXPECTED(iter->ht->u.v.nIteratorsCount != 255)) {
399 			iter->ht->u.v.nIteratorsCount--;
400 		}
401 		SEPARATE_ARRAY(array);
402 		ht = Z_ARRVAL_P(array);
403 		if (EXPECTED(ht->u.v.nIteratorsCount != 255)) {
404 			ht->u.v.nIteratorsCount++;
405 		}
406 		iter->ht = ht;
407 		iter->pos = ht->nInternalPointer;
408 	}
409 	return iter->pos;
410 }
411 
zend_hash_iterator_del(uint32_t idx)412 ZEND_API void ZEND_FASTCALL zend_hash_iterator_del(uint32_t idx)
413 {
414 	HashTableIterator *iter = EG(ht_iterators) + idx;
415 
416 	ZEND_ASSERT(idx != (uint32_t)-1);
417 
418 	if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
419 			&& EXPECTED(iter->ht->u.v.nIteratorsCount != 255)) {
420 		iter->ht->u.v.nIteratorsCount--;
421 	}
422 	iter->ht = NULL;
423 
424 	if (idx == EG(ht_iterators_used) - 1) {
425 		while (idx > 0 && EG(ht_iterators)[idx - 1].ht == NULL) {
426 			idx--;
427 		}
428 		EG(ht_iterators_used) = idx;
429 	}
430 }
431 
_zend_hash_iterators_remove(HashTable * ht)432 static zend_never_inline void ZEND_FASTCALL _zend_hash_iterators_remove(HashTable *ht)
433 {
434 	HashTableIterator *iter = EG(ht_iterators);
435 	HashTableIterator *end  = iter + EG(ht_iterators_used);
436 
437 	while (iter != end) {
438 		if (iter->ht == ht) {
439 			iter->ht = HT_POISONED_PTR;
440 		}
441 		iter++;
442 	}
443 }
444 
zend_hash_iterators_remove(HashTable * ht)445 static zend_always_inline void zend_hash_iterators_remove(HashTable *ht)
446 {
447 	if (UNEXPECTED(ht->u.v.nIteratorsCount)) {
448 		_zend_hash_iterators_remove(ht);
449 	}
450 }
451 
zend_hash_iterators_lower_pos(HashTable * ht,HashPosition start)452 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterators_lower_pos(HashTable *ht, HashPosition start)
453 {
454 	HashTableIterator *iter = EG(ht_iterators);
455 	HashTableIterator *end  = iter + EG(ht_iterators_used);
456 	HashPosition res = HT_INVALID_IDX;
457 
458 	while (iter != end) {
459 		if (iter->ht == ht) {
460 			if (iter->pos >= start && iter->pos < res) {
461 				res = iter->pos;
462 			}
463 		}
464 		iter++;
465 	}
466 	return res;
467 }
468 
_zend_hash_iterators_update(HashTable * ht,HashPosition from,HashPosition to)469 ZEND_API void ZEND_FASTCALL _zend_hash_iterators_update(HashTable *ht, HashPosition from, HashPosition to)
470 {
471 	HashTableIterator *iter = EG(ht_iterators);
472 	HashTableIterator *end  = iter + EG(ht_iterators_used);
473 
474 	while (iter != end) {
475 		if (iter->ht == ht && iter->pos == from) {
476 			iter->pos = to;
477 		}
478 		iter++;
479 	}
480 }
481 
zend_hash_find_bucket(const HashTable * ht,zend_string * key)482 static zend_always_inline Bucket *zend_hash_find_bucket(const HashTable *ht, zend_string *key)
483 {
484 	zend_ulong h;
485 	uint32_t nIndex;
486 	uint32_t idx;
487 	Bucket *p, *arData;
488 
489 	h = zend_string_hash_val(key);
490 	arData = ht->arData;
491 	nIndex = h | ht->nTableMask;
492 	idx = HT_HASH_EX(arData, nIndex);
493 	while (EXPECTED(idx != HT_INVALID_IDX)) {
494 		p = HT_HASH_TO_BUCKET_EX(arData, idx);
495 		if (EXPECTED(p->key == key)) { /* check for the same interned string */
496 			return p;
497 		} else if (EXPECTED(p->h == h) &&
498 		     EXPECTED(p->key) &&
499 		     EXPECTED(ZSTR_LEN(p->key) == ZSTR_LEN(key)) &&
500 		     EXPECTED(memcmp(ZSTR_VAL(p->key), ZSTR_VAL(key), ZSTR_LEN(key)) == 0)) {
501 			return p;
502 		}
503 		idx = Z_NEXT(p->val);
504 	}
505 	return NULL;
506 }
507 
zend_hash_str_find_bucket(const HashTable * ht,const char * str,size_t len,zend_ulong h)508 static zend_always_inline Bucket *zend_hash_str_find_bucket(const HashTable *ht, const char *str, size_t len, zend_ulong h)
509 {
510 	uint32_t nIndex;
511 	uint32_t idx;
512 	Bucket *p, *arData;
513 
514 	arData = ht->arData;
515 	nIndex = h | ht->nTableMask;
516 	idx = HT_HASH_EX(arData, nIndex);
517 	while (idx != HT_INVALID_IDX) {
518 		ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
519 		p = HT_HASH_TO_BUCKET_EX(arData, idx);
520 		if ((p->h == h)
521 			 && p->key
522 			 && (ZSTR_LEN(p->key) == len)
523 			 && !memcmp(ZSTR_VAL(p->key), str, len)) {
524 			return p;
525 		}
526 		idx = Z_NEXT(p->val);
527 	}
528 	return NULL;
529 }
530 
zend_hash_index_find_bucket(const HashTable * ht,zend_ulong h)531 static zend_always_inline Bucket *zend_hash_index_find_bucket(const HashTable *ht, zend_ulong h)
532 {
533 	uint32_t nIndex;
534 	uint32_t idx;
535 	Bucket *p, *arData;
536 
537 	arData = ht->arData;
538 	nIndex = h | ht->nTableMask;
539 	idx = HT_HASH_EX(arData, nIndex);
540 	while (idx != HT_INVALID_IDX) {
541 		ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
542 		p = HT_HASH_TO_BUCKET_EX(arData, idx);
543 		if (p->h == h && !p->key) {
544 			return p;
545 		}
546 		idx = Z_NEXT(p->val);
547 	}
548 	return NULL;
549 }
550 
_zend_hash_add_or_update_i(HashTable * ht,zend_string * key,zval * pData,uint32_t flag ZEND_FILE_LINE_DC)551 static zend_always_inline zval *_zend_hash_add_or_update_i(HashTable *ht, zend_string *key, zval *pData, uint32_t flag ZEND_FILE_LINE_DC)
552 {
553 	zend_ulong h;
554 	uint32_t nIndex;
555 	uint32_t idx;
556 	Bucket *p;
557 
558 	IS_CONSISTENT(ht);
559 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
560 
561 	if (UNEXPECTED(!(ht->u.flags & HASH_FLAG_INITIALIZED))) {
562 		CHECK_INIT(ht, 0);
563 		goto add_to_hash;
564 	} else if (ht->u.flags & HASH_FLAG_PACKED) {
565 		zend_hash_packed_to_hash(ht);
566 	} else if ((flag & HASH_ADD_NEW) == 0) {
567 		p = zend_hash_find_bucket(ht, key);
568 
569 		if (p) {
570 			zval *data;
571 
572 			if (flag & HASH_ADD) {
573 				if (!(flag & HASH_UPDATE_INDIRECT)) {
574 					return NULL;
575 				}
576 				ZEND_ASSERT(&p->val != pData);
577 				data = &p->val;
578 				if (Z_TYPE_P(data) == IS_INDIRECT) {
579 					data = Z_INDIRECT_P(data);
580 					if (Z_TYPE_P(data) != IS_UNDEF) {
581 						return NULL;
582 					}
583 				} else {
584 					return NULL;
585 				}
586 			} else {
587 				ZEND_ASSERT(&p->val != pData);
588 				data = &p->val;
589 				if ((flag & HASH_UPDATE_INDIRECT) && Z_TYPE_P(data) == IS_INDIRECT) {
590 					data = Z_INDIRECT_P(data);
591 				}
592 			}
593 			HANDLE_BLOCK_INTERRUPTIONS();
594 			if (ht->pDestructor) {
595 				ht->pDestructor(data);
596 			}
597 			ZVAL_COPY_VALUE(data, pData);
598 			HANDLE_UNBLOCK_INTERRUPTIONS();
599 			return data;
600 		}
601 	}
602 
603 	ZEND_HASH_IF_FULL_DO_RESIZE(ht);		/* If the Hash table is full, resize it */
604 
605 add_to_hash:
606 	HANDLE_BLOCK_INTERRUPTIONS();
607 	idx = ht->nNumUsed++;
608 	ht->nNumOfElements++;
609 	if (ht->nInternalPointer == HT_INVALID_IDX) {
610 		ht->nInternalPointer = idx;
611 	}
612 	zend_hash_iterators_update(ht, HT_INVALID_IDX, idx);
613 	p = ht->arData + idx;
614 	p->key = key;
615 	if (!ZSTR_IS_INTERNED(key)) {
616 		zend_string_addref(key);
617 		ht->u.flags &= ~HASH_FLAG_STATIC_KEYS;
618 		zend_string_hash_val(key);
619 	}
620 	p->h = h = ZSTR_H(key);
621 	ZVAL_COPY_VALUE(&p->val, pData);
622 	nIndex = h | ht->nTableMask;
623 	Z_NEXT(p->val) = HT_HASH(ht, nIndex);
624 	HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
625 	HANDLE_UNBLOCK_INTERRUPTIONS();
626 
627 	return &p->val;
628 }
629 
_zend_hash_add_or_update(HashTable * ht,zend_string * key,zval * pData,uint32_t flag ZEND_FILE_LINE_DC)630 ZEND_API zval* ZEND_FASTCALL _zend_hash_add_or_update(HashTable *ht, zend_string *key, zval *pData, uint32_t flag ZEND_FILE_LINE_DC)
631 {
632 	return _zend_hash_add_or_update_i(ht, key, pData, flag ZEND_FILE_LINE_RELAY_CC);
633 }
634 
_zend_hash_add(HashTable * ht,zend_string * key,zval * pData ZEND_FILE_LINE_DC)635 ZEND_API zval* ZEND_FASTCALL _zend_hash_add(HashTable *ht, zend_string *key, zval *pData ZEND_FILE_LINE_DC)
636 {
637 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD ZEND_FILE_LINE_RELAY_CC);
638 }
639 
_zend_hash_update(HashTable * ht,zend_string * key,zval * pData ZEND_FILE_LINE_DC)640 ZEND_API zval* ZEND_FASTCALL _zend_hash_update(HashTable *ht, zend_string *key, zval *pData ZEND_FILE_LINE_DC)
641 {
642 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE ZEND_FILE_LINE_RELAY_CC);
643 }
644 
_zend_hash_update_ind(HashTable * ht,zend_string * key,zval * pData ZEND_FILE_LINE_DC)645 ZEND_API zval* ZEND_FASTCALL _zend_hash_update_ind(HashTable *ht, zend_string *key, zval *pData ZEND_FILE_LINE_DC)
646 {
647 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT ZEND_FILE_LINE_RELAY_CC);
648 }
649 
_zend_hash_add_new(HashTable * ht,zend_string * key,zval * pData ZEND_FILE_LINE_DC)650 ZEND_API zval* ZEND_FASTCALL _zend_hash_add_new(HashTable *ht, zend_string *key, zval *pData ZEND_FILE_LINE_DC)
651 {
652 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD_NEW ZEND_FILE_LINE_RELAY_CC);
653 }
654 
_zend_hash_str_add_or_update(HashTable * ht,const char * str,size_t len,zval * pData,uint32_t flag ZEND_FILE_LINE_DC)655 ZEND_API zval* ZEND_FASTCALL _zend_hash_str_add_or_update(HashTable *ht, const char *str, size_t len, zval *pData, uint32_t flag ZEND_FILE_LINE_DC)
656 {
657 	zend_string *key = zend_string_init(str, len, ht->u.flags & HASH_FLAG_PERSISTENT);
658 	zval *ret = _zend_hash_add_or_update_i(ht, key, pData, flag ZEND_FILE_LINE_RELAY_CC);
659 	zend_string_release(key);
660 	return ret;
661 }
662 
_zend_hash_str_update(HashTable * ht,const char * str,size_t len,zval * pData ZEND_FILE_LINE_DC)663 ZEND_API zval* ZEND_FASTCALL _zend_hash_str_update(HashTable *ht, const char *str, size_t len, zval *pData ZEND_FILE_LINE_DC)
664 {
665 	zend_string *key = zend_string_init(str, len, ht->u.flags & HASH_FLAG_PERSISTENT);
666 	zval *ret = _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE ZEND_FILE_LINE_RELAY_CC);
667 	zend_string_release(key);
668 	return ret;
669 }
670 
_zend_hash_str_update_ind(HashTable * ht,const char * str,size_t len,zval * pData ZEND_FILE_LINE_DC)671 ZEND_API zval* ZEND_FASTCALL _zend_hash_str_update_ind(HashTable *ht, const char *str, size_t len, zval *pData ZEND_FILE_LINE_DC)
672 {
673 	zend_string *key = zend_string_init(str, len, ht->u.flags & HASH_FLAG_PERSISTENT);
674 	zval *ret = _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT ZEND_FILE_LINE_RELAY_CC);
675 	zend_string_release(key);
676 	return ret;
677 }
678 
_zend_hash_str_add(HashTable * ht,const char * str,size_t len,zval * pData ZEND_FILE_LINE_DC)679 ZEND_API zval* ZEND_FASTCALL _zend_hash_str_add(HashTable *ht, const char *str, size_t len, zval *pData ZEND_FILE_LINE_DC)
680 {
681 	zend_string *key = zend_string_init(str, len, ht->u.flags & HASH_FLAG_PERSISTENT);
682 	zval *ret = _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD ZEND_FILE_LINE_RELAY_CC);
683 	zend_string_release(key);
684 	return ret;
685 }
686 
_zend_hash_str_add_new(HashTable * ht,const char * str,size_t len,zval * pData ZEND_FILE_LINE_DC)687 ZEND_API zval* ZEND_FASTCALL _zend_hash_str_add_new(HashTable *ht, const char *str, size_t len, zval *pData ZEND_FILE_LINE_DC)
688 {
689 	zend_string *key = zend_string_init(str, len, ht->u.flags & HASH_FLAG_PERSISTENT);
690 	zval *ret = _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD_NEW ZEND_FILE_LINE_RELAY_CC);
691 	zend_string_delref(key);
692 	return ret;
693 }
694 
zend_hash_index_add_empty_element(HashTable * ht,zend_ulong h)695 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_empty_element(HashTable *ht, zend_ulong h)
696 {
697 	zval dummy;
698 
699 	ZVAL_NULL(&dummy);
700 	return zend_hash_index_add(ht, h, &dummy);
701 }
702 
zend_hash_add_empty_element(HashTable * ht,zend_string * key)703 ZEND_API zval* ZEND_FASTCALL zend_hash_add_empty_element(HashTable *ht, zend_string *key)
704 {
705 	zval dummy;
706 
707 	ZVAL_NULL(&dummy);
708 	return zend_hash_add(ht, key, &dummy);
709 }
710 
zend_hash_str_add_empty_element(HashTable * ht,const char * str,size_t len)711 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_empty_element(HashTable *ht, const char *str, size_t len)
712 {
713 	zval dummy;
714 
715 	ZVAL_NULL(&dummy);
716 	return zend_hash_str_add(ht, str, len, &dummy);
717 }
718 
_zend_hash_index_add_or_update_i(HashTable * ht,zend_ulong h,zval * pData,uint32_t flag ZEND_FILE_LINE_DC)719 static zend_always_inline zval *_zend_hash_index_add_or_update_i(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag ZEND_FILE_LINE_DC)
720 {
721 	uint32_t nIndex;
722 	uint32_t idx;
723 	Bucket *p;
724 
725 	IS_CONSISTENT(ht);
726 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
727 
728 	if (UNEXPECTED(!(ht->u.flags & HASH_FLAG_INITIALIZED))) {
729 		CHECK_INIT(ht, h < ht->nTableSize);
730 		if (h < ht->nTableSize) {
731 			p = ht->arData + h;
732 			goto add_to_packed;
733 		}
734 		goto add_to_hash;
735 	} else if (ht->u.flags & HASH_FLAG_PACKED) {
736 		if (h < ht->nNumUsed) {
737 			p = ht->arData + h;
738 			if (Z_TYPE(p->val) != IS_UNDEF) {
739 				if (flag & HASH_ADD) {
740 					return NULL;
741 				}
742 				if (ht->pDestructor) {
743 					ht->pDestructor(&p->val);
744 				}
745 				ZVAL_COPY_VALUE(&p->val, pData);
746 				if ((zend_long)h >= (zend_long)ht->nNextFreeElement) {
747 					ht->nNextFreeElement = h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
748 				}
749 				return &p->val;
750 			} else { /* we have to keep the order :( */
751 				goto convert_to_hash;
752 			}
753 		} else if (EXPECTED(h < ht->nTableSize)) {
754 			p = ht->arData + h;
755 		} else if ((h >> 1) < ht->nTableSize &&
756 		           (ht->nTableSize >> 1) < ht->nNumOfElements) {
757 			zend_hash_packed_grow(ht);
758 			p = ht->arData + h;
759 		} else {
760 			goto convert_to_hash;
761 		}
762 
763 add_to_packed:
764 		HANDLE_BLOCK_INTERRUPTIONS();
765 		/* incremental initialization of empty Buckets */
766 		if ((flag & (HASH_ADD_NEW|HASH_ADD_NEXT)) == (HASH_ADD_NEW|HASH_ADD_NEXT)) {
767 			ht->nNumUsed = h + 1;
768 		} else if (h >= ht->nNumUsed) {
769 			if (h > ht->nNumUsed) {
770 				Bucket *q = ht->arData + ht->nNumUsed;
771 				while (q != p) {
772 					ZVAL_UNDEF(&q->val);
773 					q++;
774 				}
775 			}
776 			ht->nNumUsed = h + 1;
777 		}
778 		ht->nNumOfElements++;
779 		if (ht->nInternalPointer == HT_INVALID_IDX) {
780 			ht->nInternalPointer = h;
781 		}
782 		zend_hash_iterators_update(ht, HT_INVALID_IDX, h);
783 		if ((zend_long)h >= (zend_long)ht->nNextFreeElement) {
784 			ht->nNextFreeElement = h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
785 		}
786 		p->h = h;
787 		p->key = NULL;
788 		ZVAL_COPY_VALUE(&p->val, pData);
789 
790 		HANDLE_UNBLOCK_INTERRUPTIONS();
791 
792 		return &p->val;
793 
794 convert_to_hash:
795 		zend_hash_packed_to_hash(ht);
796 	} else if ((flag & HASH_ADD_NEW) == 0) {
797 		p = zend_hash_index_find_bucket(ht, h);
798 		if (p) {
799 			if (flag & HASH_ADD) {
800 				return NULL;
801 			}
802 			ZEND_ASSERT(&p->val != pData);
803 			HANDLE_BLOCK_INTERRUPTIONS();
804 			if (ht->pDestructor) {
805 				ht->pDestructor(&p->val);
806 			}
807 			ZVAL_COPY_VALUE(&p->val, pData);
808 			HANDLE_UNBLOCK_INTERRUPTIONS();
809 			if ((zend_long)h >= (zend_long)ht->nNextFreeElement) {
810 				ht->nNextFreeElement = h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
811 			}
812 			return &p->val;
813 		}
814 	}
815 
816 	ZEND_HASH_IF_FULL_DO_RESIZE(ht);		/* If the Hash table is full, resize it */
817 
818 add_to_hash:
819 	HANDLE_BLOCK_INTERRUPTIONS();
820 	idx = ht->nNumUsed++;
821 	ht->nNumOfElements++;
822 	if (ht->nInternalPointer == HT_INVALID_IDX) {
823 		ht->nInternalPointer = idx;
824 	}
825 	zend_hash_iterators_update(ht, HT_INVALID_IDX, idx);
826 	if ((zend_long)h >= (zend_long)ht->nNextFreeElement) {
827 		ht->nNextFreeElement = h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
828 	}
829 	p = ht->arData + idx;
830 	p->h = h;
831 	p->key = NULL;
832 	nIndex = h | ht->nTableMask;
833 	ZVAL_COPY_VALUE(&p->val, pData);
834 	Z_NEXT(p->val) = HT_HASH(ht, nIndex);
835 	HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
836 	HANDLE_UNBLOCK_INTERRUPTIONS();
837 
838 	return &p->val;
839 }
840 
_zend_hash_index_add_or_update(HashTable * ht,zend_ulong h,zval * pData,uint32_t flag ZEND_FILE_LINE_DC)841 ZEND_API zval* ZEND_FASTCALL _zend_hash_index_add_or_update(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag ZEND_FILE_LINE_DC)
842 {
843 	return _zend_hash_index_add_or_update_i(ht, h, pData, flag ZEND_FILE_LINE_RELAY_CC);
844 }
845 
_zend_hash_index_add(HashTable * ht,zend_ulong h,zval * pData ZEND_FILE_LINE_DC)846 ZEND_API zval* ZEND_FASTCALL _zend_hash_index_add(HashTable *ht, zend_ulong h, zval *pData ZEND_FILE_LINE_DC)
847 {
848 	return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD ZEND_FILE_LINE_RELAY_CC);
849 }
850 
_zend_hash_index_add_new(HashTable * ht,zend_ulong h,zval * pData ZEND_FILE_LINE_DC)851 ZEND_API zval* ZEND_FASTCALL _zend_hash_index_add_new(HashTable *ht, zend_ulong h, zval *pData ZEND_FILE_LINE_DC)
852 {
853 	return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD | HASH_ADD_NEW ZEND_FILE_LINE_RELAY_CC);
854 }
855 
_zend_hash_index_update(HashTable * ht,zend_ulong h,zval * pData ZEND_FILE_LINE_DC)856 ZEND_API zval* ZEND_FASTCALL _zend_hash_index_update(HashTable *ht, zend_ulong h, zval *pData ZEND_FILE_LINE_DC)
857 {
858 	return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_UPDATE ZEND_FILE_LINE_RELAY_CC);
859 }
860 
_zend_hash_next_index_insert(HashTable * ht,zval * pData ZEND_FILE_LINE_DC)861 ZEND_API zval* ZEND_FASTCALL _zend_hash_next_index_insert(HashTable *ht, zval *pData ZEND_FILE_LINE_DC)
862 {
863 	return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEXT ZEND_FILE_LINE_RELAY_CC);
864 }
865 
_zend_hash_next_index_insert_new(HashTable * ht,zval * pData ZEND_FILE_LINE_DC)866 ZEND_API zval* ZEND_FASTCALL _zend_hash_next_index_insert_new(HashTable *ht, zval *pData ZEND_FILE_LINE_DC)
867 {
868 	return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEW | HASH_ADD_NEXT ZEND_FILE_LINE_RELAY_CC);
869 }
870 
zend_hash_do_resize(HashTable * ht)871 static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht)
872 {
873 
874 	IS_CONSISTENT(ht);
875 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
876 
877 	if (ht->nNumUsed > ht->nNumOfElements + (ht->nNumOfElements >> 5)) { /* additional term is there to amortize the cost of compaction */
878 		HANDLE_BLOCK_INTERRUPTIONS();
879 		zend_hash_rehash(ht);
880 		HANDLE_UNBLOCK_INTERRUPTIONS();
881 	} else if (ht->nTableSize < HT_MAX_SIZE) {	/* Let's double the table size */
882 		void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
883 		uint32_t nSize = ht->nTableSize + ht->nTableSize;
884 		Bucket *old_buckets = ht->arData;
885 
886 		HANDLE_BLOCK_INTERRUPTIONS();
887 		new_data = pemalloc(HT_SIZE_EX(nSize, -nSize), ht->u.flags & HASH_FLAG_PERSISTENT);
888 		ht->nTableSize = nSize;
889 		ht->nTableMask = -ht->nTableSize;
890 		HT_SET_DATA_ADDR(ht, new_data);
891 		memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
892 		pefree(old_data, ht->u.flags & HASH_FLAG_PERSISTENT);
893 		zend_hash_rehash(ht);
894 		HANDLE_UNBLOCK_INTERRUPTIONS();
895 	} else {
896 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket) + sizeof(uint32_t), sizeof(Bucket));
897 	}
898 }
899 
zend_hash_rehash(HashTable * ht)900 ZEND_API int ZEND_FASTCALL zend_hash_rehash(HashTable *ht)
901 {
902 	Bucket *p;
903 	uint32_t nIndex, i;
904 
905 	IS_CONSISTENT(ht);
906 
907 	if (UNEXPECTED(ht->nNumOfElements == 0)) {
908 		if (ht->u.flags & HASH_FLAG_INITIALIZED) {
909 			ht->nNumUsed = 0;
910 			HT_HASH_RESET(ht);
911 		}
912 		return SUCCESS;
913 	}
914 
915 	HT_HASH_RESET(ht);
916 	i = 0;
917 	p = ht->arData;
918 	if (ht->nNumUsed == ht->nNumOfElements) {
919 		do {
920 			nIndex = p->h | ht->nTableMask;
921 			Z_NEXT(p->val) = HT_HASH(ht, nIndex);
922 			HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
923 			p++;
924 		} while (++i < ht->nNumUsed);
925 	} else {
926 		do {
927 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) {
928 				uint32_t j = i;
929 				Bucket *q = p;
930 
931 				if (EXPECTED(ht->u.v.nIteratorsCount == 0)) {
932 					while (++i < ht->nNumUsed) {
933 						p++;
934 						if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
935 							ZVAL_COPY_VALUE(&q->val, &p->val);
936 							q->h = p->h;
937 							nIndex = q->h | ht->nTableMask;
938 							q->key = p->key;
939 							Z_NEXT(q->val) = HT_HASH(ht, nIndex);
940 							HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
941 							if (UNEXPECTED(ht->nInternalPointer == i)) {
942 								ht->nInternalPointer = j;
943 							}
944 							q++;
945 							j++;
946 						}
947 					}
948 				} else {
949 					uint32_t iter_pos = zend_hash_iterators_lower_pos(ht, 0);
950 
951 					while (++i < ht->nNumUsed) {
952 						p++;
953 						if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
954 							ZVAL_COPY_VALUE(&q->val, &p->val);
955 							q->h = p->h;
956 							nIndex = q->h | ht->nTableMask;
957 							q->key = p->key;
958 							Z_NEXT(q->val) = HT_HASH(ht, nIndex);
959 							HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
960 							if (UNEXPECTED(ht->nInternalPointer == i)) {
961 								ht->nInternalPointer = j;
962 							}
963 							if (UNEXPECTED(i == iter_pos)) {
964 								zend_hash_iterators_update(ht, i, j);
965 								iter_pos = zend_hash_iterators_lower_pos(ht, iter_pos + 1);
966 							}
967 							q++;
968 							j++;
969 						}
970 					}
971 				}
972 				ht->nNumUsed = j;
973 				break;
974 			}
975 			nIndex = p->h | ht->nTableMask;
976 			Z_NEXT(p->val) = HT_HASH(ht, nIndex);
977 			HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
978 			p++;
979 		} while (++i < ht->nNumUsed);
980 	}
981 	return SUCCESS;
982 }
983 
_zend_hash_del_el_ex(HashTable * ht,uint32_t idx,Bucket * p,Bucket * prev)984 static zend_always_inline void _zend_hash_del_el_ex(HashTable *ht, uint32_t idx, Bucket *p, Bucket *prev)
985 {
986 	HANDLE_BLOCK_INTERRUPTIONS();
987 	if (!(ht->u.flags & HASH_FLAG_PACKED)) {
988 		if (prev) {
989 			Z_NEXT(prev->val) = Z_NEXT(p->val);
990 		} else {
991 			HT_HASH(ht, p->h | ht->nTableMask) = Z_NEXT(p->val);
992 		}
993 	}
994 	if (HT_IDX_TO_HASH(ht->nNumUsed - 1) == idx) {
995 		do {
996 			ht->nNumUsed--;
997 		} while (ht->nNumUsed > 0 && (UNEXPECTED(Z_TYPE(ht->arData[ht->nNumUsed-1].val) == IS_UNDEF)));
998 	}
999 	ht->nNumOfElements--;
1000 	if (HT_IDX_TO_HASH(ht->nInternalPointer) == idx || UNEXPECTED(ht->u.v.nIteratorsCount)) {
1001 		uint32_t new_idx;
1002 
1003 		new_idx = idx = HT_HASH_TO_IDX(idx);
1004 		while (1) {
1005 			new_idx++;
1006 			if (new_idx >= ht->nNumUsed) {
1007 				new_idx = HT_INVALID_IDX;
1008 				break;
1009 			} else if (Z_TYPE(ht->arData[new_idx].val) != IS_UNDEF) {
1010 				break;
1011 			}
1012 		}
1013 		if (ht->nInternalPointer == idx) {
1014 			ht->nInternalPointer = new_idx;
1015 		}
1016 		zend_hash_iterators_update(ht, idx, new_idx);
1017 	}
1018 	if (p->key) {
1019 		zend_string_release(p->key);
1020 	}
1021 	if (ht->pDestructor) {
1022 		zval tmp;
1023 		ZVAL_COPY_VALUE(&tmp, &p->val);
1024 		ZVAL_UNDEF(&p->val);
1025 		ht->pDestructor(&tmp);
1026 	} else {
1027 		ZVAL_UNDEF(&p->val);
1028 	}
1029 	HANDLE_UNBLOCK_INTERRUPTIONS();
1030 }
1031 
_zend_hash_del_el(HashTable * ht,uint32_t idx,Bucket * p)1032 static zend_always_inline void _zend_hash_del_el(HashTable *ht, uint32_t idx, Bucket *p)
1033 {
1034 	Bucket *prev = NULL;
1035 
1036 	if (!(ht->u.flags & HASH_FLAG_PACKED)) {
1037 		uint32_t nIndex = p->h | ht->nTableMask;
1038 		uint32_t i = HT_HASH(ht, nIndex);
1039 
1040 		if (i != idx) {
1041 			prev = HT_HASH_TO_BUCKET(ht, i);
1042 			while (Z_NEXT(prev->val) != idx) {
1043 				i = Z_NEXT(prev->val);
1044 				prev = HT_HASH_TO_BUCKET(ht, i);
1045 			}
1046 	 	}
1047 	}
1048 
1049 	_zend_hash_del_el_ex(ht, idx, p, prev);
1050 }
1051 
zend_hash_del_bucket(HashTable * ht,Bucket * p)1052 ZEND_API void ZEND_FASTCALL zend_hash_del_bucket(HashTable *ht, Bucket *p)
1053 {
1054 	IS_CONSISTENT(ht);
1055 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
1056 	_zend_hash_del_el(ht, HT_IDX_TO_HASH(p - ht->arData), p);
1057 }
1058 
zend_hash_del(HashTable * ht,zend_string * key)1059 ZEND_API int ZEND_FASTCALL zend_hash_del(HashTable *ht, zend_string *key)
1060 {
1061 	zend_ulong h;
1062 	uint32_t nIndex;
1063 	uint32_t idx;
1064 	Bucket *p;
1065 	Bucket *prev = NULL;
1066 
1067 	IS_CONSISTENT(ht);
1068 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
1069 
1070 	h = zend_string_hash_val(key);
1071 	nIndex = h | ht->nTableMask;
1072 
1073 	idx = HT_HASH(ht, nIndex);
1074 	while (idx != HT_INVALID_IDX) {
1075 		p = HT_HASH_TO_BUCKET(ht, idx);
1076 		if ((p->key == key) ||
1077 			(p->h == h &&
1078 		     p->key &&
1079 		     ZSTR_LEN(p->key) == ZSTR_LEN(key) &&
1080 		     memcmp(ZSTR_VAL(p->key), ZSTR_VAL(key), ZSTR_LEN(key)) == 0)) {
1081 			_zend_hash_del_el_ex(ht, idx, p, prev);
1082 			return SUCCESS;
1083 		}
1084 		prev = p;
1085 		idx = Z_NEXT(p->val);
1086 	}
1087 	return FAILURE;
1088 }
1089 
zend_hash_del_ind(HashTable * ht,zend_string * key)1090 ZEND_API int ZEND_FASTCALL zend_hash_del_ind(HashTable *ht, zend_string *key)
1091 {
1092 	zend_ulong h;
1093 	uint32_t nIndex;
1094 	uint32_t idx;
1095 	Bucket *p;
1096 	Bucket *prev = NULL;
1097 
1098 	IS_CONSISTENT(ht);
1099 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
1100 
1101 	h = zend_string_hash_val(key);
1102 	nIndex = h | ht->nTableMask;
1103 
1104 	idx = HT_HASH(ht, nIndex);
1105 	while (idx != HT_INVALID_IDX) {
1106 		p = HT_HASH_TO_BUCKET(ht, idx);
1107 		if ((p->key == key) ||
1108 			(p->h == h &&
1109 		     p->key &&
1110 		     ZSTR_LEN(p->key) == ZSTR_LEN(key) &&
1111 		     memcmp(ZSTR_VAL(p->key), ZSTR_VAL(key), ZSTR_LEN(key)) == 0)) {
1112 			if (Z_TYPE(p->val) == IS_INDIRECT) {
1113 				zval *data = Z_INDIRECT(p->val);
1114 
1115 				if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
1116 					return FAILURE;
1117 				} else {
1118 					if (ht->pDestructor) {
1119 						zval tmp;
1120 						ZVAL_COPY_VALUE(&tmp, data);
1121 						ZVAL_UNDEF(data);
1122 						ht->pDestructor(&tmp);
1123 					} else {
1124 						ZVAL_UNDEF(data);
1125 					}
1126 					ht->u.v.flags |= HASH_FLAG_HAS_EMPTY_IND;
1127 				}
1128 			} else {
1129 				_zend_hash_del_el_ex(ht, idx, p, prev);
1130 			}
1131 			return SUCCESS;
1132 		}
1133 		prev = p;
1134 		idx = Z_NEXT(p->val);
1135 	}
1136 	return FAILURE;
1137 }
1138 
zend_hash_str_del_ind(HashTable * ht,const char * str,size_t len)1139 ZEND_API int ZEND_FASTCALL zend_hash_str_del_ind(HashTable *ht, const char *str, size_t len)
1140 {
1141 	zend_ulong h;
1142 	uint32_t nIndex;
1143 	uint32_t idx;
1144 	Bucket *p;
1145 	Bucket *prev = NULL;
1146 
1147 	IS_CONSISTENT(ht);
1148 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
1149 
1150 	h = zend_inline_hash_func(str, len);
1151 	nIndex = h | ht->nTableMask;
1152 
1153 	idx = HT_HASH(ht, nIndex);
1154 	while (idx != HT_INVALID_IDX) {
1155 		p = HT_HASH_TO_BUCKET(ht, idx);
1156 		if ((p->h == h)
1157 			 && p->key
1158 			 && (ZSTR_LEN(p->key) == len)
1159 			 && !memcmp(ZSTR_VAL(p->key), str, len)) {
1160 			if (Z_TYPE(p->val) == IS_INDIRECT) {
1161 				zval *data = Z_INDIRECT(p->val);
1162 
1163 				if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
1164 					return FAILURE;
1165 				} else {
1166 					if (ht->pDestructor) {
1167 						ht->pDestructor(data);
1168 					}
1169 					ZVAL_UNDEF(data);
1170 					ht->u.v.flags |= HASH_FLAG_HAS_EMPTY_IND;
1171 				}
1172 			} else {
1173 				_zend_hash_del_el_ex(ht, idx, p, prev);
1174 			}
1175 			return SUCCESS;
1176 		}
1177 		prev = p;
1178 		idx = Z_NEXT(p->val);
1179 	}
1180 	return FAILURE;
1181 }
1182 
zend_hash_str_del(HashTable * ht,const char * str,size_t len)1183 ZEND_API int ZEND_FASTCALL zend_hash_str_del(HashTable *ht, const char *str, size_t len)
1184 {
1185 	zend_ulong h;
1186 	uint32_t nIndex;
1187 	uint32_t idx;
1188 	Bucket *p;
1189 	Bucket *prev = NULL;
1190 
1191 	IS_CONSISTENT(ht);
1192 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
1193 
1194 	h = zend_inline_hash_func(str, len);
1195 	nIndex = h | ht->nTableMask;
1196 
1197 	idx = HT_HASH(ht, nIndex);
1198 	while (idx != HT_INVALID_IDX) {
1199 		p = HT_HASH_TO_BUCKET(ht, idx);
1200 		if ((p->h == h)
1201 			 && p->key
1202 			 && (ZSTR_LEN(p->key) == len)
1203 			 && !memcmp(ZSTR_VAL(p->key), str, len)) {
1204 			_zend_hash_del_el_ex(ht, idx, p, prev);
1205 			return SUCCESS;
1206 		}
1207 		prev = p;
1208 		idx = Z_NEXT(p->val);
1209 	}
1210 	return FAILURE;
1211 }
1212 
zend_hash_index_del(HashTable * ht,zend_ulong h)1213 ZEND_API int ZEND_FASTCALL zend_hash_index_del(HashTable *ht, zend_ulong h)
1214 {
1215 	uint32_t nIndex;
1216 	uint32_t idx;
1217 	Bucket *p;
1218 	Bucket *prev = NULL;
1219 
1220 	IS_CONSISTENT(ht);
1221 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
1222 
1223 	if (ht->u.flags & HASH_FLAG_PACKED) {
1224 		if (h < ht->nNumUsed) {
1225 			p = ht->arData + h;
1226 			if (Z_TYPE(p->val) != IS_UNDEF) {
1227 				_zend_hash_del_el_ex(ht, HT_IDX_TO_HASH(h), p, NULL);
1228 				return SUCCESS;
1229 			}
1230 		}
1231 		return FAILURE;
1232 	}
1233 	nIndex = h | ht->nTableMask;
1234 
1235 	idx = HT_HASH(ht, nIndex);
1236 	while (idx != HT_INVALID_IDX) {
1237 		p = HT_HASH_TO_BUCKET(ht, idx);
1238 		if ((p->h == h) && (p->key == NULL)) {
1239 			_zend_hash_del_el_ex(ht, idx, p, prev);
1240 			return SUCCESS;
1241 		}
1242 		prev = p;
1243 		idx = Z_NEXT(p->val);
1244 	}
1245 	return FAILURE;
1246 }
1247 
zend_hash_destroy(HashTable * ht)1248 ZEND_API void ZEND_FASTCALL zend_hash_destroy(HashTable *ht)
1249 {
1250 	Bucket *p, *end;
1251 
1252 	IS_CONSISTENT(ht);
1253 	HT_ASSERT(GC_REFCOUNT(ht) <= 1);
1254 
1255 	if (ht->nNumUsed) {
1256 		p = ht->arData;
1257 		end = p + ht->nNumUsed;
1258 		if (ht->pDestructor) {
1259 			SET_INCONSISTENT(HT_IS_DESTROYING);
1260 
1261 			if (ht->u.flags & (HASH_FLAG_PACKED|HASH_FLAG_STATIC_KEYS)) {
1262 				if (ht->nNumUsed == ht->nNumOfElements) {
1263 					do {
1264 						ht->pDestructor(&p->val);
1265 					} while (++p != end);
1266 				} else {
1267 					do {
1268 						if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1269 							ht->pDestructor(&p->val);
1270 						}
1271 					} while (++p != end);
1272 				}
1273 			} else if (ht->nNumUsed == ht->nNumOfElements) {
1274 				do {
1275 					ht->pDestructor(&p->val);
1276 					if (EXPECTED(p->key)) {
1277 						zend_string_release(p->key);
1278 					}
1279 				} while (++p != end);
1280 			} else {
1281 				do {
1282 					if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1283 						ht->pDestructor(&p->val);
1284 						if (EXPECTED(p->key)) {
1285 							zend_string_release(p->key);
1286 						}
1287 					}
1288 				} while (++p != end);
1289 			}
1290 
1291 			SET_INCONSISTENT(HT_DESTROYED);
1292 		} else {
1293 			if (!(ht->u.flags & (HASH_FLAG_PACKED|HASH_FLAG_STATIC_KEYS))) {
1294 				do {
1295 					if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1296 						if (EXPECTED(p->key)) {
1297 							zend_string_release(p->key);
1298 						}
1299 					}
1300 				} while (++p != end);
1301 			}
1302 		}
1303 		zend_hash_iterators_remove(ht);
1304 	} else if (EXPECTED(!(ht->u.flags & HASH_FLAG_INITIALIZED))) {
1305 		return;
1306 	}
1307 	pefree(HT_GET_DATA_ADDR(ht), ht->u.flags & HASH_FLAG_PERSISTENT);
1308 }
1309 
zend_array_destroy(HashTable * ht)1310 ZEND_API void ZEND_FASTCALL zend_array_destroy(HashTable *ht)
1311 {
1312 	Bucket *p, *end;
1313 
1314 	IS_CONSISTENT(ht);
1315 	HT_ASSERT(GC_REFCOUNT(ht) <= 1);
1316 
1317 	/* break possible cycles */
1318 	GC_REMOVE_FROM_BUFFER(ht);
1319 	GC_TYPE_INFO(ht) = IS_NULL | (GC_WHITE << 16);
1320 
1321 	if (ht->nNumUsed) {
1322 		/* In some rare cases destructors of regular arrays may be changed */
1323 		if (UNEXPECTED(ht->pDestructor != ZVAL_PTR_DTOR)) {
1324 			zend_hash_destroy(ht);
1325 			goto free_ht;
1326 		}
1327 
1328 		p = ht->arData;
1329 		end = p + ht->nNumUsed;
1330 		SET_INCONSISTENT(HT_IS_DESTROYING);
1331 
1332 		if (ht->u.flags & (HASH_FLAG_PACKED|HASH_FLAG_STATIC_KEYS)) {
1333 			do {
1334 				i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
1335 			} while (++p != end);
1336 		} else if (ht->nNumUsed == ht->nNumOfElements) {
1337 			do {
1338 				i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
1339 				if (EXPECTED(p->key)) {
1340 					zend_string_release(p->key);
1341 				}
1342 			} while (++p != end);
1343 		} else {
1344 			do {
1345 				if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1346 					i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
1347 					if (EXPECTED(p->key)) {
1348 						zend_string_release(p->key);
1349 					}
1350 				}
1351 			} while (++p != end);
1352 		}
1353 		zend_hash_iterators_remove(ht);
1354 		SET_INCONSISTENT(HT_DESTROYED);
1355 	} else if (EXPECTED(!(ht->u.flags & HASH_FLAG_INITIALIZED))) {
1356 		goto free_ht;
1357 	}
1358 	efree(HT_GET_DATA_ADDR(ht));
1359 free_ht:
1360 	FREE_HASHTABLE(ht);
1361 }
1362 
zend_hash_clean(HashTable * ht)1363 ZEND_API void ZEND_FASTCALL zend_hash_clean(HashTable *ht)
1364 {
1365 	Bucket *p, *end;
1366 
1367 	IS_CONSISTENT(ht);
1368 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
1369 
1370 	if (ht->nNumUsed) {
1371 		p = ht->arData;
1372 		end = p + ht->nNumUsed;
1373 		if (ht->pDestructor) {
1374 			if (ht->u.flags & (HASH_FLAG_PACKED|HASH_FLAG_STATIC_KEYS)) {
1375 				if (ht->nNumUsed == ht->nNumOfElements) {
1376 					do {
1377 						ht->pDestructor(&p->val);
1378 					} while (++p != end);
1379 				} else {
1380 					do {
1381 						if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1382 							ht->pDestructor(&p->val);
1383 						}
1384 					} while (++p != end);
1385 				}
1386 			} else if (ht->nNumUsed == ht->nNumOfElements) {
1387 				do {
1388 					ht->pDestructor(&p->val);
1389 					if (EXPECTED(p->key)) {
1390 						zend_string_release(p->key);
1391 					}
1392 				} while (++p != end);
1393 			} else {
1394 				do {
1395 					if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1396 						ht->pDestructor(&p->val);
1397 						if (EXPECTED(p->key)) {
1398 							zend_string_release(p->key);
1399 						}
1400 					}
1401 				} while (++p != end);
1402 			}
1403 		} else {
1404 			if (!(ht->u.flags & (HASH_FLAG_PACKED|HASH_FLAG_STATIC_KEYS))) {
1405 				if (ht->nNumUsed == ht->nNumOfElements) {
1406 					do {
1407 						if (EXPECTED(p->key)) {
1408 							zend_string_release(p->key);
1409 						}
1410 					} while (++p != end);
1411 				} else {
1412 					do {
1413 						if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1414 							if (EXPECTED(p->key)) {
1415 								zend_string_release(p->key);
1416 							}
1417 						}
1418 					} while (++p != end);
1419 				}
1420 			}
1421 		}
1422 		if (!(ht->u.flags & HASH_FLAG_PACKED)) {
1423 			HT_HASH_RESET(ht);
1424 		}
1425 	}
1426 	ht->nNumUsed = 0;
1427 	ht->nNumOfElements = 0;
1428 	ht->nNextFreeElement = 0;
1429 	ht->nInternalPointer = HT_INVALID_IDX;
1430 }
1431 
zend_symtable_clean(HashTable * ht)1432 ZEND_API void ZEND_FASTCALL zend_symtable_clean(HashTable *ht)
1433 {
1434 	Bucket *p, *end;
1435 
1436 	IS_CONSISTENT(ht);
1437 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
1438 
1439 	if (ht->nNumUsed) {
1440 		p = ht->arData;
1441 		end = p + ht->nNumUsed;
1442 		if (ht->u.flags & HASH_FLAG_STATIC_KEYS) {
1443 			do {
1444 				i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
1445 			} while (++p != end);
1446 		} else if (ht->nNumUsed == ht->nNumOfElements) {
1447 			do {
1448 				i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
1449 				if (EXPECTED(p->key)) {
1450 					zend_string_release(p->key);
1451 				}
1452 			} while (++p != end);
1453 		} else {
1454 			do {
1455 				if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1456 					i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
1457 					if (EXPECTED(p->key)) {
1458 						zend_string_release(p->key);
1459 					}
1460 				}
1461 			} while (++p != end);
1462 		}
1463 		HT_HASH_RESET(ht);
1464 	}
1465 	ht->nNumUsed = 0;
1466 	ht->nNumOfElements = 0;
1467 	ht->nNextFreeElement = 0;
1468 	ht->nInternalPointer = HT_INVALID_IDX;
1469 }
1470 
zend_hash_graceful_destroy(HashTable * ht)1471 ZEND_API void ZEND_FASTCALL zend_hash_graceful_destroy(HashTable *ht)
1472 {
1473 	uint32_t idx;
1474 	Bucket *p;
1475 
1476 	IS_CONSISTENT(ht);
1477 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
1478 
1479 	p = ht->arData;
1480 	for (idx = 0; idx < ht->nNumUsed; idx++, p++) {
1481 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1482 		_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1483 	}
1484 	if (ht->u.flags & HASH_FLAG_INITIALIZED) {
1485 		pefree(HT_GET_DATA_ADDR(ht), ht->u.flags & HASH_FLAG_PERSISTENT);
1486 	}
1487 
1488 	SET_INCONSISTENT(HT_DESTROYED);
1489 }
1490 
zend_hash_graceful_reverse_destroy(HashTable * ht)1491 ZEND_API void ZEND_FASTCALL zend_hash_graceful_reverse_destroy(HashTable *ht)
1492 {
1493 	uint32_t idx;
1494 	Bucket *p;
1495 
1496 	IS_CONSISTENT(ht);
1497 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
1498 
1499 	idx = ht->nNumUsed;
1500 	p = ht->arData + ht->nNumUsed;
1501 	while (idx > 0) {
1502 		idx--;
1503 		p--;
1504 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1505 		_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1506 	}
1507 
1508 	if (ht->u.flags & HASH_FLAG_INITIALIZED) {
1509 		pefree(HT_GET_DATA_ADDR(ht), ht->u.flags & HASH_FLAG_PERSISTENT);
1510 	}
1511 
1512 	SET_INCONSISTENT(HT_DESTROYED);
1513 }
1514 
1515 /* This is used to recurse elements and selectively delete certain entries
1516  * from a hashtable. apply_func() receives the data and decides if the entry
1517  * should be deleted or recursion should be stopped. The following three
1518  * return codes are possible:
1519  * ZEND_HASH_APPLY_KEEP   - continue
1520  * ZEND_HASH_APPLY_STOP   - stop iteration
1521  * ZEND_HASH_APPLY_REMOVE - delete the element, combineable with the former
1522  */
1523 
zend_hash_apply(HashTable * ht,apply_func_t apply_func)1524 ZEND_API void ZEND_FASTCALL zend_hash_apply(HashTable *ht, apply_func_t apply_func)
1525 {
1526 	uint32_t idx;
1527 	Bucket *p;
1528 	int result;
1529 
1530 	IS_CONSISTENT(ht);
1531 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
1532 
1533 	HASH_PROTECT_RECURSION(ht);
1534 	for (idx = 0; idx < ht->nNumUsed; idx++) {
1535 		p = ht->arData + idx;
1536 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1537 		result = apply_func(&p->val);
1538 
1539 		if (result & ZEND_HASH_APPLY_REMOVE) {
1540 			_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1541 		}
1542 		if (result & ZEND_HASH_APPLY_STOP) {
1543 			break;
1544 		}
1545 	}
1546 	HASH_UNPROTECT_RECURSION(ht);
1547 }
1548 
1549 
zend_hash_apply_with_argument(HashTable * ht,apply_func_arg_t apply_func,void * argument)1550 ZEND_API void ZEND_FASTCALL zend_hash_apply_with_argument(HashTable *ht, apply_func_arg_t apply_func, void *argument)
1551 {
1552     uint32_t idx;
1553 	Bucket *p;
1554 	int result;
1555 
1556 	IS_CONSISTENT(ht);
1557 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
1558 
1559 	HASH_PROTECT_RECURSION(ht);
1560 	for (idx = 0; idx < ht->nNumUsed; idx++) {
1561 		p = ht->arData + idx;
1562 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1563 		result = apply_func(&p->val, argument);
1564 
1565 		if (result & ZEND_HASH_APPLY_REMOVE) {
1566 			_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1567 		}
1568 		if (result & ZEND_HASH_APPLY_STOP) {
1569 			break;
1570 		}
1571 	}
1572 	HASH_UNPROTECT_RECURSION(ht);
1573 }
1574 
1575 
zend_hash_apply_with_arguments(HashTable * ht,apply_func_args_t apply_func,int num_args,...)1576 ZEND_API void ZEND_FASTCALL zend_hash_apply_with_arguments(HashTable *ht, apply_func_args_t apply_func, int num_args, ...)
1577 {
1578 	uint32_t idx;
1579 	Bucket *p;
1580 	va_list args;
1581 	zend_hash_key hash_key;
1582 	int result;
1583 
1584 	IS_CONSISTENT(ht);
1585 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
1586 
1587 	HASH_PROTECT_RECURSION(ht);
1588 
1589 	for (idx = 0; idx < ht->nNumUsed; idx++) {
1590 		p = ht->arData + idx;
1591 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1592 		va_start(args, num_args);
1593 		hash_key.h = p->h;
1594 		hash_key.key = p->key;
1595 
1596 		result = apply_func(&p->val, num_args, args, &hash_key);
1597 
1598 		if (result & ZEND_HASH_APPLY_REMOVE) {
1599 			_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1600 		}
1601 		if (result & ZEND_HASH_APPLY_STOP) {
1602 			va_end(args);
1603 			break;
1604 		}
1605 		va_end(args);
1606 	}
1607 
1608 	HASH_UNPROTECT_RECURSION(ht);
1609 }
1610 
1611 
zend_hash_reverse_apply(HashTable * ht,apply_func_t apply_func)1612 ZEND_API void ZEND_FASTCALL zend_hash_reverse_apply(HashTable *ht, apply_func_t apply_func)
1613 {
1614 	uint32_t idx;
1615 	Bucket *p;
1616 	int result;
1617 
1618 	IS_CONSISTENT(ht);
1619 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
1620 
1621 	HASH_PROTECT_RECURSION(ht);
1622 	idx = ht->nNumUsed;
1623 	while (idx > 0) {
1624 		idx--;
1625 		p = ht->arData + idx;
1626 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1627 
1628 		result = apply_func(&p->val);
1629 
1630 		if (result & ZEND_HASH_APPLY_REMOVE) {
1631 			_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1632 		}
1633 		if (result & ZEND_HASH_APPLY_STOP) {
1634 			break;
1635 		}
1636 	}
1637 	HASH_UNPROTECT_RECURSION(ht);
1638 }
1639 
1640 
zend_hash_copy(HashTable * target,HashTable * source,copy_ctor_func_t pCopyConstructor)1641 ZEND_API void ZEND_FASTCALL zend_hash_copy(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor)
1642 {
1643     uint32_t idx;
1644 	Bucket *p;
1645 	zval *new_entry, *data;
1646 	zend_bool setTargetPointer;
1647 
1648 	IS_CONSISTENT(source);
1649 	IS_CONSISTENT(target);
1650 	HT_ASSERT(GC_REFCOUNT(target) == 1);
1651 
1652 	setTargetPointer = (target->nInternalPointer == HT_INVALID_IDX);
1653 	for (idx = 0; idx < source->nNumUsed; idx++) {
1654 		p = source->arData + idx;
1655 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1656 
1657 		if (setTargetPointer && source->nInternalPointer == idx) {
1658 			target->nInternalPointer = HT_INVALID_IDX;
1659 		}
1660 		/* INDIRECT element may point to UNDEF-ined slots */
1661 		data = &p->val;
1662 		if (Z_TYPE_P(data) == IS_INDIRECT) {
1663 			data = Z_INDIRECT_P(data);
1664 			if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
1665 				continue;
1666 			}
1667 		}
1668 		if (p->key) {
1669 			new_entry = zend_hash_update(target, p->key, data);
1670 		} else {
1671 			new_entry = zend_hash_index_update(target, p->h, data);
1672 		}
1673 		if (pCopyConstructor) {
1674 			pCopyConstructor(new_entry);
1675 		}
1676 	}
1677 	if (target->nInternalPointer == HT_INVALID_IDX && target->nNumOfElements > 0) {
1678 		idx = 0;
1679 		while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) {
1680 			idx++;
1681 		}
1682 		target->nInternalPointer = idx;
1683 	}
1684 }
1685 
1686 
zend_array_dup_element(HashTable * source,HashTable * target,uint32_t idx,Bucket * p,Bucket * q,int packed,int static_keys,int with_holes)1687 static zend_always_inline int zend_array_dup_element(HashTable *source, HashTable *target, uint32_t idx, Bucket *p, Bucket *q, int packed, int static_keys, int with_holes)
1688 {
1689 	zval *data = &p->val;
1690 
1691 	if (with_holes) {
1692 		if (!packed && Z_TYPE_INFO_P(data) == IS_INDIRECT) {
1693 			data = Z_INDIRECT_P(data);
1694 		}
1695 		if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) {
1696 			return 0;
1697 		}
1698 	} else if (!packed) {
1699 		/* INDIRECT element may point to UNDEF-ined slots */
1700 		if (Z_TYPE_INFO_P(data) == IS_INDIRECT) {
1701 			data = Z_INDIRECT_P(data);
1702 			if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) {
1703 				return 0;
1704 			}
1705 		}
1706 	}
1707 
1708 	do {
1709 		if (Z_OPT_REFCOUNTED_P(data)) {
1710 			if (Z_ISREF_P(data) && Z_REFCOUNT_P(data) == 1 &&
1711 			    (Z_TYPE_P(Z_REFVAL_P(data)) != IS_ARRAY ||
1712 			      Z_ARRVAL_P(Z_REFVAL_P(data)) != source)) {
1713 				data = Z_REFVAL_P(data);
1714 				if (!Z_OPT_REFCOUNTED_P(data)) {
1715 					break;
1716 				}
1717 			}
1718 			Z_ADDREF_P(data);
1719 		}
1720 	} while (0);
1721 	ZVAL_COPY_VALUE(&q->val, data);
1722 
1723 	q->h = p->h;
1724 	if (packed) {
1725 		q->key = NULL;
1726 	} else {
1727 		uint32_t nIndex;
1728 
1729 		q->key = p->key;
1730 		if (!static_keys && q->key) {
1731 			zend_string_addref(q->key);
1732 		}
1733 
1734 		nIndex = q->h | target->nTableMask;
1735 		Z_NEXT(q->val) = HT_HASH(target, nIndex);
1736 		HT_HASH(target, nIndex) = HT_IDX_TO_HASH(idx);
1737 	}
1738 	return 1;
1739 }
1740 
zend_array_dup_packed_elements(HashTable * source,HashTable * target,int with_holes)1741 static zend_always_inline void zend_array_dup_packed_elements(HashTable *source, HashTable *target, int with_holes)
1742 {
1743 	Bucket *p = source->arData;
1744 	Bucket *q = target->arData;
1745 	Bucket *end = p + source->nNumUsed;
1746 
1747 	do {
1748 		if (!zend_array_dup_element(source, target, 0, p, q, 1, 1, with_holes)) {
1749 			if (with_holes) {
1750 				ZVAL_UNDEF(&q->val);
1751 			}
1752 		}
1753 		p++; q++;
1754 	} while (p != end);
1755 }
1756 
zend_array_dup_elements(HashTable * source,HashTable * target,int static_keys,int with_holes)1757 static zend_always_inline uint32_t zend_array_dup_elements(HashTable *source, HashTable *target, int static_keys, int with_holes)
1758 {
1759 	uint32_t idx = 0;
1760 	Bucket *p = source->arData;
1761 	Bucket *q = target->arData;
1762 	Bucket *end = p + source->nNumUsed;
1763 
1764 	do {
1765 		if (!zend_array_dup_element(source, target, idx, p, q, 0, static_keys, with_holes)) {
1766 			uint32_t target_idx = idx;
1767 
1768 			idx++; p++;
1769 			while (p != end) {
1770 				if (zend_array_dup_element(source, target, target_idx, p, q, 0, static_keys, with_holes)) {
1771 					if (source->nInternalPointer == idx) {
1772 						target->nInternalPointer = target_idx;
1773 					}
1774 					target_idx++; q++;
1775 				}
1776 				idx++; p++;
1777 			}
1778 			return target_idx;
1779 		}
1780 		idx++; p++; q++;
1781 	} while (p != end);
1782 	return idx;
1783 }
1784 
zend_array_dup(HashTable * source)1785 ZEND_API HashTable* ZEND_FASTCALL zend_array_dup(HashTable *source)
1786 {
1787 	uint32_t idx;
1788 	HashTable *target;
1789 
1790 	IS_CONSISTENT(source);
1791 
1792 	ALLOC_HASHTABLE(target);
1793 	GC_REFCOUNT(target) = 1;
1794 	GC_TYPE_INFO(target) = IS_ARRAY;
1795 
1796 	target->nTableSize = source->nTableSize;
1797 	target->pDestructor = source->pDestructor;
1798 
1799 	if (source->nNumUsed == 0) {
1800 		target->u.flags = (source->u.flags & ~(HASH_FLAG_INITIALIZED|HASH_FLAG_PACKED|HASH_FLAG_PERSISTENT|ZEND_HASH_APPLY_COUNT_MASK)) | HASH_FLAG_APPLY_PROTECTION | HASH_FLAG_STATIC_KEYS;
1801 		target->nTableMask = HT_MIN_MASK;
1802 		target->nNumUsed = 0;
1803 		target->nNumOfElements = 0;
1804 		target->nNextFreeElement = 0;
1805 		target->nInternalPointer = HT_INVALID_IDX;
1806 		HT_SET_DATA_ADDR(target, &uninitialized_bucket);
1807 	} else if (GC_FLAGS(source) & IS_ARRAY_IMMUTABLE) {
1808 		target->u.flags = (source->u.flags & ~HASH_FLAG_PERSISTENT) | HASH_FLAG_APPLY_PROTECTION;
1809 		target->nTableMask = source->nTableMask;
1810 		target->nNumUsed = source->nNumUsed;
1811 		target->nNumOfElements = source->nNumOfElements;
1812 		target->nNextFreeElement = source->nNextFreeElement;
1813 		HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
1814 		target->nInternalPointer = source->nInternalPointer;
1815 		memcpy(HT_GET_DATA_ADDR(target), HT_GET_DATA_ADDR(source), HT_USED_SIZE(source));
1816 		if (target->nNumOfElements > 0 &&
1817 		    target->nInternalPointer == HT_INVALID_IDX) {
1818 			idx = 0;
1819 			while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) {
1820 				idx++;
1821 			}
1822 			target->nInternalPointer = idx;
1823 		}
1824 	} else if (source->u.flags & HASH_FLAG_PACKED) {
1825 		target->u.flags = (source->u.flags & ~(HASH_FLAG_PERSISTENT|ZEND_HASH_APPLY_COUNT_MASK)) | HASH_FLAG_APPLY_PROTECTION;
1826 		target->nTableMask = source->nTableMask;
1827 		target->nNumUsed = source->nNumUsed;
1828 		target->nNumOfElements = source->nNumOfElements;
1829 		target->nNextFreeElement = source->nNextFreeElement;
1830 		HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
1831 		target->nInternalPointer = source->nInternalPointer;
1832 		HT_HASH_RESET_PACKED(target);
1833 
1834 		if (target->nNumUsed == target->nNumOfElements) {
1835 			zend_array_dup_packed_elements(source, target, 0);
1836 		} else {
1837 			zend_array_dup_packed_elements(source, target, 1);
1838 		}
1839 		if (target->nNumOfElements > 0 &&
1840 		    target->nInternalPointer == HT_INVALID_IDX) {
1841 			idx = 0;
1842 			while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) {
1843 				idx++;
1844 			}
1845 			target->nInternalPointer = idx;
1846 		}
1847 	} else {
1848 		target->u.flags = (source->u.flags & ~(HASH_FLAG_PERSISTENT|ZEND_HASH_APPLY_COUNT_MASK)) | HASH_FLAG_APPLY_PROTECTION;
1849 		target->nTableMask = source->nTableMask;
1850 		target->nNextFreeElement = source->nNextFreeElement;
1851 		target->nInternalPointer = source->nInternalPointer;
1852 
1853 		HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
1854 		HT_HASH_RESET(target);
1855 
1856 		if (target->u.flags & HASH_FLAG_STATIC_KEYS) {
1857 			if (source->nNumUsed == source->nNumOfElements) {
1858 				idx = zend_array_dup_elements(source, target, 1, 0);
1859 			} else {
1860 				idx = zend_array_dup_elements(source, target, 1, 1);
1861 			}
1862 		} else {
1863 			if (source->nNumUsed == source->nNumOfElements) {
1864 				idx = zend_array_dup_elements(source, target, 0, 0);
1865 			} else {
1866 				idx = zend_array_dup_elements(source, target, 0, 1);
1867 			}
1868 		}
1869 		target->nNumUsed = idx;
1870 		target->nNumOfElements = idx;
1871 		if (idx > 0 && target->nInternalPointer == HT_INVALID_IDX) {
1872 			target->nInternalPointer = 0;
1873 		}
1874 	}
1875 	return target;
1876 }
1877 
1878 
_zend_hash_merge(HashTable * target,HashTable * source,copy_ctor_func_t pCopyConstructor,zend_bool overwrite ZEND_FILE_LINE_DC)1879 ZEND_API void ZEND_FASTCALL _zend_hash_merge(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor, zend_bool overwrite ZEND_FILE_LINE_DC)
1880 {
1881     uint32_t idx;
1882 	Bucket *p;
1883 	zval *t;
1884 
1885 	IS_CONSISTENT(source);
1886 	IS_CONSISTENT(target);
1887 	HT_ASSERT(GC_REFCOUNT(target) == 1);
1888 
1889 	if (overwrite) {
1890 		for (idx = 0; idx < source->nNumUsed; idx++) {
1891 			p = source->arData + idx;
1892 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1893 			if (UNEXPECTED(Z_TYPE(p->val) == IS_INDIRECT) &&
1894 			    UNEXPECTED(Z_TYPE_P(Z_INDIRECT(p->val)) == IS_UNDEF)) {
1895 			    continue;
1896 			}
1897 			if (p->key) {
1898 				t = _zend_hash_add_or_update_i(target, p->key, &p->val, HASH_UPDATE | HASH_UPDATE_INDIRECT ZEND_FILE_LINE_RELAY_CC);
1899 				if (t && pCopyConstructor) {
1900 					pCopyConstructor(t);
1901 				}
1902 			} else {
1903 				t = zend_hash_index_update(target, p->h, &p->val);
1904 				if (t && pCopyConstructor) {
1905 					pCopyConstructor(t);
1906 				}
1907 			}
1908 		}
1909 	} else {
1910 		for (idx = 0; idx < source->nNumUsed; idx++) {
1911 			p = source->arData + idx;
1912 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1913 			if (UNEXPECTED(Z_TYPE(p->val) == IS_INDIRECT) &&
1914 			    UNEXPECTED(Z_TYPE_P(Z_INDIRECT(p->val)) == IS_UNDEF)) {
1915 			    continue;
1916 			}
1917 			if (p->key) {
1918 				t = _zend_hash_add_or_update_i(target, p->key, &p->val, HASH_ADD | HASH_UPDATE_INDIRECT ZEND_FILE_LINE_RELAY_CC);
1919 				if (t && pCopyConstructor) {
1920 					pCopyConstructor(t);
1921 				}
1922 			} else {
1923 				t = zend_hash_index_add(target, p->h, &p->val);
1924 				if (t && pCopyConstructor) {
1925 					pCopyConstructor(t);
1926 				}
1927 			}
1928 		}
1929 	}
1930 	if (target->nNumOfElements > 0) {
1931 		idx = 0;
1932 		while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) {
1933 			idx++;
1934 		}
1935 		target->nInternalPointer = idx;
1936 	}
1937 }
1938 
1939 
zend_hash_replace_checker_wrapper(HashTable * target,zval * source_data,Bucket * p,void * pParam,merge_checker_func_t merge_checker_func)1940 static zend_bool ZEND_FASTCALL zend_hash_replace_checker_wrapper(HashTable *target, zval *source_data, Bucket *p, void *pParam, merge_checker_func_t merge_checker_func)
1941 {
1942 	zend_hash_key hash_key;
1943 
1944 	hash_key.h = p->h;
1945 	hash_key.key = p->key;
1946 	return merge_checker_func(target, source_data, &hash_key, pParam);
1947 }
1948 
1949 
zend_hash_merge_ex(HashTable * target,HashTable * source,copy_ctor_func_t pCopyConstructor,merge_checker_func_t pMergeSource,void * pParam)1950 ZEND_API void ZEND_FASTCALL zend_hash_merge_ex(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor, merge_checker_func_t pMergeSource, void *pParam)
1951 {
1952 	uint32_t idx;
1953 	Bucket *p;
1954 	zval *t;
1955 
1956 	IS_CONSISTENT(source);
1957 	IS_CONSISTENT(target);
1958 	HT_ASSERT(GC_REFCOUNT(target) == 1);
1959 
1960 	for (idx = 0; idx < source->nNumUsed; idx++) {
1961 		p = source->arData + idx;
1962 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1963 		if (zend_hash_replace_checker_wrapper(target, &p->val, p, pParam, pMergeSource)) {
1964 			t = zend_hash_update(target, p->key, &p->val);
1965 			if (t && pCopyConstructor) {
1966 				pCopyConstructor(t);
1967 			}
1968 		}
1969 	}
1970 	if (target->nNumOfElements > 0) {
1971 		idx = 0;
1972 		while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) {
1973 			idx++;
1974 		}
1975 		target->nInternalPointer = idx;
1976 	}
1977 }
1978 
1979 
1980 /* Returns the hash table data if found and NULL if not. */
zend_hash_find(const HashTable * ht,zend_string * key)1981 ZEND_API zval* ZEND_FASTCALL zend_hash_find(const HashTable *ht, zend_string *key)
1982 {
1983 	Bucket *p;
1984 
1985 	IS_CONSISTENT(ht);
1986 
1987 	p = zend_hash_find_bucket(ht, key);
1988 	return p ? &p->val : NULL;
1989 }
1990 
zend_hash_str_find(const HashTable * ht,const char * str,size_t len)1991 ZEND_API zval* ZEND_FASTCALL zend_hash_str_find(const HashTable *ht, const char *str, size_t len)
1992 {
1993 	zend_ulong h;
1994 	Bucket *p;
1995 
1996 	IS_CONSISTENT(ht);
1997 
1998 	h = zend_inline_hash_func(str, len);
1999 	p = zend_hash_str_find_bucket(ht, str, len, h);
2000 	return p ? &p->val : NULL;
2001 }
2002 
zend_hash_exists(const HashTable * ht,zend_string * key)2003 ZEND_API zend_bool ZEND_FASTCALL zend_hash_exists(const HashTable *ht, zend_string *key)
2004 {
2005 	Bucket *p;
2006 
2007 	IS_CONSISTENT(ht);
2008 
2009 	p = zend_hash_find_bucket(ht, key);
2010 	return p ? 1 : 0;
2011 }
2012 
zend_hash_str_exists(const HashTable * ht,const char * str,size_t len)2013 ZEND_API zend_bool ZEND_FASTCALL zend_hash_str_exists(const HashTable *ht, const char *str, size_t len)
2014 {
2015 	zend_ulong h;
2016 	Bucket *p;
2017 
2018 	IS_CONSISTENT(ht);
2019 
2020 	h = zend_inline_hash_func(str, len);
2021 	p = zend_hash_str_find_bucket(ht, str, len, h);
2022 	return p ? 1 : 0;
2023 }
2024 
zend_hash_index_find(const HashTable * ht,zend_ulong h)2025 ZEND_API zval* ZEND_FASTCALL zend_hash_index_find(const HashTable *ht, zend_ulong h)
2026 {
2027 	Bucket *p;
2028 
2029 	IS_CONSISTENT(ht);
2030 
2031 	if (ht->u.flags & HASH_FLAG_PACKED) {
2032 		if (h < ht->nNumUsed) {
2033 			p = ht->arData + h;
2034 			if (Z_TYPE(p->val) != IS_UNDEF) {
2035 				return &p->val;
2036 			}
2037 		}
2038 		return NULL;
2039 	}
2040 
2041 	p = zend_hash_index_find_bucket(ht, h);
2042 	return p ? &p->val : NULL;
2043 }
2044 
2045 
zend_hash_index_exists(const HashTable * ht,zend_ulong h)2046 ZEND_API zend_bool ZEND_FASTCALL zend_hash_index_exists(const HashTable *ht, zend_ulong h)
2047 {
2048 	Bucket *p;
2049 
2050 	IS_CONSISTENT(ht);
2051 
2052 	if (ht->u.flags & HASH_FLAG_PACKED) {
2053 		if (h < ht->nNumUsed) {
2054 			if (Z_TYPE(ht->arData[h].val) != IS_UNDEF) {
2055 				return 1;
2056 			}
2057 		}
2058 		return 0;
2059 	}
2060 
2061 	p = zend_hash_index_find_bucket(ht, h);
2062 	return p ? 1 : 0;
2063 }
2064 
2065 
zend_hash_internal_pointer_reset_ex(HashTable * ht,HashPosition * pos)2066 ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_reset_ex(HashTable *ht, HashPosition *pos)
2067 {
2068     uint32_t idx;
2069 
2070 	IS_CONSISTENT(ht);
2071 	HT_ASSERT(&ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2072 
2073 	for (idx = 0; idx < ht->nNumUsed; idx++) {
2074 		if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2075 			*pos = idx;
2076 			return;
2077 		}
2078 	}
2079 	*pos = HT_INVALID_IDX;
2080 }
2081 
2082 
2083 /* This function will be extremely optimized by remembering
2084  * the end of the list
2085  */
zend_hash_internal_pointer_end_ex(HashTable * ht,HashPosition * pos)2086 ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_end_ex(HashTable *ht, HashPosition *pos)
2087 {
2088 	uint32_t idx;
2089 
2090 	IS_CONSISTENT(ht);
2091 	HT_ASSERT(&ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2092 
2093 	idx = ht->nNumUsed;
2094 	while (idx > 0) {
2095 		idx--;
2096 		if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2097 			*pos = idx;
2098 			return;
2099 		}
2100 	}
2101 	*pos = HT_INVALID_IDX;
2102 }
2103 
2104 
zend_hash_move_forward_ex(HashTable * ht,HashPosition * pos)2105 ZEND_API int ZEND_FASTCALL zend_hash_move_forward_ex(HashTable *ht, HashPosition *pos)
2106 {
2107 	uint32_t idx = *pos;
2108 
2109 	IS_CONSISTENT(ht);
2110 	HT_ASSERT(&ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2111 
2112 	if (idx != HT_INVALID_IDX) {
2113 		while (1) {
2114 			idx++;
2115 			if (idx >= ht->nNumUsed) {
2116 				*pos = HT_INVALID_IDX;
2117 				return SUCCESS;
2118 			}
2119 			if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2120 				*pos = idx;
2121 				return SUCCESS;
2122 			}
2123 		}
2124 	} else {
2125  		return FAILURE;
2126 	}
2127 }
2128 
zend_hash_move_backwards_ex(HashTable * ht,HashPosition * pos)2129 ZEND_API int ZEND_FASTCALL zend_hash_move_backwards_ex(HashTable *ht, HashPosition *pos)
2130 {
2131 	uint32_t idx = *pos;
2132 
2133 	IS_CONSISTENT(ht);
2134 	HT_ASSERT(&ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2135 
2136 	if (idx != HT_INVALID_IDX) {
2137 		while (idx > 0) {
2138 			idx--;
2139 			if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2140 				*pos = idx;
2141 				return SUCCESS;
2142 			}
2143 		}
2144 		*pos = HT_INVALID_IDX;
2145  		return SUCCESS;
2146 	} else {
2147  		return FAILURE;
2148 	}
2149 }
2150 
2151 
2152 /* This function should be made binary safe  */
zend_hash_get_current_key_ex(const HashTable * ht,zend_string ** str_index,zend_ulong * num_index,HashPosition * pos)2153 ZEND_API int ZEND_FASTCALL zend_hash_get_current_key_ex(const HashTable *ht, zend_string **str_index, zend_ulong *num_index, HashPosition *pos)
2154 {
2155 	uint32_t idx = *pos;
2156 	Bucket *p;
2157 
2158 	IS_CONSISTENT(ht);
2159 	if (idx != HT_INVALID_IDX) {
2160 		p = ht->arData + idx;
2161 		if (p->key) {
2162 			*str_index = p->key;
2163 			return HASH_KEY_IS_STRING;
2164 		} else {
2165 			*num_index = p->h;
2166 			return HASH_KEY_IS_LONG;
2167 		}
2168 	}
2169 	return HASH_KEY_NON_EXISTENT;
2170 }
2171 
zend_hash_get_current_key_zval_ex(const HashTable * ht,zval * key,HashPosition * pos)2172 ZEND_API void ZEND_FASTCALL zend_hash_get_current_key_zval_ex(const HashTable *ht, zval *key, HashPosition *pos)
2173 {
2174 	uint32_t idx = *pos;
2175 	Bucket *p;
2176 
2177 	IS_CONSISTENT(ht);
2178 	if (idx == HT_INVALID_IDX) {
2179 		ZVAL_NULL(key);
2180 	} else {
2181 		p = ht->arData + idx;
2182 		if (p->key) {
2183 			ZVAL_STR_COPY(key, p->key);
2184 		} else {
2185 			ZVAL_LONG(key, p->h);
2186 		}
2187 	}
2188 }
2189 
zend_hash_get_current_key_type_ex(HashTable * ht,HashPosition * pos)2190 ZEND_API int ZEND_FASTCALL zend_hash_get_current_key_type_ex(HashTable *ht, HashPosition *pos)
2191 {
2192     uint32_t idx = *pos;
2193 	Bucket *p;
2194 
2195 	IS_CONSISTENT(ht);
2196 	if (idx != HT_INVALID_IDX) {
2197 		p = ht->arData + idx;
2198 		if (p->key) {
2199 			return HASH_KEY_IS_STRING;
2200 		} else {
2201 			return HASH_KEY_IS_LONG;
2202 		}
2203 	}
2204 	return HASH_KEY_NON_EXISTENT;
2205 }
2206 
2207 
zend_hash_get_current_data_ex(HashTable * ht,HashPosition * pos)2208 ZEND_API zval* ZEND_FASTCALL zend_hash_get_current_data_ex(HashTable *ht, HashPosition *pos)
2209 {
2210 	uint32_t idx = *pos;
2211 	Bucket *p;
2212 
2213 	IS_CONSISTENT(ht);
2214 	if (idx != HT_INVALID_IDX) {
2215 		p = ht->arData + idx;
2216 		return &p->val;
2217 	} else {
2218 		return NULL;
2219 	}
2220 }
2221 
zend_hash_bucket_swap(Bucket * p,Bucket * q)2222 ZEND_API void zend_hash_bucket_swap(Bucket *p, Bucket *q)
2223 {
2224 	zval val;
2225 	zend_ulong h;
2226 	zend_string *key;
2227 
2228 	ZVAL_COPY_VALUE(&val, &p->val);
2229 	h = p->h;
2230 	key = p->key;
2231 
2232 	ZVAL_COPY_VALUE(&p->val, &q->val);
2233 	p->h = q->h;
2234 	p->key = q->key;
2235 
2236 	ZVAL_COPY_VALUE(&q->val, &val);
2237 	q->h = h;
2238 	q->key = key;
2239 }
2240 
zend_hash_bucket_renum_swap(Bucket * p,Bucket * q)2241 ZEND_API void zend_hash_bucket_renum_swap(Bucket *p, Bucket *q)
2242 {
2243 	zval val;
2244 
2245 	ZVAL_COPY_VALUE(&val, &p->val);
2246 	ZVAL_COPY_VALUE(&p->val, &q->val);
2247 	ZVAL_COPY_VALUE(&q->val, &val);
2248 }
2249 
zend_hash_bucket_packed_swap(Bucket * p,Bucket * q)2250 ZEND_API void zend_hash_bucket_packed_swap(Bucket *p, Bucket *q)
2251 {
2252 	zval val;
2253 	zend_ulong h;
2254 
2255 	ZVAL_COPY_VALUE(&val, &p->val);
2256 	h = p->h;
2257 
2258 	ZVAL_COPY_VALUE(&p->val, &q->val);
2259 	p->h = q->h;
2260 
2261 	ZVAL_COPY_VALUE(&q->val, &val);
2262 	q->h = h;
2263 }
2264 
zend_hash_sort_ex(HashTable * ht,sort_func_t sort,compare_func_t compar,zend_bool renumber)2265 ZEND_API int ZEND_FASTCALL zend_hash_sort_ex(HashTable *ht, sort_func_t sort, compare_func_t compar, zend_bool renumber)
2266 {
2267 	Bucket *p;
2268 	uint32_t i, j;
2269 
2270 	IS_CONSISTENT(ht);
2271 	HT_ASSERT(GC_REFCOUNT(ht) == 1);
2272 
2273 	if (!(ht->nNumOfElements>1) && !(renumber && ht->nNumOfElements>0)) { /* Doesn't require sorting */
2274 		return SUCCESS;
2275 	}
2276 
2277 	if (ht->nNumUsed == ht->nNumOfElements) {
2278 		i = ht->nNumUsed;
2279 	} else {
2280 		for (j = 0, i = 0; j < ht->nNumUsed; j++) {
2281 			p = ht->arData + j;
2282 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2283 			if (i != j) {
2284 				ht->arData[i] = *p;
2285 			}
2286 			i++;
2287 		}
2288 	}
2289 
2290 	sort((void *)ht->arData, i, sizeof(Bucket), compar,
2291 			(swap_func_t)(renumber? zend_hash_bucket_renum_swap :
2292 				((ht->u.flags & HASH_FLAG_PACKED) ? zend_hash_bucket_packed_swap : zend_hash_bucket_swap)));
2293 
2294 	HANDLE_BLOCK_INTERRUPTIONS();
2295 	ht->nNumUsed = i;
2296 	ht->nInternalPointer = 0;
2297 
2298 	if (renumber) {
2299 		for (j = 0; j < i; j++) {
2300 			p = ht->arData + j;
2301 			p->h = j;
2302 			if (p->key) {
2303 				zend_string_release(p->key);
2304 				p->key = NULL;
2305 			}
2306 		}
2307 
2308 		ht->nNextFreeElement = i;
2309 	}
2310 	if (ht->u.flags & HASH_FLAG_PACKED) {
2311 		if (!renumber) {
2312 			zend_hash_packed_to_hash(ht);
2313 		}
2314 	} else {
2315 		if (renumber) {
2316 			void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
2317 			Bucket *old_buckets = ht->arData;
2318 
2319 			new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), (ht->u.flags & HASH_FLAG_PERSISTENT));
2320 			ht->u.flags |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
2321 			ht->nTableMask = HT_MIN_MASK;
2322 			HT_SET_DATA_ADDR(ht, new_data);
2323 			memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
2324 			pefree(old_data, ht->u.flags & HASH_FLAG_PERSISTENT & HASH_FLAG_PERSISTENT);
2325 			HT_HASH_RESET_PACKED(ht);
2326 		} else {
2327 			zend_hash_rehash(ht);
2328 		}
2329 	}
2330 
2331 	HANDLE_UNBLOCK_INTERRUPTIONS();
2332 
2333 	return SUCCESS;
2334 }
2335 
zend_hash_compare_impl(HashTable * ht1,HashTable * ht2,compare_func_t compar,zend_bool ordered)2336 static zend_always_inline int zend_hash_compare_impl(HashTable *ht1, HashTable *ht2, compare_func_t compar, zend_bool ordered) {
2337 	uint32_t idx1, idx2;
2338 
2339 	if (ht1->nNumOfElements != ht2->nNumOfElements) {
2340 		return ht1->nNumOfElements > ht2->nNumOfElements ? 1 : -1;
2341 	}
2342 
2343 	for (idx1 = 0, idx2 = 0; idx1 < ht1->nNumUsed; idx1++) {
2344 		Bucket *p1 = ht1->arData + idx1, *p2;
2345 		zval *pData1, *pData2;
2346 		int result;
2347 
2348 		if (Z_TYPE(p1->val) == IS_UNDEF) continue;
2349 		if (ordered) {
2350 			while (1) {
2351 				ZEND_ASSERT(idx2 != ht2->nNumUsed);
2352 				p2 = ht2->arData + idx2;
2353 				if (Z_TYPE(p2->val) != IS_UNDEF) break;
2354 				idx2++;
2355 			}
2356 			if (p1->key == NULL && p2->key == NULL) { /* numeric indices */
2357 				if (p1->h != p2->h) {
2358 					return p1->h > p2->h ? 1 : -1;
2359 				}
2360 			} else if (p1->key != NULL && p2->key != NULL) { /* string indices */
2361 				if (ZSTR_LEN(p1->key) != ZSTR_LEN(p2->key)) {
2362 					return ZSTR_LEN(p1->key) > ZSTR_LEN(p2->key) ? 1 : -1;
2363 				}
2364 
2365 				result = memcmp(ZSTR_VAL(p1->key), ZSTR_VAL(p2->key), ZSTR_LEN(p1->key));
2366 				if (result != 0) {
2367 					return result;
2368 				}
2369 			} else {
2370 				/* Mixed key types: A string key is considered as larger */
2371 				return p1->key != NULL ? 1 : -1;
2372 			}
2373 			pData2 = &p2->val;
2374 			idx2++;
2375 		} else {
2376 			if (p1->key == NULL) { /* numeric index */
2377 				pData2 = zend_hash_index_find(ht2, p1->h);
2378 				if (pData2 == NULL) {
2379 					return 1;
2380 				}
2381 			} else { /* string index */
2382 				pData2 = zend_hash_find(ht2, p1->key);
2383 				if (pData2 == NULL) {
2384 					return 1;
2385 				}
2386 			}
2387 		}
2388 
2389 		pData1 = &p1->val;
2390 		if (Z_TYPE_P(pData1) == IS_INDIRECT) {
2391 			pData1 = Z_INDIRECT_P(pData1);
2392 		}
2393 		if (Z_TYPE_P(pData2) == IS_INDIRECT) {
2394 			pData2 = Z_INDIRECT_P(pData2);
2395 		}
2396 
2397 		if (Z_TYPE_P(pData1) == IS_UNDEF) {
2398 			if (Z_TYPE_P(pData2) != IS_UNDEF) {
2399 				return -1;
2400 			}
2401 		} else if (Z_TYPE_P(pData2) == IS_UNDEF) {
2402 			return 1;
2403 		} else {
2404 			result = compar(pData1, pData2);
2405 			if (result != 0) {
2406 				return result;
2407 			}
2408 		}
2409 	}
2410 
2411 	return 0;
2412 }
2413 
zend_hash_compare(HashTable * ht1,HashTable * ht2,compare_func_t compar,zend_bool ordered)2414 ZEND_API int zend_hash_compare(HashTable *ht1, HashTable *ht2, compare_func_t compar, zend_bool ordered)
2415 {
2416 	int result;
2417 	IS_CONSISTENT(ht1);
2418 	IS_CONSISTENT(ht2);
2419 
2420 	HASH_PROTECT_RECURSION(ht1);
2421 	HASH_PROTECT_RECURSION(ht2);
2422 	result = zend_hash_compare_impl(ht1, ht2, compar, ordered);
2423 	HASH_UNPROTECT_RECURSION(ht1);
2424 	HASH_UNPROTECT_RECURSION(ht2);
2425 
2426 	return result;
2427 }
2428 
2429 
zend_hash_minmax(const HashTable * ht,compare_func_t compar,uint32_t flag)2430 ZEND_API zval* ZEND_FASTCALL zend_hash_minmax(const HashTable *ht, compare_func_t compar, uint32_t flag)
2431 {
2432 	uint32_t idx;
2433 	Bucket *p, *res;
2434 
2435 	IS_CONSISTENT(ht);
2436 
2437 	if (ht->nNumOfElements == 0 ) {
2438 		return NULL;
2439 	}
2440 
2441 	idx = 0;
2442 	while (1) {
2443 		if (idx == ht->nNumUsed) {
2444 			return NULL;
2445 		}
2446 		if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) break;
2447 		idx++;
2448 	}
2449 	res = ht->arData + idx;
2450 	for (; idx < ht->nNumUsed; idx++) {
2451 		p = ht->arData + idx;
2452 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2453 
2454 		if (flag) {
2455 			if (compar(res, p) < 0) { /* max */
2456 				res = p;
2457 			}
2458 		} else {
2459 			if (compar(res, p) > 0) { /* min */
2460 				res = p;
2461 			}
2462 		}
2463 	}
2464 	return &res->val;
2465 }
2466 
_zend_handle_numeric_str_ex(const char * key,size_t length,zend_ulong * idx)2467 ZEND_API int ZEND_FASTCALL _zend_handle_numeric_str_ex(const char *key, size_t length, zend_ulong *idx)
2468 {
2469 	register const char *tmp = key;
2470 
2471 	const char *end = key + length;
2472 
2473 	if (*tmp == '-') {
2474 		tmp++;
2475 	}
2476 
2477 	if ((*tmp == '0' && length > 1) /* numbers with leading zeros */
2478 	 || (end - tmp > MAX_LENGTH_OF_LONG - 1) /* number too long */
2479 	 || (SIZEOF_ZEND_LONG == 4 &&
2480 	     end - tmp == MAX_LENGTH_OF_LONG - 1 &&
2481 	     *tmp > '2')) { /* overflow */
2482 		return 0;
2483 	}
2484 	*idx = (*tmp - '0');
2485 	while (1) {
2486 		++tmp;
2487 		if (tmp == end) {
2488 			if (*key == '-') {
2489 				if (*idx-1 > ZEND_LONG_MAX) { /* overflow */
2490 					return 0;
2491 				}
2492 				*idx = 0 - *idx;
2493 			} else if (*idx > ZEND_LONG_MAX) { /* overflow */
2494 				return 0;
2495 			}
2496 			return 1;
2497 		}
2498 		if (*tmp <= '9' && *tmp >= '0') {
2499 			*idx = (*idx * 10) + (*tmp - '0');
2500 		} else {
2501 			return 0;
2502 		}
2503 	}
2504 }
2505 
2506 /*
2507  * Local variables:
2508  * tab-width: 4
2509  * c-basic-offset: 4
2510  * indent-tabs-mode: t
2511  * End:
2512  */
2513