xref: /PHP-7.2/Zend/zend_hash.c (revision c7a86a38)
1 /*
2    +----------------------------------------------------------------------+
3    | Zend Engine                                                          |
4    +----------------------------------------------------------------------+
5    | Copyright (c) 1998-2018 Zend Technologies Ltd. (http://www.zend.com) |
6    +----------------------------------------------------------------------+
7    | This source file is subject to version 2.00 of the Zend license,     |
8    | that is bundled with this package in the file LICENSE, and is        |
9    | available through the world-wide-web at the following url:           |
10    | http://www.zend.com/license/2_00.txt.                                |
11    | If you did not receive a copy of the Zend license and are unable to  |
12    | obtain it through the world-wide-web, please send a note to          |
13    | license@zend.com so we can mail you a copy immediately.              |
14    +----------------------------------------------------------------------+
15    | Authors: Andi Gutmans <andi@zend.com>                                |
16    |          Zeev Suraski <zeev@zend.com>                                |
17    |          Dmitry Stogov <dmitry@zend.com>                             |
18    +----------------------------------------------------------------------+
19 */
20 
21 /* $Id$ */
22 
23 #include "zend.h"
24 #include "zend_globals.h"
25 #include "zend_variables.h"
26 
27 #if ZEND_DEBUG
28 # define HT_ASSERT(ht, expr) \
29 	ZEND_ASSERT((expr) || ((ht)->u.flags & HASH_FLAG_ALLOW_COW_VIOLATION))
30 #else
31 # define HT_ASSERT(ht, expr)
32 #endif
33 
34 #define HT_ASSERT_RC1(ht) HT_ASSERT(ht, GC_REFCOUNT(ht) == 1)
35 
36 #define HT_POISONED_PTR ((HashTable *) (intptr_t) -1)
37 
38 #if ZEND_DEBUG
39 
40 #define HT_OK					0x00
41 #define HT_IS_DESTROYING		0x40
42 #define HT_DESTROYED			0x80
43 #define HT_CLEANING				0xc0
44 
_zend_is_inconsistent(const HashTable * ht,const char * file,int line)45 static void _zend_is_inconsistent(const HashTable *ht, const char *file, int line)
46 {
47 	if (ht->u.v.consistency == HT_OK) {
48 		return;
49 	}
50 	switch (ht->u.v.consistency) {
51 		case HT_IS_DESTROYING:
52 			zend_output_debug_string(1, "%s(%d) : ht=%p is being destroyed", file, line, ht);
53 			break;
54 		case HT_DESTROYED:
55 			zend_output_debug_string(1, "%s(%d) : ht=%p is already destroyed", file, line, ht);
56 			break;
57 		case HT_CLEANING:
58 			zend_output_debug_string(1, "%s(%d) : ht=%p is being cleaned", file, line, ht);
59 			break;
60 		default:
61 			zend_output_debug_string(1, "%s(%d) : ht=%p is inconsistent", file, line, ht);
62 			break;
63 	}
64 	ZEND_ASSERT(0);
65 }
66 #define IS_CONSISTENT(a) _zend_is_inconsistent(a, __FILE__, __LINE__);
67 #define SET_INCONSISTENT(n) do { \
68 		(ht)->u.v.consistency = n; \
69 	} while (0)
70 #else
71 #define IS_CONSISTENT(a)
72 #define SET_INCONSISTENT(n)
73 #endif
74 
75 #define HASH_PROTECT_RECURSION(ht)														\
76 	if ((ht)->u.flags & HASH_FLAG_APPLY_PROTECTION) {									\
77 		if (((ht)->u.flags & ZEND_HASH_APPLY_COUNT_MASK) >= (3 << 8)) {												\
78 			zend_error_noreturn(E_ERROR, "Nesting level too deep - recursive dependency?");\
79 		}																				\
80 		ZEND_HASH_INC_APPLY_COUNT(ht);													\
81 	}
82 
83 #define HASH_UNPROTECT_RECURSION(ht)													\
84 	if ((ht)->u.flags & HASH_FLAG_APPLY_PROTECTION) {									\
85 		ZEND_HASH_DEC_APPLY_COUNT(ht);													\
86 	}
87 
88 #define ZEND_HASH_IF_FULL_DO_RESIZE(ht)				\
89 	if ((ht)->nNumUsed >= (ht)->nTableSize) {		\
90 		zend_hash_do_resize(ht);					\
91 	}
92 
93 static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht);
94 
zend_hash_check_size(uint32_t nSize)95 static zend_always_inline uint32_t zend_hash_check_size(uint32_t nSize)
96 {
97 #if defined(ZEND_WIN32)
98 	unsigned long index;
99 #endif
100 
101 	/* Use big enough power of 2 */
102 	/* size should be between HT_MIN_SIZE and HT_MAX_SIZE */
103 	if (nSize < HT_MIN_SIZE) {
104 		nSize = HT_MIN_SIZE;
105 	} else if (UNEXPECTED(nSize >= HT_MAX_SIZE)) {
106 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", nSize, sizeof(Bucket), sizeof(Bucket));
107 	}
108 
109 #if defined(ZEND_WIN32)
110 	if (BitScanReverse(&index, nSize - 1)) {
111 		return 0x2 << ((31 - index) ^ 0x1f);
112 	} else {
113 		/* nSize is ensured to be in the valid range, fall back to it
114 		   rather than using an undefined bis scan result. */
115 		return nSize;
116 	}
117 #elif (defined(__GNUC__) || __has_builtin(__builtin_clz))  && defined(PHP_HAVE_BUILTIN_CLZ)
118 	return 0x2 << (__builtin_clz(nSize - 1) ^ 0x1f);
119 #else
120 	nSize -= 1;
121 	nSize |= (nSize >> 1);
122 	nSize |= (nSize >> 2);
123 	nSize |= (nSize >> 4);
124 	nSize |= (nSize >> 8);
125 	nSize |= (nSize >> 16);
126 	return nSize + 1;
127 #endif
128 }
129 
zend_hash_real_init_ex(HashTable * ht,int packed)130 static zend_always_inline void zend_hash_real_init_ex(HashTable *ht, int packed)
131 {
132 	HT_ASSERT_RC1(ht);
133 	ZEND_ASSERT(!((ht)->u.flags & HASH_FLAG_INITIALIZED));
134 	if (packed) {
135 		HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), (ht)->u.flags & HASH_FLAG_PERSISTENT));
136 		(ht)->u.flags |= HASH_FLAG_INITIALIZED | HASH_FLAG_PACKED;
137 		HT_HASH_RESET_PACKED(ht);
138 	} else {
139 		(ht)->nTableMask = -(ht)->nTableSize;
140 		HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), (ht)->u.flags & HASH_FLAG_PERSISTENT));
141 		(ht)->u.flags |= HASH_FLAG_INITIALIZED;
142 		if (EXPECTED(ht->nTableMask == (uint32_t)-8)) {
143 			Bucket *arData = ht->arData;
144 
145 			HT_HASH_EX(arData, -8) = -1;
146 			HT_HASH_EX(arData, -7) = -1;
147 			HT_HASH_EX(arData, -6) = -1;
148 			HT_HASH_EX(arData, -5) = -1;
149 			HT_HASH_EX(arData, -4) = -1;
150 			HT_HASH_EX(arData, -3) = -1;
151 			HT_HASH_EX(arData, -2) = -1;
152 			HT_HASH_EX(arData, -1) = -1;
153 		} else {
154 			HT_HASH_RESET(ht);
155 		}
156 	}
157 }
158 
zend_hash_check_init(HashTable * ht,int packed)159 static zend_always_inline void zend_hash_check_init(HashTable *ht, int packed)
160 {
161 	HT_ASSERT_RC1(ht);
162 	if (UNEXPECTED(!((ht)->u.flags & HASH_FLAG_INITIALIZED))) {
163 		zend_hash_real_init_ex(ht, packed);
164 	}
165 }
166 
167 #define CHECK_INIT(ht, packed) \
168 	zend_hash_check_init(ht, packed)
169 
170 static const uint32_t uninitialized_bucket[-HT_MIN_MASK] =
171 	{HT_INVALID_IDX, HT_INVALID_IDX};
172 
_zend_hash_init(HashTable * ht,uint32_t nSize,dtor_func_t pDestructor,zend_bool persistent ZEND_FILE_LINE_DC)173 ZEND_API void ZEND_FASTCALL _zend_hash_init(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, zend_bool persistent ZEND_FILE_LINE_DC)
174 {
175 	GC_REFCOUNT(ht) = 1;
176 	GC_TYPE_INFO(ht) = IS_ARRAY | (persistent ? 0 : (GC_COLLECTABLE << GC_FLAGS_SHIFT));
177 	ht->u.flags = (persistent ? HASH_FLAG_PERSISTENT : 0) | HASH_FLAG_APPLY_PROTECTION | HASH_FLAG_STATIC_KEYS;
178 	ht->nTableMask = HT_MIN_MASK;
179 	HT_SET_DATA_ADDR(ht, &uninitialized_bucket);
180 	ht->nNumUsed = 0;
181 	ht->nNumOfElements = 0;
182 	ht->nInternalPointer = HT_INVALID_IDX;
183 	ht->nNextFreeElement = 0;
184 	ht->pDestructor = pDestructor;
185 	ht->nTableSize = zend_hash_check_size(nSize);
186 }
187 
zend_hash_packed_grow(HashTable * ht)188 static void ZEND_FASTCALL zend_hash_packed_grow(HashTable *ht)
189 {
190 	HT_ASSERT_RC1(ht);
191 	if (ht->nTableSize >= HT_MAX_SIZE) {
192 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket), sizeof(Bucket));
193 	}
194 	ht->nTableSize += ht->nTableSize;
195 	HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE(ht), HT_USED_SIZE(ht), ht->u.flags & HASH_FLAG_PERSISTENT));
196 }
197 
zend_hash_real_init(HashTable * ht,zend_bool packed)198 ZEND_API void ZEND_FASTCALL zend_hash_real_init(HashTable *ht, zend_bool packed)
199 {
200 	IS_CONSISTENT(ht);
201 
202 	HT_ASSERT_RC1(ht);
203 	zend_hash_real_init_ex(ht, packed);
204 }
205 
zend_hash_packed_to_hash(HashTable * ht)206 ZEND_API void ZEND_FASTCALL zend_hash_packed_to_hash(HashTable *ht)
207 {
208 	void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
209 	Bucket *old_buckets = ht->arData;
210 
211 	HT_ASSERT_RC1(ht);
212 	ht->u.flags &= ~HASH_FLAG_PACKED;
213 	new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, -ht->nTableSize), (ht)->u.flags & HASH_FLAG_PERSISTENT);
214 	ht->nTableMask = -ht->nTableSize;
215 	HT_SET_DATA_ADDR(ht, new_data);
216 	memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
217 	pefree(old_data, (ht)->u.flags & HASH_FLAG_PERSISTENT);
218 	zend_hash_rehash(ht);
219 }
220 
zend_hash_to_packed(HashTable * ht)221 ZEND_API void ZEND_FASTCALL zend_hash_to_packed(HashTable *ht)
222 {
223 	void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
224 	Bucket *old_buckets = ht->arData;
225 
226 	HT_ASSERT_RC1(ht);
227 	new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), (ht)->u.flags & HASH_FLAG_PERSISTENT);
228 	ht->u.flags |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
229 	ht->nTableMask = HT_MIN_MASK;
230 	HT_SET_DATA_ADDR(ht, new_data);
231 	HT_HASH_RESET_PACKED(ht);
232 	memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
233 	pefree(old_data, (ht)->u.flags & HASH_FLAG_PERSISTENT);
234 }
235 
_zend_hash_init_ex(HashTable * ht,uint32_t nSize,dtor_func_t pDestructor,zend_bool persistent,zend_bool bApplyProtection ZEND_FILE_LINE_DC)236 ZEND_API void ZEND_FASTCALL _zend_hash_init_ex(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, zend_bool persistent, zend_bool bApplyProtection ZEND_FILE_LINE_DC)
237 {
238 	_zend_hash_init(ht, nSize, pDestructor, persistent ZEND_FILE_LINE_RELAY_CC);
239 	if (!bApplyProtection) {
240 		ht->u.flags &= ~HASH_FLAG_APPLY_PROTECTION;
241 	}
242 }
243 
zend_hash_extend(HashTable * ht,uint32_t nSize,zend_bool packed)244 ZEND_API void ZEND_FASTCALL zend_hash_extend(HashTable *ht, uint32_t nSize, zend_bool packed)
245 {
246 	HT_ASSERT_RC1(ht);
247 	if (nSize == 0) return;
248 	if (UNEXPECTED(!((ht)->u.flags & HASH_FLAG_INITIALIZED))) {
249 		if (nSize > ht->nTableSize) {
250 			ht->nTableSize = zend_hash_check_size(nSize);
251 		}
252 		zend_hash_check_init(ht, packed);
253 	} else {
254 		if (packed) {
255 			ZEND_ASSERT(ht->u.flags & HASH_FLAG_PACKED);
256 			if (nSize > ht->nTableSize) {
257 				ht->nTableSize = zend_hash_check_size(nSize);
258 				HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE(ht), HT_USED_SIZE(ht), ht->u.flags & HASH_FLAG_PERSISTENT));
259 			}
260 		} else {
261 			ZEND_ASSERT(!(ht->u.flags & HASH_FLAG_PACKED));
262 			if (nSize > ht->nTableSize) {
263 				void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
264 				Bucket *old_buckets = ht->arData;
265 				nSize = zend_hash_check_size(nSize);
266 				new_data = pemalloc(HT_SIZE_EX(nSize, -nSize), ht->u.flags & HASH_FLAG_PERSISTENT);
267 				ht->nTableSize = nSize;
268 				ht->nTableMask = -ht->nTableSize;
269 				HT_SET_DATA_ADDR(ht, new_data);
270 				memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
271 				pefree(old_data, ht->u.flags & HASH_FLAG_PERSISTENT);
272 				zend_hash_rehash(ht);
273 			}
274 		}
275 	}
276 }
277 
zend_array_recalc_elements(HashTable * ht)278 static uint32_t zend_array_recalc_elements(HashTable *ht)
279 {
280        zval *val;
281        uint32_t num = ht->nNumOfElements;
282 
283 	   ZEND_HASH_FOREACH_VAL(ht, val) {
284 		   if (Z_TYPE_P(val) == IS_INDIRECT) {
285 			   if (UNEXPECTED(Z_TYPE_P(Z_INDIRECT_P(val)) == IS_UNDEF)) {
286 				   num--;
287 			   }
288 		   }
289        } ZEND_HASH_FOREACH_END();
290        return num;
291 }
292 /* }}} */
293 
zend_array_count(HashTable * ht)294 ZEND_API uint32_t zend_array_count(HashTable *ht)
295 {
296 	uint32_t num;
297 	if (UNEXPECTED(ht->u.v.flags & HASH_FLAG_HAS_EMPTY_IND)) {
298 		num = zend_array_recalc_elements(ht);
299 		if (UNEXPECTED(ht->nNumOfElements == num)) {
300 			ht->u.v.flags &= ~HASH_FLAG_HAS_EMPTY_IND;
301 		}
302 	} else if (UNEXPECTED(ht == &EG(symbol_table))) {
303 		num = zend_array_recalc_elements(ht);
304 	} else {
305 		num = zend_hash_num_elements(ht);
306 	}
307 	return num;
308 }
309 /* }}} */
310 
zend_hash_set_apply_protection(HashTable * ht,zend_bool bApplyProtection)311 ZEND_API void ZEND_FASTCALL zend_hash_set_apply_protection(HashTable *ht, zend_bool bApplyProtection)
312 {
313 	if (bApplyProtection) {
314 		ht->u.flags |= HASH_FLAG_APPLY_PROTECTION;
315 	} else {
316 		ht->u.flags &= ~HASH_FLAG_APPLY_PROTECTION;
317 	}
318 }
319 
zend_hash_iterator_add(HashTable * ht,HashPosition pos)320 ZEND_API uint32_t ZEND_FASTCALL zend_hash_iterator_add(HashTable *ht, HashPosition pos)
321 {
322 	HashTableIterator *iter = EG(ht_iterators);
323 	HashTableIterator *end  = iter + EG(ht_iterators_count);
324 	uint32_t idx;
325 
326 	if (EXPECTED(ht->u.v.nIteratorsCount != 255)) {
327 		ht->u.v.nIteratorsCount++;
328 	}
329 	while (iter != end) {
330 		if (iter->ht == NULL) {
331 			iter->ht = ht;
332 			iter->pos = pos;
333 			idx = iter - EG(ht_iterators);
334 			if (idx + 1 > EG(ht_iterators_used)) {
335 				EG(ht_iterators_used) = idx + 1;
336 			}
337 			return idx;
338 		}
339 		iter++;
340 	}
341 	if (EG(ht_iterators) == EG(ht_iterators_slots)) {
342 		EG(ht_iterators) = emalloc(sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
343 		memcpy(EG(ht_iterators), EG(ht_iterators_slots), sizeof(HashTableIterator) * EG(ht_iterators_count));
344 	} else {
345 		EG(ht_iterators) = erealloc(EG(ht_iterators), sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
346 	}
347 	iter = EG(ht_iterators) + EG(ht_iterators_count);
348 	EG(ht_iterators_count) += 8;
349 	iter->ht = ht;
350 	iter->pos = pos;
351 	memset(iter + 1, 0, sizeof(HashTableIterator) * 7);
352 	idx = iter - EG(ht_iterators);
353 	EG(ht_iterators_used) = idx + 1;
354 	return idx;
355 }
356 
zend_hash_iterator_pos(uint32_t idx,HashTable * ht)357 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos(uint32_t idx, HashTable *ht)
358 {
359 	HashTableIterator *iter = EG(ht_iterators) + idx;
360 
361 	ZEND_ASSERT(idx != (uint32_t)-1);
362 	if (iter->pos == HT_INVALID_IDX) {
363 		return HT_INVALID_IDX;
364 	} else if (UNEXPECTED(iter->ht != ht)) {
365 		if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
366 				&& EXPECTED(iter->ht->u.v.nIteratorsCount != 255)) {
367 			iter->ht->u.v.nIteratorsCount--;
368 		}
369 		if (EXPECTED(ht->u.v.nIteratorsCount != 255)) {
370 			ht->u.v.nIteratorsCount++;
371 		}
372 		iter->ht = ht;
373 		iter->pos = ht->nInternalPointer;
374 	}
375 	return iter->pos;
376 }
377 
zend_hash_iterator_pos_ex(uint32_t idx,zval * array)378 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos_ex(uint32_t idx, zval *array)
379 {
380 	HashTable *ht = Z_ARRVAL_P(array);
381 	HashTableIterator *iter = EG(ht_iterators) + idx;
382 
383 	ZEND_ASSERT(idx != (uint32_t)-1);
384 	if (iter->pos == HT_INVALID_IDX) {
385 		return HT_INVALID_IDX;
386 	} else if (UNEXPECTED(iter->ht != ht)) {
387 		if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
388 				&& EXPECTED(iter->ht->u.v.nIteratorsCount != 255)) {
389 			iter->ht->u.v.nIteratorsCount--;
390 		}
391 		SEPARATE_ARRAY(array);
392 		ht = Z_ARRVAL_P(array);
393 		if (EXPECTED(ht->u.v.nIteratorsCount != 255)) {
394 			ht->u.v.nIteratorsCount++;
395 		}
396 		iter->ht = ht;
397 		iter->pos = ht->nInternalPointer;
398 	}
399 	return iter->pos;
400 }
401 
zend_hash_iterator_del(uint32_t idx)402 ZEND_API void ZEND_FASTCALL zend_hash_iterator_del(uint32_t idx)
403 {
404 	HashTableIterator *iter = EG(ht_iterators) + idx;
405 
406 	ZEND_ASSERT(idx != (uint32_t)-1);
407 
408 	if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
409 			&& EXPECTED(iter->ht->u.v.nIteratorsCount != 255)) {
410 		iter->ht->u.v.nIteratorsCount--;
411 	}
412 	iter->ht = NULL;
413 
414 	if (idx == EG(ht_iterators_used) - 1) {
415 		while (idx > 0 && EG(ht_iterators)[idx - 1].ht == NULL) {
416 			idx--;
417 		}
418 		EG(ht_iterators_used) = idx;
419 	}
420 }
421 
_zend_hash_iterators_remove(HashTable * ht)422 static zend_never_inline void ZEND_FASTCALL _zend_hash_iterators_remove(HashTable *ht)
423 {
424 	HashTableIterator *iter = EG(ht_iterators);
425 	HashTableIterator *end  = iter + EG(ht_iterators_used);
426 
427 	while (iter != end) {
428 		if (iter->ht == ht) {
429 			iter->ht = HT_POISONED_PTR;
430 		}
431 		iter++;
432 	}
433 }
434 
zend_hash_iterators_remove(HashTable * ht)435 static zend_always_inline void zend_hash_iterators_remove(HashTable *ht)
436 {
437 	if (UNEXPECTED(ht->u.v.nIteratorsCount)) {
438 		_zend_hash_iterators_remove(ht);
439 	}
440 }
441 
zend_hash_iterators_lower_pos(HashTable * ht,HashPosition start)442 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterators_lower_pos(HashTable *ht, HashPosition start)
443 {
444 	HashTableIterator *iter = EG(ht_iterators);
445 	HashTableIterator *end  = iter + EG(ht_iterators_used);
446 	HashPosition res = HT_INVALID_IDX;
447 
448 	while (iter != end) {
449 		if (iter->ht == ht) {
450 			if (iter->pos >= start && iter->pos < res) {
451 				res = iter->pos;
452 			}
453 		}
454 		iter++;
455 	}
456 	return res;
457 }
458 
_zend_hash_iterators_update(HashTable * ht,HashPosition from,HashPosition to)459 ZEND_API void ZEND_FASTCALL _zend_hash_iterators_update(HashTable *ht, HashPosition from, HashPosition to)
460 {
461 	HashTableIterator *iter = EG(ht_iterators);
462 	HashTableIterator *end  = iter + EG(ht_iterators_used);
463 
464 	while (iter != end) {
465 		if (iter->ht == ht && iter->pos == from) {
466 			iter->pos = to;
467 		}
468 		iter++;
469 	}
470 }
471 
zend_hash_find_bucket(const HashTable * ht,zend_string * key)472 static zend_always_inline Bucket *zend_hash_find_bucket(const HashTable *ht, zend_string *key)
473 {
474 	zend_ulong h;
475 	uint32_t nIndex;
476 	uint32_t idx;
477 	Bucket *p, *arData;
478 
479 	h = zend_string_hash_val(key);
480 	arData = ht->arData;
481 	nIndex = h | ht->nTableMask;
482 	idx = HT_HASH_EX(arData, nIndex);
483 	while (EXPECTED(idx != HT_INVALID_IDX)) {
484 		p = HT_HASH_TO_BUCKET_EX(arData, idx);
485 		if (EXPECTED(p->key == key)) { /* check for the same interned string */
486 			return p;
487 		} else if (EXPECTED(p->h == h) &&
488 		     EXPECTED(p->key) &&
489 		     EXPECTED(ZSTR_LEN(p->key) == ZSTR_LEN(key)) &&
490 		     EXPECTED(memcmp(ZSTR_VAL(p->key), ZSTR_VAL(key), ZSTR_LEN(key)) == 0)) {
491 			return p;
492 		}
493 		idx = Z_NEXT(p->val);
494 	}
495 	return NULL;
496 }
497 
zend_hash_str_find_bucket(const HashTable * ht,const char * str,size_t len,zend_ulong h)498 static zend_always_inline Bucket *zend_hash_str_find_bucket(const HashTable *ht, const char *str, size_t len, zend_ulong h)
499 {
500 	uint32_t nIndex;
501 	uint32_t idx;
502 	Bucket *p, *arData;
503 
504 	arData = ht->arData;
505 	nIndex = h | ht->nTableMask;
506 	idx = HT_HASH_EX(arData, nIndex);
507 	while (idx != HT_INVALID_IDX) {
508 		ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
509 		p = HT_HASH_TO_BUCKET_EX(arData, idx);
510 		if ((p->h == h)
511 			 && p->key
512 			 && (ZSTR_LEN(p->key) == len)
513 			 && !memcmp(ZSTR_VAL(p->key), str, len)) {
514 			return p;
515 		}
516 		idx = Z_NEXT(p->val);
517 	}
518 	return NULL;
519 }
520 
zend_hash_index_find_bucket(const HashTable * ht,zend_ulong h)521 static zend_always_inline Bucket *zend_hash_index_find_bucket(const HashTable *ht, zend_ulong h)
522 {
523 	uint32_t nIndex;
524 	uint32_t idx;
525 	Bucket *p, *arData;
526 
527 	arData = ht->arData;
528 	nIndex = h | ht->nTableMask;
529 	idx = HT_HASH_EX(arData, nIndex);
530 	while (idx != HT_INVALID_IDX) {
531 		ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
532 		p = HT_HASH_TO_BUCKET_EX(arData, idx);
533 		if (p->h == h && !p->key) {
534 			return p;
535 		}
536 		idx = Z_NEXT(p->val);
537 	}
538 	return NULL;
539 }
540 
_zend_hash_add_or_update_i(HashTable * ht,zend_string * key,zval * pData,uint32_t flag ZEND_FILE_LINE_DC)541 static zend_always_inline zval *_zend_hash_add_or_update_i(HashTable *ht, zend_string *key, zval *pData, uint32_t flag ZEND_FILE_LINE_DC)
542 {
543 	zend_ulong h;
544 	uint32_t nIndex;
545 	uint32_t idx;
546 	Bucket *p;
547 
548 	IS_CONSISTENT(ht);
549 	HT_ASSERT_RC1(ht);
550 
551 	if (UNEXPECTED(!(ht->u.flags & HASH_FLAG_INITIALIZED))) {
552 		CHECK_INIT(ht, 0);
553 		goto add_to_hash;
554 	} else if (ht->u.flags & HASH_FLAG_PACKED) {
555 		zend_hash_packed_to_hash(ht);
556 	} else if ((flag & HASH_ADD_NEW) == 0) {
557 		p = zend_hash_find_bucket(ht, key);
558 
559 		if (p) {
560 			zval *data;
561 
562 			if (flag & HASH_ADD) {
563 				if (!(flag & HASH_UPDATE_INDIRECT)) {
564 					return NULL;
565 				}
566 				ZEND_ASSERT(&p->val != pData);
567 				data = &p->val;
568 				if (Z_TYPE_P(data) == IS_INDIRECT) {
569 					data = Z_INDIRECT_P(data);
570 					if (Z_TYPE_P(data) != IS_UNDEF) {
571 						return NULL;
572 					}
573 				} else {
574 					return NULL;
575 				}
576 			} else {
577 				ZEND_ASSERT(&p->val != pData);
578 				data = &p->val;
579 				if ((flag & HASH_UPDATE_INDIRECT) && Z_TYPE_P(data) == IS_INDIRECT) {
580 					data = Z_INDIRECT_P(data);
581 				}
582 			}
583 			if (ht->pDestructor) {
584 				ht->pDestructor(data);
585 			}
586 			ZVAL_COPY_VALUE(data, pData);
587 			return data;
588 		}
589 	}
590 
591 	ZEND_HASH_IF_FULL_DO_RESIZE(ht);		/* If the Hash table is full, resize it */
592 
593 add_to_hash:
594 	idx = ht->nNumUsed++;
595 	ht->nNumOfElements++;
596 	if (ht->nInternalPointer == HT_INVALID_IDX) {
597 		ht->nInternalPointer = idx;
598 	}
599 	zend_hash_iterators_update(ht, HT_INVALID_IDX, idx);
600 	p = ht->arData + idx;
601 	p->key = key;
602 	if (!ZSTR_IS_INTERNED(key)) {
603 		zend_string_addref(key);
604 		ht->u.flags &= ~HASH_FLAG_STATIC_KEYS;
605 		zend_string_hash_val(key);
606 	}
607 	p->h = h = ZSTR_H(key);
608 	ZVAL_COPY_VALUE(&p->val, pData);
609 	nIndex = h | ht->nTableMask;
610 	Z_NEXT(p->val) = HT_HASH(ht, nIndex);
611 	HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
612 
613 	return &p->val;
614 }
615 
_zend_hash_add_or_update(HashTable * ht,zend_string * key,zval * pData,uint32_t flag ZEND_FILE_LINE_DC)616 ZEND_API zval* ZEND_FASTCALL _zend_hash_add_or_update(HashTable *ht, zend_string *key, zval *pData, uint32_t flag ZEND_FILE_LINE_DC)
617 {
618 	return _zend_hash_add_or_update_i(ht, key, pData, flag ZEND_FILE_LINE_RELAY_CC);
619 }
620 
_zend_hash_add(HashTable * ht,zend_string * key,zval * pData ZEND_FILE_LINE_DC)621 ZEND_API zval* ZEND_FASTCALL _zend_hash_add(HashTable *ht, zend_string *key, zval *pData ZEND_FILE_LINE_DC)
622 {
623 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD ZEND_FILE_LINE_RELAY_CC);
624 }
625 
_zend_hash_update(HashTable * ht,zend_string * key,zval * pData ZEND_FILE_LINE_DC)626 ZEND_API zval* ZEND_FASTCALL _zend_hash_update(HashTable *ht, zend_string *key, zval *pData ZEND_FILE_LINE_DC)
627 {
628 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE ZEND_FILE_LINE_RELAY_CC);
629 }
630 
_zend_hash_update_ind(HashTable * ht,zend_string * key,zval * pData ZEND_FILE_LINE_DC)631 ZEND_API zval* ZEND_FASTCALL _zend_hash_update_ind(HashTable *ht, zend_string *key, zval *pData ZEND_FILE_LINE_DC)
632 {
633 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT ZEND_FILE_LINE_RELAY_CC);
634 }
635 
_zend_hash_add_new(HashTable * ht,zend_string * key,zval * pData ZEND_FILE_LINE_DC)636 ZEND_API zval* ZEND_FASTCALL _zend_hash_add_new(HashTable *ht, zend_string *key, zval *pData ZEND_FILE_LINE_DC)
637 {
638 	return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD_NEW ZEND_FILE_LINE_RELAY_CC);
639 }
640 
_zend_hash_str_add_or_update(HashTable * ht,const char * str,size_t len,zval * pData,uint32_t flag ZEND_FILE_LINE_DC)641 ZEND_API zval* ZEND_FASTCALL _zend_hash_str_add_or_update(HashTable *ht, const char *str, size_t len, zval *pData, uint32_t flag ZEND_FILE_LINE_DC)
642 {
643 	zend_string *key = zend_string_init(str, len, ht->u.flags & HASH_FLAG_PERSISTENT);
644 	zval *ret = _zend_hash_add_or_update_i(ht, key, pData, flag ZEND_FILE_LINE_RELAY_CC);
645 	zend_string_release(key);
646 	return ret;
647 }
648 
_zend_hash_str_update(HashTable * ht,const char * str,size_t len,zval * pData ZEND_FILE_LINE_DC)649 ZEND_API zval* ZEND_FASTCALL _zend_hash_str_update(HashTable *ht, const char *str, size_t len, zval *pData ZEND_FILE_LINE_DC)
650 {
651 	zend_string *key = zend_string_init(str, len, ht->u.flags & HASH_FLAG_PERSISTENT);
652 	zval *ret = _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE ZEND_FILE_LINE_RELAY_CC);
653 	zend_string_release(key);
654 	return ret;
655 }
656 
_zend_hash_str_update_ind(HashTable * ht,const char * str,size_t len,zval * pData ZEND_FILE_LINE_DC)657 ZEND_API zval* ZEND_FASTCALL _zend_hash_str_update_ind(HashTable *ht, const char *str, size_t len, zval *pData ZEND_FILE_LINE_DC)
658 {
659 	zend_string *key = zend_string_init(str, len, ht->u.flags & HASH_FLAG_PERSISTENT);
660 	zval *ret = _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT ZEND_FILE_LINE_RELAY_CC);
661 	zend_string_release(key);
662 	return ret;
663 }
664 
_zend_hash_str_add(HashTable * ht,const char * str,size_t len,zval * pData ZEND_FILE_LINE_DC)665 ZEND_API zval* ZEND_FASTCALL _zend_hash_str_add(HashTable *ht, const char *str, size_t len, zval *pData ZEND_FILE_LINE_DC)
666 {
667 	zend_string *key = zend_string_init(str, len, ht->u.flags & HASH_FLAG_PERSISTENT);
668 	zval *ret = _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD ZEND_FILE_LINE_RELAY_CC);
669 	zend_string_release(key);
670 	return ret;
671 }
672 
_zend_hash_str_add_new(HashTable * ht,const char * str,size_t len,zval * pData ZEND_FILE_LINE_DC)673 ZEND_API zval* ZEND_FASTCALL _zend_hash_str_add_new(HashTable *ht, const char *str, size_t len, zval *pData ZEND_FILE_LINE_DC)
674 {
675 	zend_string *key = zend_string_init(str, len, ht->u.flags & HASH_FLAG_PERSISTENT);
676 	zval *ret = _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD_NEW ZEND_FILE_LINE_RELAY_CC);
677 	zend_string_delref(key);
678 	return ret;
679 }
680 
zend_hash_index_add_empty_element(HashTable * ht,zend_ulong h)681 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_empty_element(HashTable *ht, zend_ulong h)
682 {
683 	zval dummy;
684 
685 	ZVAL_NULL(&dummy);
686 	return zend_hash_index_add(ht, h, &dummy);
687 }
688 
zend_hash_add_empty_element(HashTable * ht,zend_string * key)689 ZEND_API zval* ZEND_FASTCALL zend_hash_add_empty_element(HashTable *ht, zend_string *key)
690 {
691 	zval dummy;
692 
693 	ZVAL_NULL(&dummy);
694 	return zend_hash_add(ht, key, &dummy);
695 }
696 
zend_hash_str_add_empty_element(HashTable * ht,const char * str,size_t len)697 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_empty_element(HashTable *ht, const char *str, size_t len)
698 {
699 	zval dummy;
700 
701 	ZVAL_NULL(&dummy);
702 	return zend_hash_str_add(ht, str, len, &dummy);
703 }
704 
_zend_hash_index_add_or_update_i(HashTable * ht,zend_ulong h,zval * pData,uint32_t flag ZEND_FILE_LINE_DC)705 static zend_always_inline zval *_zend_hash_index_add_or_update_i(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag ZEND_FILE_LINE_DC)
706 {
707 	uint32_t nIndex;
708 	uint32_t idx;
709 	Bucket *p;
710 
711 	IS_CONSISTENT(ht);
712 	HT_ASSERT_RC1(ht);
713 
714 	if (UNEXPECTED(!(ht->u.flags & HASH_FLAG_INITIALIZED))) {
715 		CHECK_INIT(ht, h < ht->nTableSize);
716 		if (h < ht->nTableSize) {
717 			p = ht->arData + h;
718 			goto add_to_packed;
719 		}
720 		goto add_to_hash;
721 	} else if (ht->u.flags & HASH_FLAG_PACKED) {
722 		if (h < ht->nNumUsed) {
723 			p = ht->arData + h;
724 			if (Z_TYPE(p->val) != IS_UNDEF) {
725 				if (flag & HASH_ADD) {
726 					return NULL;
727 				}
728 				if (ht->pDestructor) {
729 					ht->pDestructor(&p->val);
730 				}
731 				ZVAL_COPY_VALUE(&p->val, pData);
732 				return &p->val;
733 			} else { /* we have to keep the order :( */
734 				goto convert_to_hash;
735 			}
736 		} else if (EXPECTED(h < ht->nTableSize)) {
737 			p = ht->arData + h;
738 		} else if ((h >> 1) < ht->nTableSize &&
739 		           (ht->nTableSize >> 1) < ht->nNumOfElements) {
740 			zend_hash_packed_grow(ht);
741 			p = ht->arData + h;
742 		} else {
743 			goto convert_to_hash;
744 		}
745 
746 add_to_packed:
747 		/* incremental initialization of empty Buckets */
748 		if ((flag & (HASH_ADD_NEW|HASH_ADD_NEXT)) == (HASH_ADD_NEW|HASH_ADD_NEXT)) {
749 			ht->nNumUsed = h + 1;
750 		} else if (h >= ht->nNumUsed) {
751 			if (h > ht->nNumUsed) {
752 				Bucket *q = ht->arData + ht->nNumUsed;
753 				while (q != p) {
754 					ZVAL_UNDEF(&q->val);
755 					q++;
756 				}
757 			}
758 			ht->nNumUsed = h + 1;
759 		}
760 		ht->nNumOfElements++;
761 		if (ht->nInternalPointer == HT_INVALID_IDX) {
762 			ht->nInternalPointer = h;
763 		}
764 		zend_hash_iterators_update(ht, HT_INVALID_IDX, h);
765 		if ((zend_long)h >= (zend_long)ht->nNextFreeElement) {
766 			ht->nNextFreeElement = h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
767 		}
768 		p->h = h;
769 		p->key = NULL;
770 		ZVAL_COPY_VALUE(&p->val, pData);
771 
772 		return &p->val;
773 
774 convert_to_hash:
775 		zend_hash_packed_to_hash(ht);
776 	} else if ((flag & HASH_ADD_NEW) == 0) {
777 		p = zend_hash_index_find_bucket(ht, h);
778 		if (p) {
779 			if (flag & HASH_ADD) {
780 				return NULL;
781 			}
782 			ZEND_ASSERT(&p->val != pData);
783 			if (ht->pDestructor) {
784 				ht->pDestructor(&p->val);
785 			}
786 			ZVAL_COPY_VALUE(&p->val, pData);
787 			if ((zend_long)h >= (zend_long)ht->nNextFreeElement) {
788 				ht->nNextFreeElement = h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
789 			}
790 			return &p->val;
791 		}
792 	}
793 
794 	ZEND_HASH_IF_FULL_DO_RESIZE(ht);		/* If the Hash table is full, resize it */
795 
796 add_to_hash:
797 	idx = ht->nNumUsed++;
798 	ht->nNumOfElements++;
799 	if (ht->nInternalPointer == HT_INVALID_IDX) {
800 		ht->nInternalPointer = idx;
801 	}
802 	zend_hash_iterators_update(ht, HT_INVALID_IDX, idx);
803 	if ((zend_long)h >= (zend_long)ht->nNextFreeElement) {
804 		ht->nNextFreeElement = h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
805 	}
806 	p = ht->arData + idx;
807 	p->h = h;
808 	p->key = NULL;
809 	nIndex = h | ht->nTableMask;
810 	ZVAL_COPY_VALUE(&p->val, pData);
811 	Z_NEXT(p->val) = HT_HASH(ht, nIndex);
812 	HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
813 
814 	return &p->val;
815 }
816 
_zend_hash_index_add_or_update(HashTable * ht,zend_ulong h,zval * pData,uint32_t flag ZEND_FILE_LINE_DC)817 ZEND_API zval* ZEND_FASTCALL _zend_hash_index_add_or_update(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag ZEND_FILE_LINE_DC)
818 {
819 	return _zend_hash_index_add_or_update_i(ht, h, pData, flag ZEND_FILE_LINE_RELAY_CC);
820 }
821 
_zend_hash_index_add(HashTable * ht,zend_ulong h,zval * pData ZEND_FILE_LINE_DC)822 ZEND_API zval* ZEND_FASTCALL _zend_hash_index_add(HashTable *ht, zend_ulong h, zval *pData ZEND_FILE_LINE_DC)
823 {
824 	return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD ZEND_FILE_LINE_RELAY_CC);
825 }
826 
_zend_hash_index_add_new(HashTable * ht,zend_ulong h,zval * pData ZEND_FILE_LINE_DC)827 ZEND_API zval* ZEND_FASTCALL _zend_hash_index_add_new(HashTable *ht, zend_ulong h, zval *pData ZEND_FILE_LINE_DC)
828 {
829 	return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD | HASH_ADD_NEW ZEND_FILE_LINE_RELAY_CC);
830 }
831 
_zend_hash_index_update(HashTable * ht,zend_ulong h,zval * pData ZEND_FILE_LINE_DC)832 ZEND_API zval* ZEND_FASTCALL _zend_hash_index_update(HashTable *ht, zend_ulong h, zval *pData ZEND_FILE_LINE_DC)
833 {
834 	return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_UPDATE ZEND_FILE_LINE_RELAY_CC);
835 }
836 
_zend_hash_next_index_insert(HashTable * ht,zval * pData ZEND_FILE_LINE_DC)837 ZEND_API zval* ZEND_FASTCALL _zend_hash_next_index_insert(HashTable *ht, zval *pData ZEND_FILE_LINE_DC)
838 {
839 	return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEXT ZEND_FILE_LINE_RELAY_CC);
840 }
841 
_zend_hash_next_index_insert_new(HashTable * ht,zval * pData ZEND_FILE_LINE_DC)842 ZEND_API zval* ZEND_FASTCALL _zend_hash_next_index_insert_new(HashTable *ht, zval *pData ZEND_FILE_LINE_DC)
843 {
844 	return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEW | HASH_ADD_NEXT ZEND_FILE_LINE_RELAY_CC);
845 }
846 
zend_hash_do_resize(HashTable * ht)847 static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht)
848 {
849 
850 	IS_CONSISTENT(ht);
851 	HT_ASSERT_RC1(ht);
852 
853 	if (ht->nNumUsed > ht->nNumOfElements + (ht->nNumOfElements >> 5)) { /* additional term is there to amortize the cost of compaction */
854 		zend_hash_rehash(ht);
855 	} else if (ht->nTableSize < HT_MAX_SIZE) {	/* Let's double the table size */
856 		void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
857 		uint32_t nSize = ht->nTableSize + ht->nTableSize;
858 		Bucket *old_buckets = ht->arData;
859 
860 		new_data = pemalloc(HT_SIZE_EX(nSize, -nSize), ht->u.flags & HASH_FLAG_PERSISTENT);
861 		ht->nTableSize = nSize;
862 		ht->nTableMask = -ht->nTableSize;
863 		HT_SET_DATA_ADDR(ht, new_data);
864 		memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
865 		pefree(old_data, ht->u.flags & HASH_FLAG_PERSISTENT);
866 		zend_hash_rehash(ht);
867 	} else {
868 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket) + sizeof(uint32_t), sizeof(Bucket));
869 	}
870 }
871 
zend_hash_rehash(HashTable * ht)872 ZEND_API int ZEND_FASTCALL zend_hash_rehash(HashTable *ht)
873 {
874 	Bucket *p;
875 	uint32_t nIndex, i;
876 
877 	IS_CONSISTENT(ht);
878 
879 	if (UNEXPECTED(ht->nNumOfElements == 0)) {
880 		if (ht->u.flags & HASH_FLAG_INITIALIZED) {
881 			ht->nNumUsed = 0;
882 			HT_HASH_RESET(ht);
883 		}
884 		return SUCCESS;
885 	}
886 
887 	HT_HASH_RESET(ht);
888 	i = 0;
889 	p = ht->arData;
890 	if (HT_IS_WITHOUT_HOLES(ht)) {
891 		do {
892 			nIndex = p->h | ht->nTableMask;
893 			Z_NEXT(p->val) = HT_HASH(ht, nIndex);
894 			HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
895 			p++;
896 		} while (++i < ht->nNumUsed);
897 	} else {
898 		do {
899 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) {
900 				uint32_t j = i;
901 				Bucket *q = p;
902 
903 				if (EXPECTED(ht->u.v.nIteratorsCount == 0)) {
904 					while (++i < ht->nNumUsed) {
905 						p++;
906 						if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
907 							ZVAL_COPY_VALUE(&q->val, &p->val);
908 							q->h = p->h;
909 							nIndex = q->h | ht->nTableMask;
910 							q->key = p->key;
911 							Z_NEXT(q->val) = HT_HASH(ht, nIndex);
912 							HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
913 							if (UNEXPECTED(ht->nInternalPointer == i)) {
914 								ht->nInternalPointer = j;
915 							}
916 							q++;
917 							j++;
918 						}
919 					}
920 				} else {
921 					uint32_t iter_pos = zend_hash_iterators_lower_pos(ht, 0);
922 
923 					while (++i < ht->nNumUsed) {
924 						p++;
925 						if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
926 							ZVAL_COPY_VALUE(&q->val, &p->val);
927 							q->h = p->h;
928 							nIndex = q->h | ht->nTableMask;
929 							q->key = p->key;
930 							Z_NEXT(q->val) = HT_HASH(ht, nIndex);
931 							HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
932 							if (UNEXPECTED(ht->nInternalPointer == i)) {
933 								ht->nInternalPointer = j;
934 							}
935 							if (UNEXPECTED(i >= iter_pos)) {
936 								do {
937 									zend_hash_iterators_update(ht, iter_pos, j);
938 									iter_pos = zend_hash_iterators_lower_pos(ht, iter_pos + 1);
939 								} while (iter_pos < i);
940 							}
941 							q++;
942 							j++;
943 						}
944 					}
945 				}
946 				ht->nNumUsed = j;
947 				break;
948 			}
949 			nIndex = p->h | ht->nTableMask;
950 			Z_NEXT(p->val) = HT_HASH(ht, nIndex);
951 			HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
952 			p++;
953 		} while (++i < ht->nNumUsed);
954 	}
955 	return SUCCESS;
956 }
957 
_zend_hash_del_el_ex(HashTable * ht,uint32_t idx,Bucket * p,Bucket * prev)958 static zend_always_inline void _zend_hash_del_el_ex(HashTable *ht, uint32_t idx, Bucket *p, Bucket *prev)
959 {
960 	if (!(ht->u.flags & HASH_FLAG_PACKED)) {
961 		if (prev) {
962 			Z_NEXT(prev->val) = Z_NEXT(p->val);
963 		} else {
964 			HT_HASH(ht, p->h | ht->nTableMask) = Z_NEXT(p->val);
965 		}
966 	}
967 	if (HT_IDX_TO_HASH(ht->nNumUsed - 1) == idx) {
968 		do {
969 			ht->nNumUsed--;
970 		} while (ht->nNumUsed > 0 && (UNEXPECTED(Z_TYPE(ht->arData[ht->nNumUsed-1].val) == IS_UNDEF)));
971 	}
972 	ht->nNumOfElements--;
973 	if (HT_IDX_TO_HASH(ht->nInternalPointer) == idx || UNEXPECTED(ht->u.v.nIteratorsCount)) {
974 		uint32_t new_idx;
975 
976 		new_idx = idx = HT_HASH_TO_IDX(idx);
977 		while (1) {
978 			new_idx++;
979 			if (new_idx >= ht->nNumUsed) {
980 				new_idx = HT_INVALID_IDX;
981 				break;
982 			} else if (Z_TYPE(ht->arData[new_idx].val) != IS_UNDEF) {
983 				break;
984 			}
985 		}
986 		if (ht->nInternalPointer == idx) {
987 			ht->nInternalPointer = new_idx;
988 		}
989 		zend_hash_iterators_update(ht, idx, new_idx);
990 	}
991 	if (p->key) {
992 		zend_string_release(p->key);
993 	}
994 	if (ht->pDestructor) {
995 		zval tmp;
996 		ZVAL_COPY_VALUE(&tmp, &p->val);
997 		ZVAL_UNDEF(&p->val);
998 		ht->pDestructor(&tmp);
999 	} else {
1000 		ZVAL_UNDEF(&p->val);
1001 	}
1002 }
1003 
_zend_hash_del_el(HashTable * ht,uint32_t idx,Bucket * p)1004 static zend_always_inline void _zend_hash_del_el(HashTable *ht, uint32_t idx, Bucket *p)
1005 {
1006 	Bucket *prev = NULL;
1007 
1008 	if (!(ht->u.flags & HASH_FLAG_PACKED)) {
1009 		uint32_t nIndex = p->h | ht->nTableMask;
1010 		uint32_t i = HT_HASH(ht, nIndex);
1011 
1012 		if (i != idx) {
1013 			prev = HT_HASH_TO_BUCKET(ht, i);
1014 			while (Z_NEXT(prev->val) != idx) {
1015 				i = Z_NEXT(prev->val);
1016 				prev = HT_HASH_TO_BUCKET(ht, i);
1017 			}
1018 	 	}
1019 	}
1020 
1021 	_zend_hash_del_el_ex(ht, idx, p, prev);
1022 }
1023 
zend_hash_del_bucket(HashTable * ht,Bucket * p)1024 ZEND_API void ZEND_FASTCALL zend_hash_del_bucket(HashTable *ht, Bucket *p)
1025 {
1026 	IS_CONSISTENT(ht);
1027 	HT_ASSERT_RC1(ht);
1028 	_zend_hash_del_el(ht, HT_IDX_TO_HASH(p - ht->arData), p);
1029 }
1030 
zend_hash_del(HashTable * ht,zend_string * key)1031 ZEND_API int ZEND_FASTCALL zend_hash_del(HashTable *ht, zend_string *key)
1032 {
1033 	zend_ulong h;
1034 	uint32_t nIndex;
1035 	uint32_t idx;
1036 	Bucket *p;
1037 	Bucket *prev = NULL;
1038 
1039 	IS_CONSISTENT(ht);
1040 	HT_ASSERT_RC1(ht);
1041 
1042 	h = zend_string_hash_val(key);
1043 	nIndex = h | ht->nTableMask;
1044 
1045 	idx = HT_HASH(ht, nIndex);
1046 	while (idx != HT_INVALID_IDX) {
1047 		p = HT_HASH_TO_BUCKET(ht, idx);
1048 		if ((p->key == key) ||
1049 			(p->h == h &&
1050 		     p->key &&
1051 		     ZSTR_LEN(p->key) == ZSTR_LEN(key) &&
1052 		     memcmp(ZSTR_VAL(p->key), ZSTR_VAL(key), ZSTR_LEN(key)) == 0)) {
1053 			_zend_hash_del_el_ex(ht, idx, p, prev);
1054 			return SUCCESS;
1055 		}
1056 		prev = p;
1057 		idx = Z_NEXT(p->val);
1058 	}
1059 	return FAILURE;
1060 }
1061 
zend_hash_del_ind(HashTable * ht,zend_string * key)1062 ZEND_API int ZEND_FASTCALL zend_hash_del_ind(HashTable *ht, zend_string *key)
1063 {
1064 	zend_ulong h;
1065 	uint32_t nIndex;
1066 	uint32_t idx;
1067 	Bucket *p;
1068 	Bucket *prev = NULL;
1069 
1070 	IS_CONSISTENT(ht);
1071 	HT_ASSERT_RC1(ht);
1072 
1073 	h = zend_string_hash_val(key);
1074 	nIndex = h | ht->nTableMask;
1075 
1076 	idx = HT_HASH(ht, nIndex);
1077 	while (idx != HT_INVALID_IDX) {
1078 		p = HT_HASH_TO_BUCKET(ht, idx);
1079 		if ((p->key == key) ||
1080 			(p->h == h &&
1081 		     p->key &&
1082 		     ZSTR_LEN(p->key) == ZSTR_LEN(key) &&
1083 		     memcmp(ZSTR_VAL(p->key), ZSTR_VAL(key), ZSTR_LEN(key)) == 0)) {
1084 			if (Z_TYPE(p->val) == IS_INDIRECT) {
1085 				zval *data = Z_INDIRECT(p->val);
1086 
1087 				if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
1088 					return FAILURE;
1089 				} else {
1090 					if (ht->pDestructor) {
1091 						zval tmp;
1092 						ZVAL_COPY_VALUE(&tmp, data);
1093 						ZVAL_UNDEF(data);
1094 						ht->pDestructor(&tmp);
1095 					} else {
1096 						ZVAL_UNDEF(data);
1097 					}
1098 					ht->u.v.flags |= HASH_FLAG_HAS_EMPTY_IND;
1099 				}
1100 			} else {
1101 				_zend_hash_del_el_ex(ht, idx, p, prev);
1102 			}
1103 			return SUCCESS;
1104 		}
1105 		prev = p;
1106 		idx = Z_NEXT(p->val);
1107 	}
1108 	return FAILURE;
1109 }
1110 
zend_hash_str_del_ind(HashTable * ht,const char * str,size_t len)1111 ZEND_API int ZEND_FASTCALL zend_hash_str_del_ind(HashTable *ht, const char *str, size_t len)
1112 {
1113 	zend_ulong h;
1114 	uint32_t nIndex;
1115 	uint32_t idx;
1116 	Bucket *p;
1117 	Bucket *prev = NULL;
1118 
1119 	IS_CONSISTENT(ht);
1120 	HT_ASSERT_RC1(ht);
1121 
1122 	h = zend_inline_hash_func(str, len);
1123 	nIndex = h | ht->nTableMask;
1124 
1125 	idx = HT_HASH(ht, nIndex);
1126 	while (idx != HT_INVALID_IDX) {
1127 		p = HT_HASH_TO_BUCKET(ht, idx);
1128 		if ((p->h == h)
1129 			 && p->key
1130 			 && (ZSTR_LEN(p->key) == len)
1131 			 && !memcmp(ZSTR_VAL(p->key), str, len)) {
1132 			if (Z_TYPE(p->val) == IS_INDIRECT) {
1133 				zval *data = Z_INDIRECT(p->val);
1134 
1135 				if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
1136 					return FAILURE;
1137 				} else {
1138 					if (ht->pDestructor) {
1139 						ht->pDestructor(data);
1140 					}
1141 					ZVAL_UNDEF(data);
1142 					ht->u.v.flags |= HASH_FLAG_HAS_EMPTY_IND;
1143 				}
1144 			} else {
1145 				_zend_hash_del_el_ex(ht, idx, p, prev);
1146 			}
1147 			return SUCCESS;
1148 		}
1149 		prev = p;
1150 		idx = Z_NEXT(p->val);
1151 	}
1152 	return FAILURE;
1153 }
1154 
zend_hash_str_del(HashTable * ht,const char * str,size_t len)1155 ZEND_API int ZEND_FASTCALL zend_hash_str_del(HashTable *ht, const char *str, size_t len)
1156 {
1157 	zend_ulong h;
1158 	uint32_t nIndex;
1159 	uint32_t idx;
1160 	Bucket *p;
1161 	Bucket *prev = NULL;
1162 
1163 	IS_CONSISTENT(ht);
1164 	HT_ASSERT_RC1(ht);
1165 
1166 	h = zend_inline_hash_func(str, len);
1167 	nIndex = h | ht->nTableMask;
1168 
1169 	idx = HT_HASH(ht, nIndex);
1170 	while (idx != HT_INVALID_IDX) {
1171 		p = HT_HASH_TO_BUCKET(ht, idx);
1172 		if ((p->h == h)
1173 			 && p->key
1174 			 && (ZSTR_LEN(p->key) == len)
1175 			 && !memcmp(ZSTR_VAL(p->key), str, len)) {
1176 			_zend_hash_del_el_ex(ht, idx, p, prev);
1177 			return SUCCESS;
1178 		}
1179 		prev = p;
1180 		idx = Z_NEXT(p->val);
1181 	}
1182 	return FAILURE;
1183 }
1184 
zend_hash_index_del(HashTable * ht,zend_ulong h)1185 ZEND_API int ZEND_FASTCALL zend_hash_index_del(HashTable *ht, zend_ulong h)
1186 {
1187 	uint32_t nIndex;
1188 	uint32_t idx;
1189 	Bucket *p;
1190 	Bucket *prev = NULL;
1191 
1192 	IS_CONSISTENT(ht);
1193 	HT_ASSERT_RC1(ht);
1194 
1195 	if (ht->u.flags & HASH_FLAG_PACKED) {
1196 		if (h < ht->nNumUsed) {
1197 			p = ht->arData + h;
1198 			if (Z_TYPE(p->val) != IS_UNDEF) {
1199 				_zend_hash_del_el_ex(ht, HT_IDX_TO_HASH(h), p, NULL);
1200 				return SUCCESS;
1201 			}
1202 		}
1203 		return FAILURE;
1204 	}
1205 	nIndex = h | ht->nTableMask;
1206 
1207 	idx = HT_HASH(ht, nIndex);
1208 	while (idx != HT_INVALID_IDX) {
1209 		p = HT_HASH_TO_BUCKET(ht, idx);
1210 		if ((p->h == h) && (p->key == NULL)) {
1211 			_zend_hash_del_el_ex(ht, idx, p, prev);
1212 			return SUCCESS;
1213 		}
1214 		prev = p;
1215 		idx = Z_NEXT(p->val);
1216 	}
1217 	return FAILURE;
1218 }
1219 
zend_hash_destroy(HashTable * ht)1220 ZEND_API void ZEND_FASTCALL zend_hash_destroy(HashTable *ht)
1221 {
1222 	Bucket *p, *end;
1223 
1224 	IS_CONSISTENT(ht);
1225 	HT_ASSERT(ht, GC_REFCOUNT(ht) <= 1);
1226 
1227 	if (ht->nNumUsed) {
1228 		p = ht->arData;
1229 		end = p + ht->nNumUsed;
1230 		if (ht->pDestructor) {
1231 			SET_INCONSISTENT(HT_IS_DESTROYING);
1232 
1233 			if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1234 				if (HT_IS_WITHOUT_HOLES(ht)) {
1235 					do {
1236 						ht->pDestructor(&p->val);
1237 					} while (++p != end);
1238 				} else {
1239 					do {
1240 						if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1241 							ht->pDestructor(&p->val);
1242 						}
1243 					} while (++p != end);
1244 				}
1245 			} else if (HT_IS_WITHOUT_HOLES(ht)) {
1246 				do {
1247 					ht->pDestructor(&p->val);
1248 					if (EXPECTED(p->key)) {
1249 						zend_string_release(p->key);
1250 					}
1251 				} while (++p != end);
1252 			} else {
1253 				do {
1254 					if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1255 						ht->pDestructor(&p->val);
1256 						if (EXPECTED(p->key)) {
1257 							zend_string_release(p->key);
1258 						}
1259 					}
1260 				} while (++p != end);
1261 			}
1262 
1263 			SET_INCONSISTENT(HT_DESTROYED);
1264 		} else {
1265 			if (!HT_HAS_STATIC_KEYS_ONLY(ht)) {
1266 				do {
1267 					if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1268 						if (EXPECTED(p->key)) {
1269 							zend_string_release(p->key);
1270 						}
1271 					}
1272 				} while (++p != end);
1273 			}
1274 		}
1275 		zend_hash_iterators_remove(ht);
1276 	} else if (EXPECTED(!(ht->u.flags & HASH_FLAG_INITIALIZED))) {
1277 		return;
1278 	}
1279 	pefree(HT_GET_DATA_ADDR(ht), ht->u.flags & HASH_FLAG_PERSISTENT);
1280 }
1281 
zend_array_destroy(HashTable * ht)1282 ZEND_API void ZEND_FASTCALL zend_array_destroy(HashTable *ht)
1283 {
1284 	Bucket *p, *end;
1285 
1286 	IS_CONSISTENT(ht);
1287 	HT_ASSERT(ht, GC_REFCOUNT(ht) <= 1);
1288 
1289 	/* break possible cycles */
1290 	GC_REMOVE_FROM_BUFFER(ht);
1291 	GC_TYPE_INFO(ht) = IS_NULL | (GC_WHITE << 16);
1292 
1293 	if (ht->nNumUsed) {
1294 		/* In some rare cases destructors of regular arrays may be changed */
1295 		if (UNEXPECTED(ht->pDestructor != ZVAL_PTR_DTOR)) {
1296 			zend_hash_destroy(ht);
1297 			goto free_ht;
1298 		}
1299 
1300 		p = ht->arData;
1301 		end = p + ht->nNumUsed;
1302 		SET_INCONSISTENT(HT_IS_DESTROYING);
1303 
1304 		if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1305 			do {
1306 				i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
1307 			} while (++p != end);
1308 		} else if (HT_IS_WITHOUT_HOLES(ht)) {
1309 			do {
1310 				i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
1311 				if (EXPECTED(p->key)) {
1312 					zend_string_release(p->key);
1313 				}
1314 			} while (++p != end);
1315 		} else {
1316 			do {
1317 				if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1318 					i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
1319 					if (EXPECTED(p->key)) {
1320 						zend_string_release(p->key);
1321 					}
1322 				}
1323 			} while (++p != end);
1324 		}
1325 		zend_hash_iterators_remove(ht);
1326 		SET_INCONSISTENT(HT_DESTROYED);
1327 	} else if (EXPECTED(!(ht->u.flags & HASH_FLAG_INITIALIZED))) {
1328 		goto free_ht;
1329 	}
1330 	efree(HT_GET_DATA_ADDR(ht));
1331 free_ht:
1332 	FREE_HASHTABLE(ht);
1333 }
1334 
zend_hash_clean(HashTable * ht)1335 ZEND_API void ZEND_FASTCALL zend_hash_clean(HashTable *ht)
1336 {
1337 	Bucket *p, *end;
1338 
1339 	IS_CONSISTENT(ht);
1340 	HT_ASSERT_RC1(ht);
1341 
1342 	if (ht->nNumUsed) {
1343 		p = ht->arData;
1344 		end = p + ht->nNumUsed;
1345 		if (ht->pDestructor) {
1346 			if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1347 				if (HT_IS_WITHOUT_HOLES(ht)) {
1348 					do {
1349 						ht->pDestructor(&p->val);
1350 					} while (++p != end);
1351 				} else {
1352 					do {
1353 						if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1354 							ht->pDestructor(&p->val);
1355 						}
1356 					} while (++p != end);
1357 				}
1358 			} else if (HT_IS_WITHOUT_HOLES(ht)) {
1359 				do {
1360 					ht->pDestructor(&p->val);
1361 					if (EXPECTED(p->key)) {
1362 						zend_string_release(p->key);
1363 					}
1364 				} while (++p != end);
1365 			} else {
1366 				do {
1367 					if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1368 						ht->pDestructor(&p->val);
1369 						if (EXPECTED(p->key)) {
1370 							zend_string_release(p->key);
1371 						}
1372 					}
1373 				} while (++p != end);
1374 			}
1375 		} else {
1376 			if (!HT_HAS_STATIC_KEYS_ONLY(ht)) {
1377 				if (HT_IS_WITHOUT_HOLES(ht)) {
1378 					do {
1379 						if (EXPECTED(p->key)) {
1380 							zend_string_release(p->key);
1381 						}
1382 					} while (++p != end);
1383 				} else {
1384 					do {
1385 						if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1386 							if (EXPECTED(p->key)) {
1387 								zend_string_release(p->key);
1388 							}
1389 						}
1390 					} while (++p != end);
1391 				}
1392 			}
1393 		}
1394 		if (!(ht->u.flags & HASH_FLAG_PACKED)) {
1395 			HT_HASH_RESET(ht);
1396 		}
1397 	}
1398 	ht->nNumUsed = 0;
1399 	ht->nNumOfElements = 0;
1400 	ht->nNextFreeElement = 0;
1401 	ht->nInternalPointer = HT_INVALID_IDX;
1402 }
1403 
zend_symtable_clean(HashTable * ht)1404 ZEND_API void ZEND_FASTCALL zend_symtable_clean(HashTable *ht)
1405 {
1406 	Bucket *p, *end;
1407 
1408 	IS_CONSISTENT(ht);
1409 	HT_ASSERT_RC1(ht);
1410 
1411 	if (ht->nNumUsed) {
1412 		p = ht->arData;
1413 		end = p + ht->nNumUsed;
1414 		if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
1415 			do {
1416 				i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
1417 			} while (++p != end);
1418 		} else if (HT_IS_WITHOUT_HOLES(ht)) {
1419 			do {
1420 				i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
1421 				if (EXPECTED(p->key)) {
1422 					zend_string_release(p->key);
1423 				}
1424 			} while (++p != end);
1425 		} else {
1426 			do {
1427 				if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1428 					i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
1429 					if (EXPECTED(p->key)) {
1430 						zend_string_release(p->key);
1431 					}
1432 				}
1433 			} while (++p != end);
1434 		}
1435 		HT_HASH_RESET(ht);
1436 	}
1437 	ht->nNumUsed = 0;
1438 	ht->nNumOfElements = 0;
1439 	ht->nNextFreeElement = 0;
1440 	ht->nInternalPointer = HT_INVALID_IDX;
1441 }
1442 
zend_hash_graceful_destroy(HashTable * ht)1443 ZEND_API void ZEND_FASTCALL zend_hash_graceful_destroy(HashTable *ht)
1444 {
1445 	uint32_t idx;
1446 	Bucket *p;
1447 
1448 	IS_CONSISTENT(ht);
1449 	HT_ASSERT_RC1(ht);
1450 
1451 	p = ht->arData;
1452 	for (idx = 0; idx < ht->nNumUsed; idx++, p++) {
1453 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1454 		_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1455 	}
1456 	if (ht->u.flags & HASH_FLAG_INITIALIZED) {
1457 		pefree(HT_GET_DATA_ADDR(ht), ht->u.flags & HASH_FLAG_PERSISTENT);
1458 	}
1459 
1460 	SET_INCONSISTENT(HT_DESTROYED);
1461 }
1462 
zend_hash_graceful_reverse_destroy(HashTable * ht)1463 ZEND_API void ZEND_FASTCALL zend_hash_graceful_reverse_destroy(HashTable *ht)
1464 {
1465 	uint32_t idx;
1466 	Bucket *p;
1467 
1468 	IS_CONSISTENT(ht);
1469 	HT_ASSERT_RC1(ht);
1470 
1471 	idx = ht->nNumUsed;
1472 	p = ht->arData + ht->nNumUsed;
1473 	while (idx > 0) {
1474 		idx--;
1475 		p--;
1476 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1477 		_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1478 	}
1479 
1480 	if (ht->u.flags & HASH_FLAG_INITIALIZED) {
1481 		pefree(HT_GET_DATA_ADDR(ht), ht->u.flags & HASH_FLAG_PERSISTENT);
1482 	}
1483 
1484 	SET_INCONSISTENT(HT_DESTROYED);
1485 }
1486 
1487 /* This is used to recurse elements and selectively delete certain entries
1488  * from a hashtable. apply_func() receives the data and decides if the entry
1489  * should be deleted or recursion should be stopped. The following three
1490  * return codes are possible:
1491  * ZEND_HASH_APPLY_KEEP   - continue
1492  * ZEND_HASH_APPLY_STOP   - stop iteration
1493  * ZEND_HASH_APPLY_REMOVE - delete the element, combineable with the former
1494  */
1495 
zend_hash_apply(HashTable * ht,apply_func_t apply_func)1496 ZEND_API void ZEND_FASTCALL zend_hash_apply(HashTable *ht, apply_func_t apply_func)
1497 {
1498 	uint32_t idx;
1499 	Bucket *p;
1500 	int result;
1501 
1502 	IS_CONSISTENT(ht);
1503 
1504 	HASH_PROTECT_RECURSION(ht);
1505 	for (idx = 0; idx < ht->nNumUsed; idx++) {
1506 		p = ht->arData + idx;
1507 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1508 		result = apply_func(&p->val);
1509 
1510 		if (result & ZEND_HASH_APPLY_REMOVE) {
1511 			HT_ASSERT_RC1(ht);
1512 			_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1513 		}
1514 		if (result & ZEND_HASH_APPLY_STOP) {
1515 			break;
1516 		}
1517 	}
1518 	HASH_UNPROTECT_RECURSION(ht);
1519 }
1520 
1521 
zend_hash_apply_with_argument(HashTable * ht,apply_func_arg_t apply_func,void * argument)1522 ZEND_API void ZEND_FASTCALL zend_hash_apply_with_argument(HashTable *ht, apply_func_arg_t apply_func, void *argument)
1523 {
1524     uint32_t idx;
1525 	Bucket *p;
1526 	int result;
1527 
1528 	IS_CONSISTENT(ht);
1529 
1530 	HASH_PROTECT_RECURSION(ht);
1531 	for (idx = 0; idx < ht->nNumUsed; idx++) {
1532 		p = ht->arData + idx;
1533 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1534 		result = apply_func(&p->val, argument);
1535 
1536 		if (result & ZEND_HASH_APPLY_REMOVE) {
1537 			HT_ASSERT_RC1(ht);
1538 			_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1539 		}
1540 		if (result & ZEND_HASH_APPLY_STOP) {
1541 			break;
1542 		}
1543 	}
1544 	HASH_UNPROTECT_RECURSION(ht);
1545 }
1546 
1547 
zend_hash_apply_with_arguments(HashTable * ht,apply_func_args_t apply_func,int num_args,...)1548 ZEND_API void ZEND_FASTCALL zend_hash_apply_with_arguments(HashTable *ht, apply_func_args_t apply_func, int num_args, ...)
1549 {
1550 	uint32_t idx;
1551 	Bucket *p;
1552 	va_list args;
1553 	zend_hash_key hash_key;
1554 	int result;
1555 
1556 	IS_CONSISTENT(ht);
1557 
1558 	HASH_PROTECT_RECURSION(ht);
1559 
1560 	for (idx = 0; idx < ht->nNumUsed; idx++) {
1561 		p = ht->arData + idx;
1562 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1563 		va_start(args, num_args);
1564 		hash_key.h = p->h;
1565 		hash_key.key = p->key;
1566 
1567 		result = apply_func(&p->val, num_args, args, &hash_key);
1568 
1569 		if (result & ZEND_HASH_APPLY_REMOVE) {
1570 			HT_ASSERT_RC1(ht);
1571 			_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1572 		}
1573 		if (result & ZEND_HASH_APPLY_STOP) {
1574 			va_end(args);
1575 			break;
1576 		}
1577 		va_end(args);
1578 	}
1579 
1580 	HASH_UNPROTECT_RECURSION(ht);
1581 }
1582 
1583 
zend_hash_reverse_apply(HashTable * ht,apply_func_t apply_func)1584 ZEND_API void ZEND_FASTCALL zend_hash_reverse_apply(HashTable *ht, apply_func_t apply_func)
1585 {
1586 	uint32_t idx;
1587 	Bucket *p;
1588 	int result;
1589 
1590 	IS_CONSISTENT(ht);
1591 
1592 	HASH_PROTECT_RECURSION(ht);
1593 	idx = ht->nNumUsed;
1594 	while (idx > 0) {
1595 		idx--;
1596 		p = ht->arData + idx;
1597 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1598 
1599 		result = apply_func(&p->val);
1600 
1601 		if (result & ZEND_HASH_APPLY_REMOVE) {
1602 			HT_ASSERT_RC1(ht);
1603 			_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1604 		}
1605 		if (result & ZEND_HASH_APPLY_STOP) {
1606 			break;
1607 		}
1608 	}
1609 	HASH_UNPROTECT_RECURSION(ht);
1610 }
1611 
1612 
zend_hash_copy(HashTable * target,HashTable * source,copy_ctor_func_t pCopyConstructor)1613 ZEND_API void ZEND_FASTCALL zend_hash_copy(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor)
1614 {
1615     uint32_t idx;
1616 	Bucket *p;
1617 	zval *new_entry, *data;
1618 	zend_bool setTargetPointer;
1619 
1620 	IS_CONSISTENT(source);
1621 	IS_CONSISTENT(target);
1622 	HT_ASSERT_RC1(target);
1623 
1624 	setTargetPointer = (target->nInternalPointer == HT_INVALID_IDX);
1625 	for (idx = 0; idx < source->nNumUsed; idx++) {
1626 		p = source->arData + idx;
1627 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1628 
1629 		if (setTargetPointer && source->nInternalPointer == idx) {
1630 			target->nInternalPointer = HT_INVALID_IDX;
1631 		}
1632 		/* INDIRECT element may point to UNDEF-ined slots */
1633 		data = &p->val;
1634 		if (Z_TYPE_P(data) == IS_INDIRECT) {
1635 			data = Z_INDIRECT_P(data);
1636 			if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
1637 				continue;
1638 			}
1639 		}
1640 		if (p->key) {
1641 			new_entry = zend_hash_update(target, p->key, data);
1642 		} else {
1643 			new_entry = zend_hash_index_update(target, p->h, data);
1644 		}
1645 		if (pCopyConstructor) {
1646 			pCopyConstructor(new_entry);
1647 		}
1648 	}
1649 	if (target->nInternalPointer == HT_INVALID_IDX && target->nNumOfElements > 0) {
1650 		idx = 0;
1651 		while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) {
1652 			idx++;
1653 		}
1654 		target->nInternalPointer = idx;
1655 	}
1656 }
1657 
1658 
zend_array_dup_element(HashTable * source,HashTable * target,uint32_t idx,Bucket * p,Bucket * q,int packed,int static_keys,int with_holes)1659 static zend_always_inline int zend_array_dup_element(HashTable *source, HashTable *target, uint32_t idx, Bucket *p, Bucket *q, int packed, int static_keys, int with_holes)
1660 {
1661 	zval *data = &p->val;
1662 
1663 	if (with_holes) {
1664 		if (!packed && Z_TYPE_INFO_P(data) == IS_INDIRECT) {
1665 			data = Z_INDIRECT_P(data);
1666 		}
1667 		if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) {
1668 			return 0;
1669 		}
1670 	} else if (!packed) {
1671 		/* INDIRECT element may point to UNDEF-ined slots */
1672 		if (Z_TYPE_INFO_P(data) == IS_INDIRECT) {
1673 			data = Z_INDIRECT_P(data);
1674 			if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) {
1675 				return 0;
1676 			}
1677 		}
1678 	}
1679 
1680 	do {
1681 		if (Z_OPT_REFCOUNTED_P(data)) {
1682 			if (Z_ISREF_P(data) && Z_REFCOUNT_P(data) == 1 &&
1683 			    (Z_TYPE_P(Z_REFVAL_P(data)) != IS_ARRAY ||
1684 			      Z_ARRVAL_P(Z_REFVAL_P(data)) != source)) {
1685 				data = Z_REFVAL_P(data);
1686 				if (!Z_OPT_REFCOUNTED_P(data)) {
1687 					break;
1688 				}
1689 			}
1690 			Z_ADDREF_P(data);
1691 		}
1692 	} while (0);
1693 	ZVAL_COPY_VALUE(&q->val, data);
1694 
1695 	q->h = p->h;
1696 	if (packed) {
1697 		q->key = NULL;
1698 	} else {
1699 		uint32_t nIndex;
1700 
1701 		q->key = p->key;
1702 		if (!static_keys && q->key) {
1703 			zend_string_addref(q->key);
1704 		}
1705 
1706 		nIndex = q->h | target->nTableMask;
1707 		Z_NEXT(q->val) = HT_HASH(target, nIndex);
1708 		HT_HASH(target, nIndex) = HT_IDX_TO_HASH(idx);
1709 	}
1710 	return 1;
1711 }
1712 
zend_array_dup_packed_elements(HashTable * source,HashTable * target,int with_holes)1713 static zend_always_inline void zend_array_dup_packed_elements(HashTable *source, HashTable *target, int with_holes)
1714 {
1715 	Bucket *p = source->arData;
1716 	Bucket *q = target->arData;
1717 	Bucket *end = p + source->nNumUsed;
1718 
1719 	do {
1720 		if (!zend_array_dup_element(source, target, 0, p, q, 1, 1, with_holes)) {
1721 			if (with_holes) {
1722 				ZVAL_UNDEF(&q->val);
1723 			}
1724 		}
1725 		p++; q++;
1726 	} while (p != end);
1727 }
1728 
zend_array_dup_elements(HashTable * source,HashTable * target,int static_keys,int with_holes)1729 static zend_always_inline uint32_t zend_array_dup_elements(HashTable *source, HashTable *target, int static_keys, int with_holes)
1730 {
1731 	uint32_t idx = 0;
1732 	Bucket *p = source->arData;
1733 	Bucket *q = target->arData;
1734 	Bucket *end = p + source->nNumUsed;
1735 
1736 	do {
1737 		if (!zend_array_dup_element(source, target, idx, p, q, 0, static_keys, with_holes)) {
1738 			uint32_t target_idx = idx;
1739 
1740 			idx++; p++;
1741 			while (p != end) {
1742 				if (zend_array_dup_element(source, target, target_idx, p, q, 0, static_keys, with_holes)) {
1743 					if (source->nInternalPointer == idx) {
1744 						target->nInternalPointer = target_idx;
1745 					}
1746 					target_idx++; q++;
1747 				}
1748 				idx++; p++;
1749 			}
1750 			return target_idx;
1751 		}
1752 		idx++; p++; q++;
1753 	} while (p != end);
1754 	return idx;
1755 }
1756 
zend_array_dup(HashTable * source)1757 ZEND_API HashTable* ZEND_FASTCALL zend_array_dup(HashTable *source)
1758 {
1759 	uint32_t idx;
1760 	HashTable *target;
1761 
1762 	IS_CONSISTENT(source);
1763 
1764 	ALLOC_HASHTABLE(target);
1765 	GC_REFCOUNT(target) = 1;
1766 	GC_TYPE_INFO(target) = IS_ARRAY | (GC_COLLECTABLE << GC_FLAGS_SHIFT);
1767 
1768 	target->nTableSize = source->nTableSize;
1769 	target->pDestructor = ZVAL_PTR_DTOR;
1770 
1771 	if (source->nNumUsed == 0) {
1772 		target->u.flags = (source->u.flags & ~(HASH_FLAG_INITIALIZED|HASH_FLAG_PACKED|HASH_FLAG_PERSISTENT|ZEND_HASH_APPLY_COUNT_MASK)) | HASH_FLAG_APPLY_PROTECTION | HASH_FLAG_STATIC_KEYS;
1773 		target->nTableMask = HT_MIN_MASK;
1774 		target->nNumUsed = 0;
1775 		target->nNumOfElements = 0;
1776 		target->nNextFreeElement = 0;
1777 		target->nInternalPointer = HT_INVALID_IDX;
1778 		HT_SET_DATA_ADDR(target, &uninitialized_bucket);
1779 	} else if (GC_FLAGS(source) & IS_ARRAY_IMMUTABLE) {
1780 		target->u.flags = (source->u.flags & ~HASH_FLAG_PERSISTENT) | HASH_FLAG_APPLY_PROTECTION;
1781 		target->nTableMask = source->nTableMask;
1782 		target->nNumUsed = source->nNumUsed;
1783 		target->nNumOfElements = source->nNumOfElements;
1784 		target->nNextFreeElement = source->nNextFreeElement;
1785 		HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
1786 		target->nInternalPointer = source->nInternalPointer;
1787 		memcpy(HT_GET_DATA_ADDR(target), HT_GET_DATA_ADDR(source), HT_USED_SIZE(source));
1788 		if (target->nNumOfElements > 0 &&
1789 		    target->nInternalPointer == HT_INVALID_IDX) {
1790 			idx = 0;
1791 			while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) {
1792 				idx++;
1793 			}
1794 			target->nInternalPointer = idx;
1795 		}
1796 	} else if (source->u.flags & HASH_FLAG_PACKED) {
1797 		target->u.flags = (source->u.flags & ~(HASH_FLAG_PERSISTENT|ZEND_HASH_APPLY_COUNT_MASK)) | HASH_FLAG_APPLY_PROTECTION;
1798 		target->nTableMask = source->nTableMask;
1799 		target->nNumUsed = source->nNumUsed;
1800 		target->nNumOfElements = source->nNumOfElements;
1801 		target->nNextFreeElement = source->nNextFreeElement;
1802 		HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
1803 		target->nInternalPointer = source->nInternalPointer;
1804 		HT_HASH_RESET_PACKED(target);
1805 
1806 		if (HT_IS_WITHOUT_HOLES(target)) {
1807 			zend_array_dup_packed_elements(source, target, 0);
1808 		} else {
1809 			zend_array_dup_packed_elements(source, target, 1);
1810 		}
1811 		if (target->nNumOfElements > 0 &&
1812 		    target->nInternalPointer == HT_INVALID_IDX) {
1813 			idx = 0;
1814 			while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) {
1815 				idx++;
1816 			}
1817 			target->nInternalPointer = idx;
1818 		}
1819 	} else {
1820 		target->u.flags = (source->u.flags & ~(HASH_FLAG_PERSISTENT|ZEND_HASH_APPLY_COUNT_MASK)) | HASH_FLAG_APPLY_PROTECTION;
1821 		target->nTableMask = source->nTableMask;
1822 		target->nNextFreeElement = source->nNextFreeElement;
1823 		target->nInternalPointer = source->nInternalPointer;
1824 
1825 		HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
1826 		HT_HASH_RESET(target);
1827 
1828 		if (HT_HAS_STATIC_KEYS_ONLY(target)) {
1829 			if (HT_IS_WITHOUT_HOLES(source)) {
1830 				idx = zend_array_dup_elements(source, target, 1, 0);
1831 			} else {
1832 				idx = zend_array_dup_elements(source, target, 1, 1);
1833 			}
1834 		} else {
1835 			if (HT_IS_WITHOUT_HOLES(source)) {
1836 				idx = zend_array_dup_elements(source, target, 0, 0);
1837 			} else {
1838 				idx = zend_array_dup_elements(source, target, 0, 1);
1839 			}
1840 		}
1841 		target->nNumUsed = idx;
1842 		target->nNumOfElements = idx;
1843 		if (idx > 0 && target->nInternalPointer == HT_INVALID_IDX) {
1844 			target->nInternalPointer = 0;
1845 		}
1846 	}
1847 	return target;
1848 }
1849 
1850 
_zend_hash_merge(HashTable * target,HashTable * source,copy_ctor_func_t pCopyConstructor,zend_bool overwrite ZEND_FILE_LINE_DC)1851 ZEND_API void ZEND_FASTCALL _zend_hash_merge(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor, zend_bool overwrite ZEND_FILE_LINE_DC)
1852 {
1853     uint32_t idx;
1854 	Bucket *p;
1855 	zval *t;
1856 
1857 	IS_CONSISTENT(source);
1858 	IS_CONSISTENT(target);
1859 	HT_ASSERT_RC1(target);
1860 
1861 	if (overwrite) {
1862 		for (idx = 0; idx < source->nNumUsed; idx++) {
1863 			p = source->arData + idx;
1864 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1865 			if (UNEXPECTED(Z_TYPE(p->val) == IS_INDIRECT) &&
1866 			    UNEXPECTED(Z_TYPE_P(Z_INDIRECT(p->val)) == IS_UNDEF)) {
1867 			    continue;
1868 			}
1869 			if (p->key) {
1870 				t = _zend_hash_add_or_update_i(target, p->key, &p->val, HASH_UPDATE | HASH_UPDATE_INDIRECT ZEND_FILE_LINE_RELAY_CC);
1871 				if (t && pCopyConstructor) {
1872 					pCopyConstructor(t);
1873 				}
1874 			} else {
1875 				t = zend_hash_index_update(target, p->h, &p->val);
1876 				if (t && pCopyConstructor) {
1877 					pCopyConstructor(t);
1878 				}
1879 			}
1880 		}
1881 	} else {
1882 		for (idx = 0; idx < source->nNumUsed; idx++) {
1883 			p = source->arData + idx;
1884 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1885 			if (UNEXPECTED(Z_TYPE(p->val) == IS_INDIRECT) &&
1886 			    UNEXPECTED(Z_TYPE_P(Z_INDIRECT(p->val)) == IS_UNDEF)) {
1887 			    continue;
1888 			}
1889 			if (p->key) {
1890 				t = _zend_hash_add_or_update_i(target, p->key, &p->val, HASH_ADD | HASH_UPDATE_INDIRECT ZEND_FILE_LINE_RELAY_CC);
1891 				if (t && pCopyConstructor) {
1892 					pCopyConstructor(t);
1893 				}
1894 			} else {
1895 				t = zend_hash_index_add(target, p->h, &p->val);
1896 				if (t && pCopyConstructor) {
1897 					pCopyConstructor(t);
1898 				}
1899 			}
1900 		}
1901 	}
1902 	if (target->nNumOfElements > 0) {
1903 		idx = 0;
1904 		while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) {
1905 			idx++;
1906 		}
1907 		target->nInternalPointer = idx;
1908 	}
1909 }
1910 
1911 
zend_hash_replace_checker_wrapper(HashTable * target,zval * source_data,Bucket * p,void * pParam,merge_checker_func_t merge_checker_func)1912 static zend_bool ZEND_FASTCALL zend_hash_replace_checker_wrapper(HashTable *target, zval *source_data, Bucket *p, void *pParam, merge_checker_func_t merge_checker_func)
1913 {
1914 	zend_hash_key hash_key;
1915 
1916 	hash_key.h = p->h;
1917 	hash_key.key = p->key;
1918 	return merge_checker_func(target, source_data, &hash_key, pParam);
1919 }
1920 
1921 
zend_hash_merge_ex(HashTable * target,HashTable * source,copy_ctor_func_t pCopyConstructor,merge_checker_func_t pMergeSource,void * pParam)1922 ZEND_API void ZEND_FASTCALL zend_hash_merge_ex(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor, merge_checker_func_t pMergeSource, void *pParam)
1923 {
1924 	uint32_t idx;
1925 	Bucket *p;
1926 	zval *t;
1927 
1928 	IS_CONSISTENT(source);
1929 	IS_CONSISTENT(target);
1930 	HT_ASSERT_RC1(target);
1931 
1932 	for (idx = 0; idx < source->nNumUsed; idx++) {
1933 		p = source->arData + idx;
1934 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1935 		if (zend_hash_replace_checker_wrapper(target, &p->val, p, pParam, pMergeSource)) {
1936 			t = zend_hash_update(target, p->key, &p->val);
1937 			if (t && pCopyConstructor) {
1938 				pCopyConstructor(t);
1939 			}
1940 		}
1941 	}
1942 	if (target->nNumOfElements > 0) {
1943 		idx = 0;
1944 		while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) {
1945 			idx++;
1946 		}
1947 		target->nInternalPointer = idx;
1948 	}
1949 }
1950 
1951 
1952 /* Returns the hash table data if found and NULL if not. */
zend_hash_find(const HashTable * ht,zend_string * key)1953 ZEND_API zval* ZEND_FASTCALL zend_hash_find(const HashTable *ht, zend_string *key)
1954 {
1955 	Bucket *p;
1956 
1957 	IS_CONSISTENT(ht);
1958 
1959 	p = zend_hash_find_bucket(ht, key);
1960 	return p ? &p->val : NULL;
1961 }
1962 
zend_hash_str_find(const HashTable * ht,const char * str,size_t len)1963 ZEND_API zval* ZEND_FASTCALL zend_hash_str_find(const HashTable *ht, const char *str, size_t len)
1964 {
1965 	zend_ulong h;
1966 	Bucket *p;
1967 
1968 	IS_CONSISTENT(ht);
1969 
1970 	h = zend_inline_hash_func(str, len);
1971 	p = zend_hash_str_find_bucket(ht, str, len, h);
1972 	return p ? &p->val : NULL;
1973 }
1974 
zend_hash_exists(const HashTable * ht,zend_string * key)1975 ZEND_API zend_bool ZEND_FASTCALL zend_hash_exists(const HashTable *ht, zend_string *key)
1976 {
1977 	Bucket *p;
1978 
1979 	IS_CONSISTENT(ht);
1980 
1981 	p = zend_hash_find_bucket(ht, key);
1982 	return p ? 1 : 0;
1983 }
1984 
zend_hash_str_exists(const HashTable * ht,const char * str,size_t len)1985 ZEND_API zend_bool ZEND_FASTCALL zend_hash_str_exists(const HashTable *ht, const char *str, size_t len)
1986 {
1987 	zend_ulong h;
1988 	Bucket *p;
1989 
1990 	IS_CONSISTENT(ht);
1991 
1992 	h = zend_inline_hash_func(str, len);
1993 	p = zend_hash_str_find_bucket(ht, str, len, h);
1994 	return p ? 1 : 0;
1995 }
1996 
zend_hash_index_find(const HashTable * ht,zend_ulong h)1997 ZEND_API zval* ZEND_FASTCALL zend_hash_index_find(const HashTable *ht, zend_ulong h)
1998 {
1999 	Bucket *p;
2000 
2001 	IS_CONSISTENT(ht);
2002 
2003 	if (ht->u.flags & HASH_FLAG_PACKED) {
2004 		if (h < ht->nNumUsed) {
2005 			p = ht->arData + h;
2006 			if (Z_TYPE(p->val) != IS_UNDEF) {
2007 				return &p->val;
2008 			}
2009 		}
2010 		return NULL;
2011 	}
2012 
2013 	p = zend_hash_index_find_bucket(ht, h);
2014 	return p ? &p->val : NULL;
2015 }
2016 
_zend_hash_index_find(const HashTable * ht,zend_ulong h)2017 ZEND_API zval* ZEND_FASTCALL _zend_hash_index_find(const HashTable *ht, zend_ulong h)
2018 {
2019 	Bucket *p;
2020 
2021 	IS_CONSISTENT(ht);
2022 
2023 	p = zend_hash_index_find_bucket(ht, h);
2024 	return p ? &p->val : NULL;
2025 }
2026 
zend_hash_index_exists(const HashTable * ht,zend_ulong h)2027 ZEND_API zend_bool ZEND_FASTCALL zend_hash_index_exists(const HashTable *ht, zend_ulong h)
2028 {
2029 	Bucket *p;
2030 
2031 	IS_CONSISTENT(ht);
2032 
2033 	if (ht->u.flags & HASH_FLAG_PACKED) {
2034 		if (h < ht->nNumUsed) {
2035 			if (Z_TYPE(ht->arData[h].val) != IS_UNDEF) {
2036 				return 1;
2037 			}
2038 		}
2039 		return 0;
2040 	}
2041 
2042 	p = zend_hash_index_find_bucket(ht, h);
2043 	return p ? 1 : 0;
2044 }
2045 
2046 
zend_hash_internal_pointer_reset_ex(HashTable * ht,HashPosition * pos)2047 ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_reset_ex(HashTable *ht, HashPosition *pos)
2048 {
2049     uint32_t idx;
2050 
2051 	IS_CONSISTENT(ht);
2052 	HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2053 
2054 	for (idx = 0; idx < ht->nNumUsed; idx++) {
2055 		if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2056 			*pos = idx;
2057 			return;
2058 		}
2059 	}
2060 	*pos = HT_INVALID_IDX;
2061 }
2062 
2063 
2064 /* This function will be extremely optimized by remembering
2065  * the end of the list
2066  */
zend_hash_internal_pointer_end_ex(HashTable * ht,HashPosition * pos)2067 ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_end_ex(HashTable *ht, HashPosition *pos)
2068 {
2069 	uint32_t idx;
2070 
2071 	IS_CONSISTENT(ht);
2072 	HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2073 
2074 	idx = ht->nNumUsed;
2075 	while (idx > 0) {
2076 		idx--;
2077 		if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2078 			*pos = idx;
2079 			return;
2080 		}
2081 	}
2082 	*pos = HT_INVALID_IDX;
2083 }
2084 
2085 
zend_hash_move_forward_ex(HashTable * ht,HashPosition * pos)2086 ZEND_API int ZEND_FASTCALL zend_hash_move_forward_ex(HashTable *ht, HashPosition *pos)
2087 {
2088 	uint32_t idx = *pos;
2089 
2090 	IS_CONSISTENT(ht);
2091 	HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2092 
2093 	if (idx != HT_INVALID_IDX) {
2094 		while (1) {
2095 			idx++;
2096 			if (idx >= ht->nNumUsed) {
2097 				*pos = HT_INVALID_IDX;
2098 				return SUCCESS;
2099 			}
2100 			if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2101 				*pos = idx;
2102 				return SUCCESS;
2103 			}
2104 		}
2105 	} else {
2106  		return FAILURE;
2107 	}
2108 }
2109 
zend_hash_move_backwards_ex(HashTable * ht,HashPosition * pos)2110 ZEND_API int ZEND_FASTCALL zend_hash_move_backwards_ex(HashTable *ht, HashPosition *pos)
2111 {
2112 	uint32_t idx = *pos;
2113 
2114 	IS_CONSISTENT(ht);
2115 	HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2116 
2117 	if (idx != HT_INVALID_IDX) {
2118 		while (idx > 0) {
2119 			idx--;
2120 			if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2121 				*pos = idx;
2122 				return SUCCESS;
2123 			}
2124 		}
2125 		*pos = HT_INVALID_IDX;
2126  		return SUCCESS;
2127 	} else {
2128  		return FAILURE;
2129 	}
2130 }
2131 
2132 
2133 /* This function should be made binary safe  */
zend_hash_get_current_key_ex(const HashTable * ht,zend_string ** str_index,zend_ulong * num_index,HashPosition * pos)2134 ZEND_API int ZEND_FASTCALL zend_hash_get_current_key_ex(const HashTable *ht, zend_string **str_index, zend_ulong *num_index, HashPosition *pos)
2135 {
2136 	uint32_t idx = *pos;
2137 	Bucket *p;
2138 
2139 	IS_CONSISTENT(ht);
2140 	if (idx != HT_INVALID_IDX) {
2141 		p = ht->arData + idx;
2142 		if (p->key) {
2143 			*str_index = p->key;
2144 			return HASH_KEY_IS_STRING;
2145 		} else {
2146 			*num_index = p->h;
2147 			return HASH_KEY_IS_LONG;
2148 		}
2149 	}
2150 	return HASH_KEY_NON_EXISTENT;
2151 }
2152 
zend_hash_get_current_key_zval_ex(const HashTable * ht,zval * key,HashPosition * pos)2153 ZEND_API void ZEND_FASTCALL zend_hash_get_current_key_zval_ex(const HashTable *ht, zval *key, HashPosition *pos)
2154 {
2155 	uint32_t idx = *pos;
2156 	Bucket *p;
2157 
2158 	IS_CONSISTENT(ht);
2159 	if (idx == HT_INVALID_IDX) {
2160 		ZVAL_NULL(key);
2161 	} else {
2162 		p = ht->arData + idx;
2163 		if (p->key) {
2164 			ZVAL_STR_COPY(key, p->key);
2165 		} else {
2166 			ZVAL_LONG(key, p->h);
2167 		}
2168 	}
2169 }
2170 
zend_hash_get_current_key_type_ex(HashTable * ht,HashPosition * pos)2171 ZEND_API int ZEND_FASTCALL zend_hash_get_current_key_type_ex(HashTable *ht, HashPosition *pos)
2172 {
2173     uint32_t idx = *pos;
2174 	Bucket *p;
2175 
2176 	IS_CONSISTENT(ht);
2177 	if (idx != HT_INVALID_IDX) {
2178 		p = ht->arData + idx;
2179 		if (p->key) {
2180 			return HASH_KEY_IS_STRING;
2181 		} else {
2182 			return HASH_KEY_IS_LONG;
2183 		}
2184 	}
2185 	return HASH_KEY_NON_EXISTENT;
2186 }
2187 
2188 
zend_hash_get_current_data_ex(HashTable * ht,HashPosition * pos)2189 ZEND_API zval* ZEND_FASTCALL zend_hash_get_current_data_ex(HashTable *ht, HashPosition *pos)
2190 {
2191 	uint32_t idx = *pos;
2192 	Bucket *p;
2193 
2194 	IS_CONSISTENT(ht);
2195 	if (idx != HT_INVALID_IDX) {
2196 		p = ht->arData + idx;
2197 		return &p->val;
2198 	} else {
2199 		return NULL;
2200 	}
2201 }
2202 
zend_hash_bucket_swap(Bucket * p,Bucket * q)2203 ZEND_API void zend_hash_bucket_swap(Bucket *p, Bucket *q)
2204 {
2205 	zval val;
2206 	zend_ulong h;
2207 	zend_string *key;
2208 
2209 	ZVAL_COPY_VALUE(&val, &p->val);
2210 	h = p->h;
2211 	key = p->key;
2212 
2213 	ZVAL_COPY_VALUE(&p->val, &q->val);
2214 	p->h = q->h;
2215 	p->key = q->key;
2216 
2217 	ZVAL_COPY_VALUE(&q->val, &val);
2218 	q->h = h;
2219 	q->key = key;
2220 }
2221 
zend_hash_bucket_renum_swap(Bucket * p,Bucket * q)2222 ZEND_API void zend_hash_bucket_renum_swap(Bucket *p, Bucket *q)
2223 {
2224 	zval val;
2225 
2226 	ZVAL_COPY_VALUE(&val, &p->val);
2227 	ZVAL_COPY_VALUE(&p->val, &q->val);
2228 	ZVAL_COPY_VALUE(&q->val, &val);
2229 }
2230 
zend_hash_bucket_packed_swap(Bucket * p,Bucket * q)2231 ZEND_API void zend_hash_bucket_packed_swap(Bucket *p, Bucket *q)
2232 {
2233 	zval val;
2234 	zend_ulong h;
2235 
2236 	ZVAL_COPY_VALUE(&val, &p->val);
2237 	h = p->h;
2238 
2239 	ZVAL_COPY_VALUE(&p->val, &q->val);
2240 	p->h = q->h;
2241 
2242 	ZVAL_COPY_VALUE(&q->val, &val);
2243 	q->h = h;
2244 }
2245 
zend_hash_sort_ex(HashTable * ht,sort_func_t sort,compare_func_t compar,zend_bool renumber)2246 ZEND_API int ZEND_FASTCALL zend_hash_sort_ex(HashTable *ht, sort_func_t sort, compare_func_t compar, zend_bool renumber)
2247 {
2248 	Bucket *p;
2249 	uint32_t i, j;
2250 
2251 	IS_CONSISTENT(ht);
2252 	HT_ASSERT_RC1(ht);
2253 
2254 	if (!(ht->nNumOfElements>1) && !(renumber && ht->nNumOfElements>0)) { /* Doesn't require sorting */
2255 		return SUCCESS;
2256 	}
2257 
2258 	if (HT_IS_WITHOUT_HOLES(ht)) {
2259 		i = ht->nNumUsed;
2260 	} else {
2261 		for (j = 0, i = 0; j < ht->nNumUsed; j++) {
2262 			p = ht->arData + j;
2263 			if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2264 			if (i != j) {
2265 				ht->arData[i] = *p;
2266 			}
2267 			i++;
2268 		}
2269 	}
2270 
2271 	sort((void *)ht->arData, i, sizeof(Bucket), compar,
2272 			(swap_func_t)(renumber? zend_hash_bucket_renum_swap :
2273 				((ht->u.flags & HASH_FLAG_PACKED) ? zend_hash_bucket_packed_swap : zend_hash_bucket_swap)));
2274 
2275 	ht->nNumUsed = i;
2276 	ht->nInternalPointer = 0;
2277 
2278 	if (renumber) {
2279 		for (j = 0; j < i; j++) {
2280 			p = ht->arData + j;
2281 			p->h = j;
2282 			if (p->key) {
2283 				zend_string_release(p->key);
2284 				p->key = NULL;
2285 			}
2286 		}
2287 
2288 		ht->nNextFreeElement = i;
2289 	}
2290 	if (ht->u.flags & HASH_FLAG_PACKED) {
2291 		if (!renumber) {
2292 			zend_hash_packed_to_hash(ht);
2293 		}
2294 	} else {
2295 		if (renumber) {
2296 			void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
2297 			Bucket *old_buckets = ht->arData;
2298 
2299 			new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), (ht->u.flags & HASH_FLAG_PERSISTENT));
2300 			ht->u.flags |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
2301 			ht->nTableMask = HT_MIN_MASK;
2302 			HT_SET_DATA_ADDR(ht, new_data);
2303 			memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
2304 			pefree(old_data, ht->u.flags & HASH_FLAG_PERSISTENT & HASH_FLAG_PERSISTENT);
2305 			HT_HASH_RESET_PACKED(ht);
2306 		} else {
2307 			zend_hash_rehash(ht);
2308 		}
2309 	}
2310 
2311 	return SUCCESS;
2312 }
2313 
zend_hash_compare_impl(HashTable * ht1,HashTable * ht2,compare_func_t compar,zend_bool ordered)2314 static zend_always_inline int zend_hash_compare_impl(HashTable *ht1, HashTable *ht2, compare_func_t compar, zend_bool ordered) {
2315 	uint32_t idx1, idx2;
2316 
2317 	if (ht1->nNumOfElements != ht2->nNumOfElements) {
2318 		return ht1->nNumOfElements > ht2->nNumOfElements ? 1 : -1;
2319 	}
2320 
2321 	for (idx1 = 0, idx2 = 0; idx1 < ht1->nNumUsed; idx1++) {
2322 		Bucket *p1 = ht1->arData + idx1, *p2;
2323 		zval *pData1, *pData2;
2324 		int result;
2325 
2326 		if (Z_TYPE(p1->val) == IS_UNDEF) continue;
2327 		if (ordered) {
2328 			while (1) {
2329 				ZEND_ASSERT(idx2 != ht2->nNumUsed);
2330 				p2 = ht2->arData + idx2;
2331 				if (Z_TYPE(p2->val) != IS_UNDEF) break;
2332 				idx2++;
2333 			}
2334 			if (p1->key == NULL && p2->key == NULL) { /* numeric indices */
2335 				if (p1->h != p2->h) {
2336 					return p1->h > p2->h ? 1 : -1;
2337 				}
2338 			} else if (p1->key != NULL && p2->key != NULL) { /* string indices */
2339 				if (ZSTR_LEN(p1->key) != ZSTR_LEN(p2->key)) {
2340 					return ZSTR_LEN(p1->key) > ZSTR_LEN(p2->key) ? 1 : -1;
2341 				}
2342 
2343 				result = memcmp(ZSTR_VAL(p1->key), ZSTR_VAL(p2->key), ZSTR_LEN(p1->key));
2344 				if (result != 0) {
2345 					return result;
2346 				}
2347 			} else {
2348 				/* Mixed key types: A string key is considered as larger */
2349 				return p1->key != NULL ? 1 : -1;
2350 			}
2351 			pData2 = &p2->val;
2352 			idx2++;
2353 		} else {
2354 			if (p1->key == NULL) { /* numeric index */
2355 				pData2 = zend_hash_index_find(ht2, p1->h);
2356 				if (pData2 == NULL) {
2357 					return 1;
2358 				}
2359 			} else { /* string index */
2360 				pData2 = zend_hash_find(ht2, p1->key);
2361 				if (pData2 == NULL) {
2362 					return 1;
2363 				}
2364 			}
2365 		}
2366 
2367 		pData1 = &p1->val;
2368 		if (Z_TYPE_P(pData1) == IS_INDIRECT) {
2369 			pData1 = Z_INDIRECT_P(pData1);
2370 		}
2371 		if (Z_TYPE_P(pData2) == IS_INDIRECT) {
2372 			pData2 = Z_INDIRECT_P(pData2);
2373 		}
2374 
2375 		if (Z_TYPE_P(pData1) == IS_UNDEF) {
2376 			if (Z_TYPE_P(pData2) != IS_UNDEF) {
2377 				return -1;
2378 			}
2379 		} else if (Z_TYPE_P(pData2) == IS_UNDEF) {
2380 			return 1;
2381 		} else {
2382 			result = compar(pData1, pData2);
2383 			if (result != 0) {
2384 				return result;
2385 			}
2386 		}
2387 	}
2388 
2389 	return 0;
2390 }
2391 
zend_hash_compare(HashTable * ht1,HashTable * ht2,compare_func_t compar,zend_bool ordered)2392 ZEND_API int zend_hash_compare(HashTable *ht1, HashTable *ht2, compare_func_t compar, zend_bool ordered)
2393 {
2394 	int result;
2395 	IS_CONSISTENT(ht1);
2396 	IS_CONSISTENT(ht2);
2397 
2398 	HASH_PROTECT_RECURSION(ht1);
2399 	HASH_PROTECT_RECURSION(ht2);
2400 	result = zend_hash_compare_impl(ht1, ht2, compar, ordered);
2401 	HASH_UNPROTECT_RECURSION(ht1);
2402 	HASH_UNPROTECT_RECURSION(ht2);
2403 
2404 	return result;
2405 }
2406 
2407 
zend_hash_minmax(const HashTable * ht,compare_func_t compar,uint32_t flag)2408 ZEND_API zval* ZEND_FASTCALL zend_hash_minmax(const HashTable *ht, compare_func_t compar, uint32_t flag)
2409 {
2410 	uint32_t idx;
2411 	Bucket *p, *res;
2412 
2413 	IS_CONSISTENT(ht);
2414 
2415 	if (ht->nNumOfElements == 0 ) {
2416 		return NULL;
2417 	}
2418 
2419 	idx = 0;
2420 	while (1) {
2421 		if (idx == ht->nNumUsed) {
2422 			return NULL;
2423 		}
2424 		if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) break;
2425 		idx++;
2426 	}
2427 	res = ht->arData + idx;
2428 	for (; idx < ht->nNumUsed; idx++) {
2429 		p = ht->arData + idx;
2430 		if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2431 
2432 		if (flag) {
2433 			if (compar(res, p) < 0) { /* max */
2434 				res = p;
2435 			}
2436 		} else {
2437 			if (compar(res, p) > 0) { /* min */
2438 				res = p;
2439 			}
2440 		}
2441 	}
2442 	return &res->val;
2443 }
2444 
_zend_handle_numeric_str_ex(const char * key,size_t length,zend_ulong * idx)2445 ZEND_API int ZEND_FASTCALL _zend_handle_numeric_str_ex(const char *key, size_t length, zend_ulong *idx)
2446 {
2447 	register const char *tmp = key;
2448 
2449 	const char *end = key + length;
2450 
2451 	if (*tmp == '-') {
2452 		tmp++;
2453 	}
2454 
2455 	if ((*tmp == '0' && length > 1) /* numbers with leading zeros */
2456 	 || (end - tmp > MAX_LENGTH_OF_LONG - 1) /* number too long */
2457 	 || (SIZEOF_ZEND_LONG == 4 &&
2458 	     end - tmp == MAX_LENGTH_OF_LONG - 1 &&
2459 	     *tmp > '2')) { /* overflow */
2460 		return 0;
2461 	}
2462 	*idx = (*tmp - '0');
2463 	while (1) {
2464 		++tmp;
2465 		if (tmp == end) {
2466 			if (*key == '-') {
2467 				if (*idx-1 > ZEND_LONG_MAX) { /* overflow */
2468 					return 0;
2469 				}
2470 				*idx = 0 - *idx;
2471 			} else if (*idx > ZEND_LONG_MAX) { /* overflow */
2472 				return 0;
2473 			}
2474 			return 1;
2475 		}
2476 		if (*tmp <= '9' && *tmp >= '0') {
2477 			*idx = (*idx * 10) + (*tmp - '0');
2478 		} else {
2479 			return 0;
2480 		}
2481 	}
2482 }
2483 
2484 /* Takes a "symtable" hashtable (contains integer and non-numeric string keys)
2485  * and converts it to a "proptable" (contains only string keys).
2486  * If the symtable didn't need duplicating, its refcount is incremented.
2487  */
zend_symtable_to_proptable(HashTable * ht)2488 ZEND_API HashTable* ZEND_FASTCALL zend_symtable_to_proptable(HashTable *ht)
2489 {
2490 	zend_ulong num_key;
2491 	zend_string *str_key;
2492 	zval *zv;
2493 
2494 	if (UNEXPECTED(HT_IS_PACKED(ht))) {
2495 		goto convert;
2496 	}
2497 
2498 	ZEND_HASH_FOREACH_KEY_VAL(ht, num_key, str_key, zv) {
2499 		if (!str_key) {
2500 			goto convert;
2501 		}
2502 	} ZEND_HASH_FOREACH_END();
2503 
2504 	if (!(GC_FLAGS(ht) & IS_ARRAY_IMMUTABLE)) {
2505 		GC_REFCOUNT(ht)++;
2506 	}
2507 
2508 	return ht;
2509 
2510 convert:
2511 	{
2512 		HashTable *new_ht = emalloc(sizeof(HashTable));
2513 
2514 		zend_hash_init(new_ht, zend_hash_num_elements(ht), NULL, ZVAL_PTR_DTOR, 0);
2515 
2516 		ZEND_HASH_FOREACH_KEY_VAL(ht, num_key, str_key, zv) {
2517 			if (!str_key) {
2518 				str_key = zend_long_to_str(num_key);
2519 				zend_string_delref(str_key);
2520 			}
2521 			do {
2522 				if (Z_OPT_REFCOUNTED_P(zv)) {
2523 					if (Z_ISREF_P(zv) && Z_REFCOUNT_P(zv) == 1) {
2524 						zv = Z_REFVAL_P(zv);
2525 						if (!Z_OPT_REFCOUNTED_P(zv)) {
2526 							break;
2527 						}
2528 					}
2529 					Z_ADDREF_P(zv);
2530 				}
2531 			} while (0);
2532 			zend_hash_update(new_ht, str_key, zv);
2533 		} ZEND_HASH_FOREACH_END();
2534 
2535 		return new_ht;
2536 	}
2537 }
2538 
2539 /* Takes a "proptable" hashtable (contains only string keys) and converts it to
2540  * a "symtable" (contains integer and non-numeric string keys).
2541  * If the proptable didn't need duplicating, its refcount is incremented.
2542  */
zend_proptable_to_symtable(HashTable * ht,zend_bool always_duplicate)2543 ZEND_API HashTable* ZEND_FASTCALL zend_proptable_to_symtable(HashTable *ht, zend_bool always_duplicate)
2544 {
2545 	zend_ulong num_key;
2546 	zend_string *str_key;
2547 	zval *zv;
2548 
2549 	ZEND_HASH_FOREACH_KEY_VAL(ht, num_key, str_key, zv) {
2550 		/* The `str_key &&` here might seem redundant: property tables should
2551 		 * only have string keys. Unfortunately, this isn't true, at the very
2552 		 * least because of ArrayObject, which stores a symtable where the
2553 		 * property table should be.
2554 		 */
2555 		if (str_key && ZEND_HANDLE_NUMERIC(str_key, num_key)) {
2556 			goto convert;
2557 		}
2558 	} ZEND_HASH_FOREACH_END();
2559 
2560 	if (always_duplicate) {
2561 		return zend_array_dup(ht);
2562 	}
2563 
2564 	if (EXPECTED(!(GC_FLAGS(ht) & IS_ARRAY_IMMUTABLE))) {
2565 		GC_REFCOUNT(ht)++;
2566 	}
2567 
2568 	return ht;
2569 
2570 convert:
2571 	{
2572 		HashTable *new_ht = emalloc(sizeof(HashTable));
2573 
2574 		zend_hash_init(new_ht, zend_hash_num_elements(ht), NULL, ZVAL_PTR_DTOR, 0);
2575 
2576 		ZEND_HASH_FOREACH_KEY_VAL(ht, num_key, str_key, zv) {
2577 			do {
2578 				if (Z_OPT_REFCOUNTED_P(zv)) {
2579 					if (Z_ISREF_P(zv) && Z_REFCOUNT_P(zv) == 1) {
2580 						zv = Z_REFVAL_P(zv);
2581 						if (!Z_OPT_REFCOUNTED_P(zv)) {
2582 							break;
2583 						}
2584 					}
2585 					Z_ADDREF_P(zv);
2586 				}
2587 			} while (0);
2588 			/* Again, thank ArrayObject for `!str_key ||`. */
2589 			if (!str_key || ZEND_HANDLE_NUMERIC(str_key, num_key)) {
2590 				zend_hash_index_update(new_ht, num_key, zv);
2591 			} else {
2592 				zend_hash_update(new_ht, str_key, zv);
2593 			}
2594 		} ZEND_HASH_FOREACH_END();
2595 
2596 		return new_ht;
2597 	}
2598 }
2599 
2600 /*
2601  * Local variables:
2602  * tab-width: 4
2603  * c-basic-offset: 4
2604  * indent-tabs-mode: t
2605  * End:
2606  * vim600: sw=4 ts=4 fdm=marker
2607  * vim<600: sw=4 ts=4
2608  */
2609