xref: /PHP-8.3/Zend/zend_alloc.c (revision bc57c77f)
1 /*
2    +----------------------------------------------------------------------+
3    | Zend Engine                                                          |
4    +----------------------------------------------------------------------+
5    | Copyright (c) Zend Technologies Ltd. (http://www.zend.com)           |
6    +----------------------------------------------------------------------+
7    | This source file is subject to version 2.00 of the Zend license,     |
8    | that is bundled with this package in the file LICENSE, and is        |
9    | available through the world-wide-web at the following url:           |
10    | http://www.zend.com/license/2_00.txt.                                |
11    | If you did not receive a copy of the Zend license and are unable to  |
12    | obtain it through the world-wide-web, please send a note to          |
13    | license@zend.com so we can mail you a copy immediately.              |
14    +----------------------------------------------------------------------+
15    | Authors: Andi Gutmans <andi@php.net>                                 |
16    |          Zeev Suraski <zeev@php.net>                                 |
17    |          Dmitry Stogov <dmitry@php.net>                              |
18    +----------------------------------------------------------------------+
19 */
20 
21 /*
22  * zend_alloc is designed to be a modern CPU cache friendly memory manager
23  * for PHP. Most ideas are taken from jemalloc and tcmalloc implementations.
24  *
25  * All allocations are split into 3 categories:
26  *
27  * Huge  - the size is greater than CHUNK size (~2M by default), allocation is
28  *         performed using mmap(). The result is aligned on 2M boundary.
29  *
30  * Large - a number of 4096K pages inside a CHUNK. Large blocks
31  *         are always aligned on page boundary.
32  *
33  * Small - less than 3/4 of page size. Small sizes are rounded up to nearest
34  *         greater predefined small size (there are 30 predefined sizes:
35  *         8, 16, 24, 32, ... 3072). Small blocks are allocated from
36  *         RUNs. Each RUN is allocated as a single or few following pages.
37  *         Allocation inside RUNs implemented using linked list of free
38  *         elements. The result is aligned to 8 bytes.
39  *
40  * zend_alloc allocates memory from OS by CHUNKs, these CHUNKs and huge memory
41  * blocks are always aligned to CHUNK boundary. So it's very easy to determine
42  * the CHUNK owning the certain pointer. Regular CHUNKs reserve a single
43  * page at start for special purpose. It contains bitset of free pages,
44  * few bitset for available runs of predefined small sizes, map of pages that
45  * keeps information about usage of each page in this CHUNK, etc.
46  *
47  * zend_alloc provides familiar emalloc/efree/erealloc API, but in addition it
48  * provides specialized and optimized routines to allocate blocks of predefined
49  * sizes (e.g. emalloc_2(), emallc_4(), ..., emalloc_large(), etc)
50  * The library uses C preprocessor tricks that substitute calls to emalloc()
51  * with more specialized routines when the requested size is known.
52  */
53 
54 #include "zend.h"
55 #include "zend_alloc.h"
56 #include "zend_globals.h"
57 #include "zend_operators.h"
58 #include "zend_multiply.h"
59 #include "zend_bitset.h"
60 #include "zend_mmap.h"
61 #include <signal.h>
62 
63 #ifdef HAVE_UNISTD_H
64 # include <unistd.h>
65 #endif
66 
67 #ifdef ZEND_WIN32
68 # include <wincrypt.h>
69 # include <process.h>
70 # include "win32/winutil.h"
71 #endif
72 
73 #include <stdio.h>
74 #include <stdlib.h>
75 #include <string.h>
76 
77 #include <sys/types.h>
78 #include <sys/stat.h>
79 #include <limits.h>
80 #include <fcntl.h>
81 #include <errno.h>
82 
83 #ifndef _WIN32
84 # include <sys/mman.h>
85 # ifndef MAP_ANON
86 #  ifdef MAP_ANONYMOUS
87 #   define MAP_ANON MAP_ANONYMOUS
88 #  endif
89 # endif
90 # ifndef MAP_FAILED
91 #  define MAP_FAILED ((void*)-1)
92 # endif
93 # ifndef MAP_POPULATE
94 #  define MAP_POPULATE 0
95 # endif
96 #  if defined(_SC_PAGESIZE) || (_SC_PAGE_SIZE)
97 #    define REAL_PAGE_SIZE _real_page_size
98 static size_t _real_page_size = ZEND_MM_PAGE_SIZE;
99 #  endif
100 # ifdef MAP_ALIGNED_SUPER
101 #    define MAP_HUGETLB MAP_ALIGNED_SUPER
102 # endif
103 #endif
104 
105 #ifndef REAL_PAGE_SIZE
106 # define REAL_PAGE_SIZE ZEND_MM_PAGE_SIZE
107 #endif
108 
109 /* NetBSD has an mremap() function with a signature that is incompatible with Linux (WTF?),
110  * so pretend it doesn't exist. */
111 #ifndef __linux__
112 # undef HAVE_MREMAP
113 #endif
114 
115 #ifndef __APPLE__
116 # define ZEND_MM_FD -1
117 #else
118 # include <mach/vm_statistics.h>
119 /* Mac allows to track anonymous page via vmmap per TAG id.
120  * user land applications are allowed to take from 240 to 255.
121  */
122 # define ZEND_MM_FD VM_MAKE_TAG(250U)
123 #endif
124 
125 #ifndef ZEND_MM_STAT
126 # define ZEND_MM_STAT 1    /* track current and peak memory usage            */
127 #endif
128 #ifndef ZEND_MM_LIMIT
129 # define ZEND_MM_LIMIT 1   /* support for user-defined memory limit          */
130 #endif
131 #ifndef ZEND_MM_CUSTOM
132 # define ZEND_MM_CUSTOM 1  /* support for custom memory allocator            */
133                            /* USE_ZEND_ALLOC=0 may switch to system malloc() */
134 #endif
135 #ifndef ZEND_MM_STORAGE
136 # define ZEND_MM_STORAGE 1 /* support for custom memory storage              */
137 #endif
138 #ifndef ZEND_MM_ERROR
139 # define ZEND_MM_ERROR 1   /* report system errors                           */
140 #endif
141 
142 #ifndef ZEND_MM_CHECK
143 # define ZEND_MM_CHECK(condition, message)  do { \
144 		if (UNEXPECTED(!(condition))) { \
145 			zend_mm_panic(message); \
146 		} \
147 	} while (0)
148 #endif
149 
150 typedef uint32_t   zend_mm_page_info; /* 4-byte integer */
151 typedef zend_ulong zend_mm_bitset;    /* 4-byte or 8-byte integer */
152 
153 #define ZEND_MM_ALIGNED_OFFSET(size, alignment) \
154 	(((size_t)(size)) & ((alignment) - 1))
155 #define ZEND_MM_ALIGNED_BASE(size, alignment) \
156 	(((size_t)(size)) & ~((alignment) - 1))
157 #define ZEND_MM_SIZE_TO_NUM(size, alignment) \
158 	(((size_t)(size) + ((alignment) - 1)) / (alignment))
159 
160 #define ZEND_MM_BITSET_LEN		(sizeof(zend_mm_bitset) * 8)       /* 32 or 64 */
161 #define ZEND_MM_PAGE_MAP_LEN	(ZEND_MM_PAGES / ZEND_MM_BITSET_LEN) /* 16 or 8 */
162 
163 typedef zend_mm_bitset zend_mm_page_map[ZEND_MM_PAGE_MAP_LEN];     /* 64B */
164 
165 #define ZEND_MM_IS_FRUN                  0x00000000
166 #define ZEND_MM_IS_LRUN                  0x40000000
167 #define ZEND_MM_IS_SRUN                  0x80000000
168 
169 #define ZEND_MM_LRUN_PAGES_MASK          0x000003ff
170 #define ZEND_MM_LRUN_PAGES_OFFSET        0
171 
172 #define ZEND_MM_SRUN_BIN_NUM_MASK        0x0000001f
173 #define ZEND_MM_SRUN_BIN_NUM_OFFSET      0
174 
175 #define ZEND_MM_SRUN_FREE_COUNTER_MASK   0x01ff0000
176 #define ZEND_MM_SRUN_FREE_COUNTER_OFFSET 16
177 
178 #define ZEND_MM_NRUN_OFFSET_MASK         0x01ff0000
179 #define ZEND_MM_NRUN_OFFSET_OFFSET       16
180 
181 #define ZEND_MM_LRUN_PAGES(info)         (((info) & ZEND_MM_LRUN_PAGES_MASK) >> ZEND_MM_LRUN_PAGES_OFFSET)
182 #define ZEND_MM_SRUN_BIN_NUM(info)       (((info) & ZEND_MM_SRUN_BIN_NUM_MASK) >> ZEND_MM_SRUN_BIN_NUM_OFFSET)
183 #define ZEND_MM_SRUN_FREE_COUNTER(info)  (((info) & ZEND_MM_SRUN_FREE_COUNTER_MASK) >> ZEND_MM_SRUN_FREE_COUNTER_OFFSET)
184 #define ZEND_MM_NRUN_OFFSET(info)        (((info) & ZEND_MM_NRUN_OFFSET_MASK) >> ZEND_MM_NRUN_OFFSET_OFFSET)
185 
186 #define ZEND_MM_FRUN()                   ZEND_MM_IS_FRUN
187 #define ZEND_MM_LRUN(count)              (ZEND_MM_IS_LRUN | ((count) << ZEND_MM_LRUN_PAGES_OFFSET))
188 #define ZEND_MM_SRUN(bin_num)            (ZEND_MM_IS_SRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET))
189 #define ZEND_MM_SRUN_EX(bin_num, count)  (ZEND_MM_IS_SRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET) | ((count) << ZEND_MM_SRUN_FREE_COUNTER_OFFSET))
190 #define ZEND_MM_NRUN(bin_num, offset)    (ZEND_MM_IS_SRUN | ZEND_MM_IS_LRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET) | ((offset) << ZEND_MM_NRUN_OFFSET_OFFSET))
191 
192 #define ZEND_MM_BINS 30
193 
194 typedef struct  _zend_mm_page      zend_mm_page;
195 typedef struct  _zend_mm_bin       zend_mm_bin;
196 typedef struct  _zend_mm_free_slot zend_mm_free_slot;
197 typedef struct  _zend_mm_chunk     zend_mm_chunk;
198 typedef struct  _zend_mm_huge_list zend_mm_huge_list;
199 
200 static bool zend_mm_use_huge_pages = false;
201 
202 /*
203  * Memory is retrieved from OS by chunks of fixed size 2MB.
204  * Inside chunk it's managed by pages of fixed size 4096B.
205  * So each chunk consists from 512 pages.
206  * The first page of each chunk is reserved for chunk header.
207  * It contains service information about all pages.
208  *
209  * free_pages - current number of free pages in this chunk
210  *
211  * free_tail  - number of continuous free pages at the end of chunk
212  *
213  * free_map   - bitset (a bit for each page). The bit is set if the corresponding
214  *              page is allocated. Allocator for "large sizes" may easily find a
215  *              free page (or a continuous number of pages) searching for zero
216  *              bits.
217  *
218  * map        - contains service information for each page. (32-bits for each
219  *              page).
220  *    usage:
221  *				(2 bits)
222  * 				FRUN - free page,
223  *              LRUN - first page of "large" allocation
224  *              SRUN - first page of a bin used for "small" allocation
225  *
226  *    lrun_pages:
227  *              (10 bits) number of allocated pages
228  *
229  *    srun_bin_num:
230  *              (5 bits) bin number (e.g. 0 for sizes 0-2, 1 for 3-4,
231  *               2 for 5-8, 3 for 9-16 etc) see zend_alloc_sizes.h
232  */
233 
234 struct _zend_mm_heap {
235 #if ZEND_MM_CUSTOM
236 	int                use_custom_heap;
237 #endif
238 #if ZEND_MM_STORAGE
239 	zend_mm_storage   *storage;
240 #endif
241 #if ZEND_MM_STAT
242 	size_t             size;                    /* current memory usage */
243 	size_t             peak;                    /* peak memory usage */
244 #endif
245 	zend_mm_free_slot *free_slot[ZEND_MM_BINS]; /* free lists for small sizes */
246 #if ZEND_MM_STAT || ZEND_MM_LIMIT
247 	size_t             real_size;               /* current size of allocated pages */
248 #endif
249 #if ZEND_MM_STAT
250 	size_t             real_peak;               /* peak size of allocated pages */
251 #endif
252 #if ZEND_MM_LIMIT
253 	size_t             limit;                   /* memory limit */
254 	int                overflow;                /* memory overflow flag */
255 #endif
256 
257 	zend_mm_huge_list *huge_list;               /* list of huge allocated blocks */
258 
259 	zend_mm_chunk     *main_chunk;
260 	zend_mm_chunk     *cached_chunks;			/* list of unused chunks */
261 	int                chunks_count;			/* number of allocated chunks */
262 	int                peak_chunks_count;		/* peak number of allocated chunks for current request */
263 	int                cached_chunks_count;		/* number of cached chunks */
264 	double             avg_chunks_count;		/* average number of chunks allocated per request */
265 	int                last_chunks_delete_boundary; /* number of chunks after last deletion */
266 	int                last_chunks_delete_count;    /* number of deletion over the last boundary */
267 #if ZEND_MM_CUSTOM
268 	union {
269 		struct {
270 			void      *(*_malloc)(size_t);
271 			void       (*_free)(void*);
272 			void      *(*_realloc)(void*, size_t);
273 		} std;
274 		struct {
275 			void      *(*_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
276 			void       (*_free)(void*  ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
277 			void      *(*_realloc)(void*, size_t  ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
278 		} debug;
279 	} custom_heap;
280 	HashTable *tracked_allocs;
281 #endif
282 };
283 
284 struct _zend_mm_chunk {
285 	zend_mm_heap      *heap;
286 	zend_mm_chunk     *next;
287 	zend_mm_chunk     *prev;
288 	uint32_t           free_pages;				/* number of free pages */
289 	uint32_t           free_tail;               /* number of free pages at the end of chunk */
290 	uint32_t           num;
291 	char               reserve[64 - (sizeof(void*) * 3 + sizeof(uint32_t) * 3)];
292 	zend_mm_heap       heap_slot;               /* used only in main chunk */
293 	zend_mm_page_map   free_map;                /* 512 bits or 64 bytes */
294 	zend_mm_page_info  map[ZEND_MM_PAGES];      /* 2 KB = 512 * 4 */
295 };
296 
297 struct _zend_mm_page {
298 	char               bytes[ZEND_MM_PAGE_SIZE];
299 };
300 
301 /*
302  * bin - is one or few continuous pages (up to 8) used for allocation of
303  * a particular "small size".
304  */
305 struct _zend_mm_bin {
306 	char               bytes[ZEND_MM_PAGE_SIZE * 8];
307 };
308 
309 struct _zend_mm_free_slot {
310 	zend_mm_free_slot *next_free_slot;
311 };
312 
313 struct _zend_mm_huge_list {
314 	void              *ptr;
315 	size_t             size;
316 	zend_mm_huge_list *next;
317 #if ZEND_DEBUG
318 	zend_mm_debug_info dbg;
319 #endif
320 };
321 
322 #define ZEND_MM_PAGE_ADDR(chunk, page_num) \
323 	((void*)(((zend_mm_page*)(chunk)) + (page_num)))
324 
325 #define _BIN_DATA_SIZE(num, size, elements, pages, x, y) size,
326 static const uint32_t bin_data_size[] = {
327 	ZEND_MM_BINS_INFO(_BIN_DATA_SIZE, x, y)
328 };
329 
330 #define _BIN_DATA_ELEMENTS(num, size, elements, pages, x, y) elements,
331 static const uint32_t bin_elements[] = {
332 	ZEND_MM_BINS_INFO(_BIN_DATA_ELEMENTS, x, y)
333 };
334 
335 #define _BIN_DATA_PAGES(num, size, elements, pages, x, y) pages,
336 static const uint32_t bin_pages[] = {
337 	ZEND_MM_BINS_INFO(_BIN_DATA_PAGES, x, y)
338 };
339 
340 #if ZEND_DEBUG
zend_debug_alloc_output(char * format,...)341 ZEND_COLD void zend_debug_alloc_output(char *format, ...)
342 {
343 	char output_buf[256];
344 	va_list args;
345 
346 	va_start(args, format);
347 	vsprintf(output_buf, format, args);
348 	va_end(args);
349 
350 #ifdef ZEND_WIN32
351 	OutputDebugString(output_buf);
352 #else
353 	fprintf(stderr, "%s", output_buf);
354 #endif
355 }
356 #endif
357 
zend_mm_panic(const char * message)358 static ZEND_COLD ZEND_NORETURN void zend_mm_panic(const char *message)
359 {
360 	fprintf(stderr, "%s\n", message);
361 /* See http://support.microsoft.com/kb/190351 */
362 #ifdef ZEND_WIN32
363 	fflush(stderr);
364 #endif
365 #if ZEND_DEBUG && defined(HAVE_KILL) && defined(HAVE_GETPID)
366 	kill(getpid(), SIGSEGV);
367 #endif
368 	abort();
369 }
370 
zend_mm_safe_error(zend_mm_heap * heap,const char * format,size_t limit,const char * filename,uint32_t lineno,size_t size)371 static ZEND_COLD ZEND_NORETURN void zend_mm_safe_error(zend_mm_heap *heap,
372 	const char *format,
373 	size_t limit,
374 #if ZEND_DEBUG
375 	const char *filename,
376 	uint32_t lineno,
377 #endif
378 	size_t size)
379 {
380 
381 	heap->overflow = 1;
382 	zend_try {
383 		zend_error_noreturn(E_ERROR,
384 			format,
385 			limit,
386 #if ZEND_DEBUG
387 			filename,
388 			lineno,
389 #endif
390 			size);
391 	} zend_catch {
392 	}  zend_end_try();
393 	heap->overflow = 0;
394 	zend_bailout();
395 	exit(1);
396 }
397 
398 #ifdef _WIN32
stderr_last_error(char * msg)399 static void stderr_last_error(char *msg)
400 {
401 	DWORD err = GetLastError();
402 	char *buf = php_win32_error_to_msg(err);
403 
404 	if (!buf[0]) {
405 		fprintf(stderr, "\n%s: [0x%08lx]\n", msg, err);
406 	}
407 	else {
408 		fprintf(stderr, "\n%s: [0x%08lx] %s\n", msg, err, buf);
409 	}
410 
411 	php_win32_error_msg_free(buf);
412 }
413 #endif
414 
415 /*****************/
416 /* OS Allocation */
417 /*****************/
418 
zend_mm_munmap(void * addr,size_t size)419 static void zend_mm_munmap(void *addr, size_t size)
420 {
421 #ifdef _WIN32
422 	if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
423 		/** ERROR_INVALID_ADDRESS is expected when addr is not range start address */
424 		if (GetLastError() != ERROR_INVALID_ADDRESS) {
425 #if ZEND_MM_ERROR
426 			stderr_last_error("VirtualFree() failed");
427 #endif
428 			return;
429 		}
430 		SetLastError(0);
431 
432 		MEMORY_BASIC_INFORMATION mbi;
433 		if (VirtualQuery(addr, &mbi, sizeof(mbi)) == 0) {
434 #if ZEND_MM_ERROR
435 			stderr_last_error("VirtualQuery() failed");
436 #endif
437 			return;
438 		}
439 		addr = mbi.AllocationBase;
440 
441 		if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
442 #if ZEND_MM_ERROR
443 			stderr_last_error("VirtualFree() failed");
444 #endif
445 		}
446 	}
447 #else
448 	if (munmap(addr, size) != 0) {
449 #if ZEND_MM_ERROR
450 		fprintf(stderr, "\nmunmap() failed: [%d] %s\n", errno, strerror(errno));
451 #endif
452 	}
453 #endif
454 }
455 
456 #ifndef HAVE_MREMAP
zend_mm_mmap_fixed(void * addr,size_t size)457 static void *zend_mm_mmap_fixed(void *addr, size_t size)
458 {
459 #ifdef _WIN32
460 	void *ptr = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
461 
462 	if (ptr == NULL) {
463 		/** ERROR_INVALID_ADDRESS is expected when fixed addr range is not free */
464 		if (GetLastError() != ERROR_INVALID_ADDRESS) {
465 #if ZEND_MM_ERROR
466 			stderr_last_error("VirtualAlloc() fixed failed");
467 #endif
468 		}
469 		SetLastError(0);
470 		return NULL;
471 	}
472 	ZEND_ASSERT(ptr == addr);
473 	return ptr;
474 #else
475 	int flags = MAP_PRIVATE | MAP_ANON;
476 #if defined(MAP_EXCL)
477 	flags |= MAP_FIXED | MAP_EXCL;
478 #elif defined(MAP_TRYFIXED)
479 	flags |= MAP_TRYFIXED;
480 #endif
481 	/* MAP_FIXED leads to discarding of the old mapping, so it can't be used. */
482 	void *ptr = mmap(addr, size, PROT_READ | PROT_WRITE, flags /*| MAP_POPULATE | MAP_HUGETLB*/, ZEND_MM_FD, 0);
483 
484 	if (ptr == MAP_FAILED) {
485 #if ZEND_MM_ERROR && !defined(MAP_EXCL) && !defined(MAP_TRYFIXED)
486 		fprintf(stderr, "\nmmap() fixed failed: [%d] %s\n", errno, strerror(errno));
487 #endif
488 		return NULL;
489 	} else if (ptr != addr) {
490 		zend_mm_munmap(ptr, size);
491 		return NULL;
492 	}
493 	return ptr;
494 #endif
495 }
496 #endif
497 
zend_mm_mmap(size_t size)498 static void *zend_mm_mmap(size_t size)
499 {
500 #ifdef _WIN32
501 	void *ptr = VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
502 
503 	if (ptr == NULL) {
504 #if ZEND_MM_ERROR
505 		stderr_last_error("VirtualAlloc() failed");
506 #endif
507 		return NULL;
508 	}
509 	return ptr;
510 #else
511 	void *ptr;
512 
513 #if defined(MAP_HUGETLB) || defined(VM_FLAGS_SUPERPAGE_SIZE_2MB)
514 	if (zend_mm_use_huge_pages && size == ZEND_MM_CHUNK_SIZE) {
515 		int fd = -1;
516 		int mflags = MAP_PRIVATE | MAP_ANON;
517 #if defined(MAP_HUGETLB)
518 		mflags |= MAP_HUGETLB;
519 #else
520 		fd = VM_FLAGS_SUPERPAGE_SIZE_2MB;
521 #endif
522 		ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, mflags, fd, 0);
523 		if (ptr != MAP_FAILED) {
524 			zend_mmap_set_name(ptr, size, "zend_alloc");
525 			return ptr;
526 		}
527 	}
528 #endif
529 
530 	ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, ZEND_MM_FD, 0);
531 
532 	if (ptr == MAP_FAILED) {
533 #if ZEND_MM_ERROR
534 		fprintf(stderr, "\nmmap() failed: [%d] %s\n", errno, strerror(errno));
535 #endif
536 		return NULL;
537 	}
538 	zend_mmap_set_name(ptr, size, "zend_alloc");
539 	return ptr;
540 #endif
541 }
542 
543 /***********/
544 /* Bitmask */
545 /***********/
546 
547 /* number of trailing set (1) bits */
zend_mm_bitset_nts(zend_mm_bitset bitset)548 static zend_always_inline int zend_mm_bitset_nts(zend_mm_bitset bitset)
549 {
550 #if (defined(__GNUC__) || __has_builtin(__builtin_ctzl)) && SIZEOF_ZEND_LONG == SIZEOF_LONG && defined(PHP_HAVE_BUILTIN_CTZL)
551 	return __builtin_ctzl(~bitset);
552 #elif (defined(__GNUC__) || __has_builtin(__builtin_ctzll)) && defined(PHP_HAVE_BUILTIN_CTZLL)
553 	return __builtin_ctzll(~bitset);
554 #elif defined(_WIN32)
555 	unsigned long index;
556 
557 #if defined(_WIN64)
558 	if (!BitScanForward64(&index, ~bitset)) {
559 #else
560 	if (!BitScanForward(&index, ~bitset)) {
561 #endif
562 		/* undefined behavior */
563 		return 32;
564 	}
565 
566 	return (int)index;
567 #else
568 	int n;
569 
570 	if (bitset == (zend_mm_bitset)-1) return ZEND_MM_BITSET_LEN;
571 
572 	n = 0;
573 #if SIZEOF_ZEND_LONG == 8
574 	if (sizeof(zend_mm_bitset) == 8) {
575 		if ((bitset & 0xffffffff) == 0xffffffff) {n += 32; bitset = bitset >> Z_UL(32);}
576 	}
577 #endif
578 	if ((bitset & 0x0000ffff) == 0x0000ffff) {n += 16; bitset = bitset >> 16;}
579 	if ((bitset & 0x000000ff) == 0x000000ff) {n +=  8; bitset = bitset >>  8;}
580 	if ((bitset & 0x0000000f) == 0x0000000f) {n +=  4; bitset = bitset >>  4;}
581 	if ((bitset & 0x00000003) == 0x00000003) {n +=  2; bitset = bitset >>  2;}
582 	return n + (bitset & 1);
583 #endif
584 }
585 
586 static zend_always_inline int zend_mm_bitset_is_set(zend_mm_bitset *bitset, int bit)
587 {
588 	return ZEND_BIT_TEST(bitset, bit);
589 }
590 
591 static zend_always_inline void zend_mm_bitset_set_bit(zend_mm_bitset *bitset, int bit)
592 {
593 	bitset[bit / ZEND_MM_BITSET_LEN] |= (Z_UL(1) << (bit & (ZEND_MM_BITSET_LEN-1)));
594 }
595 
596 static zend_always_inline void zend_mm_bitset_reset_bit(zend_mm_bitset *bitset, int bit)
597 {
598 	bitset[bit / ZEND_MM_BITSET_LEN] &= ~(Z_UL(1) << (bit & (ZEND_MM_BITSET_LEN-1)));
599 }
600 
601 static zend_always_inline void zend_mm_bitset_set_range(zend_mm_bitset *bitset, int start, int len)
602 {
603 	if (len == 1) {
604 		zend_mm_bitset_set_bit(bitset, start);
605 	} else {
606 		int pos = start / ZEND_MM_BITSET_LEN;
607 		int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
608 		int bit = start & (ZEND_MM_BITSET_LEN - 1);
609 		zend_mm_bitset tmp;
610 
611 		if (pos != end) {
612 			/* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
613 			tmp = (zend_mm_bitset)-1 << bit;
614 			bitset[pos++] |= tmp;
615 			while (pos != end) {
616 				/* set all bits */
617 				bitset[pos++] = (zend_mm_bitset)-1;
618 			}
619 			end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
620 			/* set bits from "0" to "end" */
621 			tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
622 			bitset[pos] |= tmp;
623 		} else {
624 			end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
625 			/* set bits from "bit" to "end" */
626 			tmp = (zend_mm_bitset)-1 << bit;
627 			tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
628 			bitset[pos] |= tmp;
629 		}
630 	}
631 }
632 
633 static zend_always_inline void zend_mm_bitset_reset_range(zend_mm_bitset *bitset, int start, int len)
634 {
635 	if (len == 1) {
636 		zend_mm_bitset_reset_bit(bitset, start);
637 	} else {
638 		int pos = start / ZEND_MM_BITSET_LEN;
639 		int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
640 		int bit = start & (ZEND_MM_BITSET_LEN - 1);
641 		zend_mm_bitset tmp;
642 
643 		if (pos != end) {
644 			/* reset bits from "bit" to ZEND_MM_BITSET_LEN-1 */
645 			tmp = ~((Z_UL(1) << bit) - 1);
646 			bitset[pos++] &= ~tmp;
647 			while (pos != end) {
648 				/* set all bits */
649 				bitset[pos++] = 0;
650 			}
651 			end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
652 			/* reset bits from "0" to "end" */
653 			tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
654 			bitset[pos] &= ~tmp;
655 		} else {
656 			end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
657 			/* reset bits from "bit" to "end" */
658 			tmp = (zend_mm_bitset)-1 << bit;
659 			tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
660 			bitset[pos] &= ~tmp;
661 		}
662 	}
663 }
664 
665 static zend_always_inline int zend_mm_bitset_is_free_range(zend_mm_bitset *bitset, int start, int len)
666 {
667 	if (len == 1) {
668 		return !zend_mm_bitset_is_set(bitset, start);
669 	} else {
670 		int pos = start / ZEND_MM_BITSET_LEN;
671 		int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
672 		int bit = start & (ZEND_MM_BITSET_LEN - 1);
673 		zend_mm_bitset tmp;
674 
675 		if (pos != end) {
676 			/* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
677 			tmp = (zend_mm_bitset)-1 << bit;
678 			if ((bitset[pos++] & tmp) != 0) {
679 				return 0;
680 			}
681 			while (pos != end) {
682 				/* set all bits */
683 				if (bitset[pos++] != 0) {
684 					return 0;
685 				}
686 			}
687 			end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
688 			/* set bits from "0" to "end" */
689 			tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
690 			return (bitset[pos] & tmp) == 0;
691 		} else {
692 			end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
693 			/* set bits from "bit" to "end" */
694 			tmp = (zend_mm_bitset)-1 << bit;
695 			tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
696 			return (bitset[pos] & tmp) == 0;
697 		}
698 	}
699 }
700 
701 /**********/
702 /* Chunks */
703 /**********/
704 
705 static zend_always_inline void zend_mm_hugepage(void* ptr, size_t size)
706 {
707 #if defined(MADV_HUGEPAGE)
708 	(void)madvise(ptr, size, MADV_HUGEPAGE);
709 #elif defined(HAVE_MEMCNTL)
710 	struct memcntl_mha m = {.mha_cmd = MHA_MAPSIZE_VA, .mha_pagesize = ZEND_MM_CHUNK_SIZE, .mha_flags = 0};
711 	(void)memcntl(ptr, size, MC_HAT_ADVISE, (char *)&m, 0, 0);
712 #elif !defined(VM_FLAGS_SUPERPAGE_SIZE_2MB) && !defined(MAP_ALIGNED_SUPER)
713 	zend_error_noreturn(E_WARNING, "huge_pages: thp unsupported on this platform");
714 #endif
715 }
716 
717 static void *zend_mm_chunk_alloc_int(size_t size, size_t alignment)
718 {
719 	void *ptr = zend_mm_mmap(size);
720 
721 	if (ptr == NULL) {
722 		return NULL;
723 	} else if (ZEND_MM_ALIGNED_OFFSET(ptr, alignment) == 0) {
724 		if (zend_mm_use_huge_pages) {
725 			zend_mm_hugepage(ptr, size);
726 		}
727 		return ptr;
728 	} else {
729 		size_t offset;
730 
731 		/* chunk has to be aligned */
732 		zend_mm_munmap(ptr, size);
733 		ptr = zend_mm_mmap(size + alignment - REAL_PAGE_SIZE);
734 #ifdef _WIN32
735 		offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
736 		if (offset != 0) {
737 			offset = alignment - offset;
738 		}
739 		zend_mm_munmap(ptr, size + alignment - REAL_PAGE_SIZE);
740 		ptr = zend_mm_mmap_fixed((void*)((char*)ptr + offset), size);
741 		if (ptr == NULL) { // fix GH-9650, fixed addr range is not free
742 			ptr = zend_mm_mmap(size + alignment - REAL_PAGE_SIZE);
743 			if (ptr == NULL) {
744 				return NULL;
745 			}
746 			offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
747 			if (offset != 0) {
748 				ptr = (void*)((char*)ptr + alignment - offset);
749 			}
750 		}
751 		return ptr;
752 #else
753 		offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
754 		if (offset != 0) {
755 			offset = alignment - offset;
756 			zend_mm_munmap(ptr, offset);
757 			ptr = (char*)ptr + offset;
758 			alignment -= offset;
759 		}
760 		if (alignment > REAL_PAGE_SIZE) {
761 			zend_mm_munmap((char*)ptr + size, alignment - REAL_PAGE_SIZE);
762 		}
763 		if (zend_mm_use_huge_pages) {
764 			zend_mm_hugepage(ptr, size);
765 		}
766 #endif
767 		return ptr;
768 	}
769 }
770 
771 static void *zend_mm_chunk_alloc(zend_mm_heap *heap, size_t size, size_t alignment)
772 {
773 #if ZEND_MM_STORAGE
774 	if (UNEXPECTED(heap->storage)) {
775 		void *ptr = heap->storage->handlers.chunk_alloc(heap->storage, size, alignment);
776 		ZEND_ASSERT(((uintptr_t)((char*)ptr + (alignment-1)) & (alignment-1)) == (uintptr_t)ptr);
777 		return ptr;
778 	}
779 #endif
780 	return zend_mm_chunk_alloc_int(size, alignment);
781 }
782 
783 static void zend_mm_chunk_free(zend_mm_heap *heap, void *addr, size_t size)
784 {
785 #if ZEND_MM_STORAGE
786 	if (UNEXPECTED(heap->storage)) {
787 		heap->storage->handlers.chunk_free(heap->storage, addr, size);
788 		return;
789 	}
790 #endif
791 	zend_mm_munmap(addr, size);
792 }
793 
794 static int zend_mm_chunk_truncate(zend_mm_heap *heap, void *addr, size_t old_size, size_t new_size)
795 {
796 #if ZEND_MM_STORAGE
797 	if (UNEXPECTED(heap->storage)) {
798 		if (heap->storage->handlers.chunk_truncate) {
799 			return heap->storage->handlers.chunk_truncate(heap->storage, addr, old_size, new_size);
800 		} else {
801 			return 0;
802 		}
803 	}
804 #endif
805 #ifndef _WIN32
806 	zend_mm_munmap((char*)addr + new_size, old_size - new_size);
807 	return 1;
808 #else
809 	return 0;
810 #endif
811 }
812 
813 static int zend_mm_chunk_extend(zend_mm_heap *heap, void *addr, size_t old_size, size_t new_size)
814 {
815 #if ZEND_MM_STORAGE
816 	if (UNEXPECTED(heap->storage)) {
817 		if (heap->storage->handlers.chunk_extend) {
818 			return heap->storage->handlers.chunk_extend(heap->storage, addr, old_size, new_size);
819 		} else {
820 			return 0;
821 		}
822 	}
823 #endif
824 #ifdef HAVE_MREMAP
825 	/* We don't use MREMAP_MAYMOVE due to alignment requirements. */
826 	void *ptr = mremap(addr, old_size, new_size, 0);
827 	if (ptr == MAP_FAILED) {
828 		return 0;
829 	}
830 	/* Sanity check: The mapping shouldn't have moved. */
831 	ZEND_ASSERT(ptr == addr);
832 	return 1;
833 #elif !defined(_WIN32)
834 	return (zend_mm_mmap_fixed((char*)addr + old_size, new_size - old_size) != NULL);
835 #else
836 	return 0;
837 #endif
838 }
839 
840 static zend_always_inline void zend_mm_chunk_init(zend_mm_heap *heap, zend_mm_chunk *chunk)
841 {
842 	chunk->heap = heap;
843 	chunk->next = heap->main_chunk;
844 	chunk->prev = heap->main_chunk->prev;
845 	chunk->prev->next = chunk;
846 	chunk->next->prev = chunk;
847 	/* mark first pages as allocated */
848 	chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
849 	chunk->free_tail = ZEND_MM_FIRST_PAGE;
850 	/* the younger chunks have bigger number */
851 	chunk->num = chunk->prev->num + 1;
852 	/* mark first pages as allocated */
853 	chunk->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
854 	chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
855 }
856 
857 /***********************/
858 /* Huge Runs (forward) */
859 /***********************/
860 
861 static size_t zend_mm_get_huge_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
862 static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
863 static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
864 
865 #if ZEND_DEBUG
866 static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
867 #else
868 static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
869 #endif
870 
871 /**************/
872 /* Large Runs */
873 /**************/
874 
875 #if ZEND_DEBUG
876 static void *zend_mm_alloc_pages(zend_mm_heap *heap, uint32_t pages_count, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
877 #else
878 static void *zend_mm_alloc_pages(zend_mm_heap *heap, uint32_t pages_count ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
879 #endif
880 {
881 	zend_mm_chunk *chunk = heap->main_chunk;
882 	uint32_t page_num, len;
883 	int steps = 0;
884 
885 	while (1) {
886 		if (UNEXPECTED(chunk->free_pages < pages_count)) {
887 			goto not_found;
888 #if 0
889 		} else if (UNEXPECTED(chunk->free_pages + chunk->free_tail == ZEND_MM_PAGES)) {
890 			if (UNEXPECTED(ZEND_MM_PAGES - chunk->free_tail < pages_count)) {
891 				goto not_found;
892 			} else {
893 				page_num = chunk->free_tail;
894 				goto found;
895 			}
896 		} else if (0) {
897 			/* First-Fit Search */
898 			int free_tail = chunk->free_tail;
899 			zend_mm_bitset *bitset = chunk->free_map;
900 			zend_mm_bitset tmp = *(bitset++);
901 			int i = 0;
902 
903 			while (1) {
904 				/* skip allocated blocks */
905 				while (tmp == (zend_mm_bitset)-1) {
906 					i += ZEND_MM_BITSET_LEN;
907 					if (i == ZEND_MM_PAGES) {
908 						goto not_found;
909 					}
910 					tmp = *(bitset++);
911 				}
912 				/* find first 0 bit */
913 				page_num = i + zend_mm_bitset_nts(tmp);
914 				/* reset bits from 0 to "bit" */
915 				tmp &= tmp + 1;
916 				/* skip free blocks */
917 				while (tmp == 0) {
918 					i += ZEND_MM_BITSET_LEN;
919 					len = i - page_num;
920 					if (len >= pages_count) {
921 						goto found;
922 					} else if (i >= free_tail) {
923 						goto not_found;
924 					}
925 					tmp = *(bitset++);
926 				}
927 				/* find first 1 bit */
928 				len = (i + zend_ulong_ntz(tmp)) - page_num;
929 				if (len >= pages_count) {
930 					goto found;
931 				}
932 				/* set bits from 0 to "bit" */
933 				tmp |= tmp - 1;
934 			}
935 #endif
936 		} else {
937 			/* Best-Fit Search */
938 			int best = -1;
939 			uint32_t best_len = ZEND_MM_PAGES;
940 			uint32_t free_tail = chunk->free_tail;
941 			zend_mm_bitset *bitset = chunk->free_map;
942 			zend_mm_bitset tmp = *(bitset++);
943 			uint32_t i = 0;
944 
945 			while (1) {
946 				/* skip allocated blocks */
947 				while (tmp == (zend_mm_bitset)-1) {
948 					i += ZEND_MM_BITSET_LEN;
949 					if (i == ZEND_MM_PAGES) {
950 						if (best > 0) {
951 							page_num = best;
952 							goto found;
953 						} else {
954 							goto not_found;
955 						}
956 					}
957 					tmp = *(bitset++);
958 				}
959 				/* find first 0 bit */
960 				page_num = i + zend_mm_bitset_nts(tmp);
961 				/* reset bits from 0 to "bit" */
962 				tmp &= tmp + 1;
963 				/* skip free blocks */
964 				while (tmp == 0) {
965 					i += ZEND_MM_BITSET_LEN;
966 					if (i >= free_tail || i == ZEND_MM_PAGES) {
967 						len = ZEND_MM_PAGES - page_num;
968 						if (len >= pages_count && len < best_len) {
969 							chunk->free_tail = page_num + pages_count;
970 							goto found;
971 						} else {
972 							/* set accurate value */
973 							chunk->free_tail = page_num;
974 							if (best > 0) {
975 								page_num = best;
976 								goto found;
977 							} else {
978 								goto not_found;
979 							}
980 						}
981 					}
982 					tmp = *(bitset++);
983 				}
984 				/* find first 1 bit */
985 				len = i + zend_ulong_ntz(tmp) - page_num;
986 				if (len >= pages_count) {
987 					if (len == pages_count) {
988 						goto found;
989 					} else if (len < best_len) {
990 						best_len = len;
991 						best = page_num;
992 					}
993 				}
994 				/* set bits from 0 to "bit" */
995 				tmp |= tmp - 1;
996 			}
997 		}
998 
999 not_found:
1000 		if (chunk->next == heap->main_chunk) {
1001 get_chunk:
1002 			if (heap->cached_chunks) {
1003 				heap->cached_chunks_count--;
1004 				chunk = heap->cached_chunks;
1005 				heap->cached_chunks = chunk->next;
1006 			} else {
1007 #if ZEND_MM_LIMIT
1008 				if (UNEXPECTED(ZEND_MM_CHUNK_SIZE > heap->limit - heap->real_size)) {
1009 					if (zend_mm_gc(heap)) {
1010 						goto get_chunk;
1011 					} else if (heap->overflow == 0) {
1012 #if ZEND_DEBUG
1013 						zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
1014 #else
1015 						zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, ZEND_MM_PAGE_SIZE * pages_count);
1016 #endif
1017 						return NULL;
1018 					}
1019 				}
1020 #endif
1021 				chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(heap, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
1022 				if (UNEXPECTED(chunk == NULL)) {
1023 					/* insufficient memory */
1024 					if (zend_mm_gc(heap) &&
1025 					    (chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(heap, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE)) != NULL) {
1026 						/* pass */
1027 					} else {
1028 #if !ZEND_MM_LIMIT
1029 						zend_mm_safe_error(heap, "Out of memory");
1030 #elif ZEND_DEBUG
1031 						zend_mm_safe_error(heap, "Out of memory (allocated %zu bytes) at %s:%d (tried to allocate %zu bytes)", heap->real_size, __zend_filename, __zend_lineno, size);
1032 #else
1033 						zend_mm_safe_error(heap, "Out of memory (allocated %zu bytes) (tried to allocate %zu bytes)", heap->real_size, ZEND_MM_PAGE_SIZE * pages_count);
1034 #endif
1035 						return NULL;
1036 					}
1037 				}
1038 #if ZEND_MM_STAT
1039 				do {
1040 					size_t size = heap->real_size + ZEND_MM_CHUNK_SIZE;
1041 					size_t peak = MAX(heap->real_peak, size);
1042 					heap->real_size = size;
1043 					heap->real_peak = peak;
1044 				} while (0);
1045 #elif ZEND_MM_LIMIT
1046 				heap->real_size += ZEND_MM_CHUNK_SIZE;
1047 
1048 #endif
1049 			}
1050 			heap->chunks_count++;
1051 			if (heap->chunks_count > heap->peak_chunks_count) {
1052 				heap->peak_chunks_count = heap->chunks_count;
1053 			}
1054 			zend_mm_chunk_init(heap, chunk);
1055 			page_num = ZEND_MM_FIRST_PAGE;
1056 			len = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
1057 			goto found;
1058 		} else {
1059 			chunk = chunk->next;
1060 			steps++;
1061 		}
1062 	}
1063 
1064 found:
1065 	if (steps > 2 && pages_count < 8) {
1066 		/* move chunk into the head of the linked-list */
1067 		chunk->prev->next = chunk->next;
1068 		chunk->next->prev = chunk->prev;
1069 		chunk->next = heap->main_chunk->next;
1070 		chunk->prev = heap->main_chunk;
1071 		chunk->prev->next = chunk;
1072 		chunk->next->prev = chunk;
1073 	}
1074 	/* mark run as allocated */
1075 	chunk->free_pages -= pages_count;
1076 	zend_mm_bitset_set_range(chunk->free_map, page_num, pages_count);
1077 	chunk->map[page_num] = ZEND_MM_LRUN(pages_count);
1078 	if (page_num == chunk->free_tail) {
1079 		chunk->free_tail = page_num + pages_count;
1080 	}
1081 	return ZEND_MM_PAGE_ADDR(chunk, page_num);
1082 }
1083 
1084 static zend_always_inline void *zend_mm_alloc_large_ex(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1085 {
1086 	int pages_count = (int)ZEND_MM_SIZE_TO_NUM(size, ZEND_MM_PAGE_SIZE);
1087 #if ZEND_DEBUG
1088 	void *ptr = zend_mm_alloc_pages(heap, pages_count, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1089 #else
1090 	void *ptr = zend_mm_alloc_pages(heap, pages_count ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1091 #endif
1092 #if ZEND_MM_STAT
1093 	do {
1094 		size_t size = heap->size + pages_count * ZEND_MM_PAGE_SIZE;
1095 		size_t peak = MAX(heap->peak, size);
1096 		heap->size = size;
1097 		heap->peak = peak;
1098 	} while (0);
1099 #endif
1100 	return ptr;
1101 }
1102 
1103 #if ZEND_DEBUG
1104 static zend_never_inline void *zend_mm_alloc_large(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1105 {
1106 	return zend_mm_alloc_large_ex(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1107 }
1108 #else
1109 static zend_never_inline void *zend_mm_alloc_large(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1110 {
1111 	return zend_mm_alloc_large_ex(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1112 }
1113 #endif
1114 
1115 static zend_always_inline void zend_mm_delete_chunk(zend_mm_heap *heap, zend_mm_chunk *chunk)
1116 {
1117 	chunk->next->prev = chunk->prev;
1118 	chunk->prev->next = chunk->next;
1119 	heap->chunks_count--;
1120 	if (heap->chunks_count + heap->cached_chunks_count < heap->avg_chunks_count + 0.1
1121 	 || (heap->chunks_count == heap->last_chunks_delete_boundary
1122 	  && heap->last_chunks_delete_count >= 4)) {
1123 		/* delay deletion */
1124 		heap->cached_chunks_count++;
1125 		chunk->next = heap->cached_chunks;
1126 		heap->cached_chunks = chunk;
1127 	} else {
1128 #if ZEND_MM_STAT || ZEND_MM_LIMIT
1129 		heap->real_size -= ZEND_MM_CHUNK_SIZE;
1130 #endif
1131 		if (!heap->cached_chunks) {
1132 			if (heap->chunks_count != heap->last_chunks_delete_boundary) {
1133 				heap->last_chunks_delete_boundary = heap->chunks_count;
1134 				heap->last_chunks_delete_count = 0;
1135 			} else {
1136 				heap->last_chunks_delete_count++;
1137 			}
1138 		}
1139 		if (!heap->cached_chunks || chunk->num > heap->cached_chunks->num) {
1140 			zend_mm_chunk_free(heap, chunk, ZEND_MM_CHUNK_SIZE);
1141 		} else {
1142 //TODO: select the best chunk to delete???
1143 			chunk->next = heap->cached_chunks->next;
1144 			zend_mm_chunk_free(heap, heap->cached_chunks, ZEND_MM_CHUNK_SIZE);
1145 			heap->cached_chunks = chunk;
1146 		}
1147 	}
1148 }
1149 
1150 static zend_always_inline void zend_mm_free_pages_ex(zend_mm_heap *heap, zend_mm_chunk *chunk, uint32_t page_num, uint32_t pages_count, int free_chunk)
1151 {
1152 	chunk->free_pages += pages_count;
1153 	zend_mm_bitset_reset_range(chunk->free_map, page_num, pages_count);
1154 	chunk->map[page_num] = 0;
1155 	if (chunk->free_tail == page_num + pages_count) {
1156 		/* this setting may be not accurate */
1157 		chunk->free_tail = page_num;
1158 	}
1159 	if (free_chunk && chunk != heap->main_chunk && chunk->free_pages == ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE) {
1160 		zend_mm_delete_chunk(heap, chunk);
1161 	}
1162 }
1163 
1164 static zend_never_inline void zend_mm_free_pages(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count)
1165 {
1166 	zend_mm_free_pages_ex(heap, chunk, page_num, pages_count, 1);
1167 }
1168 
1169 static zend_always_inline void zend_mm_free_large(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count)
1170 {
1171 #if ZEND_MM_STAT
1172 	heap->size -= pages_count * ZEND_MM_PAGE_SIZE;
1173 #endif
1174 	zend_mm_free_pages(heap, chunk, page_num, pages_count);
1175 }
1176 
1177 /**************/
1178 /* Small Runs */
1179 /**************/
1180 
1181 /* higher set bit number (0->N/A, 1->1, 2->2, 4->3, 8->4, 127->7, 128->8 etc) */
1182 static zend_always_inline int zend_mm_small_size_to_bit(int size)
1183 {
1184 #if (defined(__GNUC__) || __has_builtin(__builtin_clz))  && defined(PHP_HAVE_BUILTIN_CLZ)
1185 	return (__builtin_clz(size) ^ 0x1f) + 1;
1186 #elif defined(_WIN32)
1187 	unsigned long index;
1188 
1189 	if (!BitScanReverse(&index, (unsigned long)size)) {
1190 		/* undefined behavior */
1191 		return 64;
1192 	}
1193 
1194 	return (((31 - (int)index) ^ 0x1f) + 1);
1195 #else
1196 	int n = 16;
1197 	if (size <= 0x00ff) {n -= 8; size = size << 8;}
1198 	if (size <= 0x0fff) {n -= 4; size = size << 4;}
1199 	if (size <= 0x3fff) {n -= 2; size = size << 2;}
1200 	if (size <= 0x7fff) {n -= 1;}
1201 	return n;
1202 #endif
1203 }
1204 
1205 #ifndef MAX
1206 # define MAX(a, b) (((a) > (b)) ? (a) : (b))
1207 #endif
1208 
1209 #ifndef MIN
1210 # define MIN(a, b) (((a) < (b)) ? (a) : (b))
1211 #endif
1212 
1213 static zend_always_inline int zend_mm_small_size_to_bin(size_t size)
1214 {
1215 #if 0
1216 	int n;
1217                             /*0,  1,  2,  3,  4,  5,  6,  7,  8,  9  10, 11, 12*/
1218 	static const int f1[] = { 3,  3,  3,  3,  3,  3,  3,  4,  5,  6,  7,  8,  9};
1219 	static const int f2[] = { 0,  0,  0,  0,  0,  0,  0,  4,  8, 12, 16, 20, 24};
1220 
1221 	if (UNEXPECTED(size <= 2)) return 0;
1222 	n = zend_mm_small_size_to_bit(size - 1);
1223 	return ((size-1) >> f1[n]) + f2[n];
1224 #else
1225 	unsigned int t1, t2;
1226 
1227 	if (size <= 64) {
1228 		/* we need to support size == 0 ... */
1229 		return (size - !!size) >> 3;
1230 	} else {
1231 		t1 = size - 1;
1232 		t2 = zend_mm_small_size_to_bit(t1) - 3;
1233 		t1 = t1 >> t2;
1234 		t2 = t2 - 3;
1235 		t2 = t2 << 2;
1236 		return (int)(t1 + t2);
1237 	}
1238 #endif
1239 }
1240 
1241 #define ZEND_MM_SMALL_SIZE_TO_BIN(size)  zend_mm_small_size_to_bin(size)
1242 
1243 static zend_never_inline void *zend_mm_alloc_small_slow(zend_mm_heap *heap, uint32_t bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1244 {
1245 	zend_mm_chunk *chunk;
1246 	int page_num;
1247 	zend_mm_bin *bin;
1248 	zend_mm_free_slot *p, *end;
1249 
1250 #if ZEND_DEBUG
1251 	bin = (zend_mm_bin*)zend_mm_alloc_pages(heap, bin_pages[bin_num], bin_data_size[bin_num] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1252 #else
1253 	bin = (zend_mm_bin*)zend_mm_alloc_pages(heap, bin_pages[bin_num] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1254 #endif
1255 	if (UNEXPECTED(bin == NULL)) {
1256 		/* insufficient memory */
1257 		return NULL;
1258 	}
1259 
1260 	chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(bin, ZEND_MM_CHUNK_SIZE);
1261 	page_num = ZEND_MM_ALIGNED_OFFSET(bin, ZEND_MM_CHUNK_SIZE) / ZEND_MM_PAGE_SIZE;
1262 	chunk->map[page_num] = ZEND_MM_SRUN(bin_num);
1263 	if (bin_pages[bin_num] > 1) {
1264 		uint32_t i = 1;
1265 
1266 		do {
1267 			chunk->map[page_num+i] = ZEND_MM_NRUN(bin_num, i);
1268 			i++;
1269 		} while (i < bin_pages[bin_num]);
1270 	}
1271 
1272 	/* create a linked list of elements from 1 to last */
1273 	end = (zend_mm_free_slot*)((char*)bin + (bin_data_size[bin_num] * (bin_elements[bin_num] - 1)));
1274 	heap->free_slot[bin_num] = p = (zend_mm_free_slot*)((char*)bin + bin_data_size[bin_num]);
1275 	do {
1276 		p->next_free_slot = (zend_mm_free_slot*)((char*)p + bin_data_size[bin_num]);
1277 #if ZEND_DEBUG
1278 		do {
1279 			zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1280 			dbg->size = 0;
1281 		} while (0);
1282 #endif
1283 		p = (zend_mm_free_slot*)((char*)p + bin_data_size[bin_num]);
1284 	} while (p != end);
1285 
1286 	/* terminate list using NULL */
1287 	p->next_free_slot = NULL;
1288 #if ZEND_DEBUG
1289 		do {
1290 			zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1291 			dbg->size = 0;
1292 		} while (0);
1293 #endif
1294 
1295 	/* return first element */
1296 	return bin;
1297 }
1298 
1299 static zend_always_inline void *zend_mm_alloc_small(zend_mm_heap *heap, int bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1300 {
1301 #if ZEND_MM_STAT
1302 	do {
1303 		size_t size = heap->size + bin_data_size[bin_num];
1304 		size_t peak = MAX(heap->peak, size);
1305 		heap->size = size;
1306 		heap->peak = peak;
1307 	} while (0);
1308 #endif
1309 
1310 	if (EXPECTED(heap->free_slot[bin_num] != NULL)) {
1311 		zend_mm_free_slot *p = heap->free_slot[bin_num];
1312 		heap->free_slot[bin_num] = p->next_free_slot;
1313 		return p;
1314 	} else {
1315 		return zend_mm_alloc_small_slow(heap, bin_num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1316 	}
1317 }
1318 
1319 static zend_always_inline void zend_mm_free_small(zend_mm_heap *heap, void *ptr, int bin_num)
1320 {
1321 	zend_mm_free_slot *p;
1322 
1323 #if ZEND_MM_STAT
1324 	heap->size -= bin_data_size[bin_num];
1325 #endif
1326 
1327 #if ZEND_DEBUG
1328 	do {
1329 		zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)ptr + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1330 		dbg->size = 0;
1331 	} while (0);
1332 #endif
1333 
1334 	p = (zend_mm_free_slot*)ptr;
1335 	p->next_free_slot = heap->free_slot[bin_num];
1336 	heap->free_slot[bin_num] = p;
1337 }
1338 
1339 /********/
1340 /* Heap */
1341 /********/
1342 
1343 #if ZEND_DEBUG
1344 static zend_always_inline zend_mm_debug_info *zend_mm_get_debug_info(zend_mm_heap *heap, void *ptr)
1345 {
1346 	size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1347 	zend_mm_chunk *chunk;
1348 	int page_num;
1349 	zend_mm_page_info info;
1350 
1351 	ZEND_MM_CHECK(page_offset != 0, "zend_mm_heap corrupted");
1352 	chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1353 	page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1354 	info = chunk->map[page_num];
1355 	ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1356 	if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1357 		int bin_num = ZEND_MM_SRUN_BIN_NUM(info);
1358 		return (zend_mm_debug_info*)((char*)ptr + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1359 	} else /* if (info & ZEND_MM_IS_LRUN) */ {
1360 		int pages_count = ZEND_MM_LRUN_PAGES(info);
1361 
1362 		return (zend_mm_debug_info*)((char*)ptr + ZEND_MM_PAGE_SIZE * pages_count - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1363 	}
1364 }
1365 #endif
1366 
1367 static zend_always_inline void *zend_mm_alloc_heap(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1368 {
1369 	void *ptr;
1370 #if ZEND_DEBUG
1371 	size_t real_size = size;
1372 	zend_mm_debug_info *dbg;
1373 
1374 	/* special handling for zero-size allocation */
1375 	size = MAX(size, 1);
1376 	size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1377 	if (UNEXPECTED(size < real_size)) {
1378 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu + %zu)", ZEND_MM_ALIGNED_SIZE(real_size), ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1379 		return NULL;
1380 	}
1381 #endif
1382 	if (EXPECTED(size <= ZEND_MM_MAX_SMALL_SIZE)) {
1383 		ptr = zend_mm_alloc_small(heap, ZEND_MM_SMALL_SIZE_TO_BIN(size) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1384 #if ZEND_DEBUG
1385 		dbg = zend_mm_get_debug_info(heap, ptr);
1386 		dbg->size = real_size;
1387 		dbg->filename = __zend_filename;
1388 		dbg->orig_filename = __zend_orig_filename;
1389 		dbg->lineno = __zend_lineno;
1390 		dbg->orig_lineno = __zend_orig_lineno;
1391 #endif
1392 		return ptr;
1393 	} else if (EXPECTED(size <= ZEND_MM_MAX_LARGE_SIZE)) {
1394 		ptr = zend_mm_alloc_large(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1395 #if ZEND_DEBUG
1396 		dbg = zend_mm_get_debug_info(heap, ptr);
1397 		dbg->size = real_size;
1398 		dbg->filename = __zend_filename;
1399 		dbg->orig_filename = __zend_orig_filename;
1400 		dbg->lineno = __zend_lineno;
1401 		dbg->orig_lineno = __zend_orig_lineno;
1402 #endif
1403 		return ptr;
1404 	} else {
1405 #if ZEND_DEBUG
1406 		size = real_size;
1407 #endif
1408 		return zend_mm_alloc_huge(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1409 	}
1410 }
1411 
1412 static zend_always_inline void zend_mm_free_heap(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1413 {
1414 	size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1415 
1416 	if (UNEXPECTED(page_offset == 0)) {
1417 		if (ptr != NULL) {
1418 			zend_mm_free_huge(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1419 		}
1420 	} else {
1421 		zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1422 		int page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1423 		zend_mm_page_info info = chunk->map[page_num];
1424 
1425 		ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1426 		if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1427 			zend_mm_free_small(heap, ptr, ZEND_MM_SRUN_BIN_NUM(info));
1428 		} else /* if (info & ZEND_MM_IS_LRUN) */ {
1429 			int pages_count = ZEND_MM_LRUN_PAGES(info);
1430 
1431 			ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
1432 			zend_mm_free_large(heap, chunk, page_num, pages_count);
1433 		}
1434 	}
1435 }
1436 
1437 static size_t zend_mm_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1438 {
1439 	size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1440 
1441 	if (UNEXPECTED(page_offset == 0)) {
1442 		return zend_mm_get_huge_block_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1443 	} else {
1444 		zend_mm_chunk *chunk;
1445 #if 0 && ZEND_DEBUG
1446 		zend_mm_debug_info *dbg = zend_mm_get_debug_info(heap, ptr);
1447 		return dbg->size;
1448 #else
1449 		int page_num;
1450 		zend_mm_page_info info;
1451 
1452 		chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1453 		page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1454 		info = chunk->map[page_num];
1455 		ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1456 		if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1457 			return bin_data_size[ZEND_MM_SRUN_BIN_NUM(info)];
1458 		} else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
1459 			return ZEND_MM_LRUN_PAGES(info) * ZEND_MM_PAGE_SIZE;
1460 		}
1461 #endif
1462 	}
1463 }
1464 
1465 static zend_never_inline void *zend_mm_realloc_slow(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1466 {
1467 	void *ret;
1468 
1469 #if ZEND_MM_STAT
1470 	do {
1471 		size_t orig_peak = heap->peak;
1472 #endif
1473 		ret = zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1474 		memcpy(ret, ptr, copy_size);
1475 		zend_mm_free_heap(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1476 #if ZEND_MM_STAT
1477 		heap->peak = MAX(orig_peak, heap->size);
1478 	} while (0);
1479 #endif
1480 	return ret;
1481 }
1482 
1483 static zend_never_inline void *zend_mm_realloc_huge(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1484 {
1485 	size_t old_size;
1486 	size_t new_size;
1487 #if ZEND_DEBUG
1488 	size_t real_size;
1489 #endif
1490 
1491 	old_size = zend_mm_get_huge_block_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1492 #if ZEND_DEBUG
1493 	real_size = size;
1494 	size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1495 #endif
1496 	if (size > ZEND_MM_MAX_LARGE_SIZE) {
1497 #if ZEND_DEBUG
1498 		size = real_size;
1499 #endif
1500 #ifdef ZEND_WIN32
1501 		/* On Windows we don't have ability to extend huge blocks in-place.
1502 		 * We allocate them with 2MB size granularity, to avoid many
1503 		 * reallocations when they are extended by small pieces
1504 		 */
1505 		new_size = ZEND_MM_ALIGNED_SIZE_EX(size, MAX(REAL_PAGE_SIZE, ZEND_MM_CHUNK_SIZE));
1506 #else
1507 		new_size = ZEND_MM_ALIGNED_SIZE_EX(size, REAL_PAGE_SIZE);
1508 #endif
1509 		if (new_size == old_size) {
1510 #if ZEND_DEBUG
1511 			zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1512 #else
1513 			zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1514 #endif
1515 			return ptr;
1516 		} else if (new_size < old_size) {
1517 			/* unmup tail */
1518 			if (zend_mm_chunk_truncate(heap, ptr, old_size, new_size)) {
1519 #if ZEND_MM_STAT || ZEND_MM_LIMIT
1520 				heap->real_size -= old_size - new_size;
1521 #endif
1522 #if ZEND_MM_STAT
1523 				heap->size -= old_size - new_size;
1524 #endif
1525 #if ZEND_DEBUG
1526 				zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1527 #else
1528 				zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1529 #endif
1530 				return ptr;
1531 			}
1532 		} else /* if (new_size > old_size) */ {
1533 #if ZEND_MM_LIMIT
1534 			if (UNEXPECTED(new_size - old_size > heap->limit - heap->real_size)) {
1535 				if (zend_mm_gc(heap) && new_size - old_size <= heap->limit - heap->real_size) {
1536 					/* pass */
1537 				} else if (heap->overflow == 0) {
1538 #if ZEND_DEBUG
1539 					zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
1540 #else
1541 					zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, size);
1542 #endif
1543 					return NULL;
1544 				}
1545 			}
1546 #endif
1547 			/* try to map tail right after this block */
1548 			if (zend_mm_chunk_extend(heap, ptr, old_size, new_size)) {
1549 #if ZEND_MM_STAT || ZEND_MM_LIMIT
1550 				heap->real_size += new_size - old_size;
1551 #endif
1552 #if ZEND_MM_STAT
1553 				heap->real_peak = MAX(heap->real_peak, heap->real_size);
1554 				heap->size += new_size - old_size;
1555 				heap->peak = MAX(heap->peak, heap->size);
1556 #endif
1557 #if ZEND_DEBUG
1558 				zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1559 #else
1560 				zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1561 #endif
1562 				return ptr;
1563 			}
1564 		}
1565 	}
1566 
1567 	return zend_mm_realloc_slow(heap, ptr, size, MIN(old_size, copy_size) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1568 }
1569 
1570 static zend_always_inline void *zend_mm_realloc_heap(zend_mm_heap *heap, void *ptr, size_t size, bool use_copy_size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1571 {
1572 	size_t page_offset;
1573 	size_t old_size;
1574 	size_t new_size;
1575 	void *ret;
1576 #if ZEND_DEBUG
1577 	zend_mm_debug_info *dbg;
1578 #endif
1579 
1580 	page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1581 	if (UNEXPECTED(page_offset == 0)) {
1582 		if (EXPECTED(ptr == NULL)) {
1583 			return _zend_mm_alloc(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1584 		} else {
1585 			return zend_mm_realloc_huge(heap, ptr, size, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1586 		}
1587 	} else {
1588 		zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1589 		int page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1590 		zend_mm_page_info info = chunk->map[page_num];
1591 #if ZEND_DEBUG
1592 		size_t real_size = size;
1593 
1594 		size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1595 #endif
1596 
1597 		ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1598 		if (info & ZEND_MM_IS_SRUN) {
1599 			int old_bin_num = ZEND_MM_SRUN_BIN_NUM(info);
1600 
1601 			do {
1602 				old_size = bin_data_size[old_bin_num];
1603 
1604 				/* Check if requested size fits into current bin */
1605 				if (size <= old_size) {
1606 					/* Check if truncation is necessary */
1607 					if (old_bin_num > 0 && size < bin_data_size[old_bin_num - 1]) {
1608 						/* truncation */
1609 						ret = zend_mm_alloc_small(heap, ZEND_MM_SMALL_SIZE_TO_BIN(size) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1610 						copy_size = use_copy_size ? MIN(size, copy_size) : size;
1611 						memcpy(ret, ptr, copy_size);
1612 						zend_mm_free_small(heap, ptr, old_bin_num);
1613 					} else {
1614 						/* reallocation in-place */
1615 						ret = ptr;
1616 					}
1617 				} else if (size <= ZEND_MM_MAX_SMALL_SIZE) {
1618 					/* small extension */
1619 
1620 #if ZEND_MM_STAT
1621 					do {
1622 						size_t orig_peak = heap->peak;
1623 #endif
1624 						ret = zend_mm_alloc_small(heap, ZEND_MM_SMALL_SIZE_TO_BIN(size) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1625 						copy_size = use_copy_size ? MIN(old_size, copy_size) : old_size;
1626 						memcpy(ret, ptr, copy_size);
1627 						zend_mm_free_small(heap, ptr, old_bin_num);
1628 #if ZEND_MM_STAT
1629 						heap->peak = MAX(orig_peak, heap->size);
1630 					} while (0);
1631 #endif
1632 				} else {
1633 					/* slow reallocation */
1634 					break;
1635 				}
1636 
1637 #if ZEND_DEBUG
1638 				dbg = zend_mm_get_debug_info(heap, ret);
1639 				dbg->size = real_size;
1640 				dbg->filename = __zend_filename;
1641 				dbg->orig_filename = __zend_orig_filename;
1642 				dbg->lineno = __zend_lineno;
1643 				dbg->orig_lineno = __zend_orig_lineno;
1644 #endif
1645 				return ret;
1646 			}  while (0);
1647 
1648 		} else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
1649 			ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
1650 			old_size = ZEND_MM_LRUN_PAGES(info) * ZEND_MM_PAGE_SIZE;
1651 			if (size > ZEND_MM_MAX_SMALL_SIZE && size <= ZEND_MM_MAX_LARGE_SIZE) {
1652 				new_size = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE);
1653 				if (new_size == old_size) {
1654 #if ZEND_DEBUG
1655 					dbg = zend_mm_get_debug_info(heap, ptr);
1656 					dbg->size = real_size;
1657 					dbg->filename = __zend_filename;
1658 					dbg->orig_filename = __zend_orig_filename;
1659 					dbg->lineno = __zend_lineno;
1660 					dbg->orig_lineno = __zend_orig_lineno;
1661 #endif
1662 					return ptr;
1663 				} else if (new_size < old_size) {
1664 					/* free tail pages */
1665 					int new_pages_count = (int)(new_size / ZEND_MM_PAGE_SIZE);
1666 					int rest_pages_count = (int)((old_size - new_size) / ZEND_MM_PAGE_SIZE);
1667 
1668 #if ZEND_MM_STAT
1669 					heap->size -= rest_pages_count * ZEND_MM_PAGE_SIZE;
1670 #endif
1671 					chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
1672 					chunk->free_pages += rest_pages_count;
1673 					zend_mm_bitset_reset_range(chunk->free_map, page_num + new_pages_count, rest_pages_count);
1674 #if ZEND_DEBUG
1675 					dbg = zend_mm_get_debug_info(heap, ptr);
1676 					dbg->size = real_size;
1677 					dbg->filename = __zend_filename;
1678 					dbg->orig_filename = __zend_orig_filename;
1679 					dbg->lineno = __zend_lineno;
1680 					dbg->orig_lineno = __zend_orig_lineno;
1681 #endif
1682 					return ptr;
1683 				} else /* if (new_size > old_size) */ {
1684 					int new_pages_count = (int)(new_size / ZEND_MM_PAGE_SIZE);
1685 					int old_pages_count = (int)(old_size / ZEND_MM_PAGE_SIZE);
1686 
1687 					/* try to allocate tail pages after this block */
1688 					if (page_num + new_pages_count <= ZEND_MM_PAGES &&
1689 					    zend_mm_bitset_is_free_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_count)) {
1690 #if ZEND_MM_STAT
1691 						do {
1692 							size_t size = heap->size + (new_size - old_size);
1693 							size_t peak = MAX(heap->peak, size);
1694 							heap->size = size;
1695 							heap->peak = peak;
1696 						} while (0);
1697 #endif
1698 						chunk->free_pages -= new_pages_count - old_pages_count;
1699 						zend_mm_bitset_set_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_count);
1700 						chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
1701 #if ZEND_DEBUG
1702 						dbg = zend_mm_get_debug_info(heap, ptr);
1703 						dbg->size = real_size;
1704 						dbg->filename = __zend_filename;
1705 						dbg->orig_filename = __zend_orig_filename;
1706 						dbg->lineno = __zend_lineno;
1707 						dbg->orig_lineno = __zend_orig_lineno;
1708 #endif
1709 						return ptr;
1710 					}
1711 				}
1712 			}
1713 		}
1714 #if ZEND_DEBUG
1715 		size = real_size;
1716 #endif
1717 	}
1718 
1719 	copy_size = MIN(old_size, copy_size);
1720 	return zend_mm_realloc_slow(heap, ptr, size, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1721 }
1722 
1723 /*********************/
1724 /* Huge Runs (again) */
1725 /*********************/
1726 
1727 #if ZEND_DEBUG
1728 static void zend_mm_add_huge_block(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1729 #else
1730 static void zend_mm_add_huge_block(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1731 #endif
1732 {
1733 	zend_mm_huge_list *list = (zend_mm_huge_list*)zend_mm_alloc_heap(heap, sizeof(zend_mm_huge_list) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1734 	list->ptr = ptr;
1735 	list->size = size;
1736 	list->next = heap->huge_list;
1737 #if ZEND_DEBUG
1738 	list->dbg.size = dbg_size;
1739 	list->dbg.filename = __zend_filename;
1740 	list->dbg.orig_filename = __zend_orig_filename;
1741 	list->dbg.lineno = __zend_lineno;
1742 	list->dbg.orig_lineno = __zend_orig_lineno;
1743 #endif
1744 	heap->huge_list = list;
1745 }
1746 
1747 static size_t zend_mm_del_huge_block(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1748 {
1749 	zend_mm_huge_list *prev = NULL;
1750 	zend_mm_huge_list *list = heap->huge_list;
1751 	while (list != NULL) {
1752 		if (list->ptr == ptr) {
1753 			size_t size;
1754 
1755 			if (prev) {
1756 				prev->next = list->next;
1757 			} else {
1758 				heap->huge_list = list->next;
1759 			}
1760 			size = list->size;
1761 			zend_mm_free_heap(heap, list ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1762 			return size;
1763 		}
1764 		prev = list;
1765 		list = list->next;
1766 	}
1767 	ZEND_MM_CHECK(0, "zend_mm_heap corrupted");
1768 	return 0;
1769 }
1770 
1771 static size_t zend_mm_get_huge_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1772 {
1773 	zend_mm_huge_list *list = heap->huge_list;
1774 	while (list != NULL) {
1775 		if (list->ptr == ptr) {
1776 			return list->size;
1777 		}
1778 		list = list->next;
1779 	}
1780 	ZEND_MM_CHECK(0, "zend_mm_heap corrupted");
1781 	return 0;
1782 }
1783 
1784 #if ZEND_DEBUG
1785 static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1786 #else
1787 static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1788 #endif
1789 {
1790 	zend_mm_huge_list *list = heap->huge_list;
1791 	while (list != NULL) {
1792 		if (list->ptr == ptr) {
1793 			list->size = size;
1794 #if ZEND_DEBUG
1795 			list->dbg.size = dbg_size;
1796 			list->dbg.filename = __zend_filename;
1797 			list->dbg.orig_filename = __zend_orig_filename;
1798 			list->dbg.lineno = __zend_lineno;
1799 			list->dbg.orig_lineno = __zend_orig_lineno;
1800 #endif
1801 			return;
1802 		}
1803 		list = list->next;
1804 	}
1805 }
1806 
1807 static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1808 {
1809 #ifdef ZEND_WIN32
1810 	/* On Windows we don't have ability to extend huge blocks in-place.
1811 	 * We allocate them with 2MB size granularity, to avoid many
1812 	 * reallocations when they are extended by small pieces
1813 	 */
1814 	size_t alignment = MAX(REAL_PAGE_SIZE, ZEND_MM_CHUNK_SIZE);
1815 #else
1816 	size_t alignment = REAL_PAGE_SIZE;
1817 #endif
1818 	size_t new_size = ZEND_MM_ALIGNED_SIZE_EX(size, alignment);
1819 	void *ptr;
1820 
1821 	if (UNEXPECTED(new_size < size)) {
1822 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu + %zu)", size, alignment);
1823 	}
1824 
1825 #if ZEND_MM_LIMIT
1826 	if (UNEXPECTED(new_size > heap->limit - heap->real_size)) {
1827 		if (zend_mm_gc(heap) && new_size <= heap->limit - heap->real_size) {
1828 			/* pass */
1829 		} else if (heap->overflow == 0) {
1830 #if ZEND_DEBUG
1831 			zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
1832 #else
1833 			zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, size);
1834 #endif
1835 			return NULL;
1836 		}
1837 	}
1838 #endif
1839 	ptr = zend_mm_chunk_alloc(heap, new_size, ZEND_MM_CHUNK_SIZE);
1840 	if (UNEXPECTED(ptr == NULL)) {
1841 		/* insufficient memory */
1842 		if (zend_mm_gc(heap) &&
1843 		    (ptr = zend_mm_chunk_alloc(heap, new_size, ZEND_MM_CHUNK_SIZE)) != NULL) {
1844 			/* pass */
1845 		} else {
1846 #if !ZEND_MM_LIMIT
1847 			zend_mm_safe_error(heap, "Out of memory");
1848 #elif ZEND_DEBUG
1849 			zend_mm_safe_error(heap, "Out of memory (allocated %zu bytes) at %s:%d (tried to allocate %zu bytes)", heap->real_size, __zend_filename, __zend_lineno, size);
1850 #else
1851 			zend_mm_safe_error(heap, "Out of memory (allocated %zu bytes) (tried to allocate %zu bytes)", heap->real_size, size);
1852 #endif
1853 			return NULL;
1854 		}
1855 	}
1856 #if ZEND_DEBUG
1857 	zend_mm_add_huge_block(heap, ptr, new_size, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1858 #else
1859 	zend_mm_add_huge_block(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1860 #endif
1861 #if ZEND_MM_STAT
1862 	do {
1863 		size_t size = heap->real_size + new_size;
1864 		size_t peak = MAX(heap->real_peak, size);
1865 		heap->real_size = size;
1866 		heap->real_peak = peak;
1867 	} while (0);
1868 	do {
1869 		size_t size = heap->size + new_size;
1870 		size_t peak = MAX(heap->peak, size);
1871 		heap->size = size;
1872 		heap->peak = peak;
1873 	} while (0);
1874 #elif ZEND_MM_LIMIT
1875 	heap->real_size += new_size;
1876 #endif
1877 	return ptr;
1878 }
1879 
1880 static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1881 {
1882 	size_t size;
1883 
1884 	ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE) == 0, "zend_mm_heap corrupted");
1885 	size = zend_mm_del_huge_block(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1886 	zend_mm_chunk_free(heap, ptr, size);
1887 #if ZEND_MM_STAT || ZEND_MM_LIMIT
1888 	heap->real_size -= size;
1889 #endif
1890 #if ZEND_MM_STAT
1891 	heap->size -= size;
1892 #endif
1893 }
1894 
1895 /******************/
1896 /* Initialization */
1897 /******************/
1898 
1899 static zend_mm_heap *zend_mm_init(void)
1900 {
1901 	zend_mm_chunk *chunk = (zend_mm_chunk*)zend_mm_chunk_alloc_int(ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
1902 	zend_mm_heap *heap;
1903 
1904 	if (UNEXPECTED(chunk == NULL)) {
1905 #if ZEND_MM_ERROR
1906 		fprintf(stderr, "Can't initialize heap\n");
1907 #endif
1908 		return NULL;
1909 	}
1910 	heap = &chunk->heap_slot;
1911 	chunk->heap = heap;
1912 	chunk->next = chunk;
1913 	chunk->prev = chunk;
1914 	chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
1915 	chunk->free_tail = ZEND_MM_FIRST_PAGE;
1916 	chunk->num = 0;
1917 	chunk->free_map[0] = (Z_L(1) << ZEND_MM_FIRST_PAGE) - 1;
1918 	chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
1919 	heap->main_chunk = chunk;
1920 	heap->cached_chunks = NULL;
1921 	heap->chunks_count = 1;
1922 	heap->peak_chunks_count = 1;
1923 	heap->cached_chunks_count = 0;
1924 	heap->avg_chunks_count = 1.0;
1925 	heap->last_chunks_delete_boundary = 0;
1926 	heap->last_chunks_delete_count = 0;
1927 #if ZEND_MM_STAT || ZEND_MM_LIMIT
1928 	heap->real_size = ZEND_MM_CHUNK_SIZE;
1929 #endif
1930 #if ZEND_MM_STAT
1931 	heap->real_peak = ZEND_MM_CHUNK_SIZE;
1932 	heap->size = 0;
1933 	heap->peak = 0;
1934 #endif
1935 #if ZEND_MM_LIMIT
1936 	heap->limit = (size_t)Z_L(-1) >> 1;
1937 	heap->overflow = 0;
1938 #endif
1939 #if ZEND_MM_CUSTOM
1940 	heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_NONE;
1941 #endif
1942 #if ZEND_MM_STORAGE
1943 	heap->storage = NULL;
1944 #endif
1945 	heap->huge_list = NULL;
1946 	return heap;
1947 }
1948 
1949 ZEND_API size_t zend_mm_gc(zend_mm_heap *heap)
1950 {
1951 	zend_mm_free_slot *p, **q;
1952 	zend_mm_chunk *chunk;
1953 	size_t page_offset;
1954 	int page_num;
1955 	zend_mm_page_info info;
1956 	uint32_t i, free_counter;
1957 	bool has_free_pages;
1958 	size_t collected = 0;
1959 
1960 #if ZEND_MM_CUSTOM
1961 	if (heap->use_custom_heap) {
1962 		return 0;
1963 	}
1964 #endif
1965 
1966 	for (i = 0; i < ZEND_MM_BINS; i++) {
1967 		has_free_pages = false;
1968 		p = heap->free_slot[i];
1969 		while (p != NULL) {
1970 			chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(p, ZEND_MM_CHUNK_SIZE);
1971 			ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1972 			page_offset = ZEND_MM_ALIGNED_OFFSET(p, ZEND_MM_CHUNK_SIZE);
1973 			ZEND_ASSERT(page_offset != 0);
1974 			page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1975 			info = chunk->map[page_num];
1976 			ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
1977 			if (info & ZEND_MM_IS_LRUN) {
1978 				page_num -= ZEND_MM_NRUN_OFFSET(info);
1979 				info = chunk->map[page_num];
1980 				ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
1981 				ZEND_ASSERT(!(info & ZEND_MM_IS_LRUN));
1982 			}
1983 			ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(info) == i);
1984 			free_counter = ZEND_MM_SRUN_FREE_COUNTER(info) + 1;
1985 			if (free_counter == bin_elements[i]) {
1986 				has_free_pages = true;
1987 			}
1988 			chunk->map[page_num] = ZEND_MM_SRUN_EX(i, free_counter);
1989 			p = p->next_free_slot;
1990 		}
1991 
1992 		if (!has_free_pages) {
1993 			continue;
1994 		}
1995 
1996 		q = &heap->free_slot[i];
1997 		p = *q;
1998 		while (p != NULL) {
1999 			chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(p, ZEND_MM_CHUNK_SIZE);
2000 			ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
2001 			page_offset = ZEND_MM_ALIGNED_OFFSET(p, ZEND_MM_CHUNK_SIZE);
2002 			ZEND_ASSERT(page_offset != 0);
2003 			page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
2004 			info = chunk->map[page_num];
2005 			ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
2006 			if (info & ZEND_MM_IS_LRUN) {
2007 				page_num -= ZEND_MM_NRUN_OFFSET(info);
2008 				info = chunk->map[page_num];
2009 				ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
2010 				ZEND_ASSERT(!(info & ZEND_MM_IS_LRUN));
2011 			}
2012 			ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(info) == i);
2013 			if (ZEND_MM_SRUN_FREE_COUNTER(info) == bin_elements[i]) {
2014 				/* remove from cache */
2015 				p = p->next_free_slot;
2016 				*q = p;
2017 			} else {
2018 				q = &p->next_free_slot;
2019 				p = *q;
2020 			}
2021 		}
2022 	}
2023 
2024 	chunk = heap->main_chunk;
2025 	do {
2026 		i = ZEND_MM_FIRST_PAGE;
2027 		while (i < chunk->free_tail) {
2028 			if (zend_mm_bitset_is_set(chunk->free_map, i)) {
2029 				info = chunk->map[i];
2030 				if (info & ZEND_MM_IS_SRUN) {
2031 					int bin_num = ZEND_MM_SRUN_BIN_NUM(info);
2032 					int pages_count = bin_pages[bin_num];
2033 
2034 					if (ZEND_MM_SRUN_FREE_COUNTER(info) == bin_elements[bin_num]) {
2035 						/* all elements are free */
2036 						zend_mm_free_pages_ex(heap, chunk, i, pages_count, 0);
2037 						collected += pages_count;
2038 					} else {
2039 						/* reset counter */
2040 						chunk->map[i] = ZEND_MM_SRUN(bin_num);
2041 					}
2042 					i += bin_pages[bin_num];
2043 				} else /* if (info & ZEND_MM_IS_LRUN) */ {
2044 					i += ZEND_MM_LRUN_PAGES(info);
2045 				}
2046 			} else {
2047 				i++;
2048 			}
2049 		}
2050 		if (chunk->free_pages == ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE) {
2051 			zend_mm_chunk *next_chunk = chunk->next;
2052 
2053 			zend_mm_delete_chunk(heap, chunk);
2054 			chunk = next_chunk;
2055 		} else {
2056 			chunk = chunk->next;
2057 		}
2058 	} while (chunk != heap->main_chunk);
2059 
2060 	return collected * ZEND_MM_PAGE_SIZE;
2061 }
2062 
2063 #if ZEND_DEBUG
2064 /******************/
2065 /* Leak detection */
2066 /******************/
2067 
2068 static zend_long zend_mm_find_leaks_small(zend_mm_chunk *p, uint32_t i, uint32_t j, zend_leak_info *leak)
2069 {
2070 	bool empty = true;
2071 	zend_long count = 0;
2072 	int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
2073 	zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * (j + 1) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
2074 
2075 	while (j < bin_elements[bin_num]) {
2076 		if (dbg->size != 0) {
2077 			if (dbg->filename == leak->filename && dbg->lineno == leak->lineno) {
2078 				count++;
2079 				dbg->size = 0;
2080 				dbg->filename = NULL;
2081 				dbg->lineno = 0;
2082 			} else {
2083 				empty = false;
2084 			}
2085 		}
2086 		j++;
2087 		dbg = (zend_mm_debug_info*)((char*)dbg + bin_data_size[bin_num]);
2088 	}
2089 	if (empty) {
2090 		zend_mm_bitset_reset_range(p->free_map, i, bin_pages[bin_num]);
2091 	}
2092 	return count;
2093 }
2094 
2095 static zend_long zend_mm_find_leaks(zend_mm_heap *heap, zend_mm_chunk *p, uint32_t i, zend_leak_info *leak)
2096 {
2097 	zend_long count = 0;
2098 
2099 	do {
2100 		while (i < p->free_tail) {
2101 			if (zend_mm_bitset_is_set(p->free_map, i)) {
2102 				if (p->map[i] & ZEND_MM_IS_SRUN) {
2103 					int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
2104 					count += zend_mm_find_leaks_small(p, i, 0, leak);
2105 					i += bin_pages[bin_num];
2106 				} else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
2107 					int pages_count = ZEND_MM_LRUN_PAGES(p->map[i]);
2108 					zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * (i + pages_count) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
2109 
2110 					if (dbg->filename == leak->filename && dbg->lineno == leak->lineno) {
2111 						count++;
2112 					}
2113 					zend_mm_bitset_reset_range(p->free_map, i, pages_count);
2114 					i += pages_count;
2115 				}
2116 			} else {
2117 				i++;
2118 			}
2119 		}
2120 		p = p->next;
2121 		i = ZEND_MM_FIRST_PAGE;
2122 	} while (p != heap->main_chunk);
2123 	return count;
2124 }
2125 
2126 static zend_long zend_mm_find_leaks_huge(zend_mm_heap *heap, zend_mm_huge_list *list)
2127 {
2128 	zend_long count = 0;
2129 	zend_mm_huge_list *prev = list;
2130 	zend_mm_huge_list *p = list->next;
2131 
2132 	while (p) {
2133 		if (p->dbg.filename == list->dbg.filename && p->dbg.lineno == list->dbg.lineno) {
2134 			prev->next = p->next;
2135 			zend_mm_chunk_free(heap, p->ptr, p->size);
2136 			zend_mm_free_heap(heap, p, NULL, 0, NULL, 0);
2137 			count++;
2138 		} else {
2139 			prev = p;
2140 		}
2141 		p = prev->next;
2142 	}
2143 
2144 	return count;
2145 }
2146 
2147 static void zend_mm_check_leaks(zend_mm_heap *heap)
2148 {
2149 	zend_mm_huge_list *list;
2150 	zend_mm_chunk *p;
2151 	zend_leak_info leak;
2152 	zend_long repeated = 0;
2153 	uint32_t total = 0;
2154 	uint32_t i, j;
2155 
2156 	/* find leaked huge blocks and free them */
2157 	list = heap->huge_list;
2158 	while (list) {
2159 		zend_mm_huge_list *q = list;
2160 
2161 		leak.addr = list->ptr;
2162 		leak.size = list->dbg.size;
2163 		leak.filename = list->dbg.filename;
2164 		leak.orig_filename = list->dbg.orig_filename;
2165 		leak.lineno = list->dbg.lineno;
2166 		leak.orig_lineno = list->dbg.orig_lineno;
2167 
2168 		zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
2169 		zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
2170 		repeated = zend_mm_find_leaks_huge(heap, list);
2171 		total += 1 + repeated;
2172 		if (repeated) {
2173 			zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(uintptr_t)repeated);
2174 		}
2175 
2176 		heap->huge_list = list = list->next;
2177 		zend_mm_chunk_free(heap, q->ptr, q->size);
2178 		zend_mm_free_heap(heap, q, NULL, 0, NULL, 0);
2179 	}
2180 
2181 	/* for each chunk */
2182 	p = heap->main_chunk;
2183 	do {
2184 		i = ZEND_MM_FIRST_PAGE;
2185 		while (i < p->free_tail) {
2186 			if (zend_mm_bitset_is_set(p->free_map, i)) {
2187 				if (p->map[i] & ZEND_MM_IS_SRUN) {
2188 					int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
2189 					zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
2190 
2191 					j = 0;
2192 					while (j < bin_elements[bin_num]) {
2193 						if (dbg->size != 0) {
2194 							leak.addr = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * j);
2195 							leak.size = dbg->size;
2196 							leak.filename = dbg->filename;
2197 							leak.orig_filename = dbg->orig_filename;
2198 							leak.lineno = dbg->lineno;
2199 							leak.orig_lineno = dbg->orig_lineno;
2200 
2201 							zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
2202 							zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
2203 
2204 							dbg->size = 0;
2205 							dbg->filename = NULL;
2206 							dbg->lineno = 0;
2207 
2208 							repeated = zend_mm_find_leaks_small(p, i, j + 1, &leak) +
2209 							           zend_mm_find_leaks(heap, p, i + bin_pages[bin_num], &leak);
2210 							total += 1 + repeated;
2211 							if (repeated) {
2212 								zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(uintptr_t)repeated);
2213 							}
2214 						}
2215 						dbg = (zend_mm_debug_info*)((char*)dbg + bin_data_size[bin_num]);
2216 						j++;
2217 					}
2218 					i += bin_pages[bin_num];
2219 				} else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
2220 					int pages_count = ZEND_MM_LRUN_PAGES(p->map[i]);
2221 					zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * (i + pages_count) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
2222 
2223 					leak.addr = (void*)((char*)p + ZEND_MM_PAGE_SIZE * i);
2224 					leak.size = dbg->size;
2225 					leak.filename = dbg->filename;
2226 					leak.orig_filename = dbg->orig_filename;
2227 					leak.lineno = dbg->lineno;
2228 					leak.orig_lineno = dbg->orig_lineno;
2229 
2230 					zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
2231 					zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
2232 
2233 					zend_mm_bitset_reset_range(p->free_map, i, pages_count);
2234 
2235 					repeated = zend_mm_find_leaks(heap, p, i + pages_count, &leak);
2236 					total += 1 + repeated;
2237 					if (repeated) {
2238 						zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(uintptr_t)repeated);
2239 					}
2240 					i += pages_count;
2241 				}
2242 			} else {
2243 				i++;
2244 			}
2245 		}
2246 		p = p->next;
2247 	} while (p != heap->main_chunk);
2248 	if (total) {
2249 		zend_message_dispatcher(ZMSG_MEMORY_LEAKS_GRAND_TOTAL, &total);
2250 	}
2251 }
2252 #endif
2253 
2254 #if ZEND_MM_CUSTOM
2255 static void *tracked_malloc(size_t size);
2256 static void tracked_free_all(void);
2257 #endif
2258 
2259 void zend_mm_shutdown(zend_mm_heap *heap, bool full, bool silent)
2260 {
2261 	zend_mm_chunk *p;
2262 	zend_mm_huge_list *list;
2263 
2264 #if ZEND_MM_CUSTOM
2265 	if (heap->use_custom_heap) {
2266 		if (heap->custom_heap.std._malloc == tracked_malloc) {
2267 			if (silent) {
2268 				tracked_free_all();
2269 			}
2270 			zend_hash_clean(heap->tracked_allocs);
2271 			if (full) {
2272 				zend_hash_destroy(heap->tracked_allocs);
2273 				free(heap->tracked_allocs);
2274 				/* Make sure the heap free below does not use tracked_free(). */
2275 				heap->custom_heap.std._free = free;
2276 			}
2277 			heap->size = 0;
2278 		}
2279 
2280 		if (full) {
2281 			if (ZEND_DEBUG && heap->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
2282 				heap->custom_heap.debug._free(heap ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC);
2283 			} else {
2284 				heap->custom_heap.std._free(heap);
2285 			}
2286 		}
2287 		return;
2288 	}
2289 #endif
2290 
2291 #if ZEND_DEBUG
2292 	if (!silent) {
2293 		char *tmp = getenv("ZEND_ALLOC_PRINT_LEAKS");
2294 		if (!tmp || ZEND_ATOL(tmp)) {
2295 			zend_mm_check_leaks(heap);
2296 		}
2297 	}
2298 #endif
2299 
2300 	/* free huge blocks */
2301 	list = heap->huge_list;
2302 	heap->huge_list = NULL;
2303 	while (list) {
2304 		zend_mm_huge_list *q = list;
2305 		list = list->next;
2306 		zend_mm_chunk_free(heap, q->ptr, q->size);
2307 	}
2308 
2309 	/* move all chunks except of the first one into the cache */
2310 	p = heap->main_chunk->next;
2311 	while (p != heap->main_chunk) {
2312 		zend_mm_chunk *q = p->next;
2313 		p->next = heap->cached_chunks;
2314 		heap->cached_chunks = p;
2315 		p = q;
2316 		heap->chunks_count--;
2317 		heap->cached_chunks_count++;
2318 	}
2319 
2320 	if (full) {
2321 		/* free all cached chunks */
2322 		while (heap->cached_chunks) {
2323 			p = heap->cached_chunks;
2324 			heap->cached_chunks = p->next;
2325 			zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
2326 		}
2327 		/* free the first chunk */
2328 		zend_mm_chunk_free(heap, heap->main_chunk, ZEND_MM_CHUNK_SIZE);
2329 	} else {
2330 		/* free some cached chunks to keep average count */
2331 		heap->avg_chunks_count = (heap->avg_chunks_count + (double)heap->peak_chunks_count) / 2.0;
2332 		while ((double)heap->cached_chunks_count + 0.9 > heap->avg_chunks_count &&
2333 		       heap->cached_chunks) {
2334 			p = heap->cached_chunks;
2335 			heap->cached_chunks = p->next;
2336 			zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
2337 			heap->cached_chunks_count--;
2338 		}
2339 		/* clear cached chunks */
2340 		p = heap->cached_chunks;
2341 		while (p != NULL) {
2342 			zend_mm_chunk *q = p->next;
2343 			memset(p, 0, sizeof(zend_mm_chunk));
2344 			p->next = q;
2345 			p = q;
2346 		}
2347 
2348 		/* reinitialize the first chunk and heap */
2349 		p = heap->main_chunk;
2350 		p->heap = &p->heap_slot;
2351 		p->next = p;
2352 		p->prev = p;
2353 		p->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
2354 		p->free_tail = ZEND_MM_FIRST_PAGE;
2355 		p->num = 0;
2356 
2357 #if ZEND_MM_STAT
2358 		heap->size = heap->peak = 0;
2359 #endif
2360 		memset(heap->free_slot, 0, sizeof(heap->free_slot));
2361 #if ZEND_MM_STAT || ZEND_MM_LIMIT
2362 		heap->real_size = (heap->cached_chunks_count + 1) * ZEND_MM_CHUNK_SIZE;
2363 #endif
2364 #if ZEND_MM_STAT
2365 		heap->real_peak = (heap->cached_chunks_count + 1) * ZEND_MM_CHUNK_SIZE;
2366 #endif
2367 		heap->chunks_count = 1;
2368 		heap->peak_chunks_count = 1;
2369 		heap->last_chunks_delete_boundary = 0;
2370 		heap->last_chunks_delete_count = 0;
2371 
2372 		memset(p->free_map, 0, sizeof(p->free_map) + sizeof(p->map));
2373 		p->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
2374 		p->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
2375 	}
2376 }
2377 
2378 /**************/
2379 /* PUBLIC API */
2380 /**************/
2381 
2382 ZEND_API void* ZEND_FASTCALL _zend_mm_alloc(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2383 {
2384 	return zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2385 }
2386 
2387 ZEND_API void ZEND_FASTCALL _zend_mm_free(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2388 {
2389 	zend_mm_free_heap(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2390 }
2391 
2392 void* ZEND_FASTCALL _zend_mm_realloc(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2393 {
2394 	return zend_mm_realloc_heap(heap, ptr, size, 0, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2395 }
2396 
2397 void* ZEND_FASTCALL _zend_mm_realloc2(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2398 {
2399 	return zend_mm_realloc_heap(heap, ptr, size, 1, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2400 }
2401 
2402 ZEND_API size_t ZEND_FASTCALL _zend_mm_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2403 {
2404 #if ZEND_MM_CUSTOM
2405 	if (UNEXPECTED(heap->use_custom_heap)) {
2406 		if (heap->custom_heap.std._malloc == tracked_malloc) {
2407 			zend_ulong h = ((uintptr_t) ptr) >> ZEND_MM_ALIGNMENT_LOG2;
2408 			zval *size_zv = zend_hash_index_find(heap->tracked_allocs, h);
2409 			if  (size_zv) {
2410 				return Z_LVAL_P(size_zv);
2411 			}
2412 		}
2413 		return 0;
2414 	}
2415 #endif
2416 	return zend_mm_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2417 }
2418 
2419 /**********************/
2420 /* Allocation Manager */
2421 /**********************/
2422 
2423 typedef struct _zend_alloc_globals {
2424 	zend_mm_heap *mm_heap;
2425 } zend_alloc_globals;
2426 
2427 #ifdef ZTS
2428 static int alloc_globals_id;
2429 static size_t alloc_globals_offset;
2430 # define AG(v) ZEND_TSRMG_FAST(alloc_globals_offset, zend_alloc_globals *, v)
2431 #else
2432 # define AG(v) (alloc_globals.v)
2433 static zend_alloc_globals alloc_globals;
2434 #endif
2435 
2436 ZEND_API bool is_zend_mm(void)
2437 {
2438 #if ZEND_MM_CUSTOM
2439 	return !AG(mm_heap)->use_custom_heap;
2440 #else
2441 	return 1;
2442 #endif
2443 }
2444 
2445 ZEND_API bool is_zend_ptr(const void *ptr)
2446 {
2447 #if ZEND_MM_CUSTOM
2448 	if (AG(mm_heap)->use_custom_heap) {
2449 		if (AG(mm_heap)->custom_heap.std._malloc == tracked_malloc) {
2450 			zend_ulong h = ((uintptr_t) ptr) >> ZEND_MM_ALIGNMENT_LOG2;
2451 			zval *size_zv = zend_hash_index_find(AG(mm_heap)->tracked_allocs, h);
2452 			if  (size_zv) {
2453 				return 1;
2454 			}
2455 		}
2456 		return 0;
2457 	}
2458 #endif
2459 
2460 	if (AG(mm_heap)->main_chunk) {
2461 		zend_mm_chunk *chunk = AG(mm_heap)->main_chunk;
2462 
2463 		do {
2464 			if (ptr >= (void*)chunk
2465 			 && ptr < (void*)((char*)chunk + ZEND_MM_CHUNK_SIZE)) {
2466 				return 1;
2467 			}
2468 			chunk = chunk->next;
2469 		} while (chunk != AG(mm_heap)->main_chunk);
2470 	}
2471 
2472 	zend_mm_huge_list *block = AG(mm_heap)->huge_list;
2473 	while (block) {
2474 		if (ptr >= (void*)block
2475 				&& ptr < (void*)((char*)block + block->size)) {
2476 			return 1;
2477 		}
2478 		block = block->next;
2479 	}
2480 
2481 	return 0;
2482 }
2483 
2484 #if ZEND_MM_CUSTOM
2485 
2486 static ZEND_COLD void* ZEND_FASTCALL _malloc_custom(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2487 {
2488 	if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
2489 		return AG(mm_heap)->custom_heap.debug._malloc(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2490 	} else {
2491 		return AG(mm_heap)->custom_heap.std._malloc(size);
2492 	}
2493 }
2494 
2495 static ZEND_COLD void ZEND_FASTCALL _efree_custom(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2496 {
2497 	if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
2498 		AG(mm_heap)->custom_heap.debug._free(ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2499 	} else {
2500 		AG(mm_heap)->custom_heap.std._free(ptr);
2501 	}
2502 }
2503 
2504 static ZEND_COLD void* ZEND_FASTCALL _realloc_custom(void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2505 {
2506 	if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
2507 		return AG(mm_heap)->custom_heap.debug._realloc(ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2508 	} else {
2509 		return AG(mm_heap)->custom_heap.std._realloc(ptr, size);
2510 	}
2511 }
2512 #endif
2513 
2514 #if !ZEND_DEBUG && defined(HAVE_BUILTIN_CONSTANT_P)
2515 #undef _emalloc
2516 
2517 #if ZEND_MM_CUSTOM
2518 # define ZEND_MM_CUSTOM_ALLOCATOR(size) do { \
2519 		if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
2520 			return _malloc_custom(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2521 		} \
2522 	} while (0)
2523 # define ZEND_MM_CUSTOM_DEALLOCATOR(ptr) do { \
2524 		if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
2525 			_efree_custom(ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2526 			return; \
2527 		} \
2528 	} while (0)
2529 #else
2530 # define ZEND_MM_CUSTOM_ALLOCATOR(size)
2531 # define ZEND_MM_CUSTOM_DEALLOCATOR(ptr)
2532 #endif
2533 
2534 # define _ZEND_BIN_ALLOCATOR(_num, _size, _elements, _pages, x, y) \
2535 	ZEND_API void* ZEND_FASTCALL _emalloc_ ## _size(void) { \
2536 		ZEND_MM_CUSTOM_ALLOCATOR(_size); \
2537 		return zend_mm_alloc_small(AG(mm_heap), _num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2538 	}
2539 
2540 ZEND_MM_BINS_INFO(_ZEND_BIN_ALLOCATOR, x, y)
2541 
2542 ZEND_API void* ZEND_FASTCALL _emalloc_large(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2543 {
2544 	ZEND_MM_CUSTOM_ALLOCATOR(size);
2545 	return zend_mm_alloc_large_ex(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2546 }
2547 
2548 ZEND_API void* ZEND_FASTCALL _emalloc_huge(size_t size)
2549 {
2550 	ZEND_MM_CUSTOM_ALLOCATOR(size);
2551 	return zend_mm_alloc_huge(AG(mm_heap), size);
2552 }
2553 
2554 #if ZEND_DEBUG
2555 # define _ZEND_BIN_FREE(_num, _size, _elements, _pages, x, y) \
2556 	ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \
2557 		ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \
2558 		{ \
2559 			size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE); \
2560 			zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
2561 			int page_num = page_offset / ZEND_MM_PAGE_SIZE; \
2562 			ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
2563 			ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_SRUN); \
2564 			ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(chunk->map[page_num]) == _num); \
2565 			zend_mm_free_small(AG(mm_heap), ptr, _num); \
2566 		} \
2567 	}
2568 #else
2569 # define _ZEND_BIN_FREE(_num, _size, _elements, _pages, x, y) \
2570 	ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \
2571 		ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \
2572 		{ \
2573 			zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
2574 			ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
2575 			zend_mm_free_small(AG(mm_heap), ptr, _num); \
2576 		} \
2577 	}
2578 #endif
2579 
2580 ZEND_MM_BINS_INFO(_ZEND_BIN_FREE, x, y)
2581 
2582 ZEND_API void ZEND_FASTCALL _efree_large(void *ptr, size_t size)
2583 {
2584 	ZEND_MM_CUSTOM_DEALLOCATOR(ptr);
2585 	{
2586 		size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
2587 		zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
2588 		int page_num = page_offset / ZEND_MM_PAGE_SIZE;
2589 		uint32_t pages_count = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE) / ZEND_MM_PAGE_SIZE;
2590 
2591 		ZEND_MM_CHECK(chunk->heap == AG(mm_heap) && ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
2592 		ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_LRUN);
2593 		ZEND_ASSERT(ZEND_MM_LRUN_PAGES(chunk->map[page_num]) == pages_count);
2594 		zend_mm_free_large(AG(mm_heap), chunk, page_num, pages_count);
2595 	}
2596 }
2597 
2598 ZEND_API void ZEND_FASTCALL _efree_huge(void *ptr, size_t size)
2599 {
2600 
2601 	ZEND_MM_CUSTOM_DEALLOCATOR(ptr);
2602 	zend_mm_free_huge(AG(mm_heap), ptr);
2603 }
2604 #endif
2605 
2606 ZEND_API void* ZEND_FASTCALL _emalloc(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2607 {
2608 #if ZEND_MM_CUSTOM
2609 	if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2610 		return _malloc_custom(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2611 	}
2612 #endif
2613 	return zend_mm_alloc_heap(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2614 }
2615 
2616 ZEND_API void ZEND_FASTCALL _efree(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2617 {
2618 #if ZEND_MM_CUSTOM
2619 	if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2620 		_efree_custom(ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2621 		return;
2622 	}
2623 #endif
2624 	zend_mm_free_heap(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2625 }
2626 
2627 ZEND_API void* ZEND_FASTCALL _erealloc(void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2628 {
2629 #if ZEND_MM_CUSTOM
2630 	if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2631 		return _realloc_custom(ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2632 	}
2633 #endif
2634 	return zend_mm_realloc_heap(AG(mm_heap), ptr, size, 0, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2635 }
2636 
2637 ZEND_API void* ZEND_FASTCALL _erealloc2(void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2638 {
2639 #if ZEND_MM_CUSTOM
2640 	if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2641 		return _realloc_custom(ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2642 	}
2643 #endif
2644 	return zend_mm_realloc_heap(AG(mm_heap), ptr, size, 1, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2645 }
2646 
2647 ZEND_API size_t ZEND_FASTCALL _zend_mem_block_size(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2648 {
2649 	return _zend_mm_block_size(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2650 }
2651 
2652 ZEND_API void* ZEND_FASTCALL _safe_emalloc(size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2653 {
2654 	return _emalloc(zend_safe_address_guarded(nmemb, size, offset) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2655 }
2656 
2657 ZEND_API void* ZEND_FASTCALL _safe_malloc(size_t nmemb, size_t size, size_t offset)
2658 {
2659 	return pemalloc(zend_safe_address_guarded(nmemb, size, offset), 1);
2660 }
2661 
2662 ZEND_API void* ZEND_FASTCALL _safe_erealloc(void *ptr, size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2663 {
2664 	return _erealloc(ptr, zend_safe_address_guarded(nmemb, size, offset) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2665 }
2666 
2667 ZEND_API void* ZEND_FASTCALL _safe_realloc(void *ptr, size_t nmemb, size_t size, size_t offset)
2668 {
2669 	return perealloc(ptr, zend_safe_address_guarded(nmemb, size, offset), 1);
2670 }
2671 
2672 ZEND_API void* ZEND_FASTCALL _ecalloc(size_t nmemb, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2673 {
2674 	void *p;
2675 
2676 	size = zend_safe_address_guarded(nmemb, size, 0);
2677 	p = _emalloc(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2678 	memset(p, 0, size);
2679 	return p;
2680 }
2681 
2682 ZEND_API char* ZEND_FASTCALL _estrdup(const char *s ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2683 {
2684 	size_t length;
2685 	char *p;
2686 
2687 	length = strlen(s);
2688 	if (UNEXPECTED(length + 1 == 0)) {
2689 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (1 * %zu + 1)", length);
2690 	}
2691 	p = (char *) _emalloc(length + 1 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2692 	memcpy(p, s, length+1);
2693 	return p;
2694 }
2695 
2696 ZEND_API char* ZEND_FASTCALL _estrndup(const char *s, size_t length ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2697 {
2698 	char *p;
2699 
2700 	if (UNEXPECTED(length + 1 == 0)) {
2701 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (1 * %zu + 1)", length);
2702 	}
2703 	p = (char *) _emalloc(length + 1 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2704 	memcpy(p, s, length);
2705 	p[length] = 0;
2706 	return p;
2707 }
2708 
2709 static ZEND_COLD ZEND_NORETURN void zend_out_of_memory(void);
2710 
2711 ZEND_API char* ZEND_FASTCALL zend_strndup(const char *s, size_t length)
2712 {
2713 	char *p;
2714 
2715 	if (UNEXPECTED(length + 1 == 0)) {
2716 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (1 * %zu + 1)", length);
2717 	}
2718 	p = (char *) malloc(length + 1);
2719 	if (UNEXPECTED(p == NULL)) {
2720 		zend_out_of_memory();
2721 	}
2722 	if (EXPECTED(length)) {
2723 		memcpy(p, s, length);
2724 	}
2725 	p[length] = 0;
2726 	return p;
2727 }
2728 
2729 ZEND_API zend_result zend_set_memory_limit(size_t memory_limit)
2730 {
2731 #if ZEND_MM_LIMIT
2732 	zend_mm_heap *heap = AG(mm_heap);
2733 
2734 	if (UNEXPECTED(memory_limit < heap->real_size)) {
2735 		if (memory_limit >= heap->real_size - heap->cached_chunks_count * ZEND_MM_CHUNK_SIZE) {
2736 			/* free some cached chunks to fit into new memory limit */
2737 			do {
2738 				zend_mm_chunk *p = heap->cached_chunks;
2739 				heap->cached_chunks = p->next;
2740 				zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
2741 				heap->cached_chunks_count--;
2742 				heap->real_size -= ZEND_MM_CHUNK_SIZE;
2743 			} while (memory_limit < heap->real_size);
2744 			return SUCCESS;
2745 		}
2746 		return FAILURE;
2747 	}
2748 	AG(mm_heap)->limit = memory_limit;
2749 #endif
2750 	return SUCCESS;
2751 }
2752 
2753 ZEND_API bool zend_alloc_in_memory_limit_error_reporting(void)
2754 {
2755 #if ZEND_MM_LIMIT
2756 	return AG(mm_heap)->overflow;
2757 #else
2758 	return false;
2759 #endif
2760 }
2761 
2762 ZEND_API size_t zend_memory_usage(bool real_usage)
2763 {
2764 #if ZEND_MM_STAT
2765 	if (real_usage) {
2766 		return AG(mm_heap)->real_size;
2767 	} else {
2768 		size_t usage = AG(mm_heap)->size;
2769 		return usage;
2770 	}
2771 #endif
2772 	return 0;
2773 }
2774 
2775 ZEND_API size_t zend_memory_peak_usage(bool real_usage)
2776 {
2777 #if ZEND_MM_STAT
2778 	if (real_usage) {
2779 		return AG(mm_heap)->real_peak;
2780 	} else {
2781 		return AG(mm_heap)->peak;
2782 	}
2783 #endif
2784 	return 0;
2785 }
2786 
2787 ZEND_API void zend_memory_reset_peak_usage(void)
2788 {
2789 #if ZEND_MM_STAT
2790 	AG(mm_heap)->real_peak = AG(mm_heap)->real_size;
2791 	AG(mm_heap)->peak = AG(mm_heap)->size;
2792 #endif
2793 }
2794 
2795 ZEND_API void shutdown_memory_manager(bool silent, bool full_shutdown)
2796 {
2797 	zend_mm_shutdown(AG(mm_heap), full_shutdown, silent);
2798 }
2799 
2800 static ZEND_COLD ZEND_NORETURN void zend_out_of_memory(void)
2801 {
2802 	fprintf(stderr, "Out of memory\n");
2803 	exit(1);
2804 }
2805 
2806 #if ZEND_MM_CUSTOM
2807 static zend_always_inline void tracked_add(zend_mm_heap *heap, void *ptr, size_t size) {
2808 	zval size_zv;
2809 	zend_ulong h = ((uintptr_t) ptr) >> ZEND_MM_ALIGNMENT_LOG2;
2810 	ZEND_ASSERT((void *) (uintptr_t) (h << ZEND_MM_ALIGNMENT_LOG2) == ptr);
2811 	ZVAL_LONG(&size_zv, size);
2812 	zend_hash_index_add_new(heap->tracked_allocs, h, &size_zv);
2813 }
2814 
2815 static zend_always_inline zval *tracked_get_size_zv(zend_mm_heap *heap, void *ptr) {
2816 	zend_ulong h = ((uintptr_t) ptr) >> ZEND_MM_ALIGNMENT_LOG2;
2817 	zval *size_zv = zend_hash_index_find(heap->tracked_allocs, h);
2818 	ZEND_ASSERT(size_zv && "Trying to free pointer not allocated through ZendMM");
2819 	return size_zv;
2820 }
2821 
2822 static zend_always_inline void tracked_check_limit(zend_mm_heap *heap, size_t add_size) {
2823 	if (add_size > heap->limit - heap->size && !heap->overflow) {
2824 #if ZEND_DEBUG
2825 		zend_mm_safe_error(heap,
2826 			"Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)",
2827 			heap->limit, "file", 0, add_size);
2828 #else
2829 		zend_mm_safe_error(heap,
2830 			"Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)",
2831 			heap->limit, add_size);
2832 #endif
2833 	}
2834 }
2835 
2836 static void *tracked_malloc(size_t size)
2837 {
2838 	zend_mm_heap *heap = AG(mm_heap);
2839 	tracked_check_limit(heap, size);
2840 
2841 	void *ptr = malloc(size);
2842 	if (!ptr) {
2843 		zend_out_of_memory();
2844 	}
2845 
2846 	tracked_add(heap, ptr, size);
2847 	heap->size += size;
2848 	return ptr;
2849 }
2850 
2851 static void tracked_free(void *ptr) {
2852 	if (!ptr) {
2853 		return;
2854 	}
2855 
2856 	zend_mm_heap *heap = AG(mm_heap);
2857 	zval *size_zv = tracked_get_size_zv(heap, ptr);
2858 	heap->size -= Z_LVAL_P(size_zv);
2859 	zend_hash_del_bucket(heap->tracked_allocs, (Bucket *) size_zv);
2860 	free(ptr);
2861 }
2862 
2863 static void *tracked_realloc(void *ptr, size_t new_size) {
2864 	zend_mm_heap *heap = AG(mm_heap);
2865 	zval *old_size_zv = NULL;
2866 	size_t old_size = 0;
2867 	if (ptr) {
2868 		old_size_zv = tracked_get_size_zv(heap, ptr);
2869 		old_size = Z_LVAL_P(old_size_zv);
2870 	}
2871 
2872 	if (new_size > old_size) {
2873 		tracked_check_limit(heap, new_size - old_size);
2874 	}
2875 
2876 	/* Delete information about old allocation only after checking the memory limit. */
2877 	if (old_size_zv) {
2878 		zend_hash_del_bucket(heap->tracked_allocs, (Bucket *) old_size_zv);
2879 	}
2880 
2881 	ptr = __zend_realloc(ptr, new_size);
2882 	tracked_add(heap, ptr, new_size);
2883 	heap->size += new_size - old_size;
2884 	return ptr;
2885 }
2886 
2887 static void tracked_free_all(void) {
2888 	HashTable *tracked_allocs = AG(mm_heap)->tracked_allocs;
2889 	zend_ulong h;
2890 	ZEND_HASH_FOREACH_NUM_KEY(tracked_allocs, h) {
2891 		void *ptr = (void *) (uintptr_t) (h << ZEND_MM_ALIGNMENT_LOG2);
2892 		free(ptr);
2893 	} ZEND_HASH_FOREACH_END();
2894 }
2895 #endif
2896 
2897 static void alloc_globals_ctor(zend_alloc_globals *alloc_globals)
2898 {
2899 	char *tmp;
2900 
2901 #if ZEND_MM_CUSTOM
2902 	tmp = getenv("USE_ZEND_ALLOC");
2903 	if (tmp && !ZEND_ATOL(tmp)) {
2904 		bool tracked = (tmp = getenv("USE_TRACKED_ALLOC")) && ZEND_ATOL(tmp);
2905 		zend_mm_heap *mm_heap = alloc_globals->mm_heap = malloc(sizeof(zend_mm_heap));
2906 		memset(mm_heap, 0, sizeof(zend_mm_heap));
2907 		mm_heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_STD;
2908 		mm_heap->limit = (size_t)Z_L(-1) >> 1;
2909 		mm_heap->overflow = 0;
2910 
2911 		if (!tracked) {
2912 			/* Use system allocator. */
2913 			mm_heap->custom_heap.std._malloc = __zend_malloc;
2914 			mm_heap->custom_heap.std._free = free;
2915 			mm_heap->custom_heap.std._realloc = __zend_realloc;
2916 		} else {
2917 			/* Use system allocator and track allocations for auto-free. */
2918 			mm_heap->custom_heap.std._malloc = tracked_malloc;
2919 			mm_heap->custom_heap.std._free = tracked_free;
2920 			mm_heap->custom_heap.std._realloc = tracked_realloc;
2921 			mm_heap->tracked_allocs = malloc(sizeof(HashTable));
2922 			zend_hash_init(mm_heap->tracked_allocs, 1024, NULL, NULL, 1);
2923 		}
2924 		return;
2925 	}
2926 #endif
2927 
2928 	tmp = getenv("USE_ZEND_ALLOC_HUGE_PAGES");
2929 	if (tmp && ZEND_ATOL(tmp)) {
2930 		zend_mm_use_huge_pages = true;
2931 	}
2932 	alloc_globals->mm_heap = zend_mm_init();
2933 }
2934 
2935 #ifdef ZTS
2936 static void alloc_globals_dtor(zend_alloc_globals *alloc_globals)
2937 {
2938 	zend_mm_shutdown(alloc_globals->mm_heap, 1, 1);
2939 }
2940 #endif
2941 
2942 ZEND_API void start_memory_manager(void)
2943 {
2944 #ifdef ZTS
2945 	ts_allocate_fast_id(&alloc_globals_id, &alloc_globals_offset, sizeof(zend_alloc_globals), (ts_allocate_ctor) alloc_globals_ctor, (ts_allocate_dtor) alloc_globals_dtor);
2946 #else
2947 	alloc_globals_ctor(&alloc_globals);
2948 #endif
2949 #ifndef _WIN32
2950 #  if defined(_SC_PAGESIZE)
2951 	REAL_PAGE_SIZE = sysconf(_SC_PAGESIZE);
2952 #  elif defined(_SC_PAGE_SIZE)
2953 	REAL_PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
2954 #  endif
2955 #endif
2956 }
2957 
2958 ZEND_API zend_mm_heap *zend_mm_set_heap(zend_mm_heap *new_heap)
2959 {
2960 	zend_mm_heap *old_heap;
2961 
2962 	old_heap = AG(mm_heap);
2963 	AG(mm_heap) = (zend_mm_heap*)new_heap;
2964 	return (zend_mm_heap*)old_heap;
2965 }
2966 
2967 ZEND_API zend_mm_heap *zend_mm_get_heap(void)
2968 {
2969 	return AG(mm_heap);
2970 }
2971 
2972 ZEND_API bool zend_mm_is_custom_heap(zend_mm_heap *new_heap)
2973 {
2974 #if ZEND_MM_CUSTOM
2975 	return AG(mm_heap)->use_custom_heap;
2976 #else
2977 	return 0;
2978 #endif
2979 }
2980 
2981 ZEND_API void zend_mm_set_custom_handlers(zend_mm_heap *heap,
2982                                           void* (*_malloc)(size_t),
2983                                           void  (*_free)(void*),
2984                                           void* (*_realloc)(void*, size_t))
2985 {
2986 #if ZEND_MM_CUSTOM
2987 	zend_mm_heap *_heap = (zend_mm_heap*)heap;
2988 
2989 	if (!_malloc && !_free && !_realloc) {
2990 		_heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_NONE;
2991 	} else {
2992 		_heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_STD;
2993 		_heap->custom_heap.std._malloc = _malloc;
2994 		_heap->custom_heap.std._free = _free;
2995 		_heap->custom_heap.std._realloc = _realloc;
2996 	}
2997 #endif
2998 }
2999 
3000 ZEND_API void zend_mm_get_custom_handlers(zend_mm_heap *heap,
3001                                           void* (**_malloc)(size_t),
3002                                           void  (**_free)(void*),
3003                                           void* (**_realloc)(void*, size_t))
3004 {
3005 #if ZEND_MM_CUSTOM
3006 	zend_mm_heap *_heap = (zend_mm_heap*)heap;
3007 
3008 	if (heap->use_custom_heap) {
3009 		*_malloc = _heap->custom_heap.std._malloc;
3010 		*_free = _heap->custom_heap.std._free;
3011 		*_realloc = _heap->custom_heap.std._realloc;
3012 	} else {
3013 		*_malloc = NULL;
3014 		*_free = NULL;
3015 		*_realloc = NULL;
3016 	}
3017 #else
3018 	*_malloc = NULL;
3019 	*_free = NULL;
3020 	*_realloc = NULL;
3021 #endif
3022 }
3023 
3024 #if ZEND_DEBUG
3025 ZEND_API void zend_mm_set_custom_debug_handlers(zend_mm_heap *heap,
3026                                                 void* (*_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
3027                                                 void  (*_free)(void* ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
3028                                                 void* (*_realloc)(void*, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC))
3029 {
3030 #if ZEND_MM_CUSTOM
3031 	zend_mm_heap *_heap = (zend_mm_heap*)heap;
3032 
3033 	_heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_DEBUG;
3034 	_heap->custom_heap.debug._malloc = _malloc;
3035 	_heap->custom_heap.debug._free = _free;
3036 	_heap->custom_heap.debug._realloc = _realloc;
3037 #endif
3038 }
3039 #endif
3040 
3041 ZEND_API zend_mm_storage *zend_mm_get_storage(zend_mm_heap *heap)
3042 {
3043 #if ZEND_MM_STORAGE
3044 	return heap->storage;
3045 #else
3046 	return NULL
3047 #endif
3048 }
3049 
3050 ZEND_API zend_mm_heap *zend_mm_startup(void)
3051 {
3052 	return zend_mm_init();
3053 }
3054 
3055 ZEND_API zend_mm_heap *zend_mm_startup_ex(const zend_mm_handlers *handlers, void *data, size_t data_size)
3056 {
3057 #if ZEND_MM_STORAGE
3058 	zend_mm_storage tmp_storage, *storage;
3059 	zend_mm_chunk *chunk;
3060 	zend_mm_heap *heap;
3061 
3062 	memcpy((zend_mm_handlers*)&tmp_storage.handlers, handlers, sizeof(zend_mm_handlers));
3063 	tmp_storage.data = data;
3064 	chunk = (zend_mm_chunk*)handlers->chunk_alloc(&tmp_storage, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
3065 	if (UNEXPECTED(chunk == NULL)) {
3066 #if ZEND_MM_ERROR
3067 		fprintf(stderr, "Can't initialize heap\n");
3068 #endif
3069 		return NULL;
3070 	}
3071 	heap = &chunk->heap_slot;
3072 	chunk->heap = heap;
3073 	chunk->next = chunk;
3074 	chunk->prev = chunk;
3075 	chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
3076 	chunk->free_tail = ZEND_MM_FIRST_PAGE;
3077 	chunk->num = 0;
3078 	chunk->free_map[0] = (Z_L(1) << ZEND_MM_FIRST_PAGE) - 1;
3079 	chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
3080 	heap->main_chunk = chunk;
3081 	heap->cached_chunks = NULL;
3082 	heap->chunks_count = 1;
3083 	heap->peak_chunks_count = 1;
3084 	heap->cached_chunks_count = 0;
3085 	heap->avg_chunks_count = 1.0;
3086 	heap->last_chunks_delete_boundary = 0;
3087 	heap->last_chunks_delete_count = 0;
3088 #if ZEND_MM_STAT || ZEND_MM_LIMIT
3089 	heap->real_size = ZEND_MM_CHUNK_SIZE;
3090 #endif
3091 #if ZEND_MM_STAT
3092 	heap->real_peak = ZEND_MM_CHUNK_SIZE;
3093 	heap->size = 0;
3094 	heap->peak = 0;
3095 #endif
3096 #if ZEND_MM_LIMIT
3097 	heap->limit = (size_t)Z_L(-1) >> 1;
3098 	heap->overflow = 0;
3099 #endif
3100 #if ZEND_MM_CUSTOM
3101 	heap->use_custom_heap = 0;
3102 #endif
3103 	heap->storage = &tmp_storage;
3104 	heap->huge_list = NULL;
3105 	memset(heap->free_slot, 0, sizeof(heap->free_slot));
3106 	storage = _zend_mm_alloc(heap, sizeof(zend_mm_storage) + data_size ZEND_FILE_LINE_CC ZEND_FILE_LINE_CC);
3107 	if (!storage) {
3108 		handlers->chunk_free(&tmp_storage, chunk, ZEND_MM_CHUNK_SIZE);
3109 #if ZEND_MM_ERROR
3110 		fprintf(stderr, "Can't initialize heap\n");
3111 #endif
3112 		return NULL;
3113 	}
3114 	memcpy(storage, &tmp_storage, sizeof(zend_mm_storage));
3115 	if (data) {
3116 		storage->data = (void*)(((char*)storage + sizeof(zend_mm_storage)));
3117 		memcpy(storage->data, data, data_size);
3118 	}
3119 	heap->storage = storage;
3120 	return heap;
3121 #else
3122 	return NULL;
3123 #endif
3124 }
3125 
3126 ZEND_API void * __zend_malloc(size_t len)
3127 {
3128 	void *tmp = malloc(len);
3129 	if (EXPECTED(tmp || !len)) {
3130 		return tmp;
3131 	}
3132 	zend_out_of_memory();
3133 }
3134 
3135 ZEND_API void * __zend_calloc(size_t nmemb, size_t len)
3136 {
3137 	void *tmp;
3138 
3139 	len = zend_safe_address_guarded(nmemb, len, 0);
3140 	tmp = __zend_malloc(len);
3141 	memset(tmp, 0, len);
3142 	return tmp;
3143 }
3144 
3145 ZEND_API void * __zend_realloc(void *p, size_t len)
3146 {
3147 	p = realloc(p, len);
3148 	if (EXPECTED(p || !len)) {
3149 		return p;
3150 	}
3151 	zend_out_of_memory();
3152 }
3153 
3154 ZEND_API char * __zend_strdup(const char *s)
3155 {
3156 	char *tmp = strdup(s);
3157 	if (EXPECTED(tmp)) {
3158 		return tmp;
3159 	}
3160 	zend_out_of_memory();
3161 }
3162 
3163 #ifdef ZTS
3164 size_t zend_mm_globals_size(void)
3165 {
3166 	return sizeof(zend_alloc_globals);
3167 }
3168 #endif
3169