xref: /PHP-7.0/Zend/zend_alloc.c (revision 397f5cb6)
1 /*
2    +----------------------------------------------------------------------+
3    | Zend Engine                                                          |
4    +----------------------------------------------------------------------+
5    | Copyright (c) 1998-2017 Zend Technologies Ltd. (http://www.zend.com) |
6    +----------------------------------------------------------------------+
7    | This source file is subject to version 2.00 of the Zend license,     |
8    | that is bundled with this package in the file LICENSE, and is        |
9    | available through the world-wide-web at the following url:           |
10    | http://www.zend.com/license/2_00.txt.                                |
11    | If you did not receive a copy of the Zend license and are unable to  |
12    | obtain it through the world-wide-web, please send a note to          |
13    | license@zend.com so we can mail you a copy immediately.              |
14    +----------------------------------------------------------------------+
15    | Authors: Andi Gutmans <andi@zend.com>                                |
16    |          Zeev Suraski <zeev@zend.com>                                |
17    |          Dmitry Stogov <dmitry@zend.com>                             |
18    +----------------------------------------------------------------------+
19 */
20 
21 /* $Id$ */
22 
23 /*
24  * zend_alloc is designed to be a modern CPU cache friendly memory manager
25  * for PHP. Most ideas are taken from jemalloc and tcmalloc implementations.
26  *
27  * All allocations are split into 3 categories:
28  *
29  * Huge  - the size is greater than CHUNK size (~2M by default), allocation is
30  *         performed using mmap(). The result is aligned on 2M boundary.
31  *
32  * Large - a number of 4096K pages inside a CHUNK. Large blocks
33  *         are always aligned on page boundary.
34  *
35  * Small - less than 3/4 of page size. Small sizes are rounded up to nearest
36  *         greater predefined small size (there are 30 predefined sizes:
37  *         8, 16, 24, 32, ... 3072). Small blocks are allocated from
38  *         RUNs. Each RUN is allocated as a single or few following pages.
39  *         Allocation inside RUNs implemented using linked list of free
40  *         elements. The result is aligned to 8 bytes.
41  *
42  * zend_alloc allocates memory from OS by CHUNKs, these CHUNKs and huge memory
43  * blocks are always aligned to CHUNK boundary. So it's very easy to determine
44  * the CHUNK owning the certain pointer. Regular CHUNKs reserve a single
45  * page at start for special purpose. It contains bitset of free pages,
46  * few bitset for available runs of predefined small sizes, map of pages that
47  * keeps information about usage of each page in this CHUNK, etc.
48  *
49  * zend_alloc provides familiar emalloc/efree/erealloc API, but in addition it
50  * provides specialized and optimized routines to allocate blocks of predefined
51  * sizes (e.g. emalloc_2(), emallc_4(), ..., emalloc_large(), etc)
52  * The library uses C preprocessor tricks that substitute calls to emalloc()
53  * with more specialized routines when the requested size is known.
54  */
55 
56 #include "zend.h"
57 #include "zend_alloc.h"
58 #include "zend_globals.h"
59 #include "zend_operators.h"
60 #include "zend_multiply.h"
61 
62 #ifdef HAVE_SIGNAL_H
63 # include <signal.h>
64 #endif
65 #ifdef HAVE_UNISTD_H
66 # include <unistd.h>
67 #endif
68 
69 #ifdef ZEND_WIN32
70 # include <wincrypt.h>
71 # include <process.h>
72 #endif
73 
74 #include <stdio.h>
75 #include <stdlib.h>
76 #include <string.h>
77 
78 #include <sys/types.h>
79 #include <sys/stat.h>
80 #if HAVE_LIMITS_H
81 #include <limits.h>
82 #endif
83 #include <fcntl.h>
84 #include <errno.h>
85 
86 #ifndef _WIN32
87 # ifdef HAVE_MREMAP
88 #  ifndef _GNU_SOURCE
89 #   define _GNU_SOURCE
90 #  endif
91 #  ifndef __USE_GNU
92 #   define __USE_GNU
93 #  endif
94 # endif
95 # include <sys/mman.h>
96 # ifndef MAP_ANON
97 #  ifdef MAP_ANONYMOUS
98 #   define MAP_ANON MAP_ANONYMOUS
99 #  endif
100 # endif
101 # ifndef MREMAP_MAYMOVE
102 #  define MREMAP_MAYMOVE 0
103 # endif
104 # ifndef MAP_FAILED
105 #  define MAP_FAILED ((void*)-1)
106 # endif
107 # ifndef MAP_POPULATE
108 #  define MAP_POPULATE 0
109 # endif
110 #  if defined(_SC_PAGESIZE) || (_SC_PAGE_SIZE)
111 #    define REAL_PAGE_SIZE _real_page_size
112 static size_t _real_page_size = ZEND_MM_PAGE_SIZE;
113 #  endif
114 #endif
115 
116 #ifndef REAL_PAGE_SIZE
117 # define REAL_PAGE_SIZE ZEND_MM_PAGE_SIZE
118 #endif
119 
120 #ifndef ZEND_MM_STAT
121 # define ZEND_MM_STAT 1    /* track current and peak memory usage            */
122 #endif
123 #ifndef ZEND_MM_LIMIT
124 # define ZEND_MM_LIMIT 1   /* support for user-defined memory limit          */
125 #endif
126 #ifndef ZEND_MM_CUSTOM
127 # define ZEND_MM_CUSTOM 1  /* support for custom memory allocator            */
128                            /* USE_ZEND_ALLOC=0 may switch to system malloc() */
129 #endif
130 #ifndef ZEND_MM_STORAGE
131 # define ZEND_MM_STORAGE 1 /* support for custom memory storage              */
132 #endif
133 #ifndef ZEND_MM_ERROR
134 # define ZEND_MM_ERROR 1   /* report system errors                           */
135 #endif
136 
137 #ifndef ZEND_MM_CHECK
138 # define ZEND_MM_CHECK(condition, message)  do { \
139 		if (UNEXPECTED(!(condition))) { \
140 			zend_mm_panic(message); \
141 		} \
142 	} while (0)
143 #endif
144 
145 typedef uint32_t   zend_mm_page_info; /* 4-byte integer */
146 typedef zend_ulong zend_mm_bitset;    /* 4-byte or 8-byte integer */
147 
148 #define ZEND_MM_ALIGNED_OFFSET(size, alignment) \
149 	(((size_t)(size)) & ((alignment) - 1))
150 #define ZEND_MM_ALIGNED_BASE(size, alignment) \
151 	(((size_t)(size)) & ~((alignment) - 1))
152 #define ZEND_MM_SIZE_TO_NUM(size, alignment) \
153 	(((size_t)(size) + ((alignment) - 1)) / (alignment))
154 
155 #define ZEND_MM_BITSET_LEN		(sizeof(zend_mm_bitset) * 8)       /* 32 or 64 */
156 #define ZEND_MM_PAGE_MAP_LEN	(ZEND_MM_PAGES / ZEND_MM_BITSET_LEN) /* 16 or 8 */
157 
158 typedef zend_mm_bitset zend_mm_page_map[ZEND_MM_PAGE_MAP_LEN];     /* 64B */
159 
160 #define ZEND_MM_IS_FRUN                  0x00000000
161 #define ZEND_MM_IS_LRUN                  0x40000000
162 #define ZEND_MM_IS_SRUN                  0x80000000
163 
164 #define ZEND_MM_LRUN_PAGES_MASK          0x000003ff
165 #define ZEND_MM_LRUN_PAGES_OFFSET        0
166 
167 #define ZEND_MM_SRUN_BIN_NUM_MASK        0x0000001f
168 #define ZEND_MM_SRUN_BIN_NUM_OFFSET      0
169 
170 #define ZEND_MM_SRUN_FREE_COUNTER_MASK   0x01ff0000
171 #define ZEND_MM_SRUN_FREE_COUNTER_OFFSET 16
172 
173 #define ZEND_MM_NRUN_OFFSET_MASK         0x01ff0000
174 #define ZEND_MM_NRUN_OFFSET_OFFSET       16
175 
176 #define ZEND_MM_LRUN_PAGES(info)         (((info) & ZEND_MM_LRUN_PAGES_MASK) >> ZEND_MM_LRUN_PAGES_OFFSET)
177 #define ZEND_MM_SRUN_BIN_NUM(info)       (((info) & ZEND_MM_SRUN_BIN_NUM_MASK) >> ZEND_MM_SRUN_BIN_NUM_OFFSET)
178 #define ZEND_MM_SRUN_FREE_COUNTER(info)  (((info) & ZEND_MM_SRUN_FREE_COUNTER_MASK) >> ZEND_MM_SRUN_FREE_COUNTER_OFFSET)
179 #define ZEND_MM_NRUN_OFFSET(info)        (((info) & ZEND_MM_NRUN_OFFSET_MASK) >> ZEND_MM_NRUN_OFFSET_OFFSET)
180 
181 #define ZEND_MM_FRUN()                   ZEND_MM_IS_FRUN
182 #define ZEND_MM_LRUN(count)              (ZEND_MM_IS_LRUN | ((count) << ZEND_MM_LRUN_PAGES_OFFSET))
183 #define ZEND_MM_SRUN(bin_num)            (ZEND_MM_IS_SRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET))
184 #define ZEND_MM_SRUN_EX(bin_num, count)  (ZEND_MM_IS_SRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET) | ((count) << ZEND_MM_SRUN_FREE_COUNTER_OFFSET))
185 #define ZEND_MM_NRUN(bin_num, offset)    (ZEND_MM_IS_SRUN | ZEND_MM_IS_LRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET) | ((offset) << ZEND_MM_NRUN_OFFSET_OFFSET))
186 
187 #define ZEND_MM_BINS 30
188 
189 typedef struct  _zend_mm_page      zend_mm_page;
190 typedef struct  _zend_mm_bin       zend_mm_bin;
191 typedef struct  _zend_mm_free_slot zend_mm_free_slot;
192 typedef struct  _zend_mm_chunk     zend_mm_chunk;
193 typedef struct  _zend_mm_huge_list zend_mm_huge_list;
194 
195 #ifdef _WIN64
196 # define PTR_FMT "0x%0.16I64x"
197 #elif SIZEOF_LONG == 8
198 # define PTR_FMT "0x%0.16lx"
199 #else
200 # define PTR_FMT "0x%0.8lx"
201 #endif
202 
203 #ifdef MAP_HUGETLB
204 int zend_mm_use_huge_pages = 0;
205 #endif
206 
207 /*
208  * Memory is retrived from OS by chunks of fixed size 2MB.
209  * Inside chunk it's managed by pages of fixed size 4096B.
210  * So each chunk consists from 512 pages.
211  * The first page of each chunk is reseved for chunk header.
212  * It contains service information about all pages.
213  *
214  * free_pages - current number of free pages in this chunk
215  *
216  * free_tail  - number of continuous free pages at the end of chunk
217  *
218  * free_map   - bitset (a bit for each page). The bit is set if the corresponding
219  *              page is allocated. Allocator for "lage sizes" may easily find a
220  *              free page (or a continuous number of pages) searching for zero
221  *              bits.
222  *
223  * map        - contains service information for each page. (32-bits for each
224  *              page).
225  *    usage:
226  *				(2 bits)
227  * 				FRUN - free page,
228  *              LRUN - first page of "large" allocation
229  *              SRUN - first page of a bin used for "small" allocation
230  *
231  *    lrun_pages:
232  *              (10 bits) number of allocated pages
233  *
234  *    srun_bin_num:
235  *              (5 bits) bin number (e.g. 0 for sizes 0-2, 1 for 3-4,
236  *               2 for 5-8, 3 for 9-16 etc) see zend_alloc_sizes.h
237  */
238 
239 struct _zend_mm_heap {
240 #if ZEND_MM_CUSTOM
241 	int                use_custom_heap;
242 #endif
243 #if ZEND_MM_STORAGE
244 	zend_mm_storage   *storage;
245 #endif
246 #if ZEND_MM_STAT
247 	size_t             size;                    /* current memory usage */
248 	size_t             peak;                    /* peak memory usage */
249 #endif
250 	zend_mm_free_slot *free_slot[ZEND_MM_BINS]; /* free lists for small sizes */
251 #if ZEND_MM_STAT || ZEND_MM_LIMIT
252 	size_t             real_size;               /* current size of allocated pages */
253 #endif
254 #if ZEND_MM_STAT
255 	size_t             real_peak;               /* peak size of allocated pages */
256 #endif
257 #if ZEND_MM_LIMIT
258 	size_t             limit;                   /* memory limit */
259 	int                overflow;                /* memory overflow flag */
260 #endif
261 
262 	zend_mm_huge_list *huge_list;               /* list of huge allocated blocks */
263 
264 	zend_mm_chunk     *main_chunk;
265 	zend_mm_chunk     *cached_chunks;			/* list of unused chunks */
266 	int                chunks_count;			/* number of alocated chunks */
267 	int                peak_chunks_count;		/* peak number of allocated chunks for current request */
268 	int                cached_chunks_count;		/* number of cached chunks */
269 	double             avg_chunks_count;		/* average number of chunks allocated per request */
270 	int                last_chunks_delete_boundary; /* numer of chunks after last deletion */
271 	int                last_chunks_delete_count;    /* number of deletion over the last boundary */
272 #if ZEND_MM_CUSTOM
273 	union {
274 		struct {
275 			void      *(*_malloc)(size_t);
276 			void       (*_free)(void*);
277 			void      *(*_realloc)(void*, size_t);
278 		} std;
279 		struct {
280 			void      *(*_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
281 			void       (*_free)(void*  ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
282 			void      *(*_realloc)(void*, size_t  ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
283 		} debug;
284 	} custom_heap;
285 #endif
286 };
287 
288 struct _zend_mm_chunk {
289 	zend_mm_heap      *heap;
290 	zend_mm_chunk     *next;
291 	zend_mm_chunk     *prev;
292 	int                free_pages;				/* number of free pages */
293 	int                free_tail;               /* number of free pages at the end of chunk */
294 	int                num;
295 	char               reserve[64 - (sizeof(void*) * 3 + sizeof(int) * 3)];
296 	zend_mm_heap       heap_slot;               /* used only in main chunk */
297 	zend_mm_page_map   free_map;                /* 512 bits or 64 bytes */
298 	zend_mm_page_info  map[ZEND_MM_PAGES];      /* 2 KB = 512 * 4 */
299 };
300 
301 struct _zend_mm_page {
302 	char               bytes[ZEND_MM_PAGE_SIZE];
303 };
304 
305 /*
306  * bin - is one or few continuous pages (up to 8) used for allocation of
307  * a particular "small size".
308  */
309 struct _zend_mm_bin {
310 	char               bytes[ZEND_MM_PAGE_SIZE * 8];
311 };
312 
313 struct _zend_mm_free_slot {
314 	zend_mm_free_slot *next_free_slot;
315 };
316 
317 struct _zend_mm_huge_list {
318 	void              *ptr;
319 	size_t             size;
320 	zend_mm_huge_list *next;
321 #if ZEND_DEBUG
322 	zend_mm_debug_info dbg;
323 #endif
324 };
325 
326 #define ZEND_MM_PAGE_ADDR(chunk, page_num) \
327 	((void*)(((zend_mm_page*)(chunk)) + (page_num)))
328 
329 #define _BIN_DATA_SIZE(num, size, elements, pages, x, y) size,
330 static const unsigned int bin_data_size[] = {
331   ZEND_MM_BINS_INFO(_BIN_DATA_SIZE, x, y)
332 };
333 
334 #define _BIN_DATA_ELEMENTS(num, size, elements, pages, x, y) elements,
335 static const int bin_elements[] = {
336   ZEND_MM_BINS_INFO(_BIN_DATA_ELEMENTS, x, y)
337 };
338 
339 #define _BIN_DATA_PAGES(num, size, elements, pages, x, y) pages,
340 static const int bin_pages[] = {
341   ZEND_MM_BINS_INFO(_BIN_DATA_PAGES, x, y)
342 };
343 
344 #if ZEND_DEBUG
zend_debug_alloc_output(char * format,...)345 ZEND_COLD void zend_debug_alloc_output(char *format, ...)
346 {
347 	char output_buf[256];
348 	va_list args;
349 
350 	va_start(args, format);
351 	vsprintf(output_buf, format, args);
352 	va_end(args);
353 
354 #ifdef ZEND_WIN32
355 	OutputDebugString(output_buf);
356 #else
357 	fprintf(stderr, "%s", output_buf);
358 #endif
359 }
360 #endif
361 
zend_mm_panic(const char * message)362 static ZEND_COLD ZEND_NORETURN void zend_mm_panic(const char *message)
363 {
364 	fprintf(stderr, "%s\n", message);
365 /* See http://support.microsoft.com/kb/190351 */
366 #ifdef ZEND_WIN32
367 	fflush(stderr);
368 #endif
369 #if ZEND_DEBUG && defined(HAVE_KILL) && defined(HAVE_GETPID)
370 	kill(getpid(), SIGSEGV);
371 #endif
372 	exit(1);
373 }
374 
zend_mm_safe_error(zend_mm_heap * heap,const char * format,size_t limit,const char * filename,uint lineno,size_t size)375 static ZEND_COLD ZEND_NORETURN void zend_mm_safe_error(zend_mm_heap *heap,
376 	const char *format,
377 	size_t limit,
378 #if ZEND_DEBUG
379 	const char *filename,
380 	uint lineno,
381 #endif
382 	size_t size)
383 {
384 
385 	heap->overflow = 1;
386 	zend_try {
387 		zend_error_noreturn(E_ERROR,
388 			format,
389 			limit,
390 #if ZEND_DEBUG
391 			filename,
392 			lineno,
393 #endif
394 			size);
395 	} zend_catch {
396 	}  zend_end_try();
397 	heap->overflow = 0;
398 	zend_bailout();
399 	exit(1);
400 }
401 
402 #ifdef _WIN32
403 void
stderr_last_error(char * msg)404 stderr_last_error(char *msg)
405 {
406 	LPSTR buf = NULL;
407 	DWORD err = GetLastError();
408 
409 	if (!FormatMessage(
410 			FORMAT_MESSAGE_ALLOCATE_BUFFER |
411 			FORMAT_MESSAGE_FROM_SYSTEM |
412 			FORMAT_MESSAGE_IGNORE_INSERTS,
413 			NULL,
414 			err,
415 			MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
416 			(LPSTR)&buf,
417 		0, NULL)) {
418 		fprintf(stderr, "\n%s: [0x%08lx]\n", msg, err);
419 	}
420 	else {
421 		fprintf(stderr, "\n%s: [0x%08lx] %s\n", msg, err, buf);
422 	}
423 }
424 #endif
425 
426 /*****************/
427 /* OS Allocation */
428 /*****************/
429 
zend_mm_mmap_fixed(void * addr,size_t size)430 static void *zend_mm_mmap_fixed(void *addr, size_t size)
431 {
432 #ifdef _WIN32
433 	return VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
434 #else
435 	/* MAP_FIXED leads to discarding of the old mapping, so it can't be used. */
436 	void *ptr = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON /*| MAP_POPULATE | MAP_HUGETLB*/, -1, 0);
437 
438 	if (ptr == MAP_FAILED) {
439 #if ZEND_MM_ERROR
440 		fprintf(stderr, "\nmmap() failed: [%d] %s\n", errno, strerror(errno));
441 #endif
442 		return NULL;
443 	} else if (ptr != addr) {
444 		if (munmap(ptr, size) != 0) {
445 #if ZEND_MM_ERROR
446 			fprintf(stderr, "\nmunmap() failed: [%d] %s\n", errno, strerror(errno));
447 #endif
448 		}
449 		return NULL;
450 	}
451 	return ptr;
452 #endif
453 }
454 
zend_mm_mmap(size_t size)455 static void *zend_mm_mmap(size_t size)
456 {
457 #ifdef _WIN32
458 	void *ptr = VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
459 
460 	if (ptr == NULL) {
461 #if ZEND_MM_ERROR
462 		stderr_last_error("VirtualAlloc() failed");
463 #endif
464 		return NULL;
465 	}
466 	return ptr;
467 #else
468 	void *ptr;
469 
470 #ifdef MAP_HUGETLB
471 	if (zend_mm_use_huge_pages && size == ZEND_MM_CHUNK_SIZE) {
472 		ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_HUGETLB, -1, 0);
473 		if (ptr != MAP_FAILED) {
474 			return ptr;
475 		}
476 	}
477 #endif
478 
479 	ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
480 
481 	if (ptr == MAP_FAILED) {
482 #if ZEND_MM_ERROR
483 		fprintf(stderr, "\nmmap() failed: [%d] %s\n", errno, strerror(errno));
484 #endif
485 		return NULL;
486 	}
487 	return ptr;
488 #endif
489 }
490 
zend_mm_munmap(void * addr,size_t size)491 static void zend_mm_munmap(void *addr, size_t size)
492 {
493 #ifdef _WIN32
494 	if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
495 #if ZEND_MM_ERROR
496 		stderr_last_error("VirtualFree() failed");
497 #endif
498 	}
499 #else
500 	if (munmap(addr, size) != 0) {
501 #if ZEND_MM_ERROR
502 		fprintf(stderr, "\nmunmap() failed: [%d] %s\n", errno, strerror(errno));
503 #endif
504 	}
505 #endif
506 }
507 
508 /***********/
509 /* Bitmask */
510 /***********/
511 
512 /* number of trailing set (1) bits */
zend_mm_bitset_nts(zend_mm_bitset bitset)513 static zend_always_inline int zend_mm_bitset_nts(zend_mm_bitset bitset)
514 {
515 #if (defined(__GNUC__) || __has_builtin(__builtin_ctzl)) && SIZEOF_ZEND_LONG == SIZEOF_LONG && defined(PHP_HAVE_BUILTIN_CTZL)
516 	return __builtin_ctzl(~bitset);
517 #elif (defined(__GNUC__) || __has_builtin(__builtin_ctzll)) && defined(PHP_HAVE_BUILTIN_CTZLL)
518 	return __builtin_ctzll(~bitset);
519 #elif defined(_WIN32)
520 	unsigned long index;
521 
522 #if defined(_WIN64)
523 	if (!BitScanForward64(&index, ~bitset)) {
524 #else
525 	if (!BitScanForward(&index, ~bitset)) {
526 #endif
527 		/* undefined behavior */
528 		return 32;
529 	}
530 
531 	return (int)index;
532 #else
533 	int n;
534 
535 	if (bitset == (zend_mm_bitset)-1) return ZEND_MM_BITSET_LEN;
536 
537 	n = 0;
538 #if SIZEOF_ZEND_LONG == 8
539 	if (sizeof(zend_mm_bitset) == 8) {
540 		if ((bitset & 0xffffffff) == 0xffffffff) {n += 32; bitset = bitset >> Z_UL(32);}
541 	}
542 #endif
543 	if ((bitset & 0x0000ffff) == 0x0000ffff) {n += 16; bitset = bitset >> 16;}
544 	if ((bitset & 0x000000ff) == 0x000000ff) {n +=  8; bitset = bitset >>  8;}
545 	if ((bitset & 0x0000000f) == 0x0000000f) {n +=  4; bitset = bitset >>  4;}
546 	if ((bitset & 0x00000003) == 0x00000003) {n +=  2; bitset = bitset >>  2;}
547 	return n + (bitset & 1);
548 #endif
549 }
550 
551 /* number of trailing zero bits (0x01 -> 1; 0x40 -> 6; 0x00 -> LEN) */
552 static zend_always_inline int zend_mm_bitset_ntz(zend_mm_bitset bitset)
553 {
554 #if (defined(__GNUC__) || __has_builtin(__builtin_ctzl)) && SIZEOF_ZEND_LONG == SIZEOF_LONG && defined(PHP_HAVE_BUILTIN_CTZL)
555 	return __builtin_ctzl(bitset);
556 #elif (defined(__GNUC__) || __has_builtin(__builtin_ctzll)) && defined(PHP_HAVE_BUILTIN_CTZLL)
557 	return __builtin_ctzll(bitset);
558 #elif defined(_WIN32)
559 	unsigned long index;
560 
561 #if defined(_WIN64)
562 	if (!BitScanForward64(&index, bitset)) {
563 #else
564 	if (!BitScanForward(&index, bitset)) {
565 #endif
566 		/* undefined behavior */
567 		return 32;
568 	}
569 
570 	return (int)index;
571 #else
572 	int n;
573 
574 	if (bitset == (zend_mm_bitset)0) return ZEND_MM_BITSET_LEN;
575 
576 	n = 1;
577 #if SIZEOF_ZEND_LONG == 8
578 	if (sizeof(zend_mm_bitset) == 8) {
579 		if ((bitset & 0xffffffff) == 0) {n += 32; bitset = bitset >> Z_UL(32);}
580 	}
581 #endif
582 	if ((bitset & 0x0000ffff) == 0) {n += 16; bitset = bitset >> 16;}
583 	if ((bitset & 0x000000ff) == 0) {n +=  8; bitset = bitset >>  8;}
584 	if ((bitset & 0x0000000f) == 0) {n +=  4; bitset = bitset >>  4;}
585 	if ((bitset & 0x00000003) == 0) {n +=  2; bitset = bitset >>  2;}
586 	return n - (bitset & 1);
587 #endif
588 }
589 
590 static zend_always_inline int zend_mm_bitset_find_zero(zend_mm_bitset *bitset, int size)
591 {
592 	int i = 0;
593 
594 	do {
595 		zend_mm_bitset tmp = bitset[i];
596 		if (tmp != (zend_mm_bitset)-1) {
597 			return i * ZEND_MM_BITSET_LEN + zend_mm_bitset_nts(tmp);
598 		}
599 		i++;
600 	} while (i < size);
601 	return -1;
602 }
603 
604 static zend_always_inline int zend_mm_bitset_find_one(zend_mm_bitset *bitset, int size)
605 {
606 	int i = 0;
607 
608 	do {
609 		zend_mm_bitset tmp = bitset[i];
610 		if (tmp != 0) {
611 			return i * ZEND_MM_BITSET_LEN + zend_mm_bitset_ntz(tmp);
612 		}
613 		i++;
614 	} while (i < size);
615 	return -1;
616 }
617 
618 static zend_always_inline int zend_mm_bitset_find_zero_and_set(zend_mm_bitset *bitset, int size)
619 {
620 	int i = 0;
621 
622 	do {
623 		zend_mm_bitset tmp = bitset[i];
624 		if (tmp != (zend_mm_bitset)-1) {
625 			int n = zend_mm_bitset_nts(tmp);
626 			bitset[i] |= Z_UL(1) << n;
627 			return i * ZEND_MM_BITSET_LEN + n;
628 		}
629 		i++;
630 	} while (i < size);
631 	return -1;
632 }
633 
634 static zend_always_inline int zend_mm_bitset_is_set(zend_mm_bitset *bitset, int bit)
635 {
636 	return (bitset[bit / ZEND_MM_BITSET_LEN] & (Z_L(1) << (bit & (ZEND_MM_BITSET_LEN-1)))) != 0;
637 }
638 
639 static zend_always_inline void zend_mm_bitset_set_bit(zend_mm_bitset *bitset, int bit)
640 {
641 	bitset[bit / ZEND_MM_BITSET_LEN] |= (Z_L(1) << (bit & (ZEND_MM_BITSET_LEN-1)));
642 }
643 
644 static zend_always_inline void zend_mm_bitset_reset_bit(zend_mm_bitset *bitset, int bit)
645 {
646 	bitset[bit / ZEND_MM_BITSET_LEN] &= ~(Z_L(1) << (bit & (ZEND_MM_BITSET_LEN-1)));
647 }
648 
649 static zend_always_inline void zend_mm_bitset_set_range(zend_mm_bitset *bitset, int start, int len)
650 {
651 	if (len == 1) {
652 		zend_mm_bitset_set_bit(bitset, start);
653 	} else {
654 		int pos = start / ZEND_MM_BITSET_LEN;
655 		int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
656 		int bit = start & (ZEND_MM_BITSET_LEN - 1);
657 		zend_mm_bitset tmp;
658 
659 		if (pos != end) {
660 			/* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
661 			tmp = (zend_mm_bitset)-1 << bit;
662 			bitset[pos++] |= tmp;
663 			while (pos != end) {
664 				/* set all bits */
665 				bitset[pos++] = (zend_mm_bitset)-1;
666 			}
667 			end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
668 			/* set bits from "0" to "end" */
669 			tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
670 			bitset[pos] |= tmp;
671 		} else {
672 			end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
673 			/* set bits from "bit" to "end" */
674 			tmp = (zend_mm_bitset)-1 << bit;
675 			tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
676 			bitset[pos] |= tmp;
677 		}
678 	}
679 }
680 
681 static zend_always_inline void zend_mm_bitset_reset_range(zend_mm_bitset *bitset, int start, int len)
682 {
683 	if (len == 1) {
684 		zend_mm_bitset_reset_bit(bitset, start);
685 	} else {
686 		int pos = start / ZEND_MM_BITSET_LEN;
687 		int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
688 		int bit = start & (ZEND_MM_BITSET_LEN - 1);
689 		zend_mm_bitset tmp;
690 
691 		if (pos != end) {
692 			/* reset bits from "bit" to ZEND_MM_BITSET_LEN-1 */
693 			tmp = ~((Z_L(1) << bit) - 1);
694 			bitset[pos++] &= ~tmp;
695 			while (pos != end) {
696 				/* set all bits */
697 				bitset[pos++] = 0;
698 			}
699 			end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
700 			/* reset bits from "0" to "end" */
701 			tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
702 			bitset[pos] &= ~tmp;
703 		} else {
704 			end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
705 			/* reset bits from "bit" to "end" */
706 			tmp = (zend_mm_bitset)-1 << bit;
707 			tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
708 			bitset[pos] &= ~tmp;
709 		}
710 	}
711 }
712 
713 static zend_always_inline int zend_mm_bitset_is_free_range(zend_mm_bitset *bitset, int start, int len)
714 {
715 	if (len == 1) {
716 		return !zend_mm_bitset_is_set(bitset, start);
717 	} else {
718 		int pos = start / ZEND_MM_BITSET_LEN;
719 		int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
720 		int bit = start & (ZEND_MM_BITSET_LEN - 1);
721 		zend_mm_bitset tmp;
722 
723 		if (pos != end) {
724 			/* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
725 			tmp = (zend_mm_bitset)-1 << bit;
726 			if ((bitset[pos++] & tmp) != 0) {
727 				return 0;
728 			}
729 			while (pos != end) {
730 				/* set all bits */
731 				if (bitset[pos++] != 0) {
732 					return 0;
733 				}
734 			}
735 			end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
736 			/* set bits from "0" to "end" */
737 			tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
738 			return (bitset[pos] & tmp) == 0;
739 		} else {
740 			end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
741 			/* set bits from "bit" to "end" */
742 			tmp = (zend_mm_bitset)-1 << bit;
743 			tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
744 			return (bitset[pos] & tmp) == 0;
745 		}
746 	}
747 }
748 
749 /**********/
750 /* Chunks */
751 /**********/
752 
753 static void *zend_mm_chunk_alloc_int(size_t size, size_t alignment)
754 {
755 	void *ptr = zend_mm_mmap(size);
756 
757 	if (ptr == NULL) {
758 		return NULL;
759 	} else if (ZEND_MM_ALIGNED_OFFSET(ptr, alignment) == 0) {
760 #ifdef MADV_HUGEPAGE
761 	    madvise(ptr, size, MADV_HUGEPAGE);
762 #endif
763 		return ptr;
764 	} else {
765 		size_t offset;
766 
767 		/* chunk has to be aligned */
768 		zend_mm_munmap(ptr, size);
769 		ptr = zend_mm_mmap(size + alignment - REAL_PAGE_SIZE);
770 #ifdef _WIN32
771 		offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
772 		zend_mm_munmap(ptr, size + alignment - REAL_PAGE_SIZE);
773 		ptr = zend_mm_mmap_fixed((void*)((char*)ptr + (alignment - offset)), size);
774 		offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
775 		if (offset != 0) {
776 			zend_mm_munmap(ptr, size);
777 			return NULL;
778 		}
779 		return ptr;
780 #else
781 		offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
782 		if (offset != 0) {
783 			offset = alignment - offset;
784 			zend_mm_munmap(ptr, offset);
785 			ptr = (char*)ptr + offset;
786 			alignment -= offset;
787 		}
788 		if (alignment > REAL_PAGE_SIZE) {
789 			zend_mm_munmap((char*)ptr + size, alignment - REAL_PAGE_SIZE);
790 		}
791 # ifdef MADV_HUGEPAGE
792 	    madvise(ptr, size, MADV_HUGEPAGE);
793 # endif
794 #endif
795 		return ptr;
796 	}
797 }
798 
799 static void *zend_mm_chunk_alloc(zend_mm_heap *heap, size_t size, size_t alignment)
800 {
801 #if ZEND_MM_STORAGE
802 	if (UNEXPECTED(heap->storage)) {
803 		void *ptr = heap->storage->handlers.chunk_alloc(heap->storage, size, alignment);
804 		ZEND_ASSERT(((zend_uintptr_t)((char*)ptr + (alignment-1)) & (alignment-1)) == (zend_uintptr_t)ptr);
805 		return ptr;
806 	}
807 #endif
808 	return zend_mm_chunk_alloc_int(size, alignment);
809 }
810 
811 static void zend_mm_chunk_free(zend_mm_heap *heap, void *addr, size_t size)
812 {
813 #if ZEND_MM_STORAGE
814 	if (UNEXPECTED(heap->storage)) {
815 		heap->storage->handlers.chunk_free(heap->storage, addr, size);
816 		return;
817 	}
818 #endif
819 	zend_mm_munmap(addr, size);
820 }
821 
822 static int zend_mm_chunk_truncate(zend_mm_heap *heap, void *addr, size_t old_size, size_t new_size)
823 {
824 #if ZEND_MM_STORAGE
825 	if (UNEXPECTED(heap->storage)) {
826 		if (heap->storage->handlers.chunk_truncate) {
827 			return heap->storage->handlers.chunk_truncate(heap->storage, addr, old_size, new_size);
828 		} else {
829 			return 0;
830 		}
831 	}
832 #endif
833 #ifndef _WIN32
834 	zend_mm_munmap((char*)addr + new_size, old_size - new_size);
835 	return 1;
836 #else
837 	return 0;
838 #endif
839 }
840 
841 static int zend_mm_chunk_extend(zend_mm_heap *heap, void *addr, size_t old_size, size_t new_size)
842 {
843 #if ZEND_MM_STORAGE
844 	if (UNEXPECTED(heap->storage)) {
845 		if (heap->storage->handlers.chunk_extend) {
846 			return heap->storage->handlers.chunk_extend(heap->storage, addr, old_size, new_size);
847 		} else {
848 			return 0;
849 		}
850 	}
851 #endif
852 #ifndef _WIN32
853 	return (zend_mm_mmap_fixed((char*)addr + old_size, new_size - old_size) != NULL);
854 #else
855 	return 0;
856 #endif
857 }
858 
859 static zend_always_inline void zend_mm_chunk_init(zend_mm_heap *heap, zend_mm_chunk *chunk)
860 {
861 	chunk->heap = heap;
862 	chunk->next = heap->main_chunk;
863 	chunk->prev = heap->main_chunk->prev;
864 	chunk->prev->next = chunk;
865 	chunk->next->prev = chunk;
866 	/* mark first pages as allocated */
867 	chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
868 	chunk->free_tail = ZEND_MM_FIRST_PAGE;
869 	/* the younger chunks have bigger number */
870 	chunk->num = chunk->prev->num + 1;
871 	/* mark first pages as allocated */
872 	chunk->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
873 	chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
874 }
875 
876 /***********************/
877 /* Huge Runs (forward) */
878 /***********************/
879 
880 static size_t zend_mm_get_huge_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
881 static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
882 static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
883 
884 #if ZEND_DEBUG
885 static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
886 #else
887 static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
888 #endif
889 
890 /**************/
891 /* Large Runs */
892 /**************/
893 
894 #if ZEND_DEBUG
895 static void *zend_mm_alloc_pages(zend_mm_heap *heap, int pages_count, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
896 #else
897 static void *zend_mm_alloc_pages(zend_mm_heap *heap, int pages_count ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
898 #endif
899 {
900 	zend_mm_chunk *chunk = heap->main_chunk;
901 	int page_num, len;
902 	int steps = 0;
903 
904 	while (1) {
905 		if (UNEXPECTED(chunk->free_pages < pages_count)) {
906 			goto not_found;
907 #if 0
908 		} else if (UNEXPECTED(chunk->free_pages + chunk->free_tail == ZEND_MM_PAGES)) {
909 			if (UNEXPECTED(ZEND_MM_PAGES - chunk->free_tail < pages_count)) {
910 				goto not_found;
911 			} else {
912 				page_num = chunk->free_tail;
913 				goto found;
914 			}
915 		} else if (0) {
916 			/* First-Fit Search */
917 			int free_tail = chunk->free_tail;
918 			zend_mm_bitset *bitset = chunk->free_map;
919 			zend_mm_bitset tmp = *(bitset++);
920 			int i = 0;
921 
922 			while (1) {
923 				/* skip allocated blocks */
924 				while (tmp == (zend_mm_bitset)-1) {
925 					i += ZEND_MM_BITSET_LEN;
926 					if (i == ZEND_MM_PAGES) {
927 						goto not_found;
928 					}
929 					tmp = *(bitset++);
930 				}
931 				/* find first 0 bit */
932 				page_num = i + zend_mm_bitset_nts(tmp);
933 				/* reset bits from 0 to "bit" */
934 				tmp &= tmp + 1;
935 				/* skip free blocks */
936 				while (tmp == 0) {
937 					i += ZEND_MM_BITSET_LEN;
938 					len = i - page_num;
939 					if (len >= pages_count) {
940 						goto found;
941 					} else if (i >= free_tail) {
942 						goto not_found;
943 					}
944 					tmp = *(bitset++);
945 				}
946 				/* find first 1 bit */
947 				len = (i + zend_mm_bitset_ntz(tmp)) - page_num;
948 				if (len >= pages_count) {
949 					goto found;
950 				}
951 				/* set bits from 0 to "bit" */
952 				tmp |= tmp - 1;
953 			}
954 #endif
955 		} else {
956 			/* Best-Fit Search */
957 			int best = -1;
958 			int best_len = ZEND_MM_PAGES;
959 			int free_tail = chunk->free_tail;
960 			zend_mm_bitset *bitset = chunk->free_map;
961 			zend_mm_bitset tmp = *(bitset++);
962 			int i = 0;
963 
964 			while (1) {
965 				/* skip allocated blocks */
966 				while (tmp == (zend_mm_bitset)-1) {
967 					i += ZEND_MM_BITSET_LEN;
968 					if (i == ZEND_MM_PAGES) {
969 						if (best > 0) {
970 							page_num = best;
971 							goto found;
972 						} else {
973 							goto not_found;
974 						}
975 					}
976 					tmp = *(bitset++);
977 				}
978 				/* find first 0 bit */
979 				page_num = i + zend_mm_bitset_nts(tmp);
980 				/* reset bits from 0 to "bit" */
981 				tmp &= tmp + 1;
982 				/* skip free blocks */
983 				while (tmp == 0) {
984 					i += ZEND_MM_BITSET_LEN;
985 					if (i >= free_tail || i == ZEND_MM_PAGES) {
986 						len = ZEND_MM_PAGES - page_num;
987 						if (len >= pages_count && len < best_len) {
988 							chunk->free_tail = page_num + pages_count;
989 							goto found;
990 						} else {
991 							/* set accurate value */
992 							chunk->free_tail = page_num;
993 							if (best > 0) {
994 								page_num = best;
995 								goto found;
996 							} else {
997 								goto not_found;
998 							}
999 						}
1000 					}
1001 					tmp = *(bitset++);
1002 				}
1003 				/* find first 1 bit */
1004 				len = i + zend_mm_bitset_ntz(tmp) - page_num;
1005 				if (len >= pages_count) {
1006 					if (len == pages_count) {
1007 						goto found;
1008 					} else if (len < best_len) {
1009 						best_len = len;
1010 						best = page_num;
1011 					}
1012 				}
1013 				/* set bits from 0 to "bit" */
1014 				tmp |= tmp - 1;
1015 			}
1016 		}
1017 
1018 not_found:
1019 		if (chunk->next == heap->main_chunk) {
1020 get_chunk:
1021 			if (heap->cached_chunks) {
1022 				heap->cached_chunks_count--;
1023 				chunk = heap->cached_chunks;
1024 				heap->cached_chunks = chunk->next;
1025 			} else {
1026 #if ZEND_MM_LIMIT
1027 				if (UNEXPECTED(heap->real_size + ZEND_MM_CHUNK_SIZE > heap->limit)) {
1028 					if (zend_mm_gc(heap)) {
1029 						goto get_chunk;
1030 					} else if (heap->overflow == 0) {
1031 #if ZEND_DEBUG
1032 						zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
1033 #else
1034 						zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, ZEND_MM_PAGE_SIZE * pages_count);
1035 #endif
1036 						return NULL;
1037 					}
1038 				}
1039 #endif
1040 				chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(heap, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
1041 				if (UNEXPECTED(chunk == NULL)) {
1042 					/* insufficient memory */
1043 					if (zend_mm_gc(heap) &&
1044 					    (chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(heap, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE)) != NULL) {
1045 						/* pass */
1046 					} else {
1047 #if !ZEND_MM_LIMIT
1048 						zend_mm_safe_error(heap, "Out of memory");
1049 #elif ZEND_DEBUG
1050 						zend_mm_safe_error(heap, "Out of memory (allocated %zu) at %s:%d (tried to allocate %zu bytes)", heap->real_size, __zend_filename, __zend_lineno, size);
1051 #else
1052 						zend_mm_safe_error(heap, "Out of memory (allocated %zu) (tried to allocate %zu bytes)", heap->real_size, ZEND_MM_PAGE_SIZE * pages_count);
1053 #endif
1054 						return NULL;
1055 					}
1056 				}
1057 #if ZEND_MM_STAT
1058 				do {
1059 					size_t size = heap->real_size + ZEND_MM_CHUNK_SIZE;
1060 					size_t peak = MAX(heap->real_peak, size);
1061 					heap->real_size = size;
1062 					heap->real_peak = peak;
1063 				} while (0);
1064 #elif ZEND_MM_LIMIT
1065 				heap->real_size += ZEND_MM_CHUNK_SIZE;
1066 
1067 #endif
1068 			}
1069 			heap->chunks_count++;
1070 			if (heap->chunks_count > heap->peak_chunks_count) {
1071 				heap->peak_chunks_count = heap->chunks_count;
1072 			}
1073 			zend_mm_chunk_init(heap, chunk);
1074 			page_num = ZEND_MM_FIRST_PAGE;
1075 			len = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
1076 			goto found;
1077 		} else {
1078 			chunk = chunk->next;
1079 			steps++;
1080 		}
1081 	}
1082 
1083 found:
1084 	if (steps > 2 && pages_count < 8) {
1085 		/* move chunk into the head of the linked-list */
1086 		chunk->prev->next = chunk->next;
1087 		chunk->next->prev = chunk->prev;
1088 		chunk->next = heap->main_chunk->next;
1089 		chunk->prev = heap->main_chunk;
1090 		chunk->prev->next = chunk;
1091 		chunk->next->prev = chunk;
1092 	}
1093 	/* mark run as allocated */
1094 	chunk->free_pages -= pages_count;
1095 	zend_mm_bitset_set_range(chunk->free_map, page_num, pages_count);
1096 	chunk->map[page_num] = ZEND_MM_LRUN(pages_count);
1097 	if (page_num == chunk->free_tail) {
1098 		chunk->free_tail = page_num + pages_count;
1099 	}
1100 	return ZEND_MM_PAGE_ADDR(chunk, page_num);
1101 }
1102 
1103 static zend_always_inline void *zend_mm_alloc_large(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1104 {
1105 	int pages_count = (int)ZEND_MM_SIZE_TO_NUM(size, ZEND_MM_PAGE_SIZE);
1106 #if ZEND_DEBUG
1107 	void *ptr = zend_mm_alloc_pages(heap, pages_count, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1108 #else
1109 	void *ptr = zend_mm_alloc_pages(heap, pages_count ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1110 #endif
1111 #if ZEND_MM_STAT
1112 	do {
1113 		size_t size = heap->size + pages_count * ZEND_MM_PAGE_SIZE;
1114 		size_t peak = MAX(heap->peak, size);
1115 		heap->size = size;
1116 		heap->peak = peak;
1117 	} while (0);
1118 #endif
1119 	return ptr;
1120 }
1121 
1122 static zend_always_inline void zend_mm_delete_chunk(zend_mm_heap *heap, zend_mm_chunk *chunk)
1123 {
1124 	chunk->next->prev = chunk->prev;
1125 	chunk->prev->next = chunk->next;
1126 	heap->chunks_count--;
1127 	if (heap->chunks_count + heap->cached_chunks_count < heap->avg_chunks_count + 0.1
1128 	 || (heap->chunks_count == heap->last_chunks_delete_boundary
1129 	  && heap->last_chunks_delete_count >= 4)) {
1130 		/* delay deletion */
1131 		heap->cached_chunks_count++;
1132 		chunk->next = heap->cached_chunks;
1133 		heap->cached_chunks = chunk;
1134 	} else {
1135 #if ZEND_MM_STAT || ZEND_MM_LIMIT
1136 		heap->real_size -= ZEND_MM_CHUNK_SIZE;
1137 #endif
1138 		if (!heap->cached_chunks) {
1139 			if (heap->chunks_count != heap->last_chunks_delete_boundary) {
1140 				heap->last_chunks_delete_boundary = heap->chunks_count;
1141 				heap->last_chunks_delete_count = 0;
1142 			} else {
1143 				heap->last_chunks_delete_count++;
1144 			}
1145 		}
1146 		if (!heap->cached_chunks || chunk->num > heap->cached_chunks->num) {
1147 			zend_mm_chunk_free(heap, chunk, ZEND_MM_CHUNK_SIZE);
1148 		} else {
1149 //TODO: select the best chunk to delete???
1150 			chunk->next = heap->cached_chunks->next;
1151 			zend_mm_chunk_free(heap, heap->cached_chunks, ZEND_MM_CHUNK_SIZE);
1152 			heap->cached_chunks = chunk;
1153 		}
1154 	}
1155 }
1156 
1157 static zend_always_inline void zend_mm_free_pages_ex(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count, int free_chunk)
1158 {
1159 	chunk->free_pages += pages_count;
1160 	zend_mm_bitset_reset_range(chunk->free_map, page_num, pages_count);
1161 	chunk->map[page_num] = 0;
1162 	if (chunk->free_tail == page_num + pages_count) {
1163 		/* this setting may be not accurate */
1164 		chunk->free_tail = page_num;
1165 	}
1166 	if (free_chunk && chunk->free_pages == ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE) {
1167 		zend_mm_delete_chunk(heap, chunk);
1168 	}
1169 }
1170 
1171 static void zend_mm_free_pages(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count)
1172 {
1173 	zend_mm_free_pages_ex(heap, chunk, page_num, pages_count, 1);
1174 }
1175 
1176 static zend_always_inline void zend_mm_free_large(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count)
1177 {
1178 #if ZEND_MM_STAT
1179 	heap->size -= pages_count * ZEND_MM_PAGE_SIZE;
1180 #endif
1181 	zend_mm_free_pages(heap, chunk, page_num, pages_count);
1182 }
1183 
1184 /**************/
1185 /* Small Runs */
1186 /**************/
1187 
1188 /* higher set bit number (0->N/A, 1->1, 2->2, 4->3, 8->4, 127->7, 128->8 etc) */
1189 static zend_always_inline int zend_mm_small_size_to_bit(int size)
1190 {
1191 #if (defined(__GNUC__) || __has_builtin(__builtin_clz))  && defined(PHP_HAVE_BUILTIN_CLZ)
1192 	return (__builtin_clz(size) ^ 0x1f) + 1;
1193 #elif defined(_WIN32)
1194 	unsigned long index;
1195 
1196 	if (!BitScanReverse(&index, (unsigned long)size)) {
1197 		/* undefined behavior */
1198 		return 64;
1199 	}
1200 
1201 	return (((31 - (int)index) ^ 0x1f) + 1);
1202 #else
1203 	int n = 16;
1204 	if (size <= 0x00ff) {n -= 8; size = size << 8;}
1205 	if (size <= 0x0fff) {n -= 4; size = size << 4;}
1206 	if (size <= 0x3fff) {n -= 2; size = size << 2;}
1207 	if (size <= 0x7fff) {n -= 1;}
1208 	return n;
1209 #endif
1210 }
1211 
1212 #ifndef MAX
1213 # define MAX(a, b) (((a) > (b)) ? (a) : (b))
1214 #endif
1215 
1216 #ifndef MIN
1217 # define MIN(a, b) (((a) < (b)) ? (a) : (b))
1218 #endif
1219 
1220 static zend_always_inline int zend_mm_small_size_to_bin(size_t size)
1221 {
1222 #if 0
1223 	int n;
1224                             /*0,  1,  2,  3,  4,  5,  6,  7,  8,  9  10, 11, 12*/
1225 	static const int f1[] = { 3,  3,  3,  3,  3,  3,  3,  4,  5,  6,  7,  8,  9};
1226 	static const int f2[] = { 0,  0,  0,  0,  0,  0,  0,  4,  8, 12, 16, 20, 24};
1227 
1228 	if (UNEXPECTED(size <= 2)) return 0;
1229 	n = zend_mm_small_size_to_bit(size - 1);
1230 	return ((size-1) >> f1[n]) + f2[n];
1231 #else
1232 	unsigned int t1, t2;
1233 
1234 	if (size <= 64) {
1235 		/* we need to support size == 0 ... */
1236 		return (size - !!size) >> 3;
1237 	} else {
1238 		t1 = size - 1;
1239 		t2 = zend_mm_small_size_to_bit(t1) - 3;
1240 		t1 = t1 >> t2;
1241 		t2 = t2 - 3;
1242 		t2 = t2 << 2;
1243 		return (int)(t1 + t2);
1244 	}
1245 #endif
1246 }
1247 
1248 #define ZEND_MM_SMALL_SIZE_TO_BIN(size)  zend_mm_small_size_to_bin(size)
1249 
1250 static zend_never_inline void *zend_mm_alloc_small_slow(zend_mm_heap *heap, int bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1251 {
1252     zend_mm_chunk *chunk;
1253     int page_num;
1254 	zend_mm_bin *bin;
1255 	zend_mm_free_slot *p, *end;
1256 
1257 #if ZEND_DEBUG
1258 	bin = (zend_mm_bin*)zend_mm_alloc_pages(heap, bin_pages[bin_num], bin_data_size[bin_num] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1259 #else
1260 	bin = (zend_mm_bin*)zend_mm_alloc_pages(heap, bin_pages[bin_num] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1261 #endif
1262 	if (UNEXPECTED(bin == NULL)) {
1263 		/* insufficient memory */
1264 		return NULL;
1265 	}
1266 
1267 	chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(bin, ZEND_MM_CHUNK_SIZE);
1268 	page_num = ZEND_MM_ALIGNED_OFFSET(bin, ZEND_MM_CHUNK_SIZE) / ZEND_MM_PAGE_SIZE;
1269 	chunk->map[page_num] = ZEND_MM_SRUN(bin_num);
1270 	if (bin_pages[bin_num] > 1) {
1271 		int i = 1;
1272 		do {
1273 			chunk->map[page_num+i] = ZEND_MM_NRUN(bin_num, i);
1274 			i++;
1275 		} while (i < bin_pages[bin_num]);
1276 	}
1277 
1278 	/* create a linked list of elements from 1 to last */
1279 	end = (zend_mm_free_slot*)((char*)bin + (bin_data_size[bin_num] * (bin_elements[bin_num] - 1)));
1280 	heap->free_slot[bin_num] = p = (zend_mm_free_slot*)((char*)bin + bin_data_size[bin_num]);
1281 	do {
1282 		p->next_free_slot = (zend_mm_free_slot*)((char*)p + bin_data_size[bin_num]);;
1283 #if ZEND_DEBUG
1284 		do {
1285 			zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1286 			dbg->size = 0;
1287 		} while (0);
1288 #endif
1289 		p = (zend_mm_free_slot*)((char*)p + bin_data_size[bin_num]);
1290 	} while (p != end);
1291 
1292 	/* terminate list using NULL */
1293 	p->next_free_slot = NULL;
1294 #if ZEND_DEBUG
1295 		do {
1296 			zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1297 			dbg->size = 0;
1298 		} while (0);
1299 #endif
1300 
1301 	/* return first element */
1302 	return (char*)bin;
1303 }
1304 
1305 static zend_always_inline void *zend_mm_alloc_small(zend_mm_heap *heap, size_t size, int bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1306 {
1307 #if ZEND_MM_STAT
1308 	do {
1309 		size_t size = heap->size + bin_data_size[bin_num];
1310 		size_t peak = MAX(heap->peak, size);
1311 		heap->size = size;
1312 		heap->peak = peak;
1313 	} while (0);
1314 #endif
1315 
1316 	if (EXPECTED(heap->free_slot[bin_num] != NULL)) {
1317 		zend_mm_free_slot *p = heap->free_slot[bin_num];
1318 		heap->free_slot[bin_num] = p->next_free_slot;
1319 		return (void*)p;
1320 	} else {
1321 		return zend_mm_alloc_small_slow(heap, bin_num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1322 	}
1323 }
1324 
1325 static zend_always_inline void zend_mm_free_small(zend_mm_heap *heap, void *ptr, int bin_num)
1326 {
1327 	zend_mm_free_slot *p;
1328 
1329 #if ZEND_MM_STAT
1330 	heap->size -= bin_data_size[bin_num];
1331 #endif
1332 
1333 #if ZEND_DEBUG
1334 	do {
1335 		zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)ptr + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1336 		dbg->size = 0;
1337 	} while (0);
1338 #endif
1339 
1340     p = (zend_mm_free_slot*)ptr;
1341     p->next_free_slot = heap->free_slot[bin_num];
1342     heap->free_slot[bin_num] = p;
1343 }
1344 
1345 /********/
1346 /* Heap */
1347 /********/
1348 
1349 #if ZEND_DEBUG
1350 static zend_always_inline zend_mm_debug_info *zend_mm_get_debug_info(zend_mm_heap *heap, void *ptr)
1351 {
1352 	size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1353 	zend_mm_chunk *chunk;
1354 	int page_num;
1355 	zend_mm_page_info info;
1356 
1357 	ZEND_MM_CHECK(page_offset != 0, "zend_mm_heap corrupted");
1358 	chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1359 	page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1360 	info = chunk->map[page_num];
1361 	ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1362 	if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1363 		int bin_num = ZEND_MM_SRUN_BIN_NUM(info);
1364 		return (zend_mm_debug_info*)((char*)ptr + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1365 	} else /* if (info & ZEND_MM_IS_LRUN) */ {
1366 		int pages_count = ZEND_MM_LRUN_PAGES(info);
1367 
1368 		return (zend_mm_debug_info*)((char*)ptr + ZEND_MM_PAGE_SIZE * pages_count - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1369 	}
1370 }
1371 #endif
1372 
1373 static zend_always_inline void *zend_mm_alloc_heap(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1374 {
1375 	void *ptr;
1376 #if ZEND_DEBUG
1377 	size_t real_size = size;
1378 	zend_mm_debug_info *dbg;
1379 
1380 	/* special handling for zero-size allocation */
1381 	size = MAX(size, 1);
1382 	size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1383 	if (UNEXPECTED(size < real_size)) {
1384 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu + %zu)", ZEND_MM_ALIGNED_SIZE(real_size), ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1385 		return NULL;
1386 	}
1387 #endif
1388 	if (size <= ZEND_MM_MAX_SMALL_SIZE) {
1389 		ptr = zend_mm_alloc_small(heap, size, ZEND_MM_SMALL_SIZE_TO_BIN(size) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1390 #if ZEND_DEBUG
1391 		dbg = zend_mm_get_debug_info(heap, ptr);
1392 		dbg->size = real_size;
1393 		dbg->filename = __zend_filename;
1394 		dbg->orig_filename = __zend_orig_filename;
1395 		dbg->lineno = __zend_lineno;
1396 		dbg->orig_lineno = __zend_orig_lineno;
1397 #endif
1398 		return ptr;
1399 	} else if (size <= ZEND_MM_MAX_LARGE_SIZE) {
1400 		ptr = zend_mm_alloc_large(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1401 #if ZEND_DEBUG
1402 		dbg = zend_mm_get_debug_info(heap, ptr);
1403 		dbg->size = real_size;
1404 		dbg->filename = __zend_filename;
1405 		dbg->orig_filename = __zend_orig_filename;
1406 		dbg->lineno = __zend_lineno;
1407 		dbg->orig_lineno = __zend_orig_lineno;
1408 #endif
1409 		return ptr;
1410 	} else {
1411 #if ZEND_DEBUG
1412 		size = real_size;
1413 #endif
1414 		return zend_mm_alloc_huge(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1415 	}
1416 }
1417 
1418 static zend_always_inline void zend_mm_free_heap(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1419 {
1420 	size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1421 
1422 	if (UNEXPECTED(page_offset == 0)) {
1423 		if (ptr != NULL) {
1424 			zend_mm_free_huge(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1425 		}
1426 	} else {
1427 		zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1428 		int page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1429 		zend_mm_page_info info = chunk->map[page_num];
1430 
1431 		ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1432 		if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1433 			zend_mm_free_small(heap, ptr, ZEND_MM_SRUN_BIN_NUM(info));
1434 		} else /* if (info & ZEND_MM_IS_LRUN) */ {
1435 			int pages_count = ZEND_MM_LRUN_PAGES(info);
1436 
1437 			ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
1438 			zend_mm_free_large(heap, chunk, page_num, pages_count);
1439 		}
1440 	}
1441 }
1442 
1443 static size_t zend_mm_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1444 {
1445 	size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1446 
1447 	if (UNEXPECTED(page_offset == 0)) {
1448 		return zend_mm_get_huge_block_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1449 	} else {
1450 		zend_mm_chunk *chunk;
1451 #if 0 && ZEND_DEBUG
1452 		zend_mm_debug_info *dbg = zend_mm_get_debug_info(heap, ptr);
1453 		return dbg->size;
1454 #else
1455 		int page_num;
1456 		zend_mm_page_info info;
1457 
1458 		chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1459 		page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1460 		info = chunk->map[page_num];
1461 		ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1462 		if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1463 			return bin_data_size[ZEND_MM_SRUN_BIN_NUM(info)];
1464 		} else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
1465 			return ZEND_MM_LRUN_PAGES(info) * ZEND_MM_PAGE_SIZE;
1466 		}
1467 #endif
1468 	}
1469 }
1470 
1471 static void *zend_mm_realloc_heap(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1472 {
1473 	size_t page_offset;
1474 	size_t old_size;
1475 	size_t new_size;
1476 	void *ret;
1477 #if ZEND_DEBUG
1478 	size_t real_size;
1479 	zend_mm_debug_info *dbg;
1480 #endif
1481 
1482 	page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1483 	if (UNEXPECTED(page_offset == 0)) {
1484 		if (UNEXPECTED(ptr == NULL)) {
1485 			return zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1486 		}
1487 		old_size = zend_mm_get_huge_block_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1488 #if ZEND_DEBUG
1489 		real_size = size;
1490 		size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1491 #endif
1492 		if (size > ZEND_MM_MAX_LARGE_SIZE) {
1493 #if ZEND_DEBUG
1494 			size = real_size;
1495 #endif
1496 #ifdef ZEND_WIN32
1497 			/* On Windows we don't have ability to extend huge blocks in-place.
1498 			 * We allocate them with 2MB size granularity, to avoid many
1499 			 * reallocations when they are extended by small pieces
1500 			 */
1501 			new_size = ZEND_MM_ALIGNED_SIZE_EX(size, MAX(REAL_PAGE_SIZE, ZEND_MM_CHUNK_SIZE));
1502 #else
1503 			new_size = ZEND_MM_ALIGNED_SIZE_EX(size, REAL_PAGE_SIZE);
1504 #endif
1505 			if (new_size == old_size) {
1506 #if ZEND_DEBUG
1507 				zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1508 #else
1509 				zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1510 #endif
1511 				return ptr;
1512 			} else if (new_size < old_size) {
1513 				/* unmup tail */
1514 				if (zend_mm_chunk_truncate(heap, ptr, old_size, new_size)) {
1515 #if ZEND_MM_STAT || ZEND_MM_LIMIT
1516 					heap->real_size -= old_size - new_size;
1517 #endif
1518 #if ZEND_MM_STAT
1519 					heap->size -= old_size - new_size;
1520 #endif
1521 #if ZEND_DEBUG
1522 					zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1523 #else
1524 					zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1525 #endif
1526 					return ptr;
1527 				}
1528 			} else /* if (new_size > old_size) */ {
1529 #if ZEND_MM_LIMIT
1530 				if (UNEXPECTED(heap->real_size + (new_size - old_size) > heap->limit)) {
1531 					if (zend_mm_gc(heap) && heap->real_size + (new_size - old_size) <= heap->limit) {
1532 						/* pass */
1533 					} else if (heap->overflow == 0) {
1534 #if ZEND_DEBUG
1535 						zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
1536 #else
1537 						zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, size);
1538 #endif
1539 						return NULL;
1540 					}
1541 				}
1542 #endif
1543 				/* try to map tail right after this block */
1544 				if (zend_mm_chunk_extend(heap, ptr, old_size, new_size)) {
1545 #if ZEND_MM_STAT || ZEND_MM_LIMIT
1546 					heap->real_size += new_size - old_size;
1547 #endif
1548 #if ZEND_MM_STAT
1549 					heap->real_peak = MAX(heap->real_peak, heap->real_size);
1550 					heap->size += new_size - old_size;
1551 					heap->peak = MAX(heap->peak, heap->size);
1552 #endif
1553 #if ZEND_DEBUG
1554 					zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1555 #else
1556 					zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1557 #endif
1558 					return ptr;
1559 				}
1560 			}
1561 		}
1562 	} else {
1563 		zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1564 		int page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1565 		zend_mm_page_info info = chunk->map[page_num];
1566 #if ZEND_DEBUG
1567 		size_t real_size = size;
1568 
1569 		size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1570 #endif
1571 
1572 		ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1573 		if (info & ZEND_MM_IS_SRUN) {
1574 			int old_bin_num = ZEND_MM_SRUN_BIN_NUM(info);
1575 			old_size = bin_data_size[old_bin_num];
1576 			if (size <= ZEND_MM_MAX_SMALL_SIZE) {
1577 				int bin_num = ZEND_MM_SMALL_SIZE_TO_BIN(size);
1578 				if (old_bin_num == bin_num) {
1579 #if ZEND_DEBUG
1580 					dbg = zend_mm_get_debug_info(heap, ptr);
1581 					dbg->size = real_size;
1582 					dbg->filename = __zend_filename;
1583 					dbg->orig_filename = __zend_orig_filename;
1584 					dbg->lineno = __zend_lineno;
1585 					dbg->orig_lineno = __zend_orig_lineno;
1586 #endif
1587 					return ptr;
1588 				}
1589 			}
1590 		} else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
1591 			ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
1592 			old_size = ZEND_MM_LRUN_PAGES(info) * ZEND_MM_PAGE_SIZE;
1593 			if (size > ZEND_MM_MAX_SMALL_SIZE && size <= ZEND_MM_MAX_LARGE_SIZE) {
1594 				new_size = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE);
1595 				if (new_size == old_size) {
1596 #if ZEND_DEBUG
1597 					dbg = zend_mm_get_debug_info(heap, ptr);
1598 					dbg->size = real_size;
1599 					dbg->filename = __zend_filename;
1600 					dbg->orig_filename = __zend_orig_filename;
1601 					dbg->lineno = __zend_lineno;
1602 					dbg->orig_lineno = __zend_orig_lineno;
1603 #endif
1604 					return ptr;
1605 				} else if (new_size < old_size) {
1606 					/* free tail pages */
1607 					int new_pages_count = (int)(new_size / ZEND_MM_PAGE_SIZE);
1608 					int rest_pages_count = (int)((old_size - new_size) / ZEND_MM_PAGE_SIZE);
1609 
1610 #if ZEND_MM_STAT
1611 					heap->size -= rest_pages_count * ZEND_MM_PAGE_SIZE;
1612 #endif
1613 					chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
1614 					chunk->free_pages += rest_pages_count;
1615 					zend_mm_bitset_reset_range(chunk->free_map, page_num + new_pages_count, rest_pages_count);
1616 #if ZEND_DEBUG
1617 					dbg = zend_mm_get_debug_info(heap, ptr);
1618 					dbg->size = real_size;
1619 					dbg->filename = __zend_filename;
1620 					dbg->orig_filename = __zend_orig_filename;
1621 					dbg->lineno = __zend_lineno;
1622 					dbg->orig_lineno = __zend_orig_lineno;
1623 #endif
1624 					return ptr;
1625 				} else /* if (new_size > old_size) */ {
1626 					int new_pages_count = (int)(new_size / ZEND_MM_PAGE_SIZE);
1627 					int old_pages_count = (int)(old_size / ZEND_MM_PAGE_SIZE);
1628 
1629 					/* try to allocate tail pages after this block */
1630 					if (page_num + new_pages_count <= ZEND_MM_PAGES &&
1631 					    zend_mm_bitset_is_free_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_count)) {
1632 #if ZEND_MM_STAT
1633 						do {
1634 							size_t size = heap->size + (new_size - old_size);
1635 							size_t peak = MAX(heap->peak, size);
1636 							heap->size = size;
1637 							heap->peak = peak;
1638 						} while (0);
1639 #endif
1640 						chunk->free_pages -= new_pages_count - old_pages_count;
1641 						zend_mm_bitset_set_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_count);
1642 						chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
1643 #if ZEND_DEBUG
1644 						dbg = zend_mm_get_debug_info(heap, ptr);
1645 						dbg->size = real_size;
1646 						dbg->filename = __zend_filename;
1647 						dbg->orig_filename = __zend_orig_filename;
1648 						dbg->lineno = __zend_lineno;
1649 						dbg->orig_lineno = __zend_orig_lineno;
1650 #endif
1651 						return ptr;
1652 					}
1653 				}
1654 			}
1655 		}
1656 #if ZEND_DEBUG
1657 		size = real_size;
1658 #endif
1659 	}
1660 
1661 	/* Naive reallocation */
1662 #if ZEND_MM_STAT
1663 	do {
1664 		size_t orig_peak = heap->peak;
1665 		size_t orig_real_peak = heap->real_peak;
1666 #endif
1667 	ret = zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1668 	memcpy(ret, ptr, MIN(old_size, copy_size));
1669 	zend_mm_free_heap(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1670 #if ZEND_MM_STAT
1671 		heap->peak = MAX(orig_peak, heap->size);
1672 		heap->real_peak = MAX(orig_real_peak, heap->real_size);
1673 	} while (0);
1674 #endif
1675 	return ret;
1676 }
1677 
1678 /*********************/
1679 /* Huge Runs (again) */
1680 /*********************/
1681 
1682 #if ZEND_DEBUG
1683 static void zend_mm_add_huge_block(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1684 #else
1685 static void zend_mm_add_huge_block(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1686 #endif
1687 {
1688 	zend_mm_huge_list *list = (zend_mm_huge_list*)zend_mm_alloc_heap(heap, sizeof(zend_mm_huge_list) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1689 	list->ptr = ptr;
1690 	list->size = size;
1691 	list->next = heap->huge_list;
1692 #if ZEND_DEBUG
1693 	list->dbg.size = dbg_size;
1694 	list->dbg.filename = __zend_filename;
1695 	list->dbg.orig_filename = __zend_orig_filename;
1696 	list->dbg.lineno = __zend_lineno;
1697 	list->dbg.orig_lineno = __zend_orig_lineno;
1698 #endif
1699 	heap->huge_list = list;
1700 }
1701 
1702 static size_t zend_mm_del_huge_block(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1703 {
1704 	zend_mm_huge_list *prev = NULL;
1705 	zend_mm_huge_list *list = heap->huge_list;
1706 	while (list != NULL) {
1707 		if (list->ptr == ptr) {
1708 			size_t size;
1709 
1710 			if (prev) {
1711 				prev->next = list->next;
1712 			} else {
1713 				heap->huge_list = list->next;
1714 			}
1715 			size = list->size;
1716 			zend_mm_free_heap(heap, list ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1717 			return size;
1718 		}
1719 		prev = list;
1720 		list = list->next;
1721 	}
1722 	ZEND_MM_CHECK(0, "zend_mm_heap corrupted");
1723 	return 0;
1724 }
1725 
1726 static size_t zend_mm_get_huge_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1727 {
1728 	zend_mm_huge_list *list = heap->huge_list;
1729 	while (list != NULL) {
1730 		if (list->ptr == ptr) {
1731 			return list->size;
1732 		}
1733 		list = list->next;
1734 	}
1735 	ZEND_MM_CHECK(0, "zend_mm_heap corrupted");
1736 	return 0;
1737 }
1738 
1739 #if ZEND_DEBUG
1740 static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1741 #else
1742 static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1743 #endif
1744 {
1745 	zend_mm_huge_list *list = heap->huge_list;
1746 	while (list != NULL) {
1747 		if (list->ptr == ptr) {
1748 			list->size = size;
1749 #if ZEND_DEBUG
1750 			list->dbg.size = dbg_size;
1751 			list->dbg.filename = __zend_filename;
1752 			list->dbg.orig_filename = __zend_orig_filename;
1753 			list->dbg.lineno = __zend_lineno;
1754 			list->dbg.orig_lineno = __zend_orig_lineno;
1755 #endif
1756 			return;
1757 		}
1758 		list = list->next;
1759 	}
1760 }
1761 
1762 static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1763 {
1764 #ifdef ZEND_WIN32
1765 	/* On Windows we don't have ability to extend huge blocks in-place.
1766 	 * We allocate them with 2MB size granularity, to avoid many
1767 	 * reallocations when they are extended by small pieces
1768 	 */
1769 	size_t new_size = ZEND_MM_ALIGNED_SIZE_EX(size, MAX(REAL_PAGE_SIZE, ZEND_MM_CHUNK_SIZE));
1770 #else
1771 	size_t new_size = ZEND_MM_ALIGNED_SIZE_EX(size, REAL_PAGE_SIZE);
1772 #endif
1773 	void *ptr;
1774 
1775 #if ZEND_MM_LIMIT
1776 	if (UNEXPECTED(heap->real_size + new_size > heap->limit)) {
1777 		if (zend_mm_gc(heap) && heap->real_size + new_size <= heap->limit) {
1778 			/* pass */
1779 		} else if (heap->overflow == 0) {
1780 #if ZEND_DEBUG
1781 			zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
1782 #else
1783 			zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, size);
1784 #endif
1785 			return NULL;
1786 		}
1787 	}
1788 #endif
1789 	ptr = zend_mm_chunk_alloc(heap, new_size, ZEND_MM_CHUNK_SIZE);
1790 	if (UNEXPECTED(ptr == NULL)) {
1791 		/* insufficient memory */
1792 		if (zend_mm_gc(heap) &&
1793 		    (ptr = zend_mm_chunk_alloc(heap, new_size, ZEND_MM_CHUNK_SIZE)) != NULL) {
1794 			/* pass */
1795 		} else {
1796 #if !ZEND_MM_LIMIT
1797 			zend_mm_safe_error(heap, "Out of memory");
1798 #elif ZEND_DEBUG
1799 			zend_mm_safe_error(heap, "Out of memory (allocated %zu) at %s:%d (tried to allocate %zu bytes)", heap->real_size, __zend_filename, __zend_lineno, size);
1800 #else
1801 			zend_mm_safe_error(heap, "Out of memory (allocated %zu) (tried to allocate %zu bytes)", heap->real_size, size);
1802 #endif
1803 			return NULL;
1804 		}
1805 	}
1806 #if ZEND_DEBUG
1807 	zend_mm_add_huge_block(heap, ptr, new_size, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1808 #else
1809 	zend_mm_add_huge_block(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1810 #endif
1811 #if ZEND_MM_STAT
1812 	do {
1813 		size_t size = heap->real_size + new_size;
1814 		size_t peak = MAX(heap->real_peak, size);
1815 		heap->real_size = size;
1816 		heap->real_peak = peak;
1817 	} while (0);
1818 	do {
1819 		size_t size = heap->size + new_size;
1820 		size_t peak = MAX(heap->peak, size);
1821 		heap->size = size;
1822 		heap->peak = peak;
1823 	} while (0);
1824 #elif ZEND_MM_LIMIT
1825 	heap->real_size += new_size;
1826 #endif
1827 	return ptr;
1828 }
1829 
1830 static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1831 {
1832 	size_t size;
1833 
1834 	ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE) == 0, "zend_mm_heap corrupted");
1835 	size = zend_mm_del_huge_block(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1836 	zend_mm_chunk_free(heap, ptr, size);
1837 #if ZEND_MM_STAT || ZEND_MM_LIMIT
1838 	heap->real_size -= size;
1839 #endif
1840 #if ZEND_MM_STAT
1841 	heap->size -= size;
1842 #endif
1843 }
1844 
1845 /******************/
1846 /* Initialization */
1847 /******************/
1848 
1849 static zend_mm_heap *zend_mm_init(void)
1850 {
1851 	zend_mm_chunk *chunk = (zend_mm_chunk*)zend_mm_chunk_alloc_int(ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
1852 	zend_mm_heap *heap;
1853 
1854 	if (UNEXPECTED(chunk == NULL)) {
1855 #if ZEND_MM_ERROR
1856 #ifdef _WIN32
1857 		stderr_last_error("Can't initialize heap");
1858 #else
1859 		fprintf(stderr, "\nCan't initialize heap: [%d] %s\n", errno, strerror(errno));
1860 #endif
1861 #endif
1862 		return NULL;
1863 	}
1864 	heap = &chunk->heap_slot;
1865 	chunk->heap = heap;
1866 	chunk->next = chunk;
1867 	chunk->prev = chunk;
1868 	chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
1869 	chunk->free_tail = ZEND_MM_FIRST_PAGE;
1870 	chunk->num = 0;
1871 	chunk->free_map[0] = (Z_L(1) << ZEND_MM_FIRST_PAGE) - 1;
1872 	chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
1873 	heap->main_chunk = chunk;
1874 	heap->cached_chunks = NULL;
1875 	heap->chunks_count = 1;
1876 	heap->peak_chunks_count = 1;
1877 	heap->cached_chunks_count = 0;
1878 	heap->avg_chunks_count = 1.0;
1879 	heap->last_chunks_delete_boundary = 0;
1880 	heap->last_chunks_delete_count = 0;
1881 #if ZEND_MM_STAT || ZEND_MM_LIMIT
1882 	heap->real_size = ZEND_MM_CHUNK_SIZE;
1883 #endif
1884 #if ZEND_MM_STAT
1885 	heap->real_peak = ZEND_MM_CHUNK_SIZE;
1886 	heap->size = 0;
1887 	heap->peak = 0;
1888 #endif
1889 #if ZEND_MM_LIMIT
1890 	heap->limit = (Z_L(-1) >> Z_L(1));
1891 	heap->overflow = 0;
1892 #endif
1893 #if ZEND_MM_CUSTOM
1894 	heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_NONE;
1895 #endif
1896 #if ZEND_MM_STORAGE
1897 	heap->storage = NULL;
1898 #endif
1899 	heap->huge_list = NULL;
1900 	return heap;
1901 }
1902 
1903 ZEND_API size_t zend_mm_gc(zend_mm_heap *heap)
1904 {
1905 	zend_mm_free_slot *p, **q;
1906 	zend_mm_chunk *chunk;
1907 	size_t page_offset;
1908 	int page_num;
1909 	zend_mm_page_info info;
1910 	int i, has_free_pages, free_counter;
1911 	size_t collected = 0;
1912 
1913 #if ZEND_MM_CUSTOM
1914 	if (heap->use_custom_heap) {
1915 		return 0;
1916 	}
1917 #endif
1918 
1919 	for (i = 0; i < ZEND_MM_BINS; i++) {
1920 		has_free_pages = 0;
1921 		p = heap->free_slot[i];
1922 		while (p != NULL) {
1923 			chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(p, ZEND_MM_CHUNK_SIZE);
1924 			ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1925 			page_offset = ZEND_MM_ALIGNED_OFFSET(p, ZEND_MM_CHUNK_SIZE);
1926 			ZEND_ASSERT(page_offset != 0);
1927 			page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1928 			info = chunk->map[page_num];
1929 			ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
1930 			if (info & ZEND_MM_IS_LRUN) {
1931 				page_num -= ZEND_MM_NRUN_OFFSET(info);
1932 				info = chunk->map[page_num];
1933 				ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
1934 				ZEND_ASSERT(!(info & ZEND_MM_IS_LRUN));
1935 			}
1936 			ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(info) == i);
1937 			free_counter = ZEND_MM_SRUN_FREE_COUNTER(info) + 1;
1938 			if (free_counter == bin_elements[i]) {
1939 				has_free_pages = 1;
1940 			}
1941 			chunk->map[page_num] = ZEND_MM_SRUN_EX(i, free_counter);;
1942 			p = p->next_free_slot;
1943 		}
1944 
1945 		if (!has_free_pages) {
1946 			continue;
1947 		}
1948 
1949 		q = &heap->free_slot[i];
1950 		p = *q;
1951 		while (p != NULL) {
1952 			chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(p, ZEND_MM_CHUNK_SIZE);
1953 			ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1954 			page_offset = ZEND_MM_ALIGNED_OFFSET(p, ZEND_MM_CHUNK_SIZE);
1955 			ZEND_ASSERT(page_offset != 0);
1956 			page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1957 			info = chunk->map[page_num];
1958 			ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
1959 			if (info & ZEND_MM_IS_LRUN) {
1960 				page_num -= ZEND_MM_NRUN_OFFSET(info);
1961 				info = chunk->map[page_num];
1962 				ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
1963 				ZEND_ASSERT(!(info & ZEND_MM_IS_LRUN));
1964 			}
1965 			ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(info) == i);
1966 			if (ZEND_MM_SRUN_FREE_COUNTER(info) == bin_elements[i]) {
1967 				/* remove from cache */
1968 				p = p->next_free_slot;;
1969 				*q = p;
1970 			} else {
1971 				q = &p->next_free_slot;
1972 				p = *q;
1973 			}
1974 		}
1975 	}
1976 
1977 	chunk = heap->main_chunk;
1978 	do {
1979 		i = ZEND_MM_FIRST_PAGE;
1980 		while (i < chunk->free_tail) {
1981 			if (zend_mm_bitset_is_set(chunk->free_map, i)) {
1982 				info = chunk->map[i];
1983 				if (info & ZEND_MM_IS_SRUN) {
1984 					int bin_num = ZEND_MM_SRUN_BIN_NUM(info);
1985 					int pages_count = bin_pages[bin_num];
1986 
1987 					if (ZEND_MM_SRUN_FREE_COUNTER(info) == bin_elements[bin_num]) {
1988 						/* all elemens are free */
1989 						zend_mm_free_pages_ex(heap, chunk, i, pages_count, 0);
1990 						collected += pages_count;
1991 					} else {
1992 						/* reset counter */
1993 						chunk->map[i] = ZEND_MM_SRUN(bin_num);
1994 					}
1995 					i += bin_pages[bin_num];
1996 				} else /* if (info & ZEND_MM_IS_LRUN) */ {
1997 					i += ZEND_MM_LRUN_PAGES(info);
1998 				}
1999 			} else {
2000 				i++;
2001 			}
2002 		}
2003 		if (chunk->free_pages == ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE) {
2004 			zend_mm_chunk *next_chunk = chunk->next;
2005 
2006 			zend_mm_delete_chunk(heap, chunk);
2007 			chunk = next_chunk;
2008 		} else {
2009 			chunk = chunk->next;
2010 		}
2011 	} while (chunk != heap->main_chunk);
2012 
2013 	return collected * ZEND_MM_PAGE_SIZE;
2014 }
2015 
2016 #if ZEND_DEBUG
2017 /******************/
2018 /* Leak detection */
2019 /******************/
2020 
2021 static zend_long zend_mm_find_leaks_small(zend_mm_chunk *p, int i, int j, zend_leak_info *leak)
2022 {
2023     int empty = 1;
2024 	zend_long count = 0;
2025 	int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
2026 	zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * (j + 1) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
2027 
2028 	while (j < bin_elements[bin_num]) {
2029 		if (dbg->size != 0) {
2030 			if (dbg->filename == leak->filename && dbg->lineno == leak->lineno) {
2031 				count++;
2032 				dbg->size = 0;
2033 				dbg->filename = NULL;
2034 				dbg->lineno = 0;
2035 			} else {
2036 				empty = 0;
2037 			}
2038 		}
2039 		j++;
2040 		dbg = (zend_mm_debug_info*)((char*)dbg + bin_data_size[bin_num]);
2041 	}
2042 	if (empty) {
2043 		zend_mm_bitset_reset_range(p->free_map, i, bin_pages[bin_num]);
2044 	}
2045 	return count;
2046 }
2047 
2048 static zend_long zend_mm_find_leaks(zend_mm_heap *heap, zend_mm_chunk *p, int i, zend_leak_info *leak)
2049 {
2050 	zend_long count = 0;
2051 
2052 	do {
2053 		while (i < p->free_tail) {
2054 			if (zend_mm_bitset_is_set(p->free_map, i)) {
2055 				if (p->map[i] & ZEND_MM_IS_SRUN) {
2056 					int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
2057 					count += zend_mm_find_leaks_small(p, i, 0, leak);
2058 					i += bin_pages[bin_num];
2059 				} else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
2060 					int pages_count = ZEND_MM_LRUN_PAGES(p->map[i]);
2061 					zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * (i + pages_count) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
2062 
2063 					if (dbg->filename == leak->filename && dbg->lineno == leak->lineno) {
2064 						count++;
2065 					}
2066 					zend_mm_bitset_reset_range(p->free_map, i, pages_count);
2067 					i += pages_count;
2068 				}
2069 			} else {
2070 				i++;
2071 			}
2072 		}
2073 		p = p->next;
2074 	} while (p != heap->main_chunk);
2075 	return count;
2076 }
2077 
2078 static zend_long zend_mm_find_leaks_huge(zend_mm_heap *heap, zend_mm_huge_list *list)
2079 {
2080 	zend_long count = 0;
2081 	zend_mm_huge_list *prev = list;
2082 	zend_mm_huge_list *p = list->next;
2083 
2084 	while (p) {
2085 		if (p->dbg.filename == list->dbg.filename && p->dbg.lineno == list->dbg.lineno) {
2086 			prev->next = p->next;
2087 			zend_mm_chunk_free(heap, p->ptr, p->size);
2088 			zend_mm_free_heap(heap, p, NULL, 0, NULL, 0);
2089 			count++;
2090 		} else {
2091 			prev = p;
2092 		}
2093 		p = prev->next;
2094 	}
2095 
2096 	return count;
2097 }
2098 
2099 static void zend_mm_check_leaks(zend_mm_heap *heap)
2100 {
2101 	zend_mm_huge_list *list;
2102 	zend_mm_chunk *p;
2103 	zend_leak_info leak;
2104 	zend_long repeated = 0;
2105 	uint32_t total = 0;
2106 	int i, j;
2107 
2108 	/* find leaked huge blocks and free them */
2109 	list = heap->huge_list;
2110 	while (list) {
2111 		zend_mm_huge_list *q = list;
2112 
2113 		leak.addr = list->ptr;
2114 		leak.size = list->dbg.size;
2115 		leak.filename = list->dbg.filename;
2116 		leak.orig_filename = list->dbg.orig_filename;
2117 		leak.lineno = list->dbg.lineno;
2118 		leak.orig_lineno = list->dbg.orig_lineno;
2119 
2120 		zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
2121 		zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
2122 		repeated = zend_mm_find_leaks_huge(heap, list);
2123 		total += 1 + repeated;
2124 		if (repeated) {
2125 			zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated);
2126 		}
2127 
2128 		heap->huge_list = list = list->next;
2129 		zend_mm_chunk_free(heap, q->ptr, q->size);
2130 		zend_mm_free_heap(heap, q, NULL, 0, NULL, 0);
2131 	}
2132 
2133 	/* for each chunk */
2134 	p = heap->main_chunk;
2135 	do {
2136 		i = ZEND_MM_FIRST_PAGE;
2137 		while (i < p->free_tail) {
2138 			if (zend_mm_bitset_is_set(p->free_map, i)) {
2139 				if (p->map[i] & ZEND_MM_IS_SRUN) {
2140 					int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
2141 					zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
2142 
2143 					j = 0;
2144 					while (j < bin_elements[bin_num]) {
2145 						if (dbg->size != 0) {
2146 							leak.addr = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * j);
2147 							leak.size = dbg->size;
2148 							leak.filename = dbg->filename;
2149 							leak.orig_filename = dbg->orig_filename;
2150 							leak.lineno = dbg->lineno;
2151 							leak.orig_lineno = dbg->orig_lineno;
2152 
2153 							zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
2154 							zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
2155 
2156 							dbg->size = 0;
2157 							dbg->filename = NULL;
2158 							dbg->lineno = 0;
2159 
2160 							repeated = zend_mm_find_leaks_small(p, i, j + 1, &leak) +
2161 							           zend_mm_find_leaks(heap, p, i + bin_pages[bin_num], &leak);
2162 							total += 1 + repeated;
2163 							if (repeated) {
2164 								zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated);
2165 							}
2166 						}
2167 						dbg = (zend_mm_debug_info*)((char*)dbg + bin_data_size[bin_num]);
2168 						j++;
2169 					}
2170 					i += bin_pages[bin_num];
2171 				} else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
2172 					int pages_count = ZEND_MM_LRUN_PAGES(p->map[i]);
2173 					zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * (i + pages_count) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
2174 
2175 					leak.addr = (void*)((char*)p + ZEND_MM_PAGE_SIZE * i);
2176 					leak.size = dbg->size;
2177 					leak.filename = dbg->filename;
2178 					leak.orig_filename = dbg->orig_filename;
2179 					leak.lineno = dbg->lineno;
2180 					leak.orig_lineno = dbg->orig_lineno;
2181 
2182 					zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
2183 					zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
2184 
2185 					zend_mm_bitset_reset_range(p->free_map, i, pages_count);
2186 
2187 					repeated = zend_mm_find_leaks(heap, p, i + pages_count, &leak);
2188 					total += 1 + repeated;
2189 					if (repeated) {
2190 						zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated);
2191 					}
2192 					i += pages_count;
2193 				}
2194 			} else {
2195 				i++;
2196 			}
2197 		}
2198 		p = p->next;
2199 	} while (p != heap->main_chunk);
2200 	if (total) {
2201 		zend_message_dispatcher(ZMSG_MEMORY_LEAKS_GRAND_TOTAL, &total);
2202 	}
2203 }
2204 #endif
2205 
2206 void zend_mm_shutdown(zend_mm_heap *heap, int full, int silent)
2207 {
2208 	zend_mm_chunk *p;
2209 	zend_mm_huge_list *list;
2210 
2211 #if ZEND_MM_CUSTOM
2212 	if (heap->use_custom_heap) {
2213 		if (full) {
2214 			if (ZEND_DEBUG && heap->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
2215 				heap->custom_heap.debug._free(heap ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC);
2216 			} else {
2217 				heap->custom_heap.std._free(heap);
2218 			}
2219 		}
2220 		return;
2221 	}
2222 #endif
2223 
2224 #if ZEND_DEBUG
2225 	if (!silent) {
2226 		zend_mm_check_leaks(heap);
2227 	}
2228 #endif
2229 
2230 	/* free huge blocks */
2231 	list = heap->huge_list;
2232 	heap->huge_list = NULL;
2233 	while (list) {
2234 		zend_mm_huge_list *q = list;
2235 		list = list->next;
2236 		zend_mm_chunk_free(heap, q->ptr, q->size);
2237 	}
2238 
2239 	/* move all chunks except of the first one into the cache */
2240 	p = heap->main_chunk->next;
2241 	while (p != heap->main_chunk) {
2242 		zend_mm_chunk *q = p->next;
2243 		p->next = heap->cached_chunks;
2244 		heap->cached_chunks = p;
2245 		p = q;
2246 		heap->chunks_count--;
2247 		heap->cached_chunks_count++;
2248 	}
2249 
2250 	if (full) {
2251 		/* free all cached chunks */
2252 		while (heap->cached_chunks) {
2253 			p = heap->cached_chunks;
2254 			heap->cached_chunks = p->next;
2255 			zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
2256 		}
2257 		/* free the first chunk */
2258 		zend_mm_chunk_free(heap, heap->main_chunk, ZEND_MM_CHUNK_SIZE);
2259 	} else {
2260 		zend_mm_heap old_heap;
2261 
2262 		/* free some cached chunks to keep average count */
2263 		heap->avg_chunks_count = (heap->avg_chunks_count + (double)heap->peak_chunks_count) / 2.0;
2264 		while ((double)heap->cached_chunks_count + 0.9 > heap->avg_chunks_count &&
2265 		       heap->cached_chunks) {
2266 			p = heap->cached_chunks;
2267 			heap->cached_chunks = p->next;
2268 			zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
2269 			heap->cached_chunks_count--;
2270 		}
2271 		/* clear cached chunks */
2272 		p = heap->cached_chunks;
2273 		while (p != NULL) {
2274 			zend_mm_chunk *q = p->next;
2275 			memset(p, 0, sizeof(zend_mm_chunk));
2276 			p->next = q;
2277 			p = q;
2278 		}
2279 
2280 		/* reinitialize the first chunk and heap */
2281 		old_heap = *heap;
2282 		p = heap->main_chunk;
2283 		memset(p, 0, ZEND_MM_FIRST_PAGE * ZEND_MM_PAGE_SIZE);
2284 		*heap = old_heap;
2285 		memset(heap->free_slot, 0, sizeof(heap->free_slot));
2286 		heap->main_chunk = p;
2287 		p->heap = &p->heap_slot;
2288 		p->next = p;
2289 		p->prev = p;
2290 		p->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
2291 		p->free_tail = ZEND_MM_FIRST_PAGE;
2292 		p->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
2293 		p->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
2294 		heap->chunks_count = 1;
2295 		heap->peak_chunks_count = 1;
2296 		heap->last_chunks_delete_boundary = 0;
2297 		heap->last_chunks_delete_count = 0;
2298 #if ZEND_MM_STAT || ZEND_MM_LIMIT
2299 		heap->real_size = ZEND_MM_CHUNK_SIZE;
2300 #endif
2301 #if ZEND_MM_STAT
2302 		heap->real_peak = ZEND_MM_CHUNK_SIZE;
2303 		heap->size = heap->peak = 0;
2304 #endif
2305 	}
2306 }
2307 
2308 /**************/
2309 /* PUBLIC API */
2310 /**************/
2311 
2312 ZEND_API void* ZEND_FASTCALL _zend_mm_alloc(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2313 {
2314 	return zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2315 }
2316 
2317 ZEND_API void ZEND_FASTCALL _zend_mm_free(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2318 {
2319 	zend_mm_free_heap(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2320 }
2321 
2322 void* ZEND_FASTCALL _zend_mm_realloc(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2323 {
2324 	return zend_mm_realloc_heap(heap, ptr, size, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2325 }
2326 
2327 void* ZEND_FASTCALL _zend_mm_realloc2(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2328 {
2329 	return zend_mm_realloc_heap(heap, ptr, size, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2330 }
2331 
2332 ZEND_API size_t ZEND_FASTCALL _zend_mm_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2333 {
2334 	return zend_mm_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2335 }
2336 
2337 /**********************/
2338 /* Allocation Manager */
2339 /**********************/
2340 
2341 typedef struct _zend_alloc_globals {
2342 	zend_mm_heap *mm_heap;
2343 } zend_alloc_globals;
2344 
2345 #ifdef ZTS
2346 static int alloc_globals_id;
2347 # define AG(v) ZEND_TSRMG(alloc_globals_id, zend_alloc_globals *, v)
2348 #else
2349 # define AG(v) (alloc_globals.v)
2350 static zend_alloc_globals alloc_globals;
2351 #endif
2352 
2353 ZEND_API int is_zend_mm(void)
2354 {
2355 #if ZEND_MM_CUSTOM
2356 	return !AG(mm_heap)->use_custom_heap;
2357 #else
2358 	return 1;
2359 #endif
2360 }
2361 
2362 #if !ZEND_DEBUG && defined(HAVE_BUILTIN_CONSTANT_P)
2363 #undef _emalloc
2364 
2365 #if ZEND_MM_CUSTOM
2366 # define ZEND_MM_CUSTOM_ALLOCATOR(size) do { \
2367 		if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
2368 			if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) { \
2369 				return AG(mm_heap)->custom_heap.debug._malloc(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2370 			} else { \
2371 				return AG(mm_heap)->custom_heap.std._malloc(size); \
2372 			} \
2373 		} \
2374 	} while (0)
2375 # define ZEND_MM_CUSTOM_DEALLOCATOR(ptr) do { \
2376 		if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
2377 			if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) { \
2378 				AG(mm_heap)->custom_heap.debug._free(ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2379 			} else { \
2380 				AG(mm_heap)->custom_heap.std._free(ptr); \
2381 			} \
2382 			return; \
2383 		} \
2384 	} while (0)
2385 #else
2386 # define ZEND_MM_CUSTOM_ALLOCATOR(size)
2387 # define ZEND_MM_CUSTOM_DEALLOCATOR(ptr)
2388 #endif
2389 
2390 # define _ZEND_BIN_ALLOCATOR(_num, _size, _elements, _pages, x, y) \
2391 	ZEND_API void* ZEND_FASTCALL _emalloc_ ## _size(void) { \
2392 		ZEND_MM_CUSTOM_ALLOCATOR(_size); \
2393 		return zend_mm_alloc_small(AG(mm_heap), _size, _num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2394 	}
2395 
2396 ZEND_MM_BINS_INFO(_ZEND_BIN_ALLOCATOR, x, y)
2397 
2398 ZEND_API void* ZEND_FASTCALL _emalloc_large(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2399 {
2400 
2401 	ZEND_MM_CUSTOM_ALLOCATOR(size);
2402 	return zend_mm_alloc_large(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2403 }
2404 
2405 ZEND_API void* ZEND_FASTCALL _emalloc_huge(size_t size)
2406 {
2407 
2408 	ZEND_MM_CUSTOM_ALLOCATOR(size);
2409 	return zend_mm_alloc_huge(AG(mm_heap), size);
2410 }
2411 
2412 #if ZEND_DEBUG
2413 # define _ZEND_BIN_FREE(_num, _size, _elements, _pages, x, y) \
2414 	ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \
2415 		ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \
2416 		{ \
2417 			size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE); \
2418 			zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
2419 			int page_num = page_offset / ZEND_MM_PAGE_SIZE; \
2420 			ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
2421 			ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_SRUN); \
2422 			ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(chunk->map[page_num]) == _num); \
2423 			zend_mm_free_small(AG(mm_heap), ptr, _num); \
2424 		} \
2425 	}
2426 #else
2427 # define _ZEND_BIN_FREE(_num, _size, _elements, _pages, x, y) \
2428 	ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \
2429 		ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \
2430 		{ \
2431 			zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
2432 			ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
2433 			zend_mm_free_small(AG(mm_heap), ptr, _num); \
2434 		} \
2435 	}
2436 #endif
2437 
2438 ZEND_MM_BINS_INFO(_ZEND_BIN_FREE, x, y)
2439 
2440 ZEND_API void ZEND_FASTCALL _efree_large(void *ptr, size_t size)
2441 {
2442 
2443 	ZEND_MM_CUSTOM_DEALLOCATOR(ptr);
2444 	{
2445 		size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
2446 		zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
2447 		int page_num = page_offset / ZEND_MM_PAGE_SIZE;
2448 		int pages_count = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE) / ZEND_MM_PAGE_SIZE;
2449 
2450 		ZEND_MM_CHECK(chunk->heap == AG(mm_heap) && ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
2451 		ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_LRUN);
2452 		ZEND_ASSERT(ZEND_MM_LRUN_PAGES(chunk->map[page_num]) == pages_count);
2453 		zend_mm_free_large(AG(mm_heap), chunk, page_num, pages_count);
2454 	}
2455 }
2456 
2457 ZEND_API void ZEND_FASTCALL _efree_huge(void *ptr, size_t size)
2458 {
2459 
2460 	ZEND_MM_CUSTOM_DEALLOCATOR(ptr);
2461 	zend_mm_free_huge(AG(mm_heap), ptr);
2462 }
2463 #endif
2464 
2465 ZEND_API void* ZEND_FASTCALL _emalloc(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2466 {
2467 
2468 #if ZEND_MM_CUSTOM
2469 	if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2470 		if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
2471 			return AG(mm_heap)->custom_heap.debug._malloc(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2472 		} else {
2473 			return AG(mm_heap)->custom_heap.std._malloc(size);
2474 		}
2475 	}
2476 #endif
2477 	return zend_mm_alloc_heap(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2478 }
2479 
2480 ZEND_API void ZEND_FASTCALL _efree(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2481 {
2482 
2483 #if ZEND_MM_CUSTOM
2484 	if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2485 		if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
2486 			AG(mm_heap)->custom_heap.debug._free(ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2487 		} else {
2488 			AG(mm_heap)->custom_heap.std._free(ptr);
2489 	    }
2490 		return;
2491 	}
2492 #endif
2493 	zend_mm_free_heap(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2494 }
2495 
2496 ZEND_API void* ZEND_FASTCALL _erealloc(void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2497 {
2498 
2499 	if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2500 		if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
2501 			return AG(mm_heap)->custom_heap.debug._realloc(ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2502 		} else {
2503 			return AG(mm_heap)->custom_heap.std._realloc(ptr, size);
2504 		}
2505 	}
2506 	return zend_mm_realloc_heap(AG(mm_heap), ptr, size, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2507 }
2508 
2509 ZEND_API void* ZEND_FASTCALL _erealloc2(void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2510 {
2511 
2512 	if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2513 		if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
2514 			return AG(mm_heap)->custom_heap.debug._realloc(ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2515 		} else {
2516 			return AG(mm_heap)->custom_heap.std._realloc(ptr, size);
2517 		}
2518 	}
2519 	return zend_mm_realloc_heap(AG(mm_heap), ptr, size, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2520 }
2521 
2522 ZEND_API size_t ZEND_FASTCALL _zend_mem_block_size(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2523 {
2524 	if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2525 		return 0;
2526 	}
2527 	return zend_mm_size(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2528 }
2529 
2530 ZEND_API void* ZEND_FASTCALL _safe_emalloc(size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2531 {
2532 	return emalloc_rel(zend_safe_address_guarded(nmemb, size, offset));
2533 }
2534 
2535 ZEND_API void* ZEND_FASTCALL _safe_malloc(size_t nmemb, size_t size, size_t offset)
2536 {
2537 	return pemalloc(zend_safe_address_guarded(nmemb, size, offset), 1);
2538 }
2539 
2540 ZEND_API void* ZEND_FASTCALL _safe_erealloc(void *ptr, size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2541 {
2542 	return erealloc_rel(ptr, zend_safe_address_guarded(nmemb, size, offset));
2543 }
2544 
2545 ZEND_API void* ZEND_FASTCALL _safe_realloc(void *ptr, size_t nmemb, size_t size, size_t offset)
2546 {
2547 	return perealloc(ptr, zend_safe_address_guarded(nmemb, size, offset), 1);
2548 }
2549 
2550 
2551 ZEND_API void* ZEND_FASTCALL _ecalloc(size_t nmemb, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2552 {
2553 	void *p;
2554 
2555 	p = _safe_emalloc(nmemb, size, 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2556 	if (UNEXPECTED(p == NULL)) {
2557 		return p;
2558 	}
2559 	memset(p, 0, size * nmemb);
2560 	return p;
2561 }
2562 
2563 ZEND_API char* ZEND_FASTCALL _estrdup(const char *s ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2564 {
2565 	size_t length;
2566 	char *p;
2567 
2568 	length = strlen(s);
2569 	if (UNEXPECTED(length + 1 == 0)) {
2570 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", 1, length, 1);
2571 	}
2572 	p = (char *) _emalloc(length + 1 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2573 	if (UNEXPECTED(p == NULL)) {
2574 		return p;
2575 	}
2576 	memcpy(p, s, length+1);
2577 	return p;
2578 }
2579 
2580 ZEND_API char* ZEND_FASTCALL _estrndup(const char *s, size_t length ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2581 {
2582 	char *p;
2583 
2584 	if (UNEXPECTED(length + 1 == 0)) {
2585 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", 1, length, 1);
2586 	}
2587 	p = (char *) _emalloc(length + 1 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2588 	if (UNEXPECTED(p == NULL)) {
2589 		return p;
2590 	}
2591 	memcpy(p, s, length);
2592 	p[length] = 0;
2593 	return p;
2594 }
2595 
2596 
2597 ZEND_API char* ZEND_FASTCALL zend_strndup(const char *s, size_t length)
2598 {
2599 	char *p;
2600 
2601 	if (UNEXPECTED(length + 1 == 0)) {
2602 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", 1, length, 1);
2603 	}
2604 	p = (char *) malloc(length + 1);
2605 	if (UNEXPECTED(p == NULL)) {
2606 		return p;
2607 	}
2608 	if (EXPECTED(length)) {
2609 		memcpy(p, s, length);
2610 	}
2611 	p[length] = 0;
2612 	return p;
2613 }
2614 
2615 
2616 ZEND_API int zend_set_memory_limit(size_t memory_limit)
2617 {
2618 #if ZEND_MM_LIMIT
2619 	AG(mm_heap)->limit = (memory_limit >= ZEND_MM_CHUNK_SIZE) ? memory_limit : ZEND_MM_CHUNK_SIZE;
2620 #endif
2621 	return SUCCESS;
2622 }
2623 
2624 ZEND_API size_t zend_memory_usage(int real_usage)
2625 {
2626 #if ZEND_MM_STAT
2627 	if (real_usage) {
2628 		return AG(mm_heap)->real_size;
2629 	} else {
2630 		size_t usage = AG(mm_heap)->size;
2631 		return usage;
2632 	}
2633 #endif
2634 	return 0;
2635 }
2636 
2637 ZEND_API size_t zend_memory_peak_usage(int real_usage)
2638 {
2639 #if ZEND_MM_STAT
2640 	if (real_usage) {
2641 		return AG(mm_heap)->real_peak;
2642 	} else {
2643 		return AG(mm_heap)->peak;
2644 	}
2645 #endif
2646 	return 0;
2647 }
2648 
2649 ZEND_API void shutdown_memory_manager(int silent, int full_shutdown)
2650 {
2651 	zend_mm_shutdown(AG(mm_heap), full_shutdown, silent);
2652 }
2653 
2654 static void alloc_globals_ctor(zend_alloc_globals *alloc_globals)
2655 {
2656 #if ZEND_MM_CUSTOM
2657 	char *tmp = getenv("USE_ZEND_ALLOC");
2658 
2659 	if (tmp && !zend_atoi(tmp, 0)) {
2660 		alloc_globals->mm_heap = malloc(sizeof(zend_mm_heap));
2661 		memset(alloc_globals->mm_heap, 0, sizeof(zend_mm_heap));
2662 		alloc_globals->mm_heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_STD;
2663 		alloc_globals->mm_heap->custom_heap.std._malloc = __zend_malloc;
2664 		alloc_globals->mm_heap->custom_heap.std._free = free;
2665 		alloc_globals->mm_heap->custom_heap.std._realloc = __zend_realloc;
2666 		return;
2667 	}
2668 #endif
2669 #ifdef MAP_HUGETLB
2670 	tmp = getenv("USE_ZEND_ALLOC_HUGE_PAGES");
2671 	if (tmp && zend_atoi(tmp, 0)) {
2672 		zend_mm_use_huge_pages = 1;
2673 	}
2674 #endif
2675 	ZEND_TSRMLS_CACHE_UPDATE();
2676 	alloc_globals->mm_heap = zend_mm_init();
2677 }
2678 
2679 #ifdef ZTS
2680 static void alloc_globals_dtor(zend_alloc_globals *alloc_globals)
2681 {
2682 	zend_mm_shutdown(alloc_globals->mm_heap, 1, 1);
2683 }
2684 #endif
2685 
2686 ZEND_API void start_memory_manager(void)
2687 {
2688 #ifdef ZTS
2689 	ts_allocate_id(&alloc_globals_id, sizeof(zend_alloc_globals), (ts_allocate_ctor) alloc_globals_ctor, (ts_allocate_dtor) alloc_globals_dtor);
2690 #else
2691 	alloc_globals_ctor(&alloc_globals);
2692 #endif
2693 #ifndef _WIN32
2694 #  if defined(_SC_PAGESIZE)
2695 	REAL_PAGE_SIZE = sysconf(_SC_PAGESIZE);
2696 #  elif defined(_SC_PAGE_SIZE)
2697 	REAL_PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
2698 #  endif
2699 #endif
2700 }
2701 
2702 ZEND_API zend_mm_heap *zend_mm_set_heap(zend_mm_heap *new_heap)
2703 {
2704 	zend_mm_heap *old_heap;
2705 
2706 	old_heap = AG(mm_heap);
2707 	AG(mm_heap) = (zend_mm_heap*)new_heap;
2708 	return (zend_mm_heap*)old_heap;
2709 }
2710 
2711 ZEND_API zend_mm_heap *zend_mm_get_heap(void)
2712 {
2713 	return AG(mm_heap);
2714 }
2715 
2716 ZEND_API int zend_mm_is_custom_heap(zend_mm_heap *new_heap)
2717 {
2718 #if ZEND_MM_CUSTOM
2719 	return AG(mm_heap)->use_custom_heap;
2720 #else
2721 	return 0;
2722 #endif
2723 }
2724 
2725 ZEND_API void zend_mm_set_custom_handlers(zend_mm_heap *heap,
2726                                           void* (*_malloc)(size_t),
2727                                           void  (*_free)(void*),
2728                                           void* (*_realloc)(void*, size_t))
2729 {
2730 #if ZEND_MM_CUSTOM
2731 	zend_mm_heap *_heap = (zend_mm_heap*)heap;
2732 
2733 	_heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_STD;
2734 	_heap->custom_heap.std._malloc = _malloc;
2735 	_heap->custom_heap.std._free = _free;
2736 	_heap->custom_heap.std._realloc = _realloc;
2737 #endif
2738 }
2739 
2740 ZEND_API void zend_mm_get_custom_handlers(zend_mm_heap *heap,
2741                                           void* (**_malloc)(size_t),
2742                                           void  (**_free)(void*),
2743                                           void* (**_realloc)(void*, size_t))
2744 {
2745 #if ZEND_MM_CUSTOM
2746 	zend_mm_heap *_heap = (zend_mm_heap*)heap;
2747 
2748 	if (heap->use_custom_heap) {
2749 		*_malloc = _heap->custom_heap.std._malloc;
2750 		*_free = _heap->custom_heap.std._free;
2751 		*_realloc = _heap->custom_heap.std._realloc;
2752 	} else {
2753 		*_malloc = NULL;
2754 		*_free = NULL;
2755 		*_realloc = NULL;
2756 	}
2757 #else
2758 	*_malloc = NULL;
2759 	*_free = NULL;
2760 	*_realloc = NULL;
2761 #endif
2762 }
2763 
2764 #if ZEND_DEBUG
2765 ZEND_API void zend_mm_set_custom_debug_handlers(zend_mm_heap *heap,
2766                                           void* (*_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
2767                                           void  (*_free)(void* ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
2768                                           void* (*_realloc)(void*, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC))
2769 {
2770 #if ZEND_MM_CUSTOM
2771 	zend_mm_heap *_heap = (zend_mm_heap*)heap;
2772 
2773 	_heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_DEBUG;
2774 	_heap->custom_heap.debug._malloc = _malloc;
2775 	_heap->custom_heap.debug._free = _free;
2776 	_heap->custom_heap.debug._realloc = _realloc;
2777 #endif
2778 }
2779 #endif
2780 
2781 ZEND_API zend_mm_storage *zend_mm_get_storage(zend_mm_heap *heap)
2782 {
2783 #if ZEND_MM_STORAGE
2784 	return heap->storage;
2785 #else
2786 	return NULL
2787 #endif
2788 }
2789 
2790 ZEND_API zend_mm_heap *zend_mm_startup(void)
2791 {
2792 	return zend_mm_init();
2793 }
2794 
2795 ZEND_API zend_mm_heap *zend_mm_startup_ex(const zend_mm_handlers *handlers, void *data, size_t data_size)
2796 {
2797 #if ZEND_MM_STORAGE
2798 	zend_mm_storage tmp_storage, *storage;
2799 	zend_mm_chunk *chunk;
2800 	zend_mm_heap *heap;
2801 
2802 	memcpy((zend_mm_handlers*)&tmp_storage.handlers, handlers, sizeof(zend_mm_handlers));
2803 	tmp_storage.data = data;
2804 	chunk = (zend_mm_chunk*)handlers->chunk_alloc(&tmp_storage, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
2805 	if (UNEXPECTED(chunk == NULL)) {
2806 #if ZEND_MM_ERROR
2807 #ifdef _WIN32
2808 		stderr_last_error("Can't initialize heap");
2809 #else
2810 		fprintf(stderr, "\nCan't initialize heap: [%d] %s\n", errno, strerror(errno));
2811 #endif
2812 #endif
2813 		return NULL;
2814 	}
2815 	heap = &chunk->heap_slot;
2816 	chunk->heap = heap;
2817 	chunk->next = chunk;
2818 	chunk->prev = chunk;
2819 	chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
2820 	chunk->free_tail = ZEND_MM_FIRST_PAGE;
2821 	chunk->num = 0;
2822 	chunk->free_map[0] = (Z_L(1) << ZEND_MM_FIRST_PAGE) - 1;
2823 	chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
2824 	heap->main_chunk = chunk;
2825 	heap->cached_chunks = NULL;
2826 	heap->chunks_count = 1;
2827 	heap->peak_chunks_count = 1;
2828 	heap->cached_chunks_count = 0;
2829 	heap->avg_chunks_count = 1.0;
2830 	heap->last_chunks_delete_boundary = 0;
2831 	heap->last_chunks_delete_count = 0;
2832 #if ZEND_MM_STAT || ZEND_MM_LIMIT
2833 	heap->real_size = ZEND_MM_CHUNK_SIZE;
2834 #endif
2835 #if ZEND_MM_STAT
2836 	heap->real_peak = ZEND_MM_CHUNK_SIZE;
2837 	heap->size = 0;
2838 	heap->peak = 0;
2839 #endif
2840 #if ZEND_MM_LIMIT
2841 	heap->limit = (Z_L(-1) >> Z_L(1));
2842 	heap->overflow = 0;
2843 #endif
2844 #if ZEND_MM_CUSTOM
2845 	heap->use_custom_heap = 0;
2846 #endif
2847 	heap->storage = &tmp_storage;
2848 	heap->huge_list = NULL;
2849 	memset(heap->free_slot, 0, sizeof(heap->free_slot));
2850 	storage = _zend_mm_alloc(heap, sizeof(zend_mm_storage) + data_size ZEND_FILE_LINE_CC ZEND_FILE_LINE_CC);
2851 	if (!storage) {
2852 		handlers->chunk_free(&tmp_storage, chunk, ZEND_MM_CHUNK_SIZE);
2853 #if ZEND_MM_ERROR
2854 #ifdef _WIN32
2855 		stderr_last_error("Can't initialize heap");
2856 #else
2857 		fprintf(stderr, "\nCan't initialize heap: [%d] %s\n", errno, strerror(errno));
2858 #endif
2859 #endif
2860 		return NULL;
2861 	}
2862 	memcpy(storage, &tmp_storage, sizeof(zend_mm_storage));
2863 	if (data) {
2864 		storage->data = (void*)(((char*)storage + sizeof(zend_mm_storage)));
2865 		memcpy(storage->data, data, data_size);
2866 	}
2867 	heap->storage = storage;
2868 	return heap;
2869 #else
2870 	return NULL;
2871 #endif
2872 }
2873 
2874 static ZEND_COLD ZEND_NORETURN void zend_out_of_memory(void)
2875 {
2876 	fprintf(stderr, "Out of memory\n");
2877 	exit(1);
2878 }
2879 
2880 ZEND_API void * __zend_malloc(size_t len)
2881 {
2882 	void *tmp = malloc(len);
2883 	if (EXPECTED(tmp || !len)) {
2884 		return tmp;
2885 	}
2886 	zend_out_of_memory();
2887 }
2888 
2889 ZEND_API void * __zend_calloc(size_t nmemb, size_t len)
2890 {
2891 	void *tmp = _safe_malloc(nmemb, len, 0);
2892 	memset(tmp, 0, nmemb * len);
2893 	return tmp;
2894 }
2895 
2896 ZEND_API void * __zend_realloc(void *p, size_t len)
2897 {
2898 	p = realloc(p, len);
2899 	if (EXPECTED(p || !len)) {
2900 		return p;
2901 	}
2902 	zend_out_of_memory();
2903 }
2904 
2905 /*
2906  * Local variables:
2907  * tab-width: 4
2908  * c-basic-offset: 4
2909  * indent-tabs-mode: t
2910  * End:
2911  */
2912