Lines Matching refs:chunk
326 #define ZEND_MM_PAGE_ADDR(chunk, page_num) \ argument
327 ((void*)(((zend_mm_page*)(chunk)) + (page_num)))
859 static zend_always_inline void zend_mm_chunk_init(zend_mm_heap *heap, zend_mm_chunk *chunk) argument
861 chunk->heap = heap;
862 chunk->next = heap->main_chunk;
863 chunk->prev = heap->main_chunk->prev;
864 chunk->prev->next = chunk;
865 chunk->next->prev = chunk;
867 chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
868 chunk->free_tail = ZEND_MM_FIRST_PAGE;
870 chunk->num = chunk->prev->num + 1;
872 chunk->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
873 chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
900 zend_mm_chunk *chunk = heap->main_chunk; local
905 if (UNEXPECTED(chunk->free_pages < pages_count)) {
908 } else if (UNEXPECTED(chunk->free_pages + chunk->free_tail == ZEND_MM_PAGES)) {
909 if (UNEXPECTED(ZEND_MM_PAGES - chunk->free_tail < pages_count)) {
912 page_num = chunk->free_tail;
917 int free_tail = chunk->free_tail;
918 zend_mm_bitset *bitset = chunk->free_map;
959 int free_tail = chunk->free_tail;
960 zend_mm_bitset *bitset = chunk->free_map;
988 chunk->free_tail = page_num + pages_count;
992 chunk->free_tail = page_num;
1019 if (chunk->next == heap->main_chunk) {
1023 chunk = heap->cached_chunks;
1024 heap->cached_chunks = chunk->next;
1040 chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(heap, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
1041 if (UNEXPECTED(chunk == NULL)) {
1044 …(chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(heap, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE)) != NUL…
1073 zend_mm_chunk_init(heap, chunk);
1078 chunk = chunk->next;
1086 chunk->prev->next = chunk->next;
1087 chunk->next->prev = chunk->prev;
1088 chunk->next = heap->main_chunk->next;
1089 chunk->prev = heap->main_chunk;
1090 chunk->prev->next = chunk;
1091 chunk->next->prev = chunk;
1094 chunk->free_pages -= pages_count;
1095 zend_mm_bitset_set_range(chunk->free_map, page_num, pages_count);
1096 chunk->map[page_num] = ZEND_MM_LRUN(pages_count);
1097 if (page_num == chunk->free_tail) {
1098 chunk->free_tail = page_num + pages_count;
1100 return ZEND_MM_PAGE_ADDR(chunk, page_num);
1122 static zend_always_inline void zend_mm_delete_chunk(zend_mm_heap *heap, zend_mm_chunk *chunk) argument
1124 chunk->next->prev = chunk->prev;
1125 chunk->prev->next = chunk->next;
1132 chunk->next = heap->cached_chunks;
1133 heap->cached_chunks = chunk;
1146 if (!heap->cached_chunks || chunk->num > heap->cached_chunks->num) {
1147 zend_mm_chunk_free(heap, chunk, ZEND_MM_CHUNK_SIZE);
1150 chunk->next = heap->cached_chunks->next;
1152 heap->cached_chunks = chunk;
1157 static zend_always_inline void zend_mm_free_pages_ex(zend_mm_heap *heap, zend_mm_chunk *chunk, int … argument
1159 chunk->free_pages += pages_count;
1160 zend_mm_bitset_reset_range(chunk->free_map, page_num, pages_count);
1161 chunk->map[page_num] = 0;
1162 if (chunk->free_tail == page_num + pages_count) {
1164 chunk->free_tail = page_num;
1166 if (free_chunk && chunk->free_pages == ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE) {
1167 zend_mm_delete_chunk(heap, chunk);
1171 static void zend_mm_free_pages(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_co… argument
1173 zend_mm_free_pages_ex(heap, chunk, page_num, pages_count, 1);
1176 static zend_always_inline void zend_mm_free_large(zend_mm_heap *heap, zend_mm_chunk *chunk, int pag… argument
1181 zend_mm_free_pages(heap, chunk, page_num, pages_count);
1252 zend_mm_chunk *chunk; local
1267 chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(bin, ZEND_MM_CHUNK_SIZE);
1269 chunk->map[page_num] = ZEND_MM_SRUN(bin_num);
1273 chunk->map[page_num+i] = ZEND_MM_NRUN(bin_num, i);
1353 zend_mm_chunk *chunk; local
1358 chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1360 info = chunk->map[page_num];
1361 ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1427 zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); local
1429 zend_mm_page_info info = chunk->map[page_num];
1431 ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1438 zend_mm_free_large(heap, chunk, page_num, pages_count);
1450 zend_mm_chunk *chunk; local
1458 chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1460 info = chunk->map[page_num];
1461 ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1563 zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); local
1565 zend_mm_page_info info = chunk->map[page_num];
1572 ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1613 chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
1614 chunk->free_pages += rest_pages_count;
1615 zend_mm_bitset_reset_range(chunk->free_map, page_num + new_pages_count, rest_pages_count);
1631 …zend_mm_bitset_is_free_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pa…
1640 chunk->free_pages -= new_pages_count - old_pages_count;
1641 …zend_mm_bitset_set_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_…
1642 chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
1851 …zend_mm_chunk *chunk = (zend_mm_chunk*)zend_mm_chunk_alloc_int(ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_S… local
1854 if (UNEXPECTED(chunk == NULL)) {
1864 heap = &chunk->heap_slot;
1865 chunk->heap = heap;
1866 chunk->next = chunk;
1867 chunk->prev = chunk;
1868 chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
1869 chunk->free_tail = ZEND_MM_FIRST_PAGE;
1870 chunk->num = 0;
1871 chunk->free_map[0] = (Z_L(1) << ZEND_MM_FIRST_PAGE) - 1;
1872 chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
1873 heap->main_chunk = chunk;
1906 zend_mm_chunk *chunk; local
1923 chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(p, ZEND_MM_CHUNK_SIZE);
1924 ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1928 info = chunk->map[page_num];
1932 info = chunk->map[page_num];
1941 chunk->map[page_num] = ZEND_MM_SRUN_EX(i, free_counter);;
1952 chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(p, ZEND_MM_CHUNK_SIZE);
1953 ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1957 info = chunk->map[page_num];
1961 info = chunk->map[page_num];
1977 chunk = heap->main_chunk;
1980 while (i < chunk->free_tail) {
1981 if (zend_mm_bitset_is_set(chunk->free_map, i)) {
1982 info = chunk->map[i];
1989 zend_mm_free_pages_ex(heap, chunk, i, pages_count, 0);
1993 chunk->map[i] = ZEND_MM_SRUN(bin_num);
2003 if (chunk->free_pages == ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE) {
2004 zend_mm_chunk *next_chunk = chunk->next;
2006 zend_mm_delete_chunk(heap, chunk);
2007 chunk = next_chunk;
2009 chunk = chunk->next;
2011 } while (chunk != heap->main_chunk);
2418 zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
2420 ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
2421 ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_SRUN); \
2422 ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(chunk->map[page_num]) == _num); \
2431 zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
2432 ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
2446 zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); local
2450 …ZEND_MM_CHECK(chunk->heap == AG(mm_heap) && ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE)…
2451 ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_LRUN);
2452 ZEND_ASSERT(ZEND_MM_LRUN_PAGES(chunk->map[page_num]) == pages_count);
2453 zend_mm_free_large(AG(mm_heap), chunk, page_num, pages_count);
2799 zend_mm_chunk *chunk; local
2804 …chunk = (zend_mm_chunk*)handlers->chunk_alloc(&tmp_storage, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE…
2805 if (UNEXPECTED(chunk == NULL)) {
2815 heap = &chunk->heap_slot;
2816 chunk->heap = heap;
2817 chunk->next = chunk;
2818 chunk->prev = chunk;
2819 chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
2820 chunk->free_tail = ZEND_MM_FIRST_PAGE;
2821 chunk->num = 0;
2822 chunk->free_map[0] = (Z_L(1) << ZEND_MM_FIRST_PAGE) - 1;
2823 chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
2824 heap->main_chunk = chunk;
2852 handlers->chunk_free(&tmp_storage, chunk, ZEND_MM_CHUNK_SIZE);