Lines Matching refs:chunk

322 #define ZEND_MM_PAGE_ADDR(chunk, page_num) \  argument
323 ((void*)(((zend_mm_page*)(chunk)) + (page_num)))
841 static zend_always_inline void zend_mm_chunk_init(zend_mm_heap *heap, zend_mm_chunk *chunk) argument
843 chunk->heap = heap;
844 chunk->next = heap->main_chunk;
845 chunk->prev = heap->main_chunk->prev;
846 chunk->prev->next = chunk;
847 chunk->next->prev = chunk;
849 chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
850 chunk->free_tail = ZEND_MM_FIRST_PAGE;
852 chunk->num = chunk->prev->num + 1;
854 chunk->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
855 chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
882 zend_mm_chunk *chunk = heap->main_chunk; local
887 if (UNEXPECTED(chunk->free_pages < pages_count)) {
890 } else if (UNEXPECTED(chunk->free_pages + chunk->free_tail == ZEND_MM_PAGES)) {
891 if (UNEXPECTED(ZEND_MM_PAGES - chunk->free_tail < pages_count)) {
894 page_num = chunk->free_tail;
899 int free_tail = chunk->free_tail;
900 zend_mm_bitset *bitset = chunk->free_map;
941 uint32_t free_tail = chunk->free_tail;
942 zend_mm_bitset *bitset = chunk->free_map;
970 chunk->free_tail = page_num + pages_count;
974 chunk->free_tail = page_num;
1001 if (chunk->next == heap->main_chunk) {
1005 chunk = heap->cached_chunks;
1006 heap->cached_chunks = chunk->next;
1022 chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(heap, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
1023 if (UNEXPECTED(chunk == NULL)) {
1026 …(chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(heap, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE)) != NUL…
1055 zend_mm_chunk_init(heap, chunk);
1060 chunk = chunk->next;
1068 chunk->prev->next = chunk->next;
1069 chunk->next->prev = chunk->prev;
1070 chunk->next = heap->main_chunk->next;
1071 chunk->prev = heap->main_chunk;
1072 chunk->prev->next = chunk;
1073 chunk->next->prev = chunk;
1076 chunk->free_pages -= pages_count;
1077 zend_mm_bitset_set_range(chunk->free_map, page_num, pages_count);
1078 chunk->map[page_num] = ZEND_MM_LRUN(pages_count);
1079 if (page_num == chunk->free_tail) {
1080 chunk->free_tail = page_num + pages_count;
1082 return ZEND_MM_PAGE_ADDR(chunk, page_num);
1116 static zend_always_inline void zend_mm_delete_chunk(zend_mm_heap *heap, zend_mm_chunk *chunk) argument
1118 chunk->next->prev = chunk->prev;
1119 chunk->prev->next = chunk->next;
1126 chunk->next = heap->cached_chunks;
1127 heap->cached_chunks = chunk;
1140 if (!heap->cached_chunks || chunk->num > heap->cached_chunks->num) {
1141 zend_mm_chunk_free(heap, chunk, ZEND_MM_CHUNK_SIZE);
1144 chunk->next = heap->cached_chunks->next;
1146 heap->cached_chunks = chunk;
1151 static zend_always_inline void zend_mm_free_pages_ex(zend_mm_heap *heap, zend_mm_chunk *chunk, uint… argument
1153 chunk->free_pages += pages_count;
1154 zend_mm_bitset_reset_range(chunk->free_map, page_num, pages_count);
1155 chunk->map[page_num] = 0;
1156 if (chunk->free_tail == page_num + pages_count) {
1158 chunk->free_tail = page_num;
1160 …if (free_chunk && chunk != heap->main_chunk && chunk->free_pages == ZEND_MM_PAGES - ZEND_MM_FIRST_…
1161 zend_mm_delete_chunk(heap, chunk);
1165 static zend_never_inline void zend_mm_free_pages(zend_mm_heap *heap, zend_mm_chunk *chunk, int page… argument
1167 zend_mm_free_pages_ex(heap, chunk, page_num, pages_count, 1);
1170 static zend_always_inline void zend_mm_free_large(zend_mm_heap *heap, zend_mm_chunk *chunk, int pag… argument
1175 zend_mm_free_pages(heap, chunk, page_num, pages_count);
1246 zend_mm_chunk *chunk; local
1261 chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(bin, ZEND_MM_CHUNK_SIZE);
1263 chunk->map[page_num] = ZEND_MM_SRUN(bin_num);
1268 chunk->map[page_num+i] = ZEND_MM_NRUN(bin_num, i);
1348 zend_mm_chunk *chunk; local
1353 chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1355 info = chunk->map[page_num];
1356 ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1422 zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); local
1424 zend_mm_page_info info = chunk->map[page_num];
1426 ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1433 zend_mm_free_large(heap, chunk, page_num, pages_count);
1445 zend_mm_chunk *chunk; local
1453 chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1455 info = chunk->map[page_num];
1456 ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1589 zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); local
1591 zend_mm_page_info info = chunk->map[page_num];
1598 ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1672 chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
1673 chunk->free_pages += rest_pages_count;
1674 zend_mm_bitset_reset_range(chunk->free_map, page_num + new_pages_count, rest_pages_count);
1690 …zend_mm_bitset_is_free_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pa…
1699 chunk->free_pages -= new_pages_count - old_pages_count;
1700 …zend_mm_bitset_set_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_…
1701 chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
1902 …zend_mm_chunk *chunk = (zend_mm_chunk*)zend_mm_chunk_alloc_int(ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_S… local
1905 if (UNEXPECTED(chunk == NULL)) {
1911 heap = &chunk->heap_slot;
1912 chunk->heap = heap;
1913 chunk->next = chunk;
1914 chunk->prev = chunk;
1915 chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
1916 chunk->free_tail = ZEND_MM_FIRST_PAGE;
1917 chunk->num = 0;
1918 chunk->free_map[0] = (Z_L(1) << ZEND_MM_FIRST_PAGE) - 1;
1919 chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
1920 heap->main_chunk = chunk;
1953 zend_mm_chunk *chunk; local
1971 chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(p, ZEND_MM_CHUNK_SIZE);
1972 ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1976 info = chunk->map[page_num];
1980 info = chunk->map[page_num];
1989 chunk->map[page_num] = ZEND_MM_SRUN_EX(i, free_counter);
2000 chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(p, ZEND_MM_CHUNK_SIZE);
2001 ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
2005 info = chunk->map[page_num];
2009 info = chunk->map[page_num];
2025 chunk = heap->main_chunk;
2028 while (i < chunk->free_tail) {
2029 if (zend_mm_bitset_is_set(chunk->free_map, i)) {
2030 info = chunk->map[i];
2037 zend_mm_free_pages_ex(heap, chunk, i, pages_count, 0);
2041 chunk->map[i] = ZEND_MM_SRUN(bin_num);
2051 if (chunk->free_pages == ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE) {
2052 zend_mm_chunk *next_chunk = chunk->next;
2054 zend_mm_delete_chunk(heap, chunk);
2055 chunk = next_chunk;
2057 chunk = chunk->next;
2059 } while (chunk != heap->main_chunk);
2447 zend_mm_chunk *chunk = AG(mm_heap)->main_chunk; local
2450 if (ptr >= (void*)chunk
2451 && ptr < (void*)((char*)chunk + ZEND_MM_CHUNK_SIZE)) {
2454 chunk = chunk->next;
2455 } while (chunk != AG(mm_heap)->main_chunk);
2546 zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
2548 ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
2549 ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_SRUN); \
2550 ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(chunk->map[page_num]) == _num); \
2559 zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
2560 ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
2573 zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); local
2577 …ZEND_MM_CHECK(chunk->heap == AG(mm_heap) && ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE)…
2578 ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_LRUN);
2579 ZEND_ASSERT(ZEND_MM_LRUN_PAGES(chunk->map[page_num]) == pages_count);
2580 zend_mm_free_large(AG(mm_heap), chunk, page_num, pages_count);
3050 zend_mm_chunk *chunk; local
3055chunk = (zend_mm_chunk*)handlers->chunk_alloc(&tmp_storage, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE…
3056 if (UNEXPECTED(chunk == NULL)) {
3062 heap = &chunk->heap_slot;
3063 chunk->heap = heap;
3064 chunk->next = chunk;
3065 chunk->prev = chunk;
3066 chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
3067 chunk->free_tail = ZEND_MM_FIRST_PAGE;
3068 chunk->num = 0;
3069 chunk->free_map[0] = (Z_L(1) << ZEND_MM_FIRST_PAGE) - 1;
3070 chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
3071 heap->main_chunk = chunk;
3099 handlers->chunk_free(&tmp_storage, chunk, ZEND_MM_CHUNK_SIZE);