1 /*
2 * Stack-less Just-In-Time compiler
3 *
4 * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without modification, are
7 * permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright notice, this list of
10 * conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright notice, this list
13 * of conditions and the following disclaimer in the documentation and/or other materials
14 * provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
19 * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
21 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
22 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 /*
28 This file contains a simple executable memory allocator
29
30 It is assumed, that executable code blocks are usually medium (or sometimes
31 large) memory blocks, and the allocator is not too frequently called (less
32 optimized than other allocators). Thus, using it as a generic allocator is
33 not suggested.
34
35 How does it work:
36 Memory is allocated in continuous memory areas called chunks by alloc_chunk()
37 Chunk format:
38 [ block ][ block ] ... [ block ][ block terminator ]
39
40 All blocks and the block terminator is started with block_header. The block
41 header contains the size of the previous and the next block. These sizes
42 can also contain special values.
43 Block size:
44 0 - The block is a free_block, with a different size member.
45 1 - The block is a block terminator.
46 n - The block is used at the moment, and the value contains its size.
47 Previous block size:
48 0 - This is the first block of the memory chunk.
49 n - The size of the previous block.
50
51 Using these size values we can go forward or backward on the block chain.
52 The unused blocks are stored in a chain list pointed by free_blocks. This
53 list is useful if we need to find a suitable memory area when the allocator
54 is called.
55
56 When a block is freed, the new free block is connected to its adjacent free
57 blocks if possible.
58
59 [ free block ][ used block ][ free block ]
60 and "used block" is freed, the three blocks are connected together:
61 [ one big free block ]
62 */
63
64 /* Expected functions:
65 alloc_chunk / free_chunk :
66 * allocate executable system memory chunks
67 * the size is always divisible by CHUNK_SIZE
68 SLJIT_ALLOCATOR_LOCK / SLJIT_ALLOCATOR_UNLOCK :
69 * provided as part of sljitUtils
70 * only the allocator requires this lock, sljit is fully thread safe
71 as it only uses local variables
72
73 Supported defines:
74 SLJIT_HAS_CHUNK_HEADER - (optional) sljit_chunk_header is defined
75 SLJIT_HAS_EXECUTABLE_OFFSET - (optional) has executable offset data
76 SLJIT_UPDATE_WX_FLAGS - (optional) update WX flags
77 */
78
79 #ifdef SLJIT_HAS_CHUNK_HEADER
80 #define CHUNK_HEADER_SIZE (sizeof(struct sljit_chunk_header))
81 #else /* !SLJIT_HAS_CHUNK_HEADER */
82 #define CHUNK_HEADER_SIZE 0
83 #endif /* SLJIT_HAS_CHUNK_HEADER */
84
85 #ifndef SLJIT_UPDATE_WX_FLAGS
86 #define SLJIT_UPDATE_WX_FLAGS(from, to, enable_exec)
87 #endif /* SLJIT_UPDATE_WX_FLAGS */
88
89 #ifndef CHUNK_SIZE
90 /* 64 KByte if not specified. */
91 #define CHUNK_SIZE (sljit_uw)0x10000
92 #endif /* CHUNK_SIZE */
93
94 struct block_header {
95 sljit_uw size;
96 sljit_uw prev_size;
97 #ifdef SLJIT_HAS_EXECUTABLE_OFFSET
98 sljit_sw executable_offset;
99 #endif /* SLJIT_HAS_EXECUTABLE_OFFSET */
100 };
101
102 struct free_block {
103 struct block_header header;
104 struct free_block *next;
105 struct free_block *prev;
106 sljit_uw size;
107 };
108
109 #define AS_BLOCK_HEADER(base, offset) \
110 ((struct block_header*)(((sljit_u8*)base) + offset))
111 #define AS_FREE_BLOCK(base, offset) \
112 ((struct free_block*)(((sljit_u8*)base) + offset))
113 #define MEM_START(base) ((void*)((base) + 1))
114 #define CHUNK_MASK (~(CHUNK_SIZE - 1))
115 #define ALIGN_SIZE(size) (((size) + sizeof(struct block_header) + 7u) & ~(sljit_uw)7)
116 #define CHUNK_EXTRA_SIZE (sizeof(struct block_header) + CHUNK_HEADER_SIZE)
117
118 static struct free_block* free_blocks;
119 static sljit_uw allocated_size;
120 static sljit_uw total_size;
121
sljit_insert_free_block(struct free_block * free_block,sljit_uw size)122 static SLJIT_INLINE void sljit_insert_free_block(struct free_block *free_block, sljit_uw size)
123 {
124 free_block->header.size = 0;
125 free_block->size = size;
126
127 free_block->next = free_blocks;
128 free_block->prev = NULL;
129 if (free_blocks)
130 free_blocks->prev = free_block;
131 free_blocks = free_block;
132 }
133
sljit_remove_free_block(struct free_block * free_block)134 static SLJIT_INLINE void sljit_remove_free_block(struct free_block *free_block)
135 {
136 if (free_block->next)
137 free_block->next->prev = free_block->prev;
138
139 if (free_block->prev)
140 free_block->prev->next = free_block->next;
141 else {
142 SLJIT_ASSERT(free_blocks == free_block);
143 free_blocks = free_block->next;
144 }
145 }
146
sljit_malloc_exec(sljit_uw size)147 SLJIT_API_FUNC_ATTRIBUTE void* sljit_malloc_exec(sljit_uw size)
148 {
149 struct block_header *header;
150 struct block_header *next_header;
151 struct free_block *free_block;
152 sljit_uw chunk_size;
153
154 #ifdef SLJIT_HAS_CHUNK_HEADER
155 struct sljit_chunk_header *chunk_header;
156 #else /* !SLJIT_HAS_CHUNK_HEADER */
157 void *chunk_header;
158 #endif /* SLJIT_HAS_CHUNK_HEADER */
159
160 #ifdef SLJIT_HAS_EXECUTABLE_OFFSET
161 sljit_sw executable_offset;
162 #endif /* SLJIT_HAS_EXECUTABLE_OFFSET */
163
164 if (size < (64 - sizeof(struct block_header)))
165 size = (64 - sizeof(struct block_header));
166 size = ALIGN_SIZE(size);
167
168 SLJIT_ALLOCATOR_LOCK();
169 free_block = free_blocks;
170 while (free_block) {
171 if (free_block->size >= size) {
172 chunk_size = free_block->size;
173 SLJIT_UPDATE_WX_FLAGS(NULL, NULL, 0);
174 if (chunk_size > size + 64) {
175 /* We just cut a block from the end of the free block. */
176 chunk_size -= size;
177 free_block->size = chunk_size;
178 header = AS_BLOCK_HEADER(free_block, chunk_size);
179 header->prev_size = chunk_size;
180 #ifdef SLJIT_HAS_EXECUTABLE_OFFSET
181 header->executable_offset = free_block->header.executable_offset;
182 #endif /* SLJIT_HAS_EXECUTABLE_OFFSET */
183 AS_BLOCK_HEADER(header, size)->prev_size = size;
184 } else {
185 sljit_remove_free_block(free_block);
186 header = (struct block_header*)free_block;
187 size = chunk_size;
188 }
189 allocated_size += size;
190 header->size = size;
191 SLJIT_ALLOCATOR_UNLOCK();
192 return MEM_START(header);
193 }
194 free_block = free_block->next;
195 }
196
197 chunk_size = (size + CHUNK_EXTRA_SIZE + CHUNK_SIZE - 1) & CHUNK_MASK;
198
199 chunk_header = alloc_chunk(chunk_size);
200 if (!chunk_header) {
201 SLJIT_ALLOCATOR_UNLOCK();
202 return NULL;
203 }
204
205 #ifdef SLJIT_HAS_EXECUTABLE_OFFSET
206 executable_offset = (sljit_sw)((sljit_u8*)chunk_header->executable - (sljit_u8*)chunk_header);
207 #endif /* SLJIT_HAS_EXECUTABLE_OFFSET */
208
209 chunk_size -= CHUNK_EXTRA_SIZE;
210 total_size += chunk_size;
211
212 header = (struct block_header*)(((sljit_u8*)chunk_header) + CHUNK_HEADER_SIZE);
213
214 header->prev_size = 0;
215 #ifdef SLJIT_HAS_EXECUTABLE_OFFSET
216 header->executable_offset = executable_offset;
217 #endif /* SLJIT_HAS_EXECUTABLE_OFFSET */
218
219 if (chunk_size > size + 64) {
220 /* Cut the allocated space into a free and a used block. */
221 allocated_size += size;
222 header->size = size;
223 chunk_size -= size;
224
225 free_block = AS_FREE_BLOCK(header, size);
226 free_block->header.prev_size = size;
227 #ifdef SLJIT_HAS_EXECUTABLE_OFFSET
228 free_block->header.executable_offset = executable_offset;
229 #endif /* SLJIT_HAS_EXECUTABLE_OFFSET */
230 sljit_insert_free_block(free_block, chunk_size);
231 next_header = AS_BLOCK_HEADER(free_block, chunk_size);
232 } else {
233 /* All space belongs to this allocation. */
234 allocated_size += chunk_size;
235 header->size = chunk_size;
236 next_header = AS_BLOCK_HEADER(header, chunk_size);
237 }
238 next_header->size = 1;
239 next_header->prev_size = chunk_size;
240 #ifdef SLJIT_HAS_EXECUTABLE_OFFSET
241 next_header->executable_offset = executable_offset;
242 #endif /* SLJIT_HAS_EXECUTABLE_OFFSET */
243 SLJIT_ALLOCATOR_UNLOCK();
244 return MEM_START(header);
245 }
246
sljit_free_exec(void * ptr)247 SLJIT_API_FUNC_ATTRIBUTE void sljit_free_exec(void *ptr)
248 {
249 struct block_header *header;
250 struct free_block *free_block;
251
252 SLJIT_ALLOCATOR_LOCK();
253 header = AS_BLOCK_HEADER(ptr, -(sljit_sw)sizeof(struct block_header));
254 #ifdef SLJIT_HAS_EXECUTABLE_OFFSET
255 header = AS_BLOCK_HEADER(header, -header->executable_offset);
256 #endif /* SLJIT_HAS_EXECUTABLE_OFFSET */
257 allocated_size -= header->size;
258
259 SLJIT_UPDATE_WX_FLAGS(NULL, NULL, 0);
260
261 /* Connecting free blocks together if possible. */
262
263 /* If header->prev_size == 0, free_block will equal to header.
264 In this case, free_block->header.size will be > 0. */
265 free_block = AS_FREE_BLOCK(header, -(sljit_sw)header->prev_size);
266 if (SLJIT_UNLIKELY(!free_block->header.size)) {
267 free_block->size += header->size;
268 header = AS_BLOCK_HEADER(free_block, free_block->size);
269 header->prev_size = free_block->size;
270 } else {
271 free_block = (struct free_block*)header;
272 sljit_insert_free_block(free_block, header->size);
273 }
274
275 header = AS_BLOCK_HEADER(free_block, free_block->size);
276 if (SLJIT_UNLIKELY(!header->size)) {
277 free_block->size += ((struct free_block*)header)->size;
278 sljit_remove_free_block((struct free_block*)header);
279 header = AS_BLOCK_HEADER(free_block, free_block->size);
280 header->prev_size = free_block->size;
281 }
282
283 /* The whole chunk is free. */
284 if (SLJIT_UNLIKELY(!free_block->header.prev_size && header->size == 1)) {
285 /* If this block is freed, we still have (allocated_size / 2) free space. */
286 if (total_size - free_block->size > (allocated_size * 3 / 2)) {
287 total_size -= free_block->size;
288 sljit_remove_free_block(free_block);
289 free_chunk(free_block, free_block->size + CHUNK_EXTRA_SIZE);
290 }
291 }
292
293 SLJIT_UPDATE_WX_FLAGS(NULL, NULL, 1);
294 SLJIT_ALLOCATOR_UNLOCK();
295 }
296
sljit_free_unused_memory_exec(void)297 SLJIT_API_FUNC_ATTRIBUTE void sljit_free_unused_memory_exec(void)
298 {
299 struct free_block* free_block;
300 struct free_block* next_free_block;
301
302 SLJIT_ALLOCATOR_LOCK();
303 SLJIT_UPDATE_WX_FLAGS(NULL, NULL, 0);
304
305 free_block = free_blocks;
306 while (free_block) {
307 next_free_block = free_block->next;
308 if (!free_block->header.prev_size &&
309 AS_BLOCK_HEADER(free_block, free_block->size)->size == 1) {
310 total_size -= free_block->size;
311 sljit_remove_free_block(free_block);
312 free_chunk(free_block, free_block->size + CHUNK_EXTRA_SIZE);
313 }
314 free_block = next_free_block;
315 }
316
317 SLJIT_ASSERT(total_size || (!total_size && !free_blocks));
318 SLJIT_UPDATE_WX_FLAGS(NULL, NULL, 1);
319 SLJIT_ALLOCATOR_UNLOCK();
320 }
321
322 #ifdef SLJIT_HAS_EXECUTABLE_OFFSET
sljit_exec_offset(void * code)323 SLJIT_API_FUNC_ATTRIBUTE sljit_sw sljit_exec_offset(void *code)
324 {
325 return ((struct block_header*)SLJIT_CODE_TO_PTR(code))[-1].executable_offset;
326 }
327 #endif /* SLJIT_HAS_EXECUTABLE_OFFSET */
328