1 /*
2 * Stack-less Just-In-Time compiler
3 *
4 * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without modification, are
7 * permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright notice, this list of
10 * conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright notice, this list
13 * of conditions and the following disclaimer in the documentation and/or other materials
14 * provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
19 * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
21 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
22 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 /*
28 This file contains a simple executable memory allocator
29
30 It is assumed, that executable code blocks are usually medium (or sometimes
31 large) memory blocks, and the allocator is not too frequently called (less
32 optimized than other allocators). Thus, using it as a generic allocator is
33 not suggested.
34
35 How does it work:
36 Memory is allocated in continuous memory areas called chunks by alloc_chunk()
37 Chunk format:
38 [ block ][ block ] ... [ block ][ block terminator ]
39
40 All blocks and the block terminator is started with block_header. The block
41 header contains the size of the previous and the next block. These sizes
42 can also contain special values.
43 Block size:
44 0 - The block is a free_block, with a different size member.
45 1 - The block is a block terminator.
46 n - The block is used at the moment, and the value contains its size.
47 Previous block size:
48 0 - This is the first block of the memory chunk.
49 n - The size of the previous block.
50
51 Using these size values we can go forward or backward on the block chain.
52 The unused blocks are stored in a chain list pointed by free_blocks. This
53 list is useful if we need to find a suitable memory area when the allocator
54 is called.
55
56 When a block is freed, the new free block is connected to its adjacent free
57 blocks if possible.
58
59 [ free block ][ used block ][ free block ]
60 and "used block" is freed, the three blocks are connected together:
61 [ one big free block ]
62 */
63
64 /* --------------------------------------------------------------------- */
65 /* System (OS) functions */
66 /* --------------------------------------------------------------------- */
67
68 /* 64 KByte. */
69 #define CHUNK_SIZE (sljit_uw)0x10000u
70
71 /*
72 alloc_chunk / free_chunk :
73 * allocate executable system memory chunks
74 * the size is always divisible by CHUNK_SIZE
75 SLJIT_ALLOCATOR_LOCK / SLJIT_ALLOCATOR_UNLOCK :
76 * provided as part of sljitUtils
77 * only the allocator requires this lock, sljit is fully thread safe
78 as it only uses local variables
79 */
80
81 #ifdef _WIN32
82 #define SLJIT_UPDATE_WX_FLAGS(from, to, enable_exec)
83
alloc_chunk(sljit_uw size)84 static SLJIT_INLINE void* alloc_chunk(sljit_uw size)
85 {
86 return VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE);
87 }
88
free_chunk(void * chunk,sljit_uw size)89 static SLJIT_INLINE void free_chunk(void *chunk, sljit_uw size)
90 {
91 SLJIT_UNUSED_ARG(size);
92 VirtualFree(chunk, 0, MEM_RELEASE);
93 }
94
95 #else /* POSIX */
96
97 #if defined(__APPLE__) && defined(MAP_JIT)
98 /*
99 On macOS systems, returns MAP_JIT if it is defined _and_ we're running on a
100 version where it's OK to have more than one JIT block or where MAP_JIT is
101 required.
102 On non-macOS systems, returns MAP_JIT if it is defined.
103 */
104 #include <TargetConditionals.h>
105 #if TARGET_OS_OSX
106 #if defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86
107 #ifdef MAP_ANON
108 #include <sys/utsname.h>
109 #include <stdlib.h>
110
111 #define SLJIT_MAP_JIT (get_map_jit_flag())
112
get_map_jit_flag()113 static SLJIT_INLINE int get_map_jit_flag()
114 {
115 size_t page_size;
116 void *ptr;
117 struct utsname name;
118 static int map_jit_flag = -1;
119
120 if (map_jit_flag < 0) {
121 map_jit_flag = 0;
122 uname(&name);
123
124 /* Kernel version for 10.14.0 (Mojave) or later */
125 if (atoi(name.release) >= 18) {
126 page_size = get_page_alignment() + 1;
127 /* Only use MAP_JIT if a hardened runtime is used */
128 ptr = mmap(NULL, page_size, PROT_WRITE | PROT_EXEC,
129 MAP_PRIVATE | MAP_ANON, -1, 0);
130
131 if (ptr != MAP_FAILED)
132 munmap(ptr, page_size);
133 else
134 map_jit_flag = MAP_JIT;
135 }
136 }
137 return map_jit_flag;
138 }
139 #endif /* MAP_ANON */
140 #else /* !SLJIT_CONFIG_X86 */
141 #if !(defined SLJIT_CONFIG_ARM && SLJIT_CONFIG_ARM)
142 #error "Unsupported architecture"
143 #endif /* SLJIT_CONFIG_ARM */
144 #include <AvailabilityMacros.h>
145 #include <pthread.h>
146
147 #define SLJIT_MAP_JIT (MAP_JIT)
148 #define SLJIT_UPDATE_WX_FLAGS(from, to, enable_exec) \
149 apple_update_wx_flags(enable_exec)
150
apple_update_wx_flags(sljit_s32 enable_exec)151 static SLJIT_INLINE void apple_update_wx_flags(sljit_s32 enable_exec)
152 {
153 #if MAC_OS_X_VERSION_MIN_REQUIRED >= 110000
154 pthread_jit_write_protect_np(enable_exec);
155 #else
156 #error "Must target Big Sur or newer"
157 #endif /* BigSur */
158 }
159 #endif /* SLJIT_CONFIG_X86 */
160 #else /* !TARGET_OS_OSX */
161 #define SLJIT_MAP_JIT (MAP_JIT)
162 #endif /* TARGET_OS_OSX */
163 #endif /* __APPLE__ && MAP_JIT */
164 #ifndef SLJIT_UPDATE_WX_FLAGS
165 #define SLJIT_UPDATE_WX_FLAGS(from, to, enable_exec)
166 #endif /* !SLJIT_UPDATE_WX_FLAGS */
167 #ifndef SLJIT_MAP_JIT
168 #define SLJIT_MAP_JIT (0)
169 #endif /* !SLJIT_MAP_JIT */
170
alloc_chunk(sljit_uw size)171 static SLJIT_INLINE void* alloc_chunk(sljit_uw size)
172 {
173 void *retval;
174 int prot = PROT_READ | PROT_WRITE | PROT_EXEC;
175 int flags = MAP_PRIVATE;
176 int fd = -1;
177
178 #ifdef PROT_MAX
179 prot |= PROT_MAX(prot);
180 #endif
181
182 #ifdef MAP_ANON
183 flags |= MAP_ANON | SLJIT_MAP_JIT;
184 #else /* !MAP_ANON */
185 if (SLJIT_UNLIKELY((dev_zero < 0) && open_dev_zero()))
186 return NULL;
187
188 fd = dev_zero;
189 #endif /* MAP_ANON */
190
191 retval = mmap(NULL, size, prot, flags, fd, 0);
192 if (retval == MAP_FAILED)
193 return NULL;
194
195 #ifdef __FreeBSD__
196 /* HardenedBSD's mmap lies, so check permissions again */
197 if (mprotect(retval, size, PROT_READ | PROT_WRITE | PROT_EXEC) < 0) {
198 munmap(retval, size);
199 return NULL;
200 }
201 #endif /* FreeBSD */
202
203 SLJIT_UPDATE_WX_FLAGS(retval, (uint8_t *)retval + size, 0);
204
205 return retval;
206 }
207
free_chunk(void * chunk,sljit_uw size)208 static SLJIT_INLINE void free_chunk(void *chunk, sljit_uw size)
209 {
210 munmap(chunk, size);
211 }
212
213 #endif /* windows */
214
215 /* --------------------------------------------------------------------- */
216 /* Common functions */
217 /* --------------------------------------------------------------------- */
218
219 #define CHUNK_MASK (~(CHUNK_SIZE - 1))
220
221 struct block_header {
222 sljit_uw size;
223 sljit_uw prev_size;
224 };
225
226 struct free_block {
227 struct block_header header;
228 struct free_block *next;
229 struct free_block *prev;
230 sljit_uw size;
231 };
232
233 #define AS_BLOCK_HEADER(base, offset) \
234 ((struct block_header*)(((sljit_u8*)base) + offset))
235 #define AS_FREE_BLOCK(base, offset) \
236 ((struct free_block*)(((sljit_u8*)base) + offset))
237 #define MEM_START(base) ((void*)(((sljit_u8*)base) + sizeof(struct block_header)))
238 #define ALIGN_SIZE(size) (((size) + sizeof(struct block_header) + 7u) & ~(sljit_uw)7)
239
240 static struct free_block* free_blocks;
241 static sljit_uw allocated_size;
242 static sljit_uw total_size;
243
sljit_insert_free_block(struct free_block * free_block,sljit_uw size)244 static SLJIT_INLINE void sljit_insert_free_block(struct free_block *free_block, sljit_uw size)
245 {
246 free_block->header.size = 0;
247 free_block->size = size;
248
249 free_block->next = free_blocks;
250 free_block->prev = NULL;
251 if (free_blocks)
252 free_blocks->prev = free_block;
253 free_blocks = free_block;
254 }
255
sljit_remove_free_block(struct free_block * free_block)256 static SLJIT_INLINE void sljit_remove_free_block(struct free_block *free_block)
257 {
258 if (free_block->next)
259 free_block->next->prev = free_block->prev;
260
261 if (free_block->prev)
262 free_block->prev->next = free_block->next;
263 else {
264 SLJIT_ASSERT(free_blocks == free_block);
265 free_blocks = free_block->next;
266 }
267 }
268
sljit_malloc_exec(sljit_uw size)269 SLJIT_API_FUNC_ATTRIBUTE void* sljit_malloc_exec(sljit_uw size)
270 {
271 struct block_header *header;
272 struct block_header *next_header;
273 struct free_block *free_block;
274 sljit_uw chunk_size;
275
276 SLJIT_ALLOCATOR_LOCK();
277 if (size < (64 - sizeof(struct block_header)))
278 size = (64 - sizeof(struct block_header));
279 size = ALIGN_SIZE(size);
280
281 free_block = free_blocks;
282 while (free_block) {
283 if (free_block->size >= size) {
284 chunk_size = free_block->size;
285 SLJIT_UPDATE_WX_FLAGS(NULL, NULL, 0);
286 if (chunk_size > size + 64) {
287 /* We just cut a block from the end of the free block. */
288 chunk_size -= size;
289 free_block->size = chunk_size;
290 header = AS_BLOCK_HEADER(free_block, chunk_size);
291 header->prev_size = chunk_size;
292 AS_BLOCK_HEADER(header, size)->prev_size = size;
293 }
294 else {
295 sljit_remove_free_block(free_block);
296 header = (struct block_header*)free_block;
297 size = chunk_size;
298 }
299 allocated_size += size;
300 header->size = size;
301 SLJIT_ALLOCATOR_UNLOCK();
302 return MEM_START(header);
303 }
304 free_block = free_block->next;
305 }
306
307 chunk_size = (size + sizeof(struct block_header) + CHUNK_SIZE - 1) & CHUNK_MASK;
308 header = (struct block_header*)alloc_chunk(chunk_size);
309 if (!header) {
310 SLJIT_ALLOCATOR_UNLOCK();
311 return NULL;
312 }
313
314 chunk_size -= sizeof(struct block_header);
315 total_size += chunk_size;
316
317 header->prev_size = 0;
318 if (chunk_size > size + 64) {
319 /* Cut the allocated space into a free and a used block. */
320 allocated_size += size;
321 header->size = size;
322 chunk_size -= size;
323
324 free_block = AS_FREE_BLOCK(header, size);
325 free_block->header.prev_size = size;
326 sljit_insert_free_block(free_block, chunk_size);
327 next_header = AS_BLOCK_HEADER(free_block, chunk_size);
328 }
329 else {
330 /* All space belongs to this allocation. */
331 allocated_size += chunk_size;
332 header->size = chunk_size;
333 next_header = AS_BLOCK_HEADER(header, chunk_size);
334 }
335 next_header->size = 1;
336 next_header->prev_size = chunk_size;
337 SLJIT_ALLOCATOR_UNLOCK();
338 return MEM_START(header);
339 }
340
sljit_free_exec(void * ptr)341 SLJIT_API_FUNC_ATTRIBUTE void sljit_free_exec(void* ptr)
342 {
343 struct block_header *header;
344 struct free_block* free_block;
345
346 SLJIT_ALLOCATOR_LOCK();
347 header = AS_BLOCK_HEADER(ptr, -(sljit_sw)sizeof(struct block_header));
348 allocated_size -= header->size;
349
350 /* Connecting free blocks together if possible. */
351 SLJIT_UPDATE_WX_FLAGS(NULL, NULL, 0);
352
353 /* If header->prev_size == 0, free_block will equal to header.
354 In this case, free_block->header.size will be > 0. */
355 free_block = AS_FREE_BLOCK(header, -(sljit_sw)header->prev_size);
356 if (SLJIT_UNLIKELY(!free_block->header.size)) {
357 free_block->size += header->size;
358 header = AS_BLOCK_HEADER(free_block, free_block->size);
359 header->prev_size = free_block->size;
360 }
361 else {
362 free_block = (struct free_block*)header;
363 sljit_insert_free_block(free_block, header->size);
364 }
365
366 header = AS_BLOCK_HEADER(free_block, free_block->size);
367 if (SLJIT_UNLIKELY(!header->size)) {
368 free_block->size += ((struct free_block*)header)->size;
369 sljit_remove_free_block((struct free_block*)header);
370 header = AS_BLOCK_HEADER(free_block, free_block->size);
371 header->prev_size = free_block->size;
372 }
373
374 /* The whole chunk is free. */
375 if (SLJIT_UNLIKELY(!free_block->header.prev_size && header->size == 1)) {
376 /* If this block is freed, we still have (allocated_size / 2) free space. */
377 if (total_size - free_block->size > (allocated_size * 3 / 2)) {
378 total_size -= free_block->size;
379 sljit_remove_free_block(free_block);
380 free_chunk(free_block, free_block->size + sizeof(struct block_header));
381 }
382 }
383
384 SLJIT_UPDATE_WX_FLAGS(NULL, NULL, 1);
385 SLJIT_ALLOCATOR_UNLOCK();
386 }
387
sljit_free_unused_memory_exec(void)388 SLJIT_API_FUNC_ATTRIBUTE void sljit_free_unused_memory_exec(void)
389 {
390 struct free_block* free_block;
391 struct free_block* next_free_block;
392
393 SLJIT_ALLOCATOR_LOCK();
394 SLJIT_UPDATE_WX_FLAGS(NULL, NULL, 0);
395
396 free_block = free_blocks;
397 while (free_block) {
398 next_free_block = free_block->next;
399 if (!free_block->header.prev_size &&
400 AS_BLOCK_HEADER(free_block, free_block->size)->size == 1) {
401 total_size -= free_block->size;
402 sljit_remove_free_block(free_block);
403 free_chunk(free_block, free_block->size + sizeof(struct block_header));
404 }
405 free_block = next_free_block;
406 }
407
408 SLJIT_ASSERT((total_size && free_blocks) || (!total_size && !free_blocks));
409 SLJIT_UPDATE_WX_FLAGS(NULL, NULL, 1);
410 SLJIT_ALLOCATOR_UNLOCK();
411 }
412