xref: /PHP-8.3/ext/opcache/shared_alloc_mmap.c (revision 385151d2)
1 /*
2    +----------------------------------------------------------------------+
3    | Zend OPcache                                                         |
4    +----------------------------------------------------------------------+
5    | Copyright (c) The PHP Group                                          |
6    +----------------------------------------------------------------------+
7    | This source file is subject to version 3.01 of the PHP license,      |
8    | that is bundled with this package in the file LICENSE, and is        |
9    | available through the world-wide-web at the following url:           |
10    | https://www.php.net/license/3_01.txt                                 |
11    | If you did not receive a copy of the PHP license and are unable to   |
12    | obtain it through the world-wide-web, please send a note to          |
13    | license@php.net so we can mail you a copy immediately.               |
14    +----------------------------------------------------------------------+
15    | Authors: Andi Gutmans <andi@php.net>                                 |
16    |          Zeev Suraski <zeev@php.net>                                 |
17    |          Stanislav Malyshev <stas@zend.com>                          |
18    |          Dmitry Stogov <dmitry@php.net>                              |
19    +----------------------------------------------------------------------+
20 */
21 
22 #include "zend_shared_alloc.h"
23 #ifdef HAVE_JIT
24 # include "jit/zend_jit.h"
25 #endif
26 
27 #ifdef USE_MMAP
28 
29 #include <sys/types.h>
30 #include <sys/stat.h>
31 #include <stdio.h>
32 #include <stdlib.h>
33 #include <sys/mman.h>
34 
35 #ifdef __APPLE__
36 #include <mach/vm_statistics.h>
37 #endif
38 
39 #include "zend_execute.h"
40 #ifdef HAVE_SYS_PROCCTL_H
41 #include <sys/procctl.h>
42 #endif
43 
44 #if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
45 # define MAP_ANONYMOUS MAP_ANON
46 #endif
47 #if defined(MAP_ALIGNED_SUPER)
48 # include <sys/types.h>
49 # include <sys/sysctl.h>
50 # include <sys/user.h>
51 # define MAP_HUGETLB MAP_ALIGNED_SUPER
52 #endif
53 
54 #if defined(HAVE_JIT) && (defined(__linux__) || defined(__FreeBSD__)) && (defined(__x86_64__) || defined (__aarch64__)) && !defined(__SANITIZE_ADDRESS__)
find_prefered_mmap_base(size_t requested_size)55 static void *find_prefered_mmap_base(size_t requested_size)
56 {
57 	size_t huge_page_size = 2 * 1024 * 1024;
58 	uintptr_t last_free_addr = huge_page_size;
59 	uintptr_t last_candidate = (uintptr_t)MAP_FAILED;
60 	uintptr_t start, end, text_start = 0;
61 #if defined(__linux__)
62 	FILE *f;
63 	char buffer[MAXPATHLEN];
64 
65 	f = fopen("/proc/self/maps", "r");
66 	if (!f) {
67 		return MAP_FAILED;
68 	}
69 
70 	while (fgets(buffer, MAXPATHLEN, f) && sscanf(buffer, "%lx-%lx", &start, &end) == 2) {
71 		/* Don't place the segment directly before or after the heap segment. Due to an selinux bug,
72 		 * a segment directly preceding or following the heap is interpreted as heap memory, which
73 		 * will result in an execheap violation for the JIT.
74 		 * See https://bugzilla.kernel.org/show_bug.cgi?id=218258. */
75 		bool heap_segment = strstr(buffer, "[heap]") != NULL;
76 		if (heap_segment) {
77 			uintptr_t start_base = start & ~(huge_page_size - 1);
78 			if (last_free_addr + requested_size >= start_base) {
79 				last_free_addr = ZEND_MM_ALIGNED_SIZE_EX(end + huge_page_size, huge_page_size);
80 				continue;
81 			}
82 		}
83 		if ((uintptr_t)execute_ex >= start) {
84 			/* the current segment lays before PHP .text segment or PHP .text segment itself */
85 			/*Search for candidates at the end of the free segment near the .text segment
86 			  to prevent candidates from being missed due to large hole*/
87 			if (last_free_addr + requested_size <= start) {
88 				last_candidate = ZEND_MM_ALIGNED_SIZE_EX(start - requested_size, huge_page_size);
89 				if (last_candidate + requested_size > start) {
90 					last_candidate -= huge_page_size;
91 				}
92 			}
93 			if ((uintptr_t)execute_ex < end) {
94 				/* the current segment is PHP .text segment itself */
95 				if (last_candidate != (uintptr_t)MAP_FAILED) {
96 					if (end - last_candidate < UINT32_MAX) {
97 						/* we have found a big enough hole before the text segment */
98 						break;
99 					}
100 					last_candidate = (uintptr_t)MAP_FAILED;
101 				}
102 				text_start = start;
103 			}
104 		} else {
105 			/* the current segment lays after PHP .text segment */
106 			if (last_free_addr + requested_size - text_start > UINT32_MAX) {
107 				/* the current segment and the following segments lay too far from PHP .text segment */
108 				break;
109 			}
110 			if (last_free_addr + requested_size <= start) {
111 				last_candidate = last_free_addr;
112 				break;
113 			}
114 		}
115 		last_free_addr = ZEND_MM_ALIGNED_SIZE_EX(end, huge_page_size);
116 		if (heap_segment) {
117 			last_free_addr += huge_page_size;
118 		}
119 	}
120 	fclose(f);
121 #elif defined(__FreeBSD__)
122 	size_t s = 0;
123 	int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid()};
124 	if (sysctl(mib, 4, NULL, &s, NULL, 0) == 0) {
125 		s = s * 4 / 3;
126 		void *addr = mmap(NULL, s, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
127 		if (addr != MAP_FAILED) {
128 			if (sysctl(mib, 4, addr, &s, NULL, 0) == 0) {
129 				start = (uintptr_t)addr;
130 				end = start + s;
131 				while (start < end) {
132 					struct kinfo_vmentry *entry = (struct kinfo_vmentry *)start;
133 					size_t sz = entry->kve_structsize;
134 					if (sz == 0) {
135 						break;
136 					}
137 					uintptr_t e_start = entry->kve_start;
138 					uintptr_t e_end = entry->kve_end;
139 					if ((uintptr_t)execute_ex >= e_start) {
140 						/* the current segment lays before PHP .text segment or PHP .text segment itself */
141 						if (last_free_addr + requested_size <= e_start) {
142 							last_candidate = ZEND_MM_ALIGNED_SIZE_EX(e_start - requested_size, huge_page_size);
143 							if (last_candidate + requested_size > e_start) {
144 								last_candidate -= huge_page_size;
145 							}
146 						}
147 						if ((uintptr_t)execute_ex < e_end) {
148 							/* the current segment is PHP .text segment itself */
149 							if (last_candidate != (uintptr_t)MAP_FAILED) {
150 								if (e_end - last_candidate < UINT32_MAX) {
151 									/* we have found a big enough hole before the text segment */
152 									break;
153 								}
154 								last_candidate = (uintptr_t)MAP_FAILED;
155 							}
156 							text_start = e_start;
157 						}
158 					} else {
159 						/* the current segment lays after PHP .text segment */
160 						if (last_free_addr + requested_size - text_start > UINT32_MAX) {
161 							/* the current segment and the following segments lay too far from PHP .text segment */
162 							break;
163 						}
164 						if (last_free_addr + requested_size <= e_start) {
165 							last_candidate = last_free_addr;
166 							break;
167 						}
168 					}
169 					last_free_addr = ZEND_MM_ALIGNED_SIZE_EX(e_end, huge_page_size);
170 					start += sz;
171 				}
172 			}
173 			munmap(addr, s);
174 		}
175 	}
176 #endif
177 
178 	return (void*)last_candidate;
179 }
180 #endif
181 
create_segments(size_t requested_size,zend_shared_segment *** shared_segments_p,int * shared_segments_count,const char ** error_in)182 static int create_segments(size_t requested_size, zend_shared_segment ***shared_segments_p, int *shared_segments_count, const char **error_in)
183 {
184 	zend_shared_segment *shared_segment;
185 	int flags = PROT_READ | PROT_WRITE, fd = -1;
186 	void *p;
187 #if defined(HAVE_PROCCTL) && defined(PROC_WXMAP_CTL)
188 	int enable_wxmap = PROC_WX_MAPPINGS_PERMIT;
189 	if (procctl(P_PID, getpid(), PROC_WXMAP_CTL, &enable_wxmap) == -1) {
190 		return ALLOC_FAILURE;
191 	}
192 #endif
193 #ifdef PROT_MPROTECT
194 	flags |= PROT_MPROTECT(PROT_EXEC);
195 #endif
196 #ifdef VM_MAKE_TAG
197 	/* allows tracking segments via tools such as vmmap */
198 	fd = VM_MAKE_TAG(251U);
199 #endif
200 #ifdef PROT_MAX
201 	flags |= PROT_MAX(PROT_READ | PROT_WRITE | PROT_EXEC);
202 #endif
203 #if defined(HAVE_JIT) && (defined(__linux__) || defined(__FreeBSD__)) && (defined(__x86_64__) || defined (__aarch64__)) && !defined(__SANITIZE_ADDRESS__)
204 	void *hint;
205 	if (JIT_G(enabled) && JIT_G(buffer_size)
206 			&& zend_jit_check_support() == SUCCESS) {
207 		hint = find_prefered_mmap_base(requested_size);
208 	} else {
209 		/* Do not use a hint if JIT is not enabled, as this profits only JIT and
210 		 * this is potentially unsafe when the only suitable candidate is just
211 		 * after the heap (e.g. in non-PIE builds) (GH-13775). */
212 		hint = MAP_FAILED;
213 	}
214 	if (hint != MAP_FAILED) {
215 # ifdef MAP_HUGETLB
216 		size_t huge_page_size = 2 * 1024 * 1024;
217 		if (requested_size >= huge_page_size && requested_size % huge_page_size == 0) {
218 			p = mmap(hint, requested_size, flags, MAP_SHARED|MAP_ANONYMOUS|MAP_HUGETLB|MAP_FIXED, -1, 0);
219 			if (p != MAP_FAILED) {
220 				goto success;
221 			}
222 		}
223 #endif
224 		p = mmap(hint, requested_size, flags, MAP_SHARED|MAP_ANONYMOUS|MAP_FIXED, -1, 0);
225 		if (p != MAP_FAILED) {
226 			goto success;
227 		}
228 	}
229 #endif
230 #ifdef MAP_HUGETLB
231 	size_t huge_page_size = 2 * 1024 * 1024;
232 
233 	/* Try to allocate huge pages first to reduce dTLB misses.
234 	 * OSes has to be configured properly
235 	 * on Linux
236 	 * (e.g. https://wiki.debian.org/Hugepages#Enabling_HugeTlbPage)
237 	 * You may verify huge page usage with the following command:
238 	 * `grep "Huge" /proc/meminfo`
239 	 * on FreeBSD
240 	 * sysctl vm.pmap.pg_ps_enabled entry
241 	 * (boot time config only, but enabled by default on most arches).
242 	 */
243 	if (requested_size >= huge_page_size && requested_size % huge_page_size == 0) {
244 # if defined(__x86_64__) && defined(MAP_32BIT)
245 		/* to got HUGE PAGES in low 32-bit address we have to reserve address
246 		   space and then remap it using MAP_HUGETLB */
247 
248 		p = mmap(NULL, requested_size, flags, MAP_SHARED|MAP_ANONYMOUS|MAP_32BIT, fd, 0);
249 		if (p != MAP_FAILED) {
250 			munmap(p, requested_size);
251 			p = (void*)(ZEND_MM_ALIGNED_SIZE_EX((ptrdiff_t)p, huge_page_size));
252 			p = mmap(p, requested_size, flags, MAP_SHARED|MAP_ANONYMOUS|MAP_32BIT|MAP_HUGETLB|MAP_FIXED, -1, 0);
253 			if (p != MAP_FAILED) {
254 				goto success;
255 			} else {
256 				p = mmap(NULL, requested_size, flags, MAP_SHARED|MAP_ANONYMOUS|MAP_32BIT, fd, 0);
257 				if (p != MAP_FAILED) {
258 					goto success;
259 				}
260 			}
261 		}
262 # endif
263 		p = mmap(0, requested_size, flags, MAP_SHARED|MAP_ANONYMOUS|MAP_HUGETLB, fd, 0);
264 		if (p != MAP_FAILED) {
265 			goto success;
266 		}
267 	}
268 #elif defined(PREFER_MAP_32BIT) && defined(__x86_64__) && defined(MAP_32BIT)
269 	p = mmap(NULL, requested_size, flags, MAP_SHARED|MAP_ANONYMOUS|MAP_32BIT, fd, 0);
270 	if (p != MAP_FAILED) {
271 		goto success;
272 	}
273 #endif
274 
275 	p = mmap(0, requested_size, flags, MAP_SHARED|MAP_ANONYMOUS, fd, 0);
276 	if (p == MAP_FAILED) {
277 		*error_in = "mmap";
278 		return ALLOC_FAILURE;
279 	}
280 
281 success: ZEND_ATTRIBUTE_UNUSED;
282 	*shared_segments_count = 1;
283 	*shared_segments_p = (zend_shared_segment **) calloc(1, sizeof(zend_shared_segment) + sizeof(void *));
284 	if (!*shared_segments_p) {
285 		munmap(p, requested_size);
286 		*error_in = "calloc";
287 		return ALLOC_FAILURE;
288 	}
289 	shared_segment = (zend_shared_segment *)((char *)(*shared_segments_p) + sizeof(void *));
290 	(*shared_segments_p)[0] = shared_segment;
291 
292 	shared_segment->p = p;
293 	shared_segment->pos = 0;
294 	shared_segment->size = requested_size;
295 
296 	return ALLOC_SUCCESS;
297 }
298 
detach_segment(zend_shared_segment * shared_segment)299 static int detach_segment(zend_shared_segment *shared_segment)
300 {
301 	munmap(shared_segment->p, shared_segment->size);
302 	return 0;
303 }
304 
segment_type_size(void)305 static size_t segment_type_size(void)
306 {
307 	return sizeof(zend_shared_segment);
308 }
309 
310 const zend_shared_memory_handlers zend_alloc_mmap_handlers = {
311 	create_segments,
312 	detach_segment,
313 	segment_type_size
314 };
315 
316 #endif /* USE_MMAP */
317