xref: /PHP-7.4/Zend/zend_multiply.h (revision 92ac598a)
1 /*
2    +----------------------------------------------------------------------+
3    | Zend Engine                                                          |
4    +----------------------------------------------------------------------+
5    | Copyright (c) Zend Technologies Ltd. (http://www.zend.com)           |
6    +----------------------------------------------------------------------+
7    | This source file is subject to version 2.00 of the Zend license,     |
8    | that is bundled with this package in the file LICENSE, and is        |
9    | available through the world-wide-web at the following url:           |
10    | http://www.zend.com/license/2_00.txt.                                |
11    | If you did not receive a copy of the Zend license and are unable to  |
12    | obtain it through the world-wide-web, please send a note to          |
13    | license@zend.com so we can mail you a copy immediately.              |
14    +----------------------------------------------------------------------+
15    | Authors: Sascha Schumann <sascha@schumann.cx>                        |
16    |          Ard Biesheuvel <ard.biesheuvel@linaro.org>                  |
17    +----------------------------------------------------------------------+
18 */
19 
20 #include "zend_portability.h"
21 
22 #ifndef ZEND_MULTIPLY_H
23 #define ZEND_MULTIPLY_H
24 
25 #if PHP_HAVE_BUILTIN_SMULL_OVERFLOW && SIZEOF_LONG == SIZEOF_ZEND_LONG
26 
27 #define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do {	\
28 	long __tmpvar;		 											\
29 	if (((usedval) = __builtin_smull_overflow((a), (b), &__tmpvar))) {	\
30 		(dval) = (double) (a) * (double) (b);						\
31 	}																\
32 	else (lval) = __tmpvar;											\
33 } while (0)
34 
35 #elif PHP_HAVE_BUILTIN_SMULLL_OVERFLOW && SIZEOF_LONG_LONG == SIZEOF_ZEND_LONG
36 
37 #define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do {	\
38 	long long __tmpvar; 											\
39 	if (((usedval) = __builtin_smulll_overflow((a), (b), &__tmpvar))) {	\
40 		(dval) = (double) (a) * (double) (b);						\
41 	}																\
42 	else (lval) = __tmpvar;											\
43 } while (0)
44 
45 #elif (defined(__i386__) || defined(__x86_64__)) && defined(__GNUC__)
46 
47 #define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do {	\
48 	zend_long __tmpvar; 													\
49 	__asm__ ("imul %3,%0\n"											\
50 		"adc $0,%1" 												\
51 			: "=r"(__tmpvar),"=r"(usedval) 							\
52 			: "0"(a), "r"(b), "1"(0));								\
53 	if (usedval) (dval) = (double) (a) * (double) (b);				\
54 	else (lval) = __tmpvar;											\
55 } while (0)
56 
57 #elif defined(__arm__) && defined(__GNUC__)
58 
59 #define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do {	\
60 	zend_long __tmpvar; 													\
61 	__asm__("smull %0, %1, %2, %3\n"								\
62 		"sub %1, %1, %0, asr #31"									\
63 			: "=r"(__tmpvar), "=r"(usedval)							\
64 			: "r"(a), "r"(b));										\
65 	if (usedval) (dval) = (double) (a) * (double) (b);				\
66 	else (lval) = __tmpvar;											\
67 } while (0)
68 
69 #elif defined(__aarch64__) && defined(__GNUC__)
70 
71 #define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do {	\
72 	zend_long __tmpvar; 													\
73 	__asm__("mul %0, %2, %3\n"										\
74 		"smulh %1, %2, %3\n"										\
75 		"sub %1, %1, %0, asr #63\n"									\
76 			: "=&r"(__tmpvar), "=&r"(usedval)						\
77 			: "r"(a), "r"(b));										\
78 	if (usedval) (dval) = (double) (a) * (double) (b);				\
79 	else (lval) = __tmpvar;											\
80 } while (0)
81 
82 #elif defined(ZEND_WIN32)
83 
84 # ifdef _M_X64
85 #  pragma intrinsic(_mul128)
86 #  define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do {       \
87 	__int64 __high; \
88 	__int64 __low = _mul128((a), (b), &__high); \
89 	if ((__low >> 63I64) == __high) { \
90 		(usedval) = 0; \
91 		(lval) = __low; \
92 	} else { \
93 		(usedval) = 1; \
94 		(dval) = (double)(a) * (double)(b); \
95 	} \
96 } while (0)
97 # else
98 #  define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do {	\
99 	zend_long   __lres  = (a) * (b);										\
100 	long double __dres  = (long double)(a) * (long double)(b);		\
101 	long double __delta = (long double) __lres - __dres;			\
102 	if ( ((usedval) = (( __dres + __delta ) != __dres))) {			\
103 		(dval) = __dres;											\
104 	} else {														\
105 		(lval) = __lres;											\
106 	}																\
107 } while (0)
108 # endif
109 
110 #elif defined(__powerpc64__) && defined(__GNUC__)
111 
112 #define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do {	\
113 	long __low, __high;						\
114 	__asm__("mulld %0,%2,%3\n\t"					\
115 		"mulhd %1,%2,%3\n"					\
116 		: "=&r"(__low), "=&r"(__high)				\
117 		: "r"(a), "r"(b));					\
118 	if ((__low >> 63) != __high) {					\
119 		(dval) = (double) (a) * (double) (b);			\
120 		(usedval) = 1;						\
121 	} else {							\
122 		(lval) = __low;						\
123 		(usedval) = 0;						\
124 	}								\
125 } while (0)
126 
127 #elif SIZEOF_ZEND_LONG == 4
128 
129 #define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do {	\
130 	int64_t __result = (int64_t) (a) * (int64_t) (b);				\
131 	if (__result > ZEND_LONG_MAX || __result < ZEND_LONG_MIN) {		\
132 		(dval) = (double) __result;									\
133 		(usedval) = 1;												\
134 	} else {														\
135 		(lval) = (long) __result;									\
136 		(usedval) = 0;												\
137 	}																\
138 } while (0)
139 
140 #else
141 
142 #define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do {	\
143 	long   __lres  = (a) * (b);										\
144 	long double __dres  = (long double)(a) * (long double)(b);		\
145 	long double __delta = (long double) __lres - __dres;			\
146 	if ( ((usedval) = (( __dres + __delta ) != __dres))) {			\
147 		(dval) = __dres;											\
148 	} else {														\
149 		(lval) = __lres;											\
150 	}																\
151 } while (0)
152 
153 #endif
154 
155 #if defined(__GNUC__) && (defined(__native_client__) || defined(i386))
156 
zend_safe_address(size_t nmemb,size_t size,size_t offset,int * overflow)157 static zend_always_inline size_t zend_safe_address(size_t nmemb, size_t size, size_t offset, int *overflow)
158 {
159 	size_t res = nmemb;
160 	size_t m_overflow = 0;
161 
162 	if (ZEND_CONST_COND(offset == 0, 0)) {
163 		__asm__ ("mull %3\n\tadcl $0,%1"
164 	     : "=&a"(res), "=&d" (m_overflow)
165 	     : "%0"(res),
166 	       "rm"(size));
167 	} else {
168 		__asm__ ("mull %3\n\taddl %4,%0\n\tadcl $0,%1"
169 	     : "=&a"(res), "=&d" (m_overflow)
170 	     : "%0"(res),
171 	       "rm"(size),
172 	       "rm"(offset));
173 	}
174 
175 	if (UNEXPECTED(m_overflow)) {
176 		*overflow = 1;
177 		return 0;
178 	}
179 	*overflow = 0;
180 	return res;
181 }
182 
183 #elif defined(__GNUC__) && defined(__x86_64__)
184 
zend_safe_address(size_t nmemb,size_t size,size_t offset,int * overflow)185 static zend_always_inline size_t zend_safe_address(size_t nmemb, size_t size, size_t offset, int *overflow)
186 {
187 	size_t res = nmemb;
188 	zend_ulong m_overflow = 0;
189 
190 #ifdef __ILP32__ /* x32 */
191 # define LP_SUFF "l"
192 #else /* amd64 */
193 # define LP_SUFF "q"
194 #endif
195 
196 	if (ZEND_CONST_COND(offset == 0, 0)) {
197 		__asm__ ("mul" LP_SUFF  " %3\n\t"
198 			"adc $0,%1"
199 			: "=&a"(res), "=&d" (m_overflow)
200 			: "%0"(res),
201 			  "rm"(size));
202 	} else {
203 		__asm__ ("mul" LP_SUFF  " %3\n\t"
204 			"add %4,%0\n\t"
205 			"adc $0,%1"
206 			: "=&a"(res), "=&d" (m_overflow)
207 			: "%0"(res),
208 			  "rm"(size),
209 			  "rm"(offset));
210 	}
211 #undef LP_SUFF
212 	if (UNEXPECTED(m_overflow)) {
213 		*overflow = 1;
214 		return 0;
215 	}
216 	*overflow = 0;
217 	return res;
218 }
219 
220 #elif defined(__GNUC__) && defined(__arm__)
221 
zend_safe_address(size_t nmemb,size_t size,size_t offset,int * overflow)222 static zend_always_inline size_t zend_safe_address(size_t nmemb, size_t size, size_t offset, int *overflow)
223 {
224 	size_t res;
225 	zend_ulong m_overflow;
226 
227 	__asm__ ("umlal %0,%1,%2,%3"
228 		: "=r"(res), "=r"(m_overflow)
229 		: "r"(nmemb),
230 		  "r"(size),
231 		  "0"(offset),
232 		  "1"(0));
233 
234 	if (UNEXPECTED(m_overflow)) {
235 		*overflow = 1;
236 		return 0;
237 	}
238 	*overflow = 0;
239 	return res;
240 }
241 
242 #elif defined(__GNUC__) && defined(__aarch64__)
243 
zend_safe_address(size_t nmemb,size_t size,size_t offset,int * overflow)244 static zend_always_inline size_t zend_safe_address(size_t nmemb, size_t size, size_t offset, int *overflow)
245 {
246 	size_t res;
247 	zend_ulong m_overflow;
248 
249 	__asm__ ("mul %0,%2,%3\n\tumulh %1,%2,%3\n\tadds %0,%0,%4\n\tadc %1,%1,xzr"
250 		: "=&r"(res), "=&r"(m_overflow)
251 		: "r"(nmemb),
252 		  "r"(size),
253 		  "r"(offset));
254 
255 	if (UNEXPECTED(m_overflow)) {
256 		*overflow = 1;
257 		return 0;
258 	}
259 	*overflow = 0;
260 	return res;
261 }
262 
263 #elif defined(__GNUC__) && defined(__powerpc64__)
264 
zend_safe_address(size_t nmemb,size_t size,size_t offset,int * overflow)265 static zend_always_inline size_t zend_safe_address(size_t nmemb, size_t size, size_t offset, int *overflow)
266 {
267         size_t res;
268         unsigned long m_overflow;
269 
270         __asm__ ("mulld %0,%2,%3\n\t"
271                  "mulhdu %1,%2,%3\n\t"
272                  "addc %0,%0,%4\n\t"
273                  "addze %1,%1\n"
274              : "=&r"(res), "=&r"(m_overflow)
275              : "r"(nmemb),
276                "r"(size),
277                "r"(offset));
278 
279         if (UNEXPECTED(m_overflow)) {
280                 *overflow = 1;
281                 return 0;
282         }
283         *overflow = 0;
284         return res;
285 }
286 
287 #elif SIZEOF_SIZE_T == 4
288 
zend_safe_address(size_t nmemb,size_t size,size_t offset,int * overflow)289 static zend_always_inline size_t zend_safe_address(size_t nmemb, size_t size, size_t offset, int *overflow)
290 {
291 	uint64_t res = (uint64_t) nmemb * (uint64_t) size + (uint64_t) offset;
292 
293 	if (UNEXPECTED(res > UINT64_C(0xFFFFFFFF))) {
294 		*overflow = 1;
295 		return 0;
296 	}
297 	*overflow = 0;
298 	return (size_t) res;
299 }
300 
301 #else
302 
zend_safe_address(size_t nmemb,size_t size,size_t offset,int * overflow)303 static zend_always_inline size_t zend_safe_address(size_t nmemb, size_t size, size_t offset, int *overflow)
304 {
305 	size_t res = nmemb * size + offset;
306 	double _d  = (double)nmemb * (double)size + (double)offset;
307 	double _delta = (double)res - _d;
308 
309 	if (UNEXPECTED((_d + _delta ) != _d)) {
310 		*overflow = 1;
311 		return 0;
312 	}
313 	*overflow = 0;
314 	return res;
315 }
316 #endif
317 
zend_safe_address_guarded(size_t nmemb,size_t size,size_t offset)318 static zend_always_inline size_t zend_safe_address_guarded(size_t nmemb, size_t size, size_t offset)
319 {
320 	int overflow;
321 	size_t ret = zend_safe_address(nmemb, size, offset, &overflow);
322 
323 	if (UNEXPECTED(overflow)) {
324 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nmemb, size, offset);
325 		return 0;
326 	}
327 	return ret;
328 }
329 
330 /* A bit more generic version of the same */
zend_safe_addmult(size_t nmemb,size_t size,size_t offset,const char * message)331 static zend_always_inline size_t zend_safe_addmult(size_t nmemb, size_t size, size_t offset, const char *message)
332 {
333 	int overflow;
334 	size_t ret = zend_safe_address(nmemb, size, offset, &overflow);
335 
336 	if (UNEXPECTED(overflow)) {
337 		zend_error_noreturn(E_ERROR, "Possible integer overflow in %s (%zu * %zu + %zu)", message, nmemb, size, offset);
338 		return 0;
339 	}
340 	return ret;
341 }
342 
343 #endif /* ZEND_MULTIPLY_H */
344