xref: /PHP-7.2/Zend/zend_multiply.h (revision 7a7ec01a)
1 /*
2    +----------------------------------------------------------------------+
3    | Zend Engine                                                          |
4    +----------------------------------------------------------------------+
5    | Copyright (c) 1998-2018 Zend Technologies Ltd. (http://www.zend.com) |
6    +----------------------------------------------------------------------+
7    | This source file is subject to version 2.00 of the Zend license,     |
8    | that is bundled with this package in the file LICENSE, and is        |
9    | available through the world-wide-web at the following url:           |
10    | http://www.zend.com/license/2_00.txt.                                |
11    | If you did not receive a copy of the Zend license and are unable to  |
12    | obtain it through the world-wide-web, please send a note to          |
13    | license@zend.com so we can mail you a copy immediately.              |
14    +----------------------------------------------------------------------+
15    | Authors: Sascha Schumann <sascha@schumann.cx>                        |
16    |          Ard Biesheuvel <ard.biesheuvel@linaro.org>                  |
17    +----------------------------------------------------------------------+
18 */
19 
20 /* $Id$ */
21 
22 #include "zend_portability.h"
23 
24 #ifndef ZEND_MULTIPLY_H
25 #define ZEND_MULTIPLY_H
26 
27 #if PHP_HAVE_BUILTIN_SMULL_OVERFLOW && SIZEOF_LONG == SIZEOF_ZEND_LONG
28 
29 #define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do {	\
30 	long __tmpvar;		 											\
31 	if (((usedval) = __builtin_smull_overflow((a), (b), &__tmpvar))) {	\
32 		(dval) = (double) (a) * (double) (b);						\
33 	}																\
34 	else (lval) = __tmpvar;											\
35 } while (0)
36 
37 #elif PHP_HAVE_BUILTIN_SMULLL_OVERFLOW && SIZEOF_LONG_LONG == SIZEOF_ZEND_LONG
38 
39 #define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do {	\
40 	long long __tmpvar; 											\
41 	if (((usedval) = __builtin_smulll_overflow((a), (b), &__tmpvar))) {	\
42 		(dval) = (double) (a) * (double) (b);						\
43 	}																\
44 	else (lval) = __tmpvar;											\
45 } while (0)
46 
47 #elif (defined(__i386__) || defined(__x86_64__)) && defined(__GNUC__)
48 
49 #define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do {	\
50 	zend_long __tmpvar; 													\
51 	__asm__ ("imul %3,%0\n"											\
52 		"adc $0,%1" 												\
53 			: "=r"(__tmpvar),"=r"(usedval) 							\
54 			: "0"(a), "r"(b), "1"(0));								\
55 	if (usedval) (dval) = (double) (a) * (double) (b);				\
56 	else (lval) = __tmpvar;											\
57 } while (0)
58 
59 #elif defined(__arm__) && defined(__GNUC__)
60 
61 #define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do {	\
62 	zend_long __tmpvar; 													\
63 	__asm__("smull %0, %1, %2, %3\n"								\
64 		"sub %1, %1, %0, asr #31"									\
65 			: "=r"(__tmpvar), "=r"(usedval)							\
66 			: "r"(a), "r"(b));										\
67 	if (usedval) (dval) = (double) (a) * (double) (b);				\
68 	else (lval) = __tmpvar;											\
69 } while (0)
70 
71 #elif defined(__aarch64__) && defined(__GNUC__)
72 
73 #define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do {	\
74 	zend_long __tmpvar; 													\
75 	__asm__("mul %0, %2, %3\n"										\
76 		"smulh %1, %2, %3\n"										\
77 		"sub %1, %1, %0, asr #63\n"									\
78 			: "=&r"(__tmpvar), "=&r"(usedval)						\
79 			: "r"(a), "r"(b));										\
80 	if (usedval) (dval) = (double) (a) * (double) (b);				\
81 	else (lval) = __tmpvar;											\
82 } while (0)
83 
84 #elif defined(ZEND_WIN32)
85 
86 # ifdef _M_X64
87 #  pragma intrinsic(_mul128)
88 #  define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do {       \
89 	__int64 __high; \
90 	__int64 __low = _mul128((a), (b), &__high); \
91 	if ((__low >> 63I64) == __high) { \
92 		(usedval) = 0; \
93 		(lval) = __low; \
94 	} else { \
95 		(usedval) = 1; \
96 		(dval) = (double)(a) * (double)(b); \
97 	} \
98 } while (0)
99 # else
100 #  define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do {	\
101 	zend_long   __lres  = (a) * (b);										\
102 	long double __dres  = (long double)(a) * (long double)(b);		\
103 	long double __delta = (long double) __lres - __dres;			\
104 	if ( ((usedval) = (( __dres + __delta ) != __dres))) {			\
105 		(dval) = __dres;											\
106 	} else {														\
107 		(lval) = __lres;											\
108 	}																\
109 } while (0)
110 # endif
111 
112 #elif defined(__powerpc64__) && defined(__GNUC__)
113 
114 #define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do {	\
115 	long __low, __high;						\
116 	__asm__("mulld %0,%2,%3\n\t"					\
117 		"mulhd %1,%2,%3\n"					\
118 		: "=&r"(__low), "=&r"(__high)				\
119 		: "r"(a), "r"(b));					\
120 	if ((__low >> 63) != __high) {					\
121 		(dval) = (double) (a) * (double) (b);			\
122 		(usedval) = 1;						\
123 	} else {							\
124 		(lval) = __low;						\
125 		(usedval) = 0;						\
126 	}								\
127 } while (0)
128 
129 #elif SIZEOF_ZEND_LONG == 4
130 
131 #define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do {	\
132 	int64_t __result = (int64_t) (a) * (int64_t) (b);				\
133 	if (__result > ZEND_LONG_MAX || __result < ZEND_LONG_MIN) {		\
134 		(dval) = (double) __result;									\
135 		(usedval) = 1;												\
136 	} else {														\
137 		(lval) = (long) __result;									\
138 		(usedval) = 0;												\
139 	}																\
140 } while (0)
141 
142 #else
143 
144 #define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do {	\
145 	long   __lres  = (a) * (b);										\
146 	long double __dres  = (long double)(a) * (long double)(b);		\
147 	long double __delta = (long double) __lres - __dres;			\
148 	if ( ((usedval) = (( __dres + __delta ) != __dres))) {			\
149 		(dval) = __dres;											\
150 	} else {														\
151 		(lval) = __lres;											\
152 	}																\
153 } while (0)
154 
155 #endif
156 
157 #if defined(__GNUC__) && (defined(__native_client__) || defined(i386))
158 
zend_safe_address(size_t nmemb,size_t size,size_t offset,int * overflow)159 static zend_always_inline size_t zend_safe_address(size_t nmemb, size_t size, size_t offset, int *overflow)
160 {
161 	size_t res = nmemb;
162 	size_t m_overflow = 0;
163 
164 	__asm__ ("mull %3\n\taddl %4,%0\n\tadcl $0,%1"
165 	     : "=&a"(res), "=&d" (m_overflow)
166 	     : "%0"(res),
167 	       "rm"(size),
168 	       "rm"(offset));
169 
170 	if (UNEXPECTED(m_overflow)) {
171 		*overflow = 1;
172 		return 0;
173 	}
174 	*overflow = 0;
175 	return res;
176 }
177 
178 #elif defined(__GNUC__) && defined(__x86_64__)
179 
zend_safe_address(size_t nmemb,size_t size,size_t offset,int * overflow)180 static zend_always_inline size_t zend_safe_address(size_t nmemb, size_t size, size_t offset, int *overflow)
181 {
182 	size_t res = nmemb;
183 	zend_ulong m_overflow = 0;
184 
185 #ifdef __ILP32__ /* x32 */
186 # define LP_SUFF "l"
187 #else /* amd64 */
188 # define LP_SUFF "q"
189 #endif
190 
191 	__asm__ ("mul" LP_SUFF  " %3\n\t"
192 		"add %4,%0\n\t"
193 		"adc $0,%1"
194 		: "=&a"(res), "=&d" (m_overflow)
195 		: "%0"(res),
196 		  "rm"(size),
197 		  "rm"(offset));
198 
199 #undef LP_SUFF
200 	if (UNEXPECTED(m_overflow)) {
201 		*overflow = 1;
202 		return 0;
203 	}
204 	*overflow = 0;
205 	return res;
206 }
207 
208 #elif defined(__GNUC__) && defined(__arm__)
209 
zend_safe_address(size_t nmemb,size_t size,size_t offset,int * overflow)210 static zend_always_inline size_t zend_safe_address(size_t nmemb, size_t size, size_t offset, int *overflow)
211 {
212 	size_t res;
213 	zend_ulong m_overflow;
214 
215 	__asm__ ("umlal %0,%1,%2,%3"
216 		: "=r"(res), "=r"(m_overflow)
217 		: "r"(nmemb),
218 		  "r"(size),
219 		  "0"(offset),
220 		  "1"(0));
221 
222 	if (UNEXPECTED(m_overflow)) {
223 		*overflow = 1;
224 		return 0;
225 	}
226 	*overflow = 0;
227 	return res;
228 }
229 
230 #elif defined(__GNUC__) && defined(__aarch64__)
231 
zend_safe_address(size_t nmemb,size_t size,size_t offset,int * overflow)232 static zend_always_inline size_t zend_safe_address(size_t nmemb, size_t size, size_t offset, int *overflow)
233 {
234 	size_t res;
235 	zend_ulong m_overflow;
236 
237 	__asm__ ("mul %0,%2,%3\n\tumulh %1,%2,%3\n\tadds %0,%0,%4\n\tadc %1,%1,xzr"
238 		: "=&r"(res), "=&r"(m_overflow)
239 		: "r"(nmemb),
240 		  "r"(size),
241 		  "r"(offset));
242 
243 	if (UNEXPECTED(m_overflow)) {
244 		*overflow = 1;
245 		return 0;
246 	}
247 	*overflow = 0;
248 	return res;
249 }
250 
251 #elif defined(__GNUC__) && defined(__powerpc64__)
252 
zend_safe_address(size_t nmemb,size_t size,size_t offset,int * overflow)253 static zend_always_inline size_t zend_safe_address(size_t nmemb, size_t size, size_t offset, int *overflow)
254 {
255         size_t res;
256         unsigned long m_overflow;
257 
258         __asm__ ("mulld %0,%2,%3\n\t"
259                  "mulhdu %1,%2,%3\n\t"
260                  "addc %0,%0,%4\n\t"
261                  "addze %1,%1\n"
262              : "=&r"(res), "=&r"(m_overflow)
263              : "r"(nmemb),
264                "r"(size),
265                "r"(offset));
266 
267         if (UNEXPECTED(m_overflow)) {
268                 *overflow = 1;
269                 return 0;
270         }
271         *overflow = 0;
272         return res;
273 }
274 
275 #elif SIZEOF_SIZE_T == 4
276 
zend_safe_address(size_t nmemb,size_t size,size_t offset,int * overflow)277 static zend_always_inline size_t zend_safe_address(size_t nmemb, size_t size, size_t offset, int *overflow)
278 {
279 	uint64_t res = (uint64_t) nmemb * (uint64_t) size + (uint64_t) offset;
280 
281 	if (UNEXPECTED(res > UINT64_C(0xFFFFFFFF))) {
282 		*overflow = 1;
283 		return 0;
284 	}
285 	*overflow = 0;
286 	return (size_t) res;
287 }
288 
289 #else
290 
zend_safe_address(size_t nmemb,size_t size,size_t offset,int * overflow)291 static zend_always_inline size_t zend_safe_address(size_t nmemb, size_t size, size_t offset, int *overflow)
292 {
293 	size_t res = nmemb * size + offset;
294 	double _d  = (double)nmemb * (double)size + (double)offset;
295 	double _delta = (double)res - _d;
296 
297 	if (UNEXPECTED((_d + _delta ) != _d)) {
298 		*overflow = 1;
299 		return 0;
300 	}
301 	*overflow = 0;
302 	return res;
303 }
304 #endif
305 
zend_safe_address_guarded(size_t nmemb,size_t size,size_t offset)306 static zend_always_inline size_t zend_safe_address_guarded(size_t nmemb, size_t size, size_t offset)
307 {
308 	int overflow;
309 	size_t ret = zend_safe_address(nmemb, size, offset, &overflow);
310 
311 	if (UNEXPECTED(overflow)) {
312 		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nmemb, size, offset);
313 		return 0;
314 	}
315 	return ret;
316 }
317 
318 /* A bit more generic version of the same */
zend_safe_addmult(size_t nmemb,size_t size,size_t offset,const char * message)319 static zend_always_inline size_t zend_safe_addmult(size_t nmemb, size_t size, size_t offset, const char *message)
320 {
321 	int overflow;
322 	size_t ret = zend_safe_address(nmemb, size, offset, &overflow);
323 
324 	if (UNEXPECTED(overflow)) {
325 		zend_error_noreturn(E_ERROR, "Possible integer overflow in %s (%zu * %zu + %zu)", message, nmemb, size, offset);
326 		return 0;
327 	}
328 	return ret;
329 }
330 
331 #endif /* ZEND_MULTIPLY_H */
332 
333 /*
334  * Local variables:
335  * tab-width: 4
336  * c-basic-offset: 4
337  * indent-tabs-mode: t
338  * End:
339  * vim600: sw=4 ts=4 fdm=marker
340  * vim<600: sw=4 ts=4
341  */
342