zend_multiply.h 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343
  1. /*
  2. +----------------------------------------------------------------------+
  3. | Zend Engine |
  4. +----------------------------------------------------------------------+
  5. | Copyright (c) Zend Technologies Ltd. (http://www.zend.com) |
  6. +----------------------------------------------------------------------+
  7. | This source file is subject to version 2.00 of the Zend license, |
  8. | that is bundled with this package in the file LICENSE, and is |
  9. | available through the world-wide-web at the following url: |
  10. | http://www.zend.com/license/2_00.txt. |
  11. | If you did not receive a copy of the Zend license and are unable to |
  12. | obtain it through the world-wide-web, please send a note to |
  13. | license@zend.com so we can mail you a copy immediately. |
  14. +----------------------------------------------------------------------+
  15. | Authors: Sascha Schumann <sascha@schumann.cx> |
  16. | Ard Biesheuvel <ard.biesheuvel@linaro.org> |
  17. +----------------------------------------------------------------------+
  18. */
  19. #include "zend_portability.h"
  20. #ifndef ZEND_MULTIPLY_H
  21. #define ZEND_MULTIPLY_H
  22. #if PHP_HAVE_BUILTIN_SMULL_OVERFLOW && SIZEOF_LONG == SIZEOF_ZEND_LONG
  23. #define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do { \
  24. long __tmpvar; \
  25. if (((usedval) = __builtin_smull_overflow((a), (b), &__tmpvar))) { \
  26. (dval) = (double) (a) * (double) (b); \
  27. } \
  28. else (lval) = __tmpvar; \
  29. } while (0)
  30. #elif PHP_HAVE_BUILTIN_SMULLL_OVERFLOW && SIZEOF_LONG_LONG == SIZEOF_ZEND_LONG
  31. #define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do { \
  32. long long __tmpvar; \
  33. if (((usedval) = __builtin_smulll_overflow((a), (b), &__tmpvar))) { \
  34. (dval) = (double) (a) * (double) (b); \
  35. } \
  36. else (lval) = __tmpvar; \
  37. } while (0)
  38. #elif (defined(__i386__) || defined(__x86_64__)) && defined(__GNUC__)
  39. #define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do { \
  40. zend_long __tmpvar; \
  41. __asm__ ("imul %3,%0\n" \
  42. "adc $0,%1" \
  43. : "=r"(__tmpvar),"=r"(usedval) \
  44. : "0"(a), "r"(b), "1"(0)); \
  45. if (usedval) (dval) = (double) (a) * (double) (b); \
  46. else (lval) = __tmpvar; \
  47. } while (0)
  48. #elif defined(__arm__) && defined(__GNUC__)
  49. #define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do { \
  50. zend_long __tmpvar; \
  51. __asm__("smull %0, %1, %2, %3\n" \
  52. "sub %1, %1, %0, asr #31" \
  53. : "=r"(__tmpvar), "=r"(usedval) \
  54. : "r"(a), "r"(b)); \
  55. if (usedval) (dval) = (double) (a) * (double) (b); \
  56. else (lval) = __tmpvar; \
  57. } while (0)
  58. #elif defined(__aarch64__) && defined(__GNUC__)
  59. #define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do { \
  60. zend_long __tmpvar; \
  61. __asm__("mul %0, %2, %3\n" \
  62. "smulh %1, %2, %3\n" \
  63. "sub %1, %1, %0, asr #63\n" \
  64. : "=&r"(__tmpvar), "=&r"(usedval) \
  65. : "r"(a), "r"(b)); \
  66. if (usedval) (dval) = (double) (a) * (double) (b); \
  67. else (lval) = __tmpvar; \
  68. } while (0)
  69. #elif defined(ZEND_WIN32)
  70. # ifdef _M_X64
  71. # pragma intrinsic(_mul128)
  72. # define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do { \
  73. __int64 __high; \
  74. __int64 __low = _mul128((a), (b), &__high); \
  75. if ((__low >> 63I64) == __high) { \
  76. (usedval) = 0; \
  77. (lval) = __low; \
  78. } else { \
  79. (usedval) = 1; \
  80. (dval) = (double)(a) * (double)(b); \
  81. } \
  82. } while (0)
  83. # else
  84. # define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do { \
  85. zend_long __lres = (a) * (b); \
  86. long double __dres = (long double)(a) * (long double)(b); \
  87. long double __delta = (long double) __lres - __dres; \
  88. if ( ((usedval) = (( __dres + __delta ) != __dres))) { \
  89. (dval) = __dres; \
  90. } else { \
  91. (lval) = __lres; \
  92. } \
  93. } while (0)
  94. # endif
  95. #elif defined(__powerpc64__) && defined(__GNUC__)
  96. #define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do { \
  97. long __low, __high; \
  98. __asm__("mulld %0,%2,%3\n\t" \
  99. "mulhd %1,%2,%3\n" \
  100. : "=&r"(__low), "=&r"(__high) \
  101. : "r"(a), "r"(b)); \
  102. if ((__low >> 63) != __high) { \
  103. (dval) = (double) (a) * (double) (b); \
  104. (usedval) = 1; \
  105. } else { \
  106. (lval) = __low; \
  107. (usedval) = 0; \
  108. } \
  109. } while (0)
  110. #elif SIZEOF_ZEND_LONG == 4
  111. #define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do { \
  112. int64_t __result = (int64_t) (a) * (int64_t) (b); \
  113. if (__result > ZEND_LONG_MAX || __result < ZEND_LONG_MIN) { \
  114. (dval) = (double) __result; \
  115. (usedval) = 1; \
  116. } else { \
  117. (lval) = (long) __result; \
  118. (usedval) = 0; \
  119. } \
  120. } while (0)
  121. #else
  122. #define ZEND_SIGNED_MULTIPLY_LONG(a, b, lval, dval, usedval) do { \
  123. long __lres = (a) * (b); \
  124. long double __dres = (long double)(a) * (long double)(b); \
  125. long double __delta = (long double) __lres - __dres; \
  126. if ( ((usedval) = (( __dres + __delta ) != __dres))) { \
  127. (dval) = __dres; \
  128. } else { \
  129. (lval) = __lres; \
  130. } \
  131. } while (0)
  132. #endif
  133. #if defined(__GNUC__) && (defined(__native_client__) || defined(i386))
  134. static zend_always_inline size_t zend_safe_address(size_t nmemb, size_t size, size_t offset, bool *overflow)
  135. {
  136. size_t res = nmemb;
  137. size_t m_overflow = 0;
  138. if (ZEND_CONST_COND(offset == 0, 0)) {
  139. __asm__ ("mull %3\n\tadcl $0,%1"
  140. : "=&a"(res), "=&d" (m_overflow)
  141. : "%0"(res),
  142. "rm"(size));
  143. } else {
  144. __asm__ ("mull %3\n\taddl %4,%0\n\tadcl $0,%1"
  145. : "=&a"(res), "=&d" (m_overflow)
  146. : "%0"(res),
  147. "rm"(size),
  148. "rm"(offset));
  149. }
  150. if (UNEXPECTED(m_overflow)) {
  151. *overflow = 1;
  152. return 0;
  153. }
  154. *overflow = 0;
  155. return res;
  156. }
  157. #elif defined(__GNUC__) && defined(__x86_64__)
  158. static zend_always_inline size_t zend_safe_address(size_t nmemb, size_t size, size_t offset, bool *overflow)
  159. {
  160. size_t res = nmemb;
  161. zend_ulong m_overflow = 0;
  162. #ifdef __ILP32__ /* x32 */
  163. # define LP_SUFF "l"
  164. #else /* amd64 */
  165. # define LP_SUFF "q"
  166. #endif
  167. if (ZEND_CONST_COND(offset == 0, 0)) {
  168. __asm__ ("mul" LP_SUFF " %3\n\t"
  169. "adc $0,%1"
  170. : "=&a"(res), "=&d" (m_overflow)
  171. : "%0"(res),
  172. "rm"(size));
  173. } else {
  174. __asm__ ("mul" LP_SUFF " %3\n\t"
  175. "add %4,%0\n\t"
  176. "adc $0,%1"
  177. : "=&a"(res), "=&d" (m_overflow)
  178. : "%0"(res),
  179. "rm"(size),
  180. "rm"(offset));
  181. }
  182. #undef LP_SUFF
  183. if (UNEXPECTED(m_overflow)) {
  184. *overflow = 1;
  185. return 0;
  186. }
  187. *overflow = 0;
  188. return res;
  189. }
  190. #elif defined(__GNUC__) && defined(__arm__)
  191. static zend_always_inline size_t zend_safe_address(size_t nmemb, size_t size, size_t offset, bool *overflow)
  192. {
  193. size_t res;
  194. zend_ulong m_overflow;
  195. __asm__ ("umlal %0,%1,%2,%3"
  196. : "=r"(res), "=r"(m_overflow)
  197. : "r"(nmemb),
  198. "r"(size),
  199. "0"(offset),
  200. "1"(0));
  201. if (UNEXPECTED(m_overflow)) {
  202. *overflow = 1;
  203. return 0;
  204. }
  205. *overflow = 0;
  206. return res;
  207. }
  208. #elif defined(__GNUC__) && defined(__aarch64__)
  209. static zend_always_inline size_t zend_safe_address(size_t nmemb, size_t size, size_t offset, bool *overflow)
  210. {
  211. size_t res;
  212. zend_ulong m_overflow;
  213. __asm__ ("mul %0,%2,%3\n\tumulh %1,%2,%3\n\tadds %0,%0,%4\n\tadc %1,%1,xzr"
  214. : "=&r"(res), "=&r"(m_overflow)
  215. : "r"(nmemb),
  216. "r"(size),
  217. "r"(offset));
  218. if (UNEXPECTED(m_overflow)) {
  219. *overflow = 1;
  220. return 0;
  221. }
  222. *overflow = 0;
  223. return res;
  224. }
  225. #elif defined(__GNUC__) && defined(__powerpc64__)
  226. static zend_always_inline size_t zend_safe_address(size_t nmemb, size_t size, size_t offset, bool *overflow)
  227. {
  228. size_t res;
  229. unsigned long m_overflow;
  230. __asm__ ("mulld %0,%2,%3\n\t"
  231. "mulhdu %1,%2,%3\n\t"
  232. "addc %0,%0,%4\n\t"
  233. "addze %1,%1\n"
  234. : "=&r"(res), "=&r"(m_overflow)
  235. : "r"(nmemb),
  236. "r"(size),
  237. "r"(offset));
  238. if (UNEXPECTED(m_overflow)) {
  239. *overflow = 1;
  240. return 0;
  241. }
  242. *overflow = 0;
  243. return res;
  244. }
  245. #elif SIZEOF_SIZE_T == 4
  246. static zend_always_inline size_t zend_safe_address(size_t nmemb, size_t size, size_t offset, bool *overflow)
  247. {
  248. uint64_t res = (uint64_t) nmemb * (uint64_t) size + (uint64_t) offset;
  249. if (UNEXPECTED(res > UINT64_C(0xFFFFFFFF))) {
  250. *overflow = 1;
  251. return 0;
  252. }
  253. *overflow = 0;
  254. return (size_t) res;
  255. }
  256. #else
  257. static zend_always_inline size_t zend_safe_address(size_t nmemb, size_t size, size_t offset, bool *overflow)
  258. {
  259. size_t res = nmemb * size + offset;
  260. double _d = (double)nmemb * (double)size + (double)offset;
  261. double _delta = (double)res - _d;
  262. if (UNEXPECTED((_d + _delta ) != _d)) {
  263. *overflow = 1;
  264. return 0;
  265. }
  266. *overflow = 0;
  267. return res;
  268. }
  269. #endif
  270. static zend_always_inline size_t zend_safe_address_guarded(size_t nmemb, size_t size, size_t offset)
  271. {
  272. bool overflow;
  273. size_t ret = zend_safe_address(nmemb, size, offset, &overflow);
  274. if (UNEXPECTED(overflow)) {
  275. zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nmemb, size, offset);
  276. return 0;
  277. }
  278. return ret;
  279. }
  280. /* A bit more generic version of the same */
  281. static zend_always_inline size_t zend_safe_addmult(size_t nmemb, size_t size, size_t offset, const char *message)
  282. {
  283. bool overflow;
  284. size_t ret = zend_safe_address(nmemb, size, offset, &overflow);
  285. if (UNEXPECTED(overflow)) {
  286. zend_error_noreturn(E_ERROR, "Possible integer overflow in %s (%zu * %zu + %zu)", message, nmemb, size, offset);
  287. return 0;
  288. }
  289. return ret;
  290. }
  291. #endif /* ZEND_MULTIPLY_H */