crc64_x86.S 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287
  1. /*
  2. * Speed-optimized CRC64 using slicing-by-four algorithm
  3. *
  4. * This uses only i386 instructions, but it is optimized for i686 and later
  5. * (including e.g. Pentium II/III/IV, Athlon XP, and Core 2).
  6. *
  7. * Authors: Igor Pavlov (original CRC32 assembly code)
  8. * Lasse Collin (CRC64 adaptation of the modified CRC32 code)
  9. *
  10. * This file has been put into the public domain.
  11. * You can do whatever you want with this file.
  12. *
  13. * This code needs lzma_crc64_table, which can be created using the
  14. * following C code:
  15. uint64_t lzma_crc64_table[4][256];
  16. void
  17. init_table(void)
  18. {
  19. // ECMA-182
  20. static const uint64_t poly64 = UINT64_C(0xC96C5795D7870F42);
  21. for (size_t s = 0; s < 4; ++s) {
  22. for (size_t b = 0; b < 256; ++b) {
  23. uint64_t r = s == 0 ? b : lzma_crc64_table[s - 1][b];
  24. for (size_t i = 0; i < 8; ++i) {
  25. if (r & 1)
  26. r = (r >> 1) ^ poly64;
  27. else
  28. r >>= 1;
  29. }
  30. lzma_crc64_table[s][b] = r;
  31. }
  32. }
  33. }
  34. * The prototype of the CRC64 function:
  35. * extern uint64_t lzma_crc64(const uint8_t *buf, size_t size, uint64_t crc);
  36. */
  37. /*
  38. * On some systems, the functions need to be prefixed. The prefix is
  39. * usually an underscore.
  40. */
  41. #ifndef __USER_LABEL_PREFIX__
  42. # define __USER_LABEL_PREFIX__
  43. #endif
  44. #define MAKE_SYM_CAT(prefix, sym) prefix ## sym
  45. #define MAKE_SYM(prefix, sym) MAKE_SYM_CAT(prefix, sym)
  46. #define LZMA_CRC64 MAKE_SYM(__USER_LABEL_PREFIX__, lzma_crc64)
  47. #define LZMA_CRC64_TABLE MAKE_SYM(__USER_LABEL_PREFIX__, lzma_crc64_table)
  48. /*
  49. * Solaris assembler doesn't have .p2align, and Darwin uses .align
  50. * differently than GNU/Linux and Solaris.
  51. */
  52. #if defined(__APPLE__) || defined(__MSDOS__)
  53. # define ALIGN(pow2, abs) .align pow2
  54. #else
  55. # define ALIGN(pow2, abs) .align abs
  56. #endif
  57. .text
  58. .globl LZMA_CRC64
  59. #if !defined(__APPLE__) && !defined(_WIN32) && !defined(__CYGWIN__) \
  60. && !defined(__MSDOS__)
  61. .type LZMA_CRC64, @function
  62. #endif
  63. ALIGN(4, 16)
  64. LZMA_CRC64:
  65. /*
  66. * Register usage:
  67. * %eax crc LSB
  68. * %edx crc MSB
  69. * %esi buf
  70. * %edi size or buf + size
  71. * %ebx lzma_crc64_table
  72. * %ebp Table index
  73. * %ecx Temporary
  74. */
  75. pushl %ebx
  76. pushl %esi
  77. pushl %edi
  78. pushl %ebp
  79. movl 0x14(%esp), %esi /* buf */
  80. movl 0x18(%esp), %edi /* size */
  81. movl 0x1C(%esp), %eax /* crc LSB */
  82. movl 0x20(%esp), %edx /* crc MSB */
  83. /*
  84. * Store the address of lzma_crc64_table to %ebx. This is needed to
  85. * get position-independent code (PIC).
  86. *
  87. * The PIC macro is defined by libtool, while __PIC__ is defined
  88. * by GCC but only on some systems. Testing for both makes it simpler
  89. * to test this code without libtool, and keeps the code working also
  90. * when built with libtool but using something else than GCC.
  91. *
  92. * I understood that libtool may define PIC on Windows even though
  93. * the code in Windows DLLs is not PIC in sense that it is in ELF
  94. * binaries, so we need a separate check to always use the non-PIC
  95. * code on Windows.
  96. */
  97. #if (!defined(PIC) && !defined(__PIC__)) \
  98. || (defined(_WIN32) || defined(__CYGWIN__))
  99. /* Not PIC */
  100. movl $ LZMA_CRC64_TABLE, %ebx
  101. #elif defined(__APPLE__)
  102. /* Mach-O */
  103. call .L_get_pc
  104. .L_pic:
  105. leal .L_lzma_crc64_table$non_lazy_ptr-.L_pic(%ebx), %ebx
  106. movl (%ebx), %ebx
  107. #else
  108. /* ELF */
  109. call .L_get_pc
  110. addl $_GLOBAL_OFFSET_TABLE_, %ebx
  111. movl LZMA_CRC64_TABLE@GOT(%ebx), %ebx
  112. #endif
  113. /* Complement the initial value. */
  114. notl %eax
  115. notl %edx
  116. .L_align:
  117. /*
  118. * Check if there is enough input to use slicing-by-four.
  119. * We need eight bytes, because the loop pre-reads four bytes.
  120. */
  121. cmpl $8, %edi
  122. jb .L_rest
  123. /* Check if we have reached alignment of four bytes. */
  124. testl $3, %esi
  125. jz .L_slice
  126. /* Calculate CRC of the next input byte. */
  127. movzbl (%esi), %ebp
  128. incl %esi
  129. movzbl %al, %ecx
  130. xorl %ecx, %ebp
  131. shrdl $8, %edx, %eax
  132. xorl (%ebx, %ebp, 8), %eax
  133. shrl $8, %edx
  134. xorl 4(%ebx, %ebp, 8), %edx
  135. decl %edi
  136. jmp .L_align
  137. .L_slice:
  138. /*
  139. * If we get here, there's at least eight bytes of aligned input
  140. * available. Make %edi multiple of four bytes. Store the possible
  141. * remainder over the "size" variable in the argument stack.
  142. */
  143. movl %edi, 0x18(%esp)
  144. andl $-4, %edi
  145. subl %edi, 0x18(%esp)
  146. /*
  147. * Let %edi be buf + size - 4 while running the main loop. This way
  148. * we can compare for equality to determine when exit the loop.
  149. */
  150. addl %esi, %edi
  151. subl $4, %edi
  152. /* Read in the first four aligned bytes. */
  153. movl (%esi), %ecx
  154. .L_loop:
  155. xorl %eax, %ecx
  156. movzbl %cl, %ebp
  157. movl 0x1800(%ebx, %ebp, 8), %eax
  158. xorl %edx, %eax
  159. movl 0x1804(%ebx, %ebp, 8), %edx
  160. movzbl %ch, %ebp
  161. xorl 0x1000(%ebx, %ebp, 8), %eax
  162. xorl 0x1004(%ebx, %ebp, 8), %edx
  163. shrl $16, %ecx
  164. movzbl %cl, %ebp
  165. xorl 0x0800(%ebx, %ebp, 8), %eax
  166. xorl 0x0804(%ebx, %ebp, 8), %edx
  167. movzbl %ch, %ebp
  168. addl $4, %esi
  169. xorl (%ebx, %ebp, 8), %eax
  170. xorl 4(%ebx, %ebp, 8), %edx
  171. /* Check for end of aligned input. */
  172. cmpl %edi, %esi
  173. /*
  174. * Copy the next input byte to %ecx. It is slightly faster to
  175. * read it here than at the top of the loop.
  176. */
  177. movl (%esi), %ecx
  178. jb .L_loop
  179. /*
  180. * Process the remaining four bytes, which we have already
  181. * copied to %ecx.
  182. */
  183. xorl %eax, %ecx
  184. movzbl %cl, %ebp
  185. movl 0x1800(%ebx, %ebp, 8), %eax
  186. xorl %edx, %eax
  187. movl 0x1804(%ebx, %ebp, 8), %edx
  188. movzbl %ch, %ebp
  189. xorl 0x1000(%ebx, %ebp, 8), %eax
  190. xorl 0x1004(%ebx, %ebp, 8), %edx
  191. shrl $16, %ecx
  192. movzbl %cl, %ebp
  193. xorl 0x0800(%ebx, %ebp, 8), %eax
  194. xorl 0x0804(%ebx, %ebp, 8), %edx
  195. movzbl %ch, %ebp
  196. addl $4, %esi
  197. xorl (%ebx, %ebp, 8), %eax
  198. xorl 4(%ebx, %ebp, 8), %edx
  199. /* Copy the number of remaining bytes to %edi. */
  200. movl 0x18(%esp), %edi
  201. .L_rest:
  202. /* Check for end of input. */
  203. testl %edi, %edi
  204. jz .L_return
  205. /* Calculate CRC of the next input byte. */
  206. movzbl (%esi), %ebp
  207. incl %esi
  208. movzbl %al, %ecx
  209. xorl %ecx, %ebp
  210. shrdl $8, %edx, %eax
  211. xorl (%ebx, %ebp, 8), %eax
  212. shrl $8, %edx
  213. xorl 4(%ebx, %ebp, 8), %edx
  214. decl %edi
  215. jmp .L_rest
  216. .L_return:
  217. /* Complement the final value. */
  218. notl %eax
  219. notl %edx
  220. popl %ebp
  221. popl %edi
  222. popl %esi
  223. popl %ebx
  224. ret
  225. #if defined(PIC) || defined(__PIC__)
  226. ALIGN(4, 16)
  227. .L_get_pc:
  228. movl (%esp), %ebx
  229. ret
  230. #endif
  231. #if defined(__APPLE__) && (defined(PIC) || defined(__PIC__))
  232. /* Mach-O PIC */
  233. .section __IMPORT,__pointers,non_lazy_symbol_pointers
  234. .L_lzma_crc64_table$non_lazy_ptr:
  235. .indirect_symbol LZMA_CRC64_TABLE
  236. .long 0
  237. #elif defined(_WIN32) || defined(__CYGWIN__)
  238. # ifdef DLL_EXPORT
  239. /* This is equivalent of __declspec(dllexport). */
  240. .section .drectve
  241. .ascii " -export:lzma_crc64"
  242. # endif
  243. #elif !defined(__MSDOS__)
  244. /* ELF */
  245. .size LZMA_CRC64, .-LZMA_CRC64
  246. #endif
  247. /*
  248. * This is needed to support non-executable stack. It's ugly to
  249. * use __linux__ here, but I don't know a way to detect when
  250. * we are using GNU assembler.
  251. */
  252. #if defined(__ELF__) && defined(__linux__)
  253. .section .note.GNU-stack,"",@progbits
  254. #endif