sha256-586.pl 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281
  1. #!/usr/bin/env perl
  2. #
  3. # ====================================================================
  4. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  5. # project. The module is, however, dual licensed under OpenSSL and
  6. # CRYPTOGAMS licenses depending on where you obtain it. For further
  7. # details see http://www.openssl.org/~appro/cryptogams/.
  8. # ====================================================================
  9. #
  10. # SHA256 block transform for x86. September 2007.
  11. #
  12. # Performance improvement over compiler generated code varies from
  13. # 10% to 40% [see below]. Not very impressive on some µ-archs, but
  14. # it's 5 times smaller and optimizies amount of writes.
  15. #
  16. # May 2012.
  17. #
  18. # Optimization including two of Pavel Semjanov's ideas, alternative
  19. # Maj and full unroll, resulted in ~20-25% improvement on most CPUs,
  20. # ~7% on Pentium, ~40% on Atom. As fully unrolled loop body is almost
  21. # 15x larger, 8KB vs. 560B, it's fired only for longer inputs. But not
  22. # on P4, where it kills performance, nor Sandy Bridge, where folded
  23. # loop is approximately as fast...
  24. #
  25. # June 2012.
  26. #
  27. # Add AMD XOP-specific code path, >30% improvement on Bulldozer over
  28. # May version, >60% over original. Add AVX+shrd code path, >25%
  29. # improvement on Sandy Bridge over May version, 60% over original.
  30. #
  31. # May 2013.
  32. #
  33. # Replace AMD XOP code path with SSSE3 to cover more processors.
  34. # (Biggest improvement coefficient is on upcoming Atom Silvermont,
  35. # not shown.) Add AVX+BMI code path.
  36. #
  37. # March 2014.
  38. #
  39. # Add support for Intel SHA Extensions.
  40. #
  41. # Performance in clock cycles per processed byte (less is better):
  42. #
  43. # gcc icc x86 asm(*) SIMD x86_64 asm(**)
  44. # Pentium 46 57 40/38 - -
  45. # PIII 36 33 27/24 - -
  46. # P4 41 38 28 - 17.3
  47. # AMD K8 27 25 19/15.5 - 14.9
  48. # Core2 26 23 18/15.6 14.3 13.8
  49. # Westmere 27 - 19/15.7 13.4 12.3
  50. # Sandy Bridge 25 - 15.9 12.4 11.6
  51. # Ivy Bridge 24 - 15.0 11.4 10.3
  52. # Haswell 22 - 13.9 9.46 7.80
  53. # Bulldozer 36 - 27/22 17.0 13.6
  54. # VIA Nano 36 - 25/22 16.8 16.5
  55. # Atom 50 - 30/25 21.9 18.9
  56. # Silvermont 40 - 34/31 22.9 20.6
  57. #
  58. # (*) numbers after slash are for unrolled loop, where applicable;
  59. # (**) x86_64 assembly performance is presented for reference
  60. # purposes, results are best-available;
  61. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  62. push(@INC,"${dir}","${dir}../../perlasm");
  63. require "x86asm.pl";
  64. &asm_init($ARGV[0],"sha512-586.pl",$ARGV[$#ARGV] eq "386");
  65. $xmm=$avx=0;
  66. for (@ARGV) { $xmm=1 if (/-DOPENSSL_IA32_SSE2/); }
  67. if ($xmm && `$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
  68. =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
  69. $avx = ($1>=2.19) + ($1>=2.22);
  70. }
  71. if ($xmm && !$avx && $ARGV[0] eq "win32n" &&
  72. `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
  73. $avx = ($1>=2.03) + ($1>=2.10);
  74. }
  75. if ($xmm && !$avx && $ARGV[0] eq "win32" &&
  76. `ml 2>&1` =~ /Version ([0-9]+)\./) {
  77. $avx = ($1>=10) + ($1>=11);
  78. }
  79. if ($xmm && !$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9]\.[0-9]+)/) {
  80. $avx = ($2>=3.0) + ($2>3.0);
  81. }
  82. $shaext=$xmm; ### set to zero if compiling for 1.0.1
  83. $unroll_after = 64*4; # If pre-evicted from L1P cache first spin of
  84. # fully unrolled loop was measured to run about
  85. # 3-4x slower. If slowdown coefficient is N and
  86. # unrolled loop is m times faster, then you break
  87. # even at (N-1)/(m-1) blocks. Then it needs to be
  88. # adjusted for probability of code being evicted,
  89. # code size/cache size=1/4. Typical m is 1.15...
  90. $A="eax";
  91. $E="edx";
  92. $T="ebx";
  93. $Aoff=&DWP(4,"esp");
  94. $Boff=&DWP(8,"esp");
  95. $Coff=&DWP(12,"esp");
  96. $Doff=&DWP(16,"esp");
  97. $Eoff=&DWP(20,"esp");
  98. $Foff=&DWP(24,"esp");
  99. $Goff=&DWP(28,"esp");
  100. $Hoff=&DWP(32,"esp");
  101. $Xoff=&DWP(36,"esp");
  102. $K256="ebp";
  103. sub BODY_16_63() {
  104. &mov ($T,"ecx"); # "ecx" is preloaded
  105. &mov ("esi",&DWP(4*(9+15+16-14),"esp"));
  106. &ror ("ecx",18-7);
  107. &mov ("edi","esi");
  108. &ror ("esi",19-17);
  109. &xor ("ecx",$T);
  110. &shr ($T,3);
  111. &ror ("ecx",7);
  112. &xor ("esi","edi");
  113. &xor ($T,"ecx"); # T = sigma0(X[-15])
  114. &ror ("esi",17);
  115. &add ($T,&DWP(4*(9+15+16),"esp")); # T += X[-16]
  116. &shr ("edi",10);
  117. &add ($T,&DWP(4*(9+15+16-9),"esp")); # T += X[-7]
  118. #&xor ("edi","esi") # sigma1(X[-2])
  119. # &add ($T,"edi"); # T += sigma1(X[-2])
  120. # &mov (&DWP(4*(9+15),"esp"),$T); # save X[0]
  121. &BODY_00_15(1);
  122. }
  123. sub BODY_00_15() {
  124. my $in_16_63=shift;
  125. &mov ("ecx",$E);
  126. &xor ("edi","esi") if ($in_16_63); # sigma1(X[-2])
  127. &mov ("esi",$Foff);
  128. &ror ("ecx",25-11);
  129. &add ($T,"edi") if ($in_16_63); # T += sigma1(X[-2])
  130. &mov ("edi",$Goff);
  131. &xor ("ecx",$E);
  132. &xor ("esi","edi");
  133. &mov ($T,&DWP(4*(9+15),"esp")) if (!$in_16_63);
  134. &mov (&DWP(4*(9+15),"esp"),$T) if ($in_16_63); # save X[0]
  135. &ror ("ecx",11-6);
  136. &and ("esi",$E);
  137. &mov ($Eoff,$E); # modulo-scheduled
  138. &xor ($E,"ecx");
  139. &add ($T,$Hoff); # T += h
  140. &xor ("esi","edi"); # Ch(e,f,g)
  141. &ror ($E,6); # Sigma1(e)
  142. &mov ("ecx",$A);
  143. &add ($T,"esi"); # T += Ch(e,f,g)
  144. &ror ("ecx",22-13);
  145. &add ($T,$E); # T += Sigma1(e)
  146. &mov ("edi",$Boff);
  147. &xor ("ecx",$A);
  148. &mov ($Aoff,$A); # modulo-scheduled
  149. &lea ("esp",&DWP(-4,"esp"));
  150. &ror ("ecx",13-2);
  151. &mov ("esi",&DWP(0,$K256));
  152. &xor ("ecx",$A);
  153. &mov ($E,$Eoff); # e in next iteration, d in this one
  154. &xor ($A,"edi"); # a ^= b
  155. &ror ("ecx",2); # Sigma0(a)
  156. &add ($T,"esi"); # T+= K[i]
  157. &mov (&DWP(0,"esp"),$A); # (b^c) in next round
  158. &add ($E,$T); # d += T
  159. &and ($A,&DWP(4,"esp")); # a &= (b^c)
  160. &add ($T,"ecx"); # T += Sigma0(a)
  161. &xor ($A,"edi"); # h = Maj(a,b,c) = Ch(a^b,c,b)
  162. &mov ("ecx",&DWP(4*(9+15+16-1),"esp")) if ($in_16_63); # preload T
  163. &add ($K256,4);
  164. &add ($A,$T); # h += T
  165. }
  166. &external_label("OPENSSL_ia32cap_P") if (!$i386);
  167. &function_begin("sha256_block_data_order");
  168. &mov ("esi",wparam(0)); # ctx
  169. &mov ("edi",wparam(1)); # inp
  170. &mov ("eax",wparam(2)); # num
  171. &mov ("ebx","esp"); # saved sp
  172. &call (&label("pic_point")); # make it PIC!
  173. &set_label("pic_point");
  174. &blindpop($K256);
  175. &lea ($K256,&DWP(&label("K256")."-".&label("pic_point"),$K256));
  176. &sub ("esp",16);
  177. &and ("esp",-64);
  178. &shl ("eax",6);
  179. &add ("eax","edi");
  180. &mov (&DWP(0,"esp"),"esi"); # ctx
  181. &mov (&DWP(4,"esp"),"edi"); # inp
  182. &mov (&DWP(8,"esp"),"eax"); # inp+num*128
  183. &mov (&DWP(12,"esp"),"ebx"); # saved sp
  184. if (!$i386 && $xmm) {
  185. &picmeup("edx","OPENSSL_ia32cap_P",$K256,&label("K256"));
  186. &mov ("ecx",&DWP(0,"edx"));
  187. &mov ("ebx",&DWP(4,"edx"));
  188. &test ("ecx",1<<20); # check for P4
  189. &jnz (&label("loop"));
  190. &mov ("edx",&DWP(8,"edx")) if ($xmm);
  191. &test ("ecx",1<<24); # check for FXSR
  192. &jz ($unroll_after?&label("no_xmm"):&label("loop"));
  193. &and ("ecx",1<<30); # mask "Intel CPU" bit
  194. &and ("ebx",1<<28|1<<9); # mask AVX and SSSE3 bits
  195. &test ("edx",1<<29) if ($shaext); # check for SHA
  196. &jnz (&label("shaext")) if ($shaext);
  197. &or ("ecx","ebx");
  198. &and ("ecx",1<<28|1<<30);
  199. &cmp ("ecx",1<<28|1<<30);
  200. if ($xmm) {
  201. &je (&label("AVX")) if ($avx);
  202. &test ("ebx",1<<9); # check for SSSE3
  203. &jnz (&label("SSSE3"));
  204. } else {
  205. &je (&label("loop_shrd"));
  206. }
  207. if ($unroll_after) {
  208. &set_label("no_xmm");
  209. &sub ("eax","edi");
  210. &cmp ("eax",$unroll_after);
  211. &jae (&label("unrolled"));
  212. } }
  213. &jmp (&label("loop"));
  214. sub COMPACT_LOOP() {
  215. my $suffix=shift;
  216. &set_label("loop$suffix",$suffix?32:16);
  217. # copy input block to stack reversing byte and dword order
  218. for($i=0;$i<4;$i++) {
  219. &mov ("eax",&DWP($i*16+0,"edi"));
  220. &mov ("ebx",&DWP($i*16+4,"edi"));
  221. &mov ("ecx",&DWP($i*16+8,"edi"));
  222. &bswap ("eax");
  223. &mov ("edx",&DWP($i*16+12,"edi"));
  224. &bswap ("ebx");
  225. &push ("eax");
  226. &bswap ("ecx");
  227. &push ("ebx");
  228. &bswap ("edx");
  229. &push ("ecx");
  230. &push ("edx");
  231. }
  232. &add ("edi",64);
  233. &lea ("esp",&DWP(-4*9,"esp"));# place for A,B,C,D,E,F,G,H
  234. &mov (&DWP(4*(9+16)+4,"esp"),"edi");
  235. # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
  236. &mov ($A,&DWP(0,"esi"));
  237. &mov ("ebx",&DWP(4,"esi"));
  238. &mov ("ecx",&DWP(8,"esi"));
  239. &mov ("edi",&DWP(12,"esi"));
  240. # &mov ($Aoff,$A);
  241. &mov ($Boff,"ebx");
  242. &xor ("ebx","ecx");
  243. &mov ($Coff,"ecx");
  244. &mov ($Doff,"edi");
  245. &mov (&DWP(0,"esp"),"ebx"); # magic
  246. &mov ($E,&DWP(16,"esi"));
  247. &mov ("ebx",&DWP(20,"esi"));
  248. &mov ("ecx",&DWP(24,"esi"));
  249. &mov ("edi",&DWP(28,"esi"));
  250. # &mov ($Eoff,$E);
  251. &mov ($Foff,"ebx");
  252. &mov ($Goff,"ecx");
  253. &mov ($Hoff,"edi");
  254. &set_label("00_15$suffix",16);
  255. &BODY_00_15();
  256. &cmp ("esi",0xc19bf174);
  257. &jne (&label("00_15$suffix"));
  258. &mov ("ecx",&DWP(4*(9+15+16-1),"esp")); # preloaded in BODY_00_15(1)
  259. &jmp (&label("16_63$suffix"));
  260. &set_label("16_63$suffix",16);
  261. &BODY_16_63();
  262. &cmp ("esi",0xc67178f2);
  263. &jne (&label("16_63$suffix"));
  264. &mov ("esi",&DWP(4*(9+16+64)+0,"esp"));#ctx
  265. # &mov ($A,$Aoff);
  266. &mov ("ebx",$Boff);
  267. # &mov ("edi",$Coff);
  268. &mov ("ecx",$Doff);
  269. &add ($A,&DWP(0,"esi"));
  270. &add ("ebx",&DWP(4,"esi"));
  271. &add ("edi",&DWP(8,"esi"));
  272. &add ("ecx",&DWP(12,"esi"));
  273. &mov (&DWP(0,"esi"),$A);
  274. &mov (&DWP(4,"esi"),"ebx");
  275. &mov (&DWP(8,"esi"),"edi");
  276. &mov (&DWP(12,"esi"),"ecx");
  277. # &mov ($E,$Eoff);
  278. &mov ("eax",$Foff);
  279. &mov ("ebx",$Goff);
  280. &mov ("ecx",$Hoff);
  281. &mov ("edi",&DWP(4*(9+16+64)+4,"esp"));#inp
  282. &add ($E,&DWP(16,"esi"));
  283. &add ("eax",&DWP(20,"esi"));
  284. &add ("ebx",&DWP(24,"esi"));
  285. &add ("ecx",&DWP(28,"esi"));
  286. &mov (&DWP(16,"esi"),$E);
  287. &mov (&DWP(20,"esi"),"eax");
  288. &mov (&DWP(24,"esi"),"ebx");
  289. &mov (&DWP(28,"esi"),"ecx");
  290. &lea ("esp",&DWP(4*(9+16+64),"esp"));# destroy frame
  291. &sub ($K256,4*64); # rewind K
  292. &cmp ("edi",&DWP(8,"esp")); # are we done yet?
  293. &jb (&label("loop$suffix"));
  294. }
  295. &COMPACT_LOOP();
  296. &mov ("esp",&DWP(12,"esp")); # restore sp
  297. &function_end_A();
  298. if (!$i386 && !$xmm) {
  299. # ~20% improvement on Sandy Bridge
  300. local *ror = sub { &shrd(@_[0],@_) };
  301. &COMPACT_LOOP("_shrd");
  302. &mov ("esp",&DWP(12,"esp")); # restore sp
  303. &function_end_A();
  304. }
  305. &set_label("K256",64); # Yes! I keep it in the code segment!
  306. @K256=( 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,
  307. 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5,
  308. 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,
  309. 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174,
  310. 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,
  311. 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da,
  312. 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,
  313. 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967,
  314. 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,
  315. 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85,
  316. 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,
  317. 0xd192e819,0xd6990624,0xf40e3585,0x106aa070,
  318. 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,
  319. 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3,
  320. 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,
  321. 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 );
  322. &data_word(@K256);
  323. &data_word(0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f); # byte swap mask
  324. &asciz("SHA256 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");
  325. ($a,$b,$c,$d,$e,$f,$g,$h)=(0..7); # offsets
  326. sub off { &DWP(4*(((shift)-$i)&7),"esp"); }
  327. if (!$i386 && $unroll_after) {
  328. my @AH=($A,$K256);
  329. &set_label("unrolled",16);
  330. &lea ("esp",&DWP(-96,"esp"));
  331. # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
  332. &mov ($AH[0],&DWP(0,"esi"));
  333. &mov ($AH[1],&DWP(4,"esi"));
  334. &mov ("ecx",&DWP(8,"esi"));
  335. &mov ("ebx",&DWP(12,"esi"));
  336. #&mov (&DWP(0,"esp"),$AH[0]);
  337. &mov (&DWP(4,"esp"),$AH[1]);
  338. &xor ($AH[1],"ecx"); # magic
  339. &mov (&DWP(8,"esp"),"ecx");
  340. &mov (&DWP(12,"esp"),"ebx");
  341. &mov ($E,&DWP(16,"esi"));
  342. &mov ("ebx",&DWP(20,"esi"));
  343. &mov ("ecx",&DWP(24,"esi"));
  344. &mov ("esi",&DWP(28,"esi"));
  345. #&mov (&DWP(16,"esp"),$E);
  346. &mov (&DWP(20,"esp"),"ebx");
  347. &mov (&DWP(24,"esp"),"ecx");
  348. &mov (&DWP(28,"esp"),"esi");
  349. &jmp (&label("grand_loop"));
  350. &set_label("grand_loop",16);
  351. # copy input block to stack reversing byte order
  352. for($i=0;$i<5;$i++) {
  353. &mov ("ebx",&DWP(12*$i+0,"edi"));
  354. &mov ("ecx",&DWP(12*$i+4,"edi"));
  355. &bswap ("ebx");
  356. &mov ("esi",&DWP(12*$i+8,"edi"));
  357. &bswap ("ecx");
  358. &mov (&DWP(32+12*$i+0,"esp"),"ebx");
  359. &bswap ("esi");
  360. &mov (&DWP(32+12*$i+4,"esp"),"ecx");
  361. &mov (&DWP(32+12*$i+8,"esp"),"esi");
  362. }
  363. &mov ("ebx",&DWP($i*12,"edi"));
  364. &add ("edi",64);
  365. &bswap ("ebx");
  366. &mov (&DWP(96+4,"esp"),"edi");
  367. &mov (&DWP(32+12*$i,"esp"),"ebx");
  368. my ($t1,$t2) = ("ecx","esi");
  369. for ($i=0;$i<64;$i++) {
  370. if ($i>=16) {
  371. &mov ($T,$t1); # $t1 is preloaded
  372. # &mov ($t2,&DWP(32+4*(($i+14)&15),"esp"));
  373. &ror ($t1,18-7);
  374. &mov ("edi",$t2);
  375. &ror ($t2,19-17);
  376. &xor ($t1,$T);
  377. &shr ($T,3);
  378. &ror ($t1,7);
  379. &xor ($t2,"edi");
  380. &xor ($T,$t1); # T = sigma0(X[-15])
  381. &ror ($t2,17);
  382. &add ($T,&DWP(32+4*($i&15),"esp")); # T += X[-16]
  383. &shr ("edi",10);
  384. &add ($T,&DWP(32+4*(($i+9)&15),"esp")); # T += X[-7]
  385. #&xor ("edi",$t2) # sigma1(X[-2])
  386. # &add ($T,"edi"); # T += sigma1(X[-2])
  387. # &mov (&DWP(4*(9+15),"esp"),$T); # save X[0]
  388. }
  389. &mov ($t1,$E);
  390. &xor ("edi",$t2) if ($i>=16); # sigma1(X[-2])
  391. &mov ($t2,&off($f));
  392. &ror ($E,25-11);
  393. &add ($T,"edi") if ($i>=16); # T += sigma1(X[-2])
  394. &mov ("edi",&off($g));
  395. &xor ($E,$t1);
  396. &mov ($T,&DWP(32+4*($i&15),"esp")) if ($i<16); # X[i]
  397. &mov (&DWP(32+4*($i&15),"esp"),$T) if ($i>=16 && $i<62); # save X[0]
  398. &xor ($t2,"edi");
  399. &ror ($E,11-6);
  400. &and ($t2,$t1);
  401. &mov (&off($e),$t1); # save $E, modulo-scheduled
  402. &xor ($E,$t1);
  403. &add ($T,&off($h)); # T += h
  404. &xor ("edi",$t2); # Ch(e,f,g)
  405. &ror ($E,6); # Sigma1(e)
  406. &mov ($t1,$AH[0]);
  407. &add ($T,"edi"); # T += Ch(e,f,g)
  408. &ror ($t1,22-13);
  409. &mov ($t2,$AH[0]);
  410. &mov ("edi",&off($b));
  411. &xor ($t1,$AH[0]);
  412. &mov (&off($a),$AH[0]); # save $A, modulo-scheduled
  413. &xor ($AH[0],"edi"); # a ^= b, (b^c) in next round
  414. &ror ($t1,13-2);
  415. &and ($AH[1],$AH[0]); # (b^c) &= (a^b)
  416. &lea ($E,&DWP(@K256[$i],$T,$E)); # T += Sigma1(1)+K[i]
  417. &xor ($t1,$t2);
  418. &xor ($AH[1],"edi"); # h = Maj(a,b,c) = Ch(a^b,c,b)
  419. &mov ($t2,&DWP(32+4*(($i+2)&15),"esp")) if ($i>=15 && $i<63);
  420. &ror ($t1,2); # Sigma0(a)
  421. &add ($AH[1],$E); # h += T
  422. &add ($E,&off($d)); # d += T
  423. &add ($AH[1],$t1); # h += Sigma0(a)
  424. &mov ($t1,&DWP(32+4*(($i+15)&15),"esp")) if ($i>=15 && $i<63);
  425. @AH = reverse(@AH); # rotate(a,h)
  426. ($t1,$t2) = ($t2,$t1); # rotate(t1,t2)
  427. }
  428. &mov ("esi",&DWP(96,"esp")); #ctx
  429. #&mov ($AH[0],&DWP(0,"esp"));
  430. &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
  431. #&mov ("edi", &DWP(8,"esp"));
  432. &mov ("ecx",&DWP(12,"esp"));
  433. &add ($AH[0],&DWP(0,"esi"));
  434. &add ($AH[1],&DWP(4,"esi"));
  435. &add ("edi",&DWP(8,"esi"));
  436. &add ("ecx",&DWP(12,"esi"));
  437. &mov (&DWP(0,"esi"),$AH[0]);
  438. &mov (&DWP(4,"esi"),$AH[1]);
  439. &mov (&DWP(8,"esi"),"edi");
  440. &mov (&DWP(12,"esi"),"ecx");
  441. #&mov (&DWP(0,"esp"),$AH[0]);
  442. &mov (&DWP(4,"esp"),$AH[1]);
  443. &xor ($AH[1],"edi"); # magic
  444. &mov (&DWP(8,"esp"),"edi");
  445. &mov (&DWP(12,"esp"),"ecx");
  446. #&mov ($E,&DWP(16,"esp"));
  447. &mov ("edi",&DWP(20,"esp"));
  448. &mov ("ebx",&DWP(24,"esp"));
  449. &mov ("ecx",&DWP(28,"esp"));
  450. &add ($E,&DWP(16,"esi"));
  451. &add ("edi",&DWP(20,"esi"));
  452. &add ("ebx",&DWP(24,"esi"));
  453. &add ("ecx",&DWP(28,"esi"));
  454. &mov (&DWP(16,"esi"),$E);
  455. &mov (&DWP(20,"esi"),"edi");
  456. &mov (&DWP(24,"esi"),"ebx");
  457. &mov (&DWP(28,"esi"),"ecx");
  458. #&mov (&DWP(16,"esp"),$E);
  459. &mov (&DWP(20,"esp"),"edi");
  460. &mov ("edi",&DWP(96+4,"esp")); # inp
  461. &mov (&DWP(24,"esp"),"ebx");
  462. &mov (&DWP(28,"esp"),"ecx");
  463. &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
  464. &jb (&label("grand_loop"));
  465. &mov ("esp",&DWP(96+12,"esp")); # restore sp
  466. &function_end_A();
  467. }
  468. if (!$i386 && $xmm) {{{
  469. if ($shaext) {
  470. ######################################################################
  471. # Intel SHA Extensions implementation of SHA256 update function.
  472. #
  473. my ($ctx,$inp,$end)=("esi","edi","eax");
  474. my ($Wi,$ABEF,$CDGH,$TMP)=map("xmm$_",(0..2,7));
  475. my @MSG=map("xmm$_",(3..6));
  476. sub sha256op38 {
  477. my ($opcodelet,$dst,$src)=@_;
  478. if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
  479. { &data_byte(0x0f,0x38,$opcodelet,0xc0|($1<<3)|$2); }
  480. }
  481. sub sha256rnds2 { sha256op38(0xcb,@_); }
  482. sub sha256msg1 { sha256op38(0xcc,@_); }
  483. sub sha256msg2 { sha256op38(0xcd,@_); }
  484. &set_label("shaext",32);
  485. &sub ("esp",32);
  486. &movdqu ($ABEF,&QWP(0,$ctx)); # DCBA
  487. &lea ($K256,&DWP(0x80,$K256));
  488. &movdqu ($CDGH,&QWP(16,$ctx)); # HGFE
  489. &movdqa ($TMP,&QWP(0x100-0x80,$K256)); # byte swap mask
  490. &pshufd ($Wi,$ABEF,0x1b); # ABCD
  491. &pshufd ($ABEF,$ABEF,0xb1); # CDAB
  492. &pshufd ($CDGH,$CDGH,0x1b); # EFGH
  493. &palignr ($ABEF,$CDGH,8); # ABEF
  494. &punpcklqdq ($CDGH,$Wi); # CDGH
  495. &jmp (&label("loop_shaext"));
  496. &set_label("loop_shaext",16);
  497. &movdqu (@MSG[0],&QWP(0,$inp));
  498. &movdqu (@MSG[1],&QWP(0x10,$inp));
  499. &movdqu (@MSG[2],&QWP(0x20,$inp));
  500. &pshufb (@MSG[0],$TMP);
  501. &movdqu (@MSG[3],&QWP(0x30,$inp));
  502. &movdqa (&QWP(16,"esp"),$CDGH); # offload
  503. &movdqa ($Wi,&QWP(0*16-0x80,$K256));
  504. &paddd ($Wi,@MSG[0]);
  505. &pshufb (@MSG[1],$TMP);
  506. &sha256rnds2 ($CDGH,$ABEF); # 0-3
  507. &pshufd ($Wi,$Wi,0x0e);
  508. &nop ();
  509. &movdqa (&QWP(0,"esp"),$ABEF); # offload
  510. &sha256rnds2 ($ABEF,$CDGH);
  511. &movdqa ($Wi,&QWP(1*16-0x80,$K256));
  512. &paddd ($Wi,@MSG[1]);
  513. &pshufb (@MSG[2],$TMP);
  514. &sha256rnds2 ($CDGH,$ABEF); # 4-7
  515. &pshufd ($Wi,$Wi,0x0e);
  516. &lea ($inp,&DWP(0x40,$inp));
  517. &sha256msg1 (@MSG[0],@MSG[1]);
  518. &sha256rnds2 ($ABEF,$CDGH);
  519. &movdqa ($Wi,&QWP(2*16-0x80,$K256));
  520. &paddd ($Wi,@MSG[2]);
  521. &pshufb (@MSG[3],$TMP);
  522. &sha256rnds2 ($CDGH,$ABEF); # 8-11
  523. &pshufd ($Wi,$Wi,0x0e);
  524. &movdqa ($TMP,@MSG[3]);
  525. &palignr ($TMP,@MSG[2],4);
  526. &nop ();
  527. &paddd (@MSG[0],$TMP);
  528. &sha256msg1 (@MSG[1],@MSG[2]);
  529. &sha256rnds2 ($ABEF,$CDGH);
  530. &movdqa ($Wi,&QWP(3*16-0x80,$K256));
  531. &paddd ($Wi,@MSG[3]);
  532. &sha256msg2 (@MSG[0],@MSG[3]);
  533. &sha256rnds2 ($CDGH,$ABEF); # 12-15
  534. &pshufd ($Wi,$Wi,0x0e);
  535. &movdqa ($TMP,@MSG[0]);
  536. &palignr ($TMP,@MSG[3],4);
  537. &nop ();
  538. &paddd (@MSG[1],$TMP);
  539. &sha256msg1 (@MSG[2],@MSG[3]);
  540. &sha256rnds2 ($ABEF,$CDGH);
  541. for($i=4;$i<16-3;$i++) {
  542. &movdqa ($Wi,&QWP($i*16-0x80,$K256));
  543. &paddd ($Wi,@MSG[0]);
  544. &sha256msg2 (@MSG[1],@MSG[0]);
  545. &sha256rnds2 ($CDGH,$ABEF); # 16-19...
  546. &pshufd ($Wi,$Wi,0x0e);
  547. &movdqa ($TMP,@MSG[1]);
  548. &palignr ($TMP,@MSG[0],4);
  549. &nop ();
  550. &paddd (@MSG[2],$TMP);
  551. &sha256msg1 (@MSG[3],@MSG[0]);
  552. &sha256rnds2 ($ABEF,$CDGH);
  553. push(@MSG,shift(@MSG));
  554. }
  555. &movdqa ($Wi,&QWP(13*16-0x80,$K256));
  556. &paddd ($Wi,@MSG[0]);
  557. &sha256msg2 (@MSG[1],@MSG[0]);
  558. &sha256rnds2 ($CDGH,$ABEF); # 52-55
  559. &pshufd ($Wi,$Wi,0x0e);
  560. &movdqa ($TMP,@MSG[1])
  561. &palignr ($TMP,@MSG[0],4);
  562. &sha256rnds2 ($ABEF,$CDGH);
  563. &paddd (@MSG[2],$TMP);
  564. &movdqa ($Wi,&QWP(14*16-0x80,$K256));
  565. &paddd ($Wi,@MSG[1]);
  566. &sha256rnds2 ($CDGH,$ABEF); # 56-59
  567. &pshufd ($Wi,$Wi,0x0e);
  568. &sha256msg2 (@MSG[2],@MSG[1]);
  569. &movdqa ($TMP,&QWP(0x100-0x80,$K256)); # byte swap mask
  570. &sha256rnds2 ($ABEF,$CDGH);
  571. &movdqa ($Wi,&QWP(15*16-0x80,$K256));
  572. &paddd ($Wi,@MSG[2]);
  573. &nop ();
  574. &sha256rnds2 ($CDGH,$ABEF); # 60-63
  575. &pshufd ($Wi,$Wi,0x0e);
  576. &cmp ($end,$inp);
  577. &nop ();
  578. &sha256rnds2 ($ABEF,$CDGH);
  579. &paddd ($CDGH,&QWP(16,"esp"));
  580. &paddd ($ABEF,&QWP(0,"esp"));
  581. &jnz (&label("loop_shaext"));
  582. &pshufd ($CDGH,$CDGH,0xb1); # DCHG
  583. &pshufd ($TMP,$ABEF,0x1b); # FEBA
  584. &pshufd ($ABEF,$ABEF,0xb1); # BAFE
  585. &punpckhqdq ($ABEF,$CDGH); # DCBA
  586. &palignr ($CDGH,$TMP,8); # HGFE
  587. &mov ("esp",&DWP(32+12,"esp"));
  588. &movdqu (&QWP(0,$ctx),$ABEF);
  589. &movdqu (&QWP(16,$ctx),$CDGH);
  590. &function_end_A();
  591. }
  592. my @X = map("xmm$_",(0..3));
  593. my ($t0,$t1,$t2,$t3) = map("xmm$_",(4..7));
  594. my @AH = ($A,$T);
  595. &set_label("SSSE3",32);
  596. &lea ("esp",&DWP(-96,"esp"));
  597. # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
  598. &mov ($AH[0],&DWP(0,"esi"));
  599. &mov ($AH[1],&DWP(4,"esi"));
  600. &mov ("ecx",&DWP(8,"esi"));
  601. &mov ("edi",&DWP(12,"esi"));
  602. #&mov (&DWP(0,"esp"),$AH[0]);
  603. &mov (&DWP(4,"esp"),$AH[1]);
  604. &xor ($AH[1],"ecx"); # magic
  605. &mov (&DWP(8,"esp"),"ecx");
  606. &mov (&DWP(12,"esp"),"edi");
  607. &mov ($E,&DWP(16,"esi"));
  608. &mov ("edi",&DWP(20,"esi"));
  609. &mov ("ecx",&DWP(24,"esi"));
  610. &mov ("esi",&DWP(28,"esi"));
  611. #&mov (&DWP(16,"esp"),$E);
  612. &mov (&DWP(20,"esp"),"edi");
  613. &mov ("edi",&DWP(96+4,"esp")); # inp
  614. &mov (&DWP(24,"esp"),"ecx");
  615. &mov (&DWP(28,"esp"),"esi");
  616. &movdqa ($t3,&QWP(256,$K256));
  617. &jmp (&label("grand_ssse3"));
  618. &set_label("grand_ssse3",16);
  619. # load input, reverse byte order, add K256[0..15], save to stack
  620. &movdqu (@X[0],&QWP(0,"edi"));
  621. &movdqu (@X[1],&QWP(16,"edi"));
  622. &movdqu (@X[2],&QWP(32,"edi"));
  623. &movdqu (@X[3],&QWP(48,"edi"));
  624. &add ("edi",64);
  625. &pshufb (@X[0],$t3);
  626. &mov (&DWP(96+4,"esp"),"edi");
  627. &pshufb (@X[1],$t3);
  628. &movdqa ($t0,&QWP(0,$K256));
  629. &pshufb (@X[2],$t3);
  630. &movdqa ($t1,&QWP(16,$K256));
  631. &paddd ($t0,@X[0]);
  632. &pshufb (@X[3],$t3);
  633. &movdqa ($t2,&QWP(32,$K256));
  634. &paddd ($t1,@X[1]);
  635. &movdqa ($t3,&QWP(48,$K256));
  636. &movdqa (&QWP(32+0,"esp"),$t0);
  637. &paddd ($t2,@X[2]);
  638. &movdqa (&QWP(32+16,"esp"),$t1);
  639. &paddd ($t3,@X[3]);
  640. &movdqa (&QWP(32+32,"esp"),$t2);
  641. &movdqa (&QWP(32+48,"esp"),$t3);
  642. &jmp (&label("ssse3_00_47"));
  643. &set_label("ssse3_00_47",16);
  644. &add ($K256,64);
  645. sub SSSE3_00_47 () {
  646. my $j = shift;
  647. my $body = shift;
  648. my @X = @_;
  649. my @insns = (&$body,&$body,&$body,&$body); # 120 instructions
  650. eval(shift(@insns));
  651. &movdqa ($t0,@X[1]);
  652. eval(shift(@insns)); # @
  653. eval(shift(@insns));
  654. &movdqa ($t3,@X[3]);
  655. eval(shift(@insns));
  656. eval(shift(@insns));
  657. &palignr ($t0,@X[0],4); # X[1..4]
  658. eval(shift(@insns));
  659. eval(shift(@insns)); # @
  660. eval(shift(@insns));
  661. &palignr ($t3,@X[2],4); # X[9..12]
  662. eval(shift(@insns));
  663. eval(shift(@insns));
  664. eval(shift(@insns));
  665. &movdqa ($t1,$t0);
  666. eval(shift(@insns)); # @
  667. eval(shift(@insns));
  668. &movdqa ($t2,$t0);
  669. eval(shift(@insns));
  670. eval(shift(@insns));
  671. &psrld ($t0,3);
  672. eval(shift(@insns));
  673. eval(shift(@insns)); # @
  674. &paddd (@X[0],$t3); # X[0..3] += X[9..12]
  675. eval(shift(@insns));
  676. eval(shift(@insns));
  677. &psrld ($t2,7);
  678. eval(shift(@insns));
  679. eval(shift(@insns));
  680. eval(shift(@insns)); # @
  681. eval(shift(@insns));
  682. &pshufd ($t3,@X[3],0b11111010); # X[14..15]
  683. eval(shift(@insns));
  684. eval(shift(@insns));
  685. &pslld ($t1,32-18);
  686. eval(shift(@insns));
  687. eval(shift(@insns)); # @
  688. &pxor ($t0,$t2);
  689. eval(shift(@insns));
  690. eval(shift(@insns));
  691. &psrld ($t2,18-7);
  692. eval(shift(@insns));
  693. eval(shift(@insns));
  694. eval(shift(@insns)); # @
  695. &pxor ($t0,$t1);
  696. eval(shift(@insns));
  697. eval(shift(@insns));
  698. &pslld ($t1,18-7);
  699. eval(shift(@insns));
  700. eval(shift(@insns));
  701. eval(shift(@insns)); # @
  702. &pxor ($t0,$t2);
  703. eval(shift(@insns));
  704. eval(shift(@insns));
  705. &movdqa ($t2,$t3);
  706. eval(shift(@insns));
  707. eval(shift(@insns));
  708. eval(shift(@insns)); # @
  709. &pxor ($t0,$t1); # sigma0(X[1..4])
  710. eval(shift(@insns));
  711. eval(shift(@insns));
  712. &psrld ($t3,10);
  713. eval(shift(@insns));
  714. eval(shift(@insns));
  715. eval(shift(@insns)); # @
  716. &paddd (@X[0],$t0); # X[0..3] += sigma0(X[1..4])
  717. eval(shift(@insns));
  718. eval(shift(@insns));
  719. &psrlq ($t2,17);
  720. eval(shift(@insns));
  721. eval(shift(@insns));
  722. eval(shift(@insns)); # @
  723. &pxor ($t3,$t2);
  724. eval(shift(@insns));
  725. eval(shift(@insns));
  726. &psrlq ($t2,19-17);
  727. eval(shift(@insns));
  728. eval(shift(@insns));
  729. eval(shift(@insns)); # @
  730. &pxor ($t3,$t2);
  731. eval(shift(@insns));
  732. eval(shift(@insns));
  733. &pshufd ($t3,$t3,0b10000000);
  734. eval(shift(@insns));
  735. eval(shift(@insns));
  736. eval(shift(@insns)); # @
  737. eval(shift(@insns));
  738. eval(shift(@insns));
  739. eval(shift(@insns));
  740. eval(shift(@insns));
  741. eval(shift(@insns)); # @
  742. eval(shift(@insns));
  743. &psrldq ($t3,8);
  744. eval(shift(@insns));
  745. eval(shift(@insns));
  746. eval(shift(@insns));
  747. &paddd (@X[0],$t3); # X[0..1] += sigma1(X[14..15])
  748. eval(shift(@insns)); # @
  749. eval(shift(@insns));
  750. eval(shift(@insns));
  751. eval(shift(@insns));
  752. eval(shift(@insns));
  753. eval(shift(@insns)); # @
  754. eval(shift(@insns));
  755. &pshufd ($t3,@X[0],0b01010000); # X[16..17]
  756. eval(shift(@insns));
  757. eval(shift(@insns));
  758. eval(shift(@insns));
  759. &movdqa ($t2,$t3);
  760. eval(shift(@insns)); # @
  761. &psrld ($t3,10);
  762. eval(shift(@insns));
  763. &psrlq ($t2,17);
  764. eval(shift(@insns));
  765. eval(shift(@insns));
  766. eval(shift(@insns));
  767. eval(shift(@insns)); # @
  768. &pxor ($t3,$t2);
  769. eval(shift(@insns));
  770. eval(shift(@insns));
  771. &psrlq ($t2,19-17);
  772. eval(shift(@insns));
  773. eval(shift(@insns));
  774. eval(shift(@insns)); # @
  775. &pxor ($t3,$t2);
  776. eval(shift(@insns));
  777. eval(shift(@insns));
  778. eval(shift(@insns));
  779. &pshufd ($t3,$t3,0b00001000);
  780. eval(shift(@insns));
  781. eval(shift(@insns)); # @
  782. &movdqa ($t2,&QWP(16*$j,$K256));
  783. eval(shift(@insns));
  784. eval(shift(@insns));
  785. &pslldq ($t3,8);
  786. eval(shift(@insns));
  787. eval(shift(@insns));
  788. eval(shift(@insns)); # @
  789. eval(shift(@insns));
  790. eval(shift(@insns));
  791. eval(shift(@insns));
  792. eval(shift(@insns));
  793. eval(shift(@insns)); # @
  794. &paddd (@X[0],$t3); # X[2..3] += sigma1(X[16..17])
  795. eval(shift(@insns));
  796. eval(shift(@insns));
  797. eval(shift(@insns));
  798. eval(shift(@insns));
  799. &paddd ($t2,@X[0]);
  800. eval(shift(@insns)); # @
  801. foreach (@insns) { eval; } # remaining instructions
  802. &movdqa (&QWP(32+16*$j,"esp"),$t2);
  803. }
  804. sub body_00_15 () {
  805. (
  806. '&mov ("ecx",$E);',
  807. '&ror ($E,25-11);',
  808. '&mov ("esi",&off($f));',
  809. '&xor ($E,"ecx");',
  810. '&mov ("edi",&off($g));',
  811. '&xor ("esi","edi");',
  812. '&ror ($E,11-6);',
  813. '&and ("esi","ecx");',
  814. '&mov (&off($e),"ecx");', # save $E, modulo-scheduled
  815. '&xor ($E,"ecx");',
  816. '&xor ("edi","esi");', # Ch(e,f,g)
  817. '&ror ($E,6);', # T = Sigma1(e)
  818. '&mov ("ecx",$AH[0]);',
  819. '&add ($E,"edi");', # T += Ch(e,f,g)
  820. '&mov ("edi",&off($b));',
  821. '&mov ("esi",$AH[0]);',
  822. '&ror ("ecx",22-13);',
  823. '&mov (&off($a),$AH[0]);', # save $A, modulo-scheduled
  824. '&xor ("ecx",$AH[0]);',
  825. '&xor ($AH[0],"edi");', # a ^= b, (b^c) in next round
  826. '&add ($E,&off($h));', # T += h
  827. '&ror ("ecx",13-2);',
  828. '&and ($AH[1],$AH[0]);', # (b^c) &= (a^b)
  829. '&xor ("ecx","esi");',
  830. '&add ($E,&DWP(32+4*($i&15),"esp"));', # T += K[i]+X[i]
  831. '&xor ($AH[1],"edi");', # h = Maj(a,b,c) = Ch(a^b,c,b)
  832. '&ror ("ecx",2);', # Sigma0(a)
  833. '&add ($AH[1],$E);', # h += T
  834. '&add ($E,&off($d));', # d += T
  835. '&add ($AH[1],"ecx");'. # h += Sigma0(a)
  836. '@AH = reverse(@AH); $i++;' # rotate(a,h)
  837. );
  838. }
  839. for ($i=0,$j=0; $j<4; $j++) {
  840. &SSSE3_00_47($j,\&body_00_15,@X);
  841. push(@X,shift(@X)); # rotate(@X)
  842. }
  843. &cmp (&DWP(16*$j,$K256),0x00010203);
  844. &jne (&label("ssse3_00_47"));
  845. for ($i=0; $i<16; ) {
  846. foreach(body_00_15()) { eval; }
  847. }
  848. &mov ("esi",&DWP(96,"esp")); #ctx
  849. #&mov ($AH[0],&DWP(0,"esp"));
  850. &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
  851. #&mov ("edi", &DWP(8,"esp"));
  852. &mov ("ecx",&DWP(12,"esp"));
  853. &add ($AH[0],&DWP(0,"esi"));
  854. &add ($AH[1],&DWP(4,"esi"));
  855. &add ("edi",&DWP(8,"esi"));
  856. &add ("ecx",&DWP(12,"esi"));
  857. &mov (&DWP(0,"esi"),$AH[0]);
  858. &mov (&DWP(4,"esi"),$AH[1]);
  859. &mov (&DWP(8,"esi"),"edi");
  860. &mov (&DWP(12,"esi"),"ecx");
  861. #&mov (&DWP(0,"esp"),$AH[0]);
  862. &mov (&DWP(4,"esp"),$AH[1]);
  863. &xor ($AH[1],"edi"); # magic
  864. &mov (&DWP(8,"esp"),"edi");
  865. &mov (&DWP(12,"esp"),"ecx");
  866. #&mov ($E,&DWP(16,"esp"));
  867. &mov ("edi",&DWP(20,"esp"));
  868. &mov ("ecx",&DWP(24,"esp"));
  869. &add ($E,&DWP(16,"esi"));
  870. &add ("edi",&DWP(20,"esi"));
  871. &add ("ecx",&DWP(24,"esi"));
  872. &mov (&DWP(16,"esi"),$E);
  873. &mov (&DWP(20,"esi"),"edi");
  874. &mov (&DWP(20,"esp"),"edi");
  875. &mov ("edi",&DWP(28,"esp"));
  876. &mov (&DWP(24,"esi"),"ecx");
  877. #&mov (&DWP(16,"esp"),$E);
  878. &add ("edi",&DWP(28,"esi"));
  879. &mov (&DWP(24,"esp"),"ecx");
  880. &mov (&DWP(28,"esi"),"edi");
  881. &mov (&DWP(28,"esp"),"edi");
  882. &mov ("edi",&DWP(96+4,"esp")); # inp
  883. &movdqa ($t3,&QWP(64,$K256));
  884. &sub ($K256,3*64); # rewind K
  885. &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
  886. &jb (&label("grand_ssse3"));
  887. &mov ("esp",&DWP(96+12,"esp")); # restore sp
  888. &function_end_A();
  889. if ($avx) {
  890. &set_label("AVX",32);
  891. if ($avx>1) {
  892. &and ("edx",1<<8|1<<3); # check for BMI2+BMI1
  893. &cmp ("edx",1<<8|1<<3);
  894. &je (&label("AVX_BMI"));
  895. }
  896. &lea ("esp",&DWP(-96,"esp"));
  897. &vzeroall ();
  898. # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
  899. &mov ($AH[0],&DWP(0,"esi"));
  900. &mov ($AH[1],&DWP(4,"esi"));
  901. &mov ("ecx",&DWP(8,"esi"));
  902. &mov ("edi",&DWP(12,"esi"));
  903. #&mov (&DWP(0,"esp"),$AH[0]);
  904. &mov (&DWP(4,"esp"),$AH[1]);
  905. &xor ($AH[1],"ecx"); # magic
  906. &mov (&DWP(8,"esp"),"ecx");
  907. &mov (&DWP(12,"esp"),"edi");
  908. &mov ($E,&DWP(16,"esi"));
  909. &mov ("edi",&DWP(20,"esi"));
  910. &mov ("ecx",&DWP(24,"esi"));
  911. &mov ("esi",&DWP(28,"esi"));
  912. #&mov (&DWP(16,"esp"),$E);
  913. &mov (&DWP(20,"esp"),"edi");
  914. &mov ("edi",&DWP(96+4,"esp")); # inp
  915. &mov (&DWP(24,"esp"),"ecx");
  916. &mov (&DWP(28,"esp"),"esi");
  917. &vmovdqa ($t3,&QWP(256,$K256));
  918. &jmp (&label("grand_avx"));
  919. &set_label("grand_avx",32);
  920. # load input, reverse byte order, add K256[0..15], save to stack
  921. &vmovdqu (@X[0],&QWP(0,"edi"));
  922. &vmovdqu (@X[1],&QWP(16,"edi"));
  923. &vmovdqu (@X[2],&QWP(32,"edi"));
  924. &vmovdqu (@X[3],&QWP(48,"edi"));
  925. &add ("edi",64);
  926. &vpshufb (@X[0],@X[0],$t3);
  927. &mov (&DWP(96+4,"esp"),"edi");
  928. &vpshufb (@X[1],@X[1],$t3);
  929. &vpshufb (@X[2],@X[2],$t3);
  930. &vpaddd ($t0,@X[0],&QWP(0,$K256));
  931. &vpshufb (@X[3],@X[3],$t3);
  932. &vpaddd ($t1,@X[1],&QWP(16,$K256));
  933. &vpaddd ($t2,@X[2],&QWP(32,$K256));
  934. &vpaddd ($t3,@X[3],&QWP(48,$K256));
  935. &vmovdqa (&QWP(32+0,"esp"),$t0);
  936. &vmovdqa (&QWP(32+16,"esp"),$t1);
  937. &vmovdqa (&QWP(32+32,"esp"),$t2);
  938. &vmovdqa (&QWP(32+48,"esp"),$t3);
  939. &jmp (&label("avx_00_47"));
  940. &set_label("avx_00_47",16);
  941. &add ($K256,64);
  942. sub Xupdate_AVX () {
  943. (
  944. '&vpalignr ($t0,@X[1],@X[0],4);', # X[1..4]
  945. '&vpalignr ($t3,@X[3],@X[2],4);', # X[9..12]
  946. '&vpsrld ($t2,$t0,7);',
  947. '&vpaddd (@X[0],@X[0],$t3);', # X[0..3] += X[9..16]
  948. '&vpsrld ($t3,$t0,3);',
  949. '&vpslld ($t1,$t0,14);',
  950. '&vpxor ($t0,$t3,$t2);',
  951. '&vpshufd ($t3,@X[3],0b11111010)',# X[14..15]
  952. '&vpsrld ($t2,$t2,18-7);',
  953. '&vpxor ($t0,$t0,$t1);',
  954. '&vpslld ($t1,$t1,25-14);',
  955. '&vpxor ($t0,$t0,$t2);',
  956. '&vpsrld ($t2,$t3,10);',
  957. '&vpxor ($t0,$t0,$t1);', # sigma0(X[1..4])
  958. '&vpsrlq ($t1,$t3,17);',
  959. '&vpaddd (@X[0],@X[0],$t0);', # X[0..3] += sigma0(X[1..4])
  960. '&vpxor ($t2,$t2,$t1);',
  961. '&vpsrlq ($t3,$t3,19);',
  962. '&vpxor ($t2,$t2,$t3);', # sigma1(X[14..15]
  963. '&vpshufd ($t3,$t2,0b10000100);',
  964. '&vpsrldq ($t3,$t3,8);',
  965. '&vpaddd (@X[0],@X[0],$t3);', # X[0..1] += sigma1(X[14..15])
  966. '&vpshufd ($t3,@X[0],0b01010000)',# X[16..17]
  967. '&vpsrld ($t2,$t3,10);',
  968. '&vpsrlq ($t1,$t3,17);',
  969. '&vpxor ($t2,$t2,$t1);',
  970. '&vpsrlq ($t3,$t3,19);',
  971. '&vpxor ($t2,$t2,$t3);', # sigma1(X[16..17]
  972. '&vpshufd ($t3,$t2,0b11101000);',
  973. '&vpslldq ($t3,$t3,8);',
  974. '&vpaddd (@X[0],@X[0],$t3);' # X[2..3] += sigma1(X[16..17])
  975. );
  976. }
  977. local *ror = sub { &shrd(@_[0],@_) };
  978. sub AVX_00_47 () {
  979. my $j = shift;
  980. my $body = shift;
  981. my @X = @_;
  982. my @insns = (&$body,&$body,&$body,&$body); # 120 instructions
  983. my $insn;
  984. foreach (Xupdate_AVX()) { # 31 instructions
  985. eval;
  986. eval(shift(@insns));
  987. eval(shift(@insns));
  988. eval($insn = shift(@insns));
  989. eval(shift(@insns)) if ($insn =~ /rorx/ && @insns[0] =~ /rorx/);
  990. }
  991. &vpaddd ($t2,@X[0],&QWP(16*$j,$K256));
  992. foreach (@insns) { eval; } # remaining instructions
  993. &vmovdqa (&QWP(32+16*$j,"esp"),$t2);
  994. }
  995. for ($i=0,$j=0; $j<4; $j++) {
  996. &AVX_00_47($j,\&body_00_15,@X);
  997. push(@X,shift(@X)); # rotate(@X)
  998. }
  999. &cmp (&DWP(16*$j,$K256),0x00010203);
  1000. &jne (&label("avx_00_47"));
  1001. for ($i=0; $i<16; ) {
  1002. foreach(body_00_15()) { eval; }
  1003. }
  1004. &mov ("esi",&DWP(96,"esp")); #ctx
  1005. #&mov ($AH[0],&DWP(0,"esp"));
  1006. &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
  1007. #&mov ("edi", &DWP(8,"esp"));
  1008. &mov ("ecx",&DWP(12,"esp"));
  1009. &add ($AH[0],&DWP(0,"esi"));
  1010. &add ($AH[1],&DWP(4,"esi"));
  1011. &add ("edi",&DWP(8,"esi"));
  1012. &add ("ecx",&DWP(12,"esi"));
  1013. &mov (&DWP(0,"esi"),$AH[0]);
  1014. &mov (&DWP(4,"esi"),$AH[1]);
  1015. &mov (&DWP(8,"esi"),"edi");
  1016. &mov (&DWP(12,"esi"),"ecx");
  1017. #&mov (&DWP(0,"esp"),$AH[0]);
  1018. &mov (&DWP(4,"esp"),$AH[1]);
  1019. &xor ($AH[1],"edi"); # magic
  1020. &mov (&DWP(8,"esp"),"edi");
  1021. &mov (&DWP(12,"esp"),"ecx");
  1022. #&mov ($E,&DWP(16,"esp"));
  1023. &mov ("edi",&DWP(20,"esp"));
  1024. &mov ("ecx",&DWP(24,"esp"));
  1025. &add ($E,&DWP(16,"esi"));
  1026. &add ("edi",&DWP(20,"esi"));
  1027. &add ("ecx",&DWP(24,"esi"));
  1028. &mov (&DWP(16,"esi"),$E);
  1029. &mov (&DWP(20,"esi"),"edi");
  1030. &mov (&DWP(20,"esp"),"edi");
  1031. &mov ("edi",&DWP(28,"esp"));
  1032. &mov (&DWP(24,"esi"),"ecx");
  1033. #&mov (&DWP(16,"esp"),$E);
  1034. &add ("edi",&DWP(28,"esi"));
  1035. &mov (&DWP(24,"esp"),"ecx");
  1036. &mov (&DWP(28,"esi"),"edi");
  1037. &mov (&DWP(28,"esp"),"edi");
  1038. &mov ("edi",&DWP(96+4,"esp")); # inp
  1039. &vmovdqa ($t3,&QWP(64,$K256));
  1040. &sub ($K256,3*64); # rewind K
  1041. &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
  1042. &jb (&label("grand_avx"));
  1043. &mov ("esp",&DWP(96+12,"esp")); # restore sp
  1044. &vzeroall ();
  1045. &function_end_A();
  1046. if ($avx>1) {
  1047. sub bodyx_00_15 () { # +10%
  1048. (
  1049. '&rorx ("ecx",$E,6)',
  1050. '&rorx ("esi",$E,11)',
  1051. '&mov (&off($e),$E)', # save $E, modulo-scheduled
  1052. '&rorx ("edi",$E,25)',
  1053. '&xor ("ecx","esi")',
  1054. '&andn ("esi",$E,&off($g))',
  1055. '&xor ("ecx","edi")', # Sigma1(e)
  1056. '&and ($E,&off($f))',
  1057. '&mov (&off($a),$AH[0]);', # save $A, modulo-scheduled
  1058. '&or ($E,"esi")', # T = Ch(e,f,g)
  1059. '&rorx ("edi",$AH[0],2)',
  1060. '&rorx ("esi",$AH[0],13)',
  1061. '&lea ($E,&DWP(0,$E,"ecx"))', # T += Sigma1(e)
  1062. '&rorx ("ecx",$AH[0],22)',
  1063. '&xor ("esi","edi")',
  1064. '&mov ("edi",&off($b))',
  1065. '&xor ("ecx","esi")', # Sigma0(a)
  1066. '&xor ($AH[0],"edi")', # a ^= b, (b^c) in next round
  1067. '&add ($E,&off($h))', # T += h
  1068. '&and ($AH[1],$AH[0])', # (b^c) &= (a^b)
  1069. '&add ($E,&DWP(32+4*($i&15),"esp"))', # T += K[i]+X[i]
  1070. '&xor ($AH[1],"edi")', # h = Maj(a,b,c) = Ch(a^b,c,b)
  1071. '&add ("ecx",$E)', # h += T
  1072. '&add ($E,&off($d))', # d += T
  1073. '&lea ($AH[1],&DWP(0,$AH[1],"ecx"));'. # h += Sigma0(a)
  1074. '@AH = reverse(@AH); $i++;' # rotate(a,h)
  1075. );
  1076. }
  1077. &set_label("AVX_BMI",32);
  1078. &lea ("esp",&DWP(-96,"esp"));
  1079. &vzeroall ();
  1080. # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
  1081. &mov ($AH[0],&DWP(0,"esi"));
  1082. &mov ($AH[1],&DWP(4,"esi"));
  1083. &mov ("ecx",&DWP(8,"esi"));
  1084. &mov ("edi",&DWP(12,"esi"));
  1085. #&mov (&DWP(0,"esp"),$AH[0]);
  1086. &mov (&DWP(4,"esp"),$AH[1]);
  1087. &xor ($AH[1],"ecx"); # magic
  1088. &mov (&DWP(8,"esp"),"ecx");
  1089. &mov (&DWP(12,"esp"),"edi");
  1090. &mov ($E,&DWP(16,"esi"));
  1091. &mov ("edi",&DWP(20,"esi"));
  1092. &mov ("ecx",&DWP(24,"esi"));
  1093. &mov ("esi",&DWP(28,"esi"));
  1094. #&mov (&DWP(16,"esp"),$E);
  1095. &mov (&DWP(20,"esp"),"edi");
  1096. &mov ("edi",&DWP(96+4,"esp")); # inp
  1097. &mov (&DWP(24,"esp"),"ecx");
  1098. &mov (&DWP(28,"esp"),"esi");
  1099. &vmovdqa ($t3,&QWP(256,$K256));
  1100. &jmp (&label("grand_avx_bmi"));
  1101. &set_label("grand_avx_bmi",32);
  1102. # load input, reverse byte order, add K256[0..15], save to stack
  1103. &vmovdqu (@X[0],&QWP(0,"edi"));
  1104. &vmovdqu (@X[1],&QWP(16,"edi"));
  1105. &vmovdqu (@X[2],&QWP(32,"edi"));
  1106. &vmovdqu (@X[3],&QWP(48,"edi"));
  1107. &add ("edi",64);
  1108. &vpshufb (@X[0],@X[0],$t3);
  1109. &mov (&DWP(96+4,"esp"),"edi");
  1110. &vpshufb (@X[1],@X[1],$t3);
  1111. &vpshufb (@X[2],@X[2],$t3);
  1112. &vpaddd ($t0,@X[0],&QWP(0,$K256));
  1113. &vpshufb (@X[3],@X[3],$t3);
  1114. &vpaddd ($t1,@X[1],&QWP(16,$K256));
  1115. &vpaddd ($t2,@X[2],&QWP(32,$K256));
  1116. &vpaddd ($t3,@X[3],&QWP(48,$K256));
  1117. &vmovdqa (&QWP(32+0,"esp"),$t0);
  1118. &vmovdqa (&QWP(32+16,"esp"),$t1);
  1119. &vmovdqa (&QWP(32+32,"esp"),$t2);
  1120. &vmovdqa (&QWP(32+48,"esp"),$t3);
  1121. &jmp (&label("avx_bmi_00_47"));
  1122. &set_label("avx_bmi_00_47",16);
  1123. &add ($K256,64);
  1124. for ($i=0,$j=0; $j<4; $j++) {
  1125. &AVX_00_47($j,\&bodyx_00_15,@X);
  1126. push(@X,shift(@X)); # rotate(@X)
  1127. }
  1128. &cmp (&DWP(16*$j,$K256),0x00010203);
  1129. &jne (&label("avx_bmi_00_47"));
  1130. for ($i=0; $i<16; ) {
  1131. foreach(bodyx_00_15()) { eval; }
  1132. }
  1133. &mov ("esi",&DWP(96,"esp")); #ctx
  1134. #&mov ($AH[0],&DWP(0,"esp"));
  1135. &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
  1136. #&mov ("edi", &DWP(8,"esp"));
  1137. &mov ("ecx",&DWP(12,"esp"));
  1138. &add ($AH[0],&DWP(0,"esi"));
  1139. &add ($AH[1],&DWP(4,"esi"));
  1140. &add ("edi",&DWP(8,"esi"));
  1141. &add ("ecx",&DWP(12,"esi"));
  1142. &mov (&DWP(0,"esi"),$AH[0]);
  1143. &mov (&DWP(4,"esi"),$AH[1]);
  1144. &mov (&DWP(8,"esi"),"edi");
  1145. &mov (&DWP(12,"esi"),"ecx");
  1146. #&mov (&DWP(0,"esp"),$AH[0]);
  1147. &mov (&DWP(4,"esp"),$AH[1]);
  1148. &xor ($AH[1],"edi"); # magic
  1149. &mov (&DWP(8,"esp"),"edi");
  1150. &mov (&DWP(12,"esp"),"ecx");
  1151. #&mov ($E,&DWP(16,"esp"));
  1152. &mov ("edi",&DWP(20,"esp"));
  1153. &mov ("ecx",&DWP(24,"esp"));
  1154. &add ($E,&DWP(16,"esi"));
  1155. &add ("edi",&DWP(20,"esi"));
  1156. &add ("ecx",&DWP(24,"esi"));
  1157. &mov (&DWP(16,"esi"),$E);
  1158. &mov (&DWP(20,"esi"),"edi");
  1159. &mov (&DWP(20,"esp"),"edi");
  1160. &mov ("edi",&DWP(28,"esp"));
  1161. &mov (&DWP(24,"esi"),"ecx");
  1162. #&mov (&DWP(16,"esp"),$E);
  1163. &add ("edi",&DWP(28,"esi"));
  1164. &mov (&DWP(24,"esp"),"ecx");
  1165. &mov (&DWP(28,"esi"),"edi");
  1166. &mov (&DWP(28,"esp"),"edi");
  1167. &mov ("edi",&DWP(96+4,"esp")); # inp
  1168. &vmovdqa ($t3,&QWP(64,$K256));
  1169. &sub ($K256,3*64); # rewind K
  1170. &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
  1171. &jb (&label("grand_avx_bmi"));
  1172. &mov ("esp",&DWP(96+12,"esp")); # restore sp
  1173. &vzeroall ();
  1174. &function_end_A();
  1175. }
  1176. }
  1177. }}}
  1178. &function_end_B("sha256_block_data_order");
  1179. &asm_finish();