sha512-armv8.pl 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900
  1. #! /usr/bin/env perl
  2. # Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the OpenSSL license (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. # ====================================================================
  9. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  10. # project. The module is, however, dual licensed under OpenSSL and
  11. # CRYPTOGAMS licenses depending on where you obtain it. For further
  12. # details see http://www.openssl.org/~appro/cryptogams/.
  13. #
  14. # Permission to use under GPLv2 terms is granted.
  15. # ====================================================================
  16. #
  17. # SHA256/512 for ARMv8.
  18. #
  19. # Performance in cycles per processed byte and improvement coefficient
  20. # over code generated with "default" compiler:
  21. #
  22. # SHA256-hw SHA256(*) SHA512
  23. # Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
  24. # Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
  25. # Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
  26. # Denver 2.01 10.5 (+26%) 6.70 (+8%)
  27. # X-Gene 20.0 (+100%) 12.8 (+300%(***))
  28. # Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
  29. # Kryo 1.92 17.4 (+30%) 11.2 (+8%)
  30. #
  31. # (*) Software SHA256 results are of lesser relevance, presented
  32. # mostly for informational purposes.
  33. # (**) The result is a trade-off: it's possible to improve it by
  34. # 10% (or by 1 cycle per round), but at the cost of 20% loss
  35. # on Cortex-A53 (or by 4 cycles per round).
  36. # (***) Super-impressive coefficients over gcc-generated code are
  37. # indication of some compiler "pathology", most notably code
  38. # generated with -mgeneral-regs-only is significantly faster
  39. # and the gap is only 40-90%.
  40. #
  41. # October 2016.
  42. #
  43. # Originally it was reckoned that it makes no sense to implement NEON
  44. # version of SHA256 for 64-bit processors. This is because performance
  45. # improvement on most wide-spread Cortex-A5x processors was observed
  46. # to be marginal, same on Cortex-A53 and ~10% on A57. But then it was
  47. # observed that 32-bit NEON SHA256 performs significantly better than
  48. # 64-bit scalar version on *some* of the more recent processors. As
  49. # result 64-bit NEON version of SHA256 was added to provide best
  50. # all-round performance. For example it executes ~30% faster on X-Gene
  51. # and Mongoose. [For reference, NEON version of SHA512 is bound to
  52. # deliver much less improvement, likely *negative* on Cortex-A5x.
  53. # Which is why NEON support is limited to SHA256.]
  54. $output=pop;
  55. $flavour=pop;
  56. if ($flavour && $flavour ne "void") {
  57. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  58. ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
  59. ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
  60. die "can't locate arm-xlate.pl";
  61. open OUT,"| \"$^X\" $xlate $flavour $output";
  62. *STDOUT=*OUT;
  63. } else {
  64. open STDOUT,">$output";
  65. }
  66. if ($output =~ /512/) {
  67. $BITS=512;
  68. $SZ=8;
  69. @Sigma0=(28,34,39);
  70. @Sigma1=(14,18,41);
  71. @sigma0=(1, 8, 7);
  72. @sigma1=(19,61, 6);
  73. $rounds=80;
  74. $reg_t="x";
  75. } else {
  76. $BITS=256;
  77. $SZ=4;
  78. @Sigma0=( 2,13,22);
  79. @Sigma1=( 6,11,25);
  80. @sigma0=( 7,18, 3);
  81. @sigma1=(17,19,10);
  82. $rounds=64;
  83. $reg_t="w";
  84. }
  85. $func="sha${BITS}_block_data_order";
  86. ($ctx,$inp,$num,$Ktbl)=map("x$_",(0..2,30));
  87. @X=map("$reg_t$_",(3..15,0..2));
  88. @V=($A,$B,$C,$D,$E,$F,$G,$H)=map("$reg_t$_",(20..27));
  89. ($t0,$t1,$t2,$t3)=map("$reg_t$_",(16,17,19,28));
  90. sub BODY_00_xx {
  91. my ($i,$a,$b,$c,$d,$e,$f,$g,$h)=@_;
  92. my $j=($i+1)&15;
  93. my ($T0,$T1,$T2)=(@X[($i-8)&15],@X[($i-9)&15],@X[($i-10)&15]);
  94. $T0=@X[$i+3] if ($i<11);
  95. $code.=<<___ if ($i<16);
  96. #ifndef __AARCH64EB__
  97. rev @X[$i],@X[$i] // $i
  98. #endif
  99. ___
  100. $code.=<<___ if ($i<13 && ($i&1));
  101. ldp @X[$i+1],@X[$i+2],[$inp],#2*$SZ
  102. ___
  103. $code.=<<___ if ($i==13);
  104. ldp @X[14],@X[15],[$inp]
  105. ___
  106. $code.=<<___ if ($i>=14);
  107. ldr @X[($i-11)&15],[sp,#`$SZ*(($i-11)%4)`]
  108. ___
  109. $code.=<<___ if ($i>0 && $i<16);
  110. add $a,$a,$t1 // h+=Sigma0(a)
  111. ___
  112. $code.=<<___ if ($i>=11);
  113. str @X[($i-8)&15],[sp,#`$SZ*(($i-8)%4)`]
  114. ___
  115. # While ARMv8 specifies merged rotate-n-logical operation such as
  116. # 'eor x,y,z,ror#n', it was found to negatively affect performance
  117. # on Apple A7. The reason seems to be that it requires even 'y' to
  118. # be available earlier. This means that such merged instruction is
  119. # not necessarily best choice on critical path... On the other hand
  120. # Cortex-A5x handles merged instructions much better than disjoint
  121. # rotate and logical... See (**) footnote above.
  122. $code.=<<___ if ($i<15);
  123. ror $t0,$e,#$Sigma1[0]
  124. add $h,$h,$t2 // h+=K[i]
  125. eor $T0,$e,$e,ror#`$Sigma1[2]-$Sigma1[1]`
  126. and $t1,$f,$e
  127. bic $t2,$g,$e
  128. add $h,$h,@X[$i&15] // h+=X[i]
  129. orr $t1,$t1,$t2 // Ch(e,f,g)
  130. eor $t2,$a,$b // a^b, b^c in next round
  131. eor $t0,$t0,$T0,ror#$Sigma1[1] // Sigma1(e)
  132. ror $T0,$a,#$Sigma0[0]
  133. add $h,$h,$t1 // h+=Ch(e,f,g)
  134. eor $t1,$a,$a,ror#`$Sigma0[2]-$Sigma0[1]`
  135. add $h,$h,$t0 // h+=Sigma1(e)
  136. and $t3,$t3,$t2 // (b^c)&=(a^b)
  137. add $d,$d,$h // d+=h
  138. eor $t3,$t3,$b // Maj(a,b,c)
  139. eor $t1,$T0,$t1,ror#$Sigma0[1] // Sigma0(a)
  140. add $h,$h,$t3 // h+=Maj(a,b,c)
  141. ldr $t3,[$Ktbl],#$SZ // *K++, $t2 in next round
  142. //add $h,$h,$t1 // h+=Sigma0(a)
  143. ___
  144. $code.=<<___ if ($i>=15);
  145. ror $t0,$e,#$Sigma1[0]
  146. add $h,$h,$t2 // h+=K[i]
  147. ror $T1,@X[($j+1)&15],#$sigma0[0]
  148. and $t1,$f,$e
  149. ror $T2,@X[($j+14)&15],#$sigma1[0]
  150. bic $t2,$g,$e
  151. ror $T0,$a,#$Sigma0[0]
  152. add $h,$h,@X[$i&15] // h+=X[i]
  153. eor $t0,$t0,$e,ror#$Sigma1[1]
  154. eor $T1,$T1,@X[($j+1)&15],ror#$sigma0[1]
  155. orr $t1,$t1,$t2 // Ch(e,f,g)
  156. eor $t2,$a,$b // a^b, b^c in next round
  157. eor $t0,$t0,$e,ror#$Sigma1[2] // Sigma1(e)
  158. eor $T0,$T0,$a,ror#$Sigma0[1]
  159. add $h,$h,$t1 // h+=Ch(e,f,g)
  160. and $t3,$t3,$t2 // (b^c)&=(a^b)
  161. eor $T2,$T2,@X[($j+14)&15],ror#$sigma1[1]
  162. eor $T1,$T1,@X[($j+1)&15],lsr#$sigma0[2] // sigma0(X[i+1])
  163. add $h,$h,$t0 // h+=Sigma1(e)
  164. eor $t3,$t3,$b // Maj(a,b,c)
  165. eor $t1,$T0,$a,ror#$Sigma0[2] // Sigma0(a)
  166. eor $T2,$T2,@X[($j+14)&15],lsr#$sigma1[2] // sigma1(X[i+14])
  167. add @X[$j],@X[$j],@X[($j+9)&15]
  168. add $d,$d,$h // d+=h
  169. add $h,$h,$t3 // h+=Maj(a,b,c)
  170. ldr $t3,[$Ktbl],#$SZ // *K++, $t2 in next round
  171. add @X[$j],@X[$j],$T1
  172. add $h,$h,$t1 // h+=Sigma0(a)
  173. add @X[$j],@X[$j],$T2
  174. ___
  175. ($t2,$t3)=($t3,$t2);
  176. }
  177. $code.=<<___;
  178. #ifndef __KERNEL__
  179. # include "arm_arch.h"
  180. #endif
  181. .text
  182. .extern OPENSSL_armcap_P
  183. .hidden OPENSSL_armcap_P
  184. .globl $func
  185. .type $func,%function
  186. .align 6
  187. $func:
  188. #ifndef __KERNEL__
  189. # ifdef __ILP32__
  190. ldrsw x16,.LOPENSSL_armcap_P
  191. # else
  192. ldr x16,.LOPENSSL_armcap_P
  193. # endif
  194. adr x17,.LOPENSSL_armcap_P
  195. add x16,x16,x17
  196. ldr w16,[x16]
  197. ___
  198. $code.=<<___ if ($SZ==4);
  199. tst w16,#ARMV8_SHA256
  200. b.ne .Lv8_entry
  201. tst w16,#ARMV7_NEON
  202. b.ne .Lneon_entry
  203. ___
  204. $code.=<<___ if ($SZ==8);
  205. tst w16,#ARMV8_SHA512
  206. b.ne .Lv8_entry
  207. ___
  208. $code.=<<___;
  209. #endif
  210. .inst 0xd503233f // paciasp
  211. stp x29,x30,[sp,#-128]!
  212. add x29,sp,#0
  213. stp x19,x20,[sp,#16]
  214. stp x21,x22,[sp,#32]
  215. stp x23,x24,[sp,#48]
  216. stp x25,x26,[sp,#64]
  217. stp x27,x28,[sp,#80]
  218. sub sp,sp,#4*$SZ
  219. ldp $A,$B,[$ctx] // load context
  220. ldp $C,$D,[$ctx,#2*$SZ]
  221. ldp $E,$F,[$ctx,#4*$SZ]
  222. add $num,$inp,$num,lsl#`log(16*$SZ)/log(2)` // end of input
  223. ldp $G,$H,[$ctx,#6*$SZ]
  224. adr $Ktbl,.LK$BITS
  225. stp $ctx,$num,[x29,#96]
  226. .Loop:
  227. ldp @X[0],@X[1],[$inp],#2*$SZ
  228. ldr $t2,[$Ktbl],#$SZ // *K++
  229. eor $t3,$B,$C // magic seed
  230. str $inp,[x29,#112]
  231. ___
  232. for ($i=0;$i<16;$i++) { &BODY_00_xx($i,@V); unshift(@V,pop(@V)); }
  233. $code.=".Loop_16_xx:\n";
  234. for (;$i<32;$i++) { &BODY_00_xx($i,@V); unshift(@V,pop(@V)); }
  235. $code.=<<___;
  236. cbnz $t2,.Loop_16_xx
  237. ldp $ctx,$num,[x29,#96]
  238. ldr $inp,[x29,#112]
  239. sub $Ktbl,$Ktbl,#`$SZ*($rounds+1)` // rewind
  240. ldp @X[0],@X[1],[$ctx]
  241. ldp @X[2],@X[3],[$ctx,#2*$SZ]
  242. add $inp,$inp,#14*$SZ // advance input pointer
  243. ldp @X[4],@X[5],[$ctx,#4*$SZ]
  244. add $A,$A,@X[0]
  245. ldp @X[6],@X[7],[$ctx,#6*$SZ]
  246. add $B,$B,@X[1]
  247. add $C,$C,@X[2]
  248. add $D,$D,@X[3]
  249. stp $A,$B,[$ctx]
  250. add $E,$E,@X[4]
  251. add $F,$F,@X[5]
  252. stp $C,$D,[$ctx,#2*$SZ]
  253. add $G,$G,@X[6]
  254. add $H,$H,@X[7]
  255. cmp $inp,$num
  256. stp $E,$F,[$ctx,#4*$SZ]
  257. stp $G,$H,[$ctx,#6*$SZ]
  258. b.ne .Loop
  259. ldp x19,x20,[x29,#16]
  260. add sp,sp,#4*$SZ
  261. ldp x21,x22,[x29,#32]
  262. ldp x23,x24,[x29,#48]
  263. ldp x25,x26,[x29,#64]
  264. ldp x27,x28,[x29,#80]
  265. ldp x29,x30,[sp],#128
  266. .inst 0xd50323bf // autiasp
  267. ret
  268. .size $func,.-$func
  269. .align 6
  270. .type .LK$BITS,%object
  271. .LK$BITS:
  272. ___
  273. $code.=<<___ if ($SZ==8);
  274. .quad 0x428a2f98d728ae22,0x7137449123ef65cd
  275. .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
  276. .quad 0x3956c25bf348b538,0x59f111f1b605d019
  277. .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
  278. .quad 0xd807aa98a3030242,0x12835b0145706fbe
  279. .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
  280. .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
  281. .quad 0x9bdc06a725c71235,0xc19bf174cf692694
  282. .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
  283. .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
  284. .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
  285. .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
  286. .quad 0x983e5152ee66dfab,0xa831c66d2db43210
  287. .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
  288. .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
  289. .quad 0x06ca6351e003826f,0x142929670a0e6e70
  290. .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
  291. .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
  292. .quad 0x650a73548baf63de,0x766a0abb3c77b2a8
  293. .quad 0x81c2c92e47edaee6,0x92722c851482353b
  294. .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
  295. .quad 0xc24b8b70d0f89791,0xc76c51a30654be30
  296. .quad 0xd192e819d6ef5218,0xd69906245565a910
  297. .quad 0xf40e35855771202a,0x106aa07032bbd1b8
  298. .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
  299. .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
  300. .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
  301. .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
  302. .quad 0x748f82ee5defb2fc,0x78a5636f43172f60
  303. .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
  304. .quad 0x90befffa23631e28,0xa4506cebde82bde9
  305. .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
  306. .quad 0xca273eceea26619c,0xd186b8c721c0c207
  307. .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
  308. .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
  309. .quad 0x113f9804bef90dae,0x1b710b35131c471b
  310. .quad 0x28db77f523047d84,0x32caab7b40c72493
  311. .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
  312. .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
  313. .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
  314. .quad 0 // terminator
  315. ___
  316. $code.=<<___ if ($SZ==4);
  317. .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
  318. .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
  319. .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
  320. .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
  321. .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
  322. .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
  323. .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
  324. .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
  325. .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
  326. .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
  327. .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
  328. .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
  329. .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
  330. .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
  331. .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
  332. .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
  333. .long 0 //terminator
  334. ___
  335. $code.=<<___;
  336. .size .LK$BITS,.-.LK$BITS
  337. #ifndef __KERNEL__
  338. .align 3
  339. .LOPENSSL_armcap_P:
  340. # ifdef __ILP32__
  341. .long OPENSSL_armcap_P-.
  342. # else
  343. .quad OPENSSL_armcap_P-.
  344. # endif
  345. #endif
  346. .asciz "SHA$BITS block transform for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
  347. .align 2
  348. ___
  349. if ($SZ==4) {
  350. my $Ktbl="x3";
  351. my ($ABCD,$EFGH,$abcd)=map("v$_.16b",(0..2));
  352. my @MSG=map("v$_.16b",(4..7));
  353. my ($W0,$W1)=("v16.4s","v17.4s");
  354. my ($ABCD_SAVE,$EFGH_SAVE)=("v18.16b","v19.16b");
  355. $code.=<<___;
  356. #ifndef __KERNEL__
  357. .type sha256_block_armv8,%function
  358. .align 6
  359. sha256_block_armv8:
  360. .Lv8_entry:
  361. stp x29,x30,[sp,#-16]!
  362. add x29,sp,#0
  363. ld1.32 {$ABCD,$EFGH},[$ctx]
  364. adr $Ktbl,.LK256
  365. .Loop_hw:
  366. ld1 {@MSG[0]-@MSG[3]},[$inp],#64
  367. sub $num,$num,#1
  368. ld1.32 {$W0},[$Ktbl],#16
  369. rev32 @MSG[0],@MSG[0]
  370. rev32 @MSG[1],@MSG[1]
  371. rev32 @MSG[2],@MSG[2]
  372. rev32 @MSG[3],@MSG[3]
  373. orr $ABCD_SAVE,$ABCD,$ABCD // offload
  374. orr $EFGH_SAVE,$EFGH,$EFGH
  375. ___
  376. for($i=0;$i<12;$i++) {
  377. $code.=<<___;
  378. ld1.32 {$W1},[$Ktbl],#16
  379. add.i32 $W0,$W0,@MSG[0]
  380. sha256su0 @MSG[0],@MSG[1]
  381. orr $abcd,$ABCD,$ABCD
  382. sha256h $ABCD,$EFGH,$W0
  383. sha256h2 $EFGH,$abcd,$W0
  384. sha256su1 @MSG[0],@MSG[2],@MSG[3]
  385. ___
  386. ($W0,$W1)=($W1,$W0); push(@MSG,shift(@MSG));
  387. }
  388. $code.=<<___;
  389. ld1.32 {$W1},[$Ktbl],#16
  390. add.i32 $W0,$W0,@MSG[0]
  391. orr $abcd,$ABCD,$ABCD
  392. sha256h $ABCD,$EFGH,$W0
  393. sha256h2 $EFGH,$abcd,$W0
  394. ld1.32 {$W0},[$Ktbl],#16
  395. add.i32 $W1,$W1,@MSG[1]
  396. orr $abcd,$ABCD,$ABCD
  397. sha256h $ABCD,$EFGH,$W1
  398. sha256h2 $EFGH,$abcd,$W1
  399. ld1.32 {$W1},[$Ktbl]
  400. add.i32 $W0,$W0,@MSG[2]
  401. sub $Ktbl,$Ktbl,#$rounds*$SZ-16 // rewind
  402. orr $abcd,$ABCD,$ABCD
  403. sha256h $ABCD,$EFGH,$W0
  404. sha256h2 $EFGH,$abcd,$W0
  405. add.i32 $W1,$W1,@MSG[3]
  406. orr $abcd,$ABCD,$ABCD
  407. sha256h $ABCD,$EFGH,$W1
  408. sha256h2 $EFGH,$abcd,$W1
  409. add.i32 $ABCD,$ABCD,$ABCD_SAVE
  410. add.i32 $EFGH,$EFGH,$EFGH_SAVE
  411. cbnz $num,.Loop_hw
  412. st1.32 {$ABCD,$EFGH},[$ctx]
  413. ldr x29,[sp],#16
  414. ret
  415. .size sha256_block_armv8,.-sha256_block_armv8
  416. #endif
  417. ___
  418. }
  419. if ($SZ==4) { ######################################### NEON stuff #
  420. # You'll surely note a lot of similarities with sha256-armv4 module,
  421. # and of course it's not a coincidence. sha256-armv4 was used as
  422. # initial template, but was adapted for ARMv8 instruction set and
  423. # extensively re-tuned for all-round performance.
  424. my @V = ($A,$B,$C,$D,$E,$F,$G,$H) = map("w$_",(3..10));
  425. my ($t0,$t1,$t2,$t3,$t4) = map("w$_",(11..15));
  426. my $Ktbl="x16";
  427. my $Xfer="x17";
  428. my @X = map("q$_",(0..3));
  429. my ($T0,$T1,$T2,$T3,$T4,$T5,$T6,$T7) = map("q$_",(4..7,16..19));
  430. my $j=0;
  431. sub AUTOLOAD() # thunk [simplified] x86-style perlasm
  432. { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
  433. my $arg = pop;
  434. $arg = "#$arg" if ($arg*1 eq $arg);
  435. $code .= "\t$opcode\t".join(',',@_,$arg)."\n";
  436. }
  437. sub Dscalar { shift =~ m|[qv]([0-9]+)|?"d$1":""; }
  438. sub Dlo { shift =~ m|[qv]([0-9]+)|?"v$1.d[0]":""; }
  439. sub Dhi { shift =~ m|[qv]([0-9]+)|?"v$1.d[1]":""; }
  440. sub Xupdate()
  441. { use integer;
  442. my $body = shift;
  443. my @insns = (&$body,&$body,&$body,&$body);
  444. my ($a,$b,$c,$d,$e,$f,$g,$h);
  445. &ext_8 ($T0,@X[0],@X[1],4); # X[1..4]
  446. eval(shift(@insns));
  447. eval(shift(@insns));
  448. eval(shift(@insns));
  449. &ext_8 ($T3,@X[2],@X[3],4); # X[9..12]
  450. eval(shift(@insns));
  451. eval(shift(@insns));
  452. &mov (&Dscalar($T7),&Dhi(@X[3])); # X[14..15]
  453. eval(shift(@insns));
  454. eval(shift(@insns));
  455. &ushr_32 ($T2,$T0,$sigma0[0]);
  456. eval(shift(@insns));
  457. &ushr_32 ($T1,$T0,$sigma0[2]);
  458. eval(shift(@insns));
  459. &add_32 (@X[0],@X[0],$T3); # X[0..3] += X[9..12]
  460. eval(shift(@insns));
  461. &sli_32 ($T2,$T0,32-$sigma0[0]);
  462. eval(shift(@insns));
  463. eval(shift(@insns));
  464. &ushr_32 ($T3,$T0,$sigma0[1]);
  465. eval(shift(@insns));
  466. eval(shift(@insns));
  467. &eor_8 ($T1,$T1,$T2);
  468. eval(shift(@insns));
  469. eval(shift(@insns));
  470. &sli_32 ($T3,$T0,32-$sigma0[1]);
  471. eval(shift(@insns));
  472. eval(shift(@insns));
  473. &ushr_32 ($T4,$T7,$sigma1[0]);
  474. eval(shift(@insns));
  475. eval(shift(@insns));
  476. &eor_8 ($T1,$T1,$T3); # sigma0(X[1..4])
  477. eval(shift(@insns));
  478. eval(shift(@insns));
  479. &sli_32 ($T4,$T7,32-$sigma1[0]);
  480. eval(shift(@insns));
  481. eval(shift(@insns));
  482. &ushr_32 ($T5,$T7,$sigma1[2]);
  483. eval(shift(@insns));
  484. eval(shift(@insns));
  485. &ushr_32 ($T3,$T7,$sigma1[1]);
  486. eval(shift(@insns));
  487. eval(shift(@insns));
  488. &add_32 (@X[0],@X[0],$T1); # X[0..3] += sigma0(X[1..4])
  489. eval(shift(@insns));
  490. eval(shift(@insns));
  491. &sli_u32 ($T3,$T7,32-$sigma1[1]);
  492. eval(shift(@insns));
  493. eval(shift(@insns));
  494. &eor_8 ($T5,$T5,$T4);
  495. eval(shift(@insns));
  496. eval(shift(@insns));
  497. eval(shift(@insns));
  498. &eor_8 ($T5,$T5,$T3); # sigma1(X[14..15])
  499. eval(shift(@insns));
  500. eval(shift(@insns));
  501. eval(shift(@insns));
  502. &add_32 (@X[0],@X[0],$T5); # X[0..1] += sigma1(X[14..15])
  503. eval(shift(@insns));
  504. eval(shift(@insns));
  505. eval(shift(@insns));
  506. &ushr_32 ($T6,@X[0],$sigma1[0]);
  507. eval(shift(@insns));
  508. &ushr_32 ($T7,@X[0],$sigma1[2]);
  509. eval(shift(@insns));
  510. eval(shift(@insns));
  511. &sli_32 ($T6,@X[0],32-$sigma1[0]);
  512. eval(shift(@insns));
  513. &ushr_32 ($T5,@X[0],$sigma1[1]);
  514. eval(shift(@insns));
  515. eval(shift(@insns));
  516. &eor_8 ($T7,$T7,$T6);
  517. eval(shift(@insns));
  518. eval(shift(@insns));
  519. &sli_32 ($T5,@X[0],32-$sigma1[1]);
  520. eval(shift(@insns));
  521. eval(shift(@insns));
  522. &ld1_32 ("{$T0}","[$Ktbl], #16");
  523. eval(shift(@insns));
  524. &eor_8 ($T7,$T7,$T5); # sigma1(X[16..17])
  525. eval(shift(@insns));
  526. eval(shift(@insns));
  527. &eor_8 ($T5,$T5,$T5);
  528. eval(shift(@insns));
  529. eval(shift(@insns));
  530. &mov (&Dhi($T5), &Dlo($T7));
  531. eval(shift(@insns));
  532. eval(shift(@insns));
  533. eval(shift(@insns));
  534. &add_32 (@X[0],@X[0],$T5); # X[2..3] += sigma1(X[16..17])
  535. eval(shift(@insns));
  536. eval(shift(@insns));
  537. eval(shift(@insns));
  538. &add_32 ($T0,$T0,@X[0]);
  539. while($#insns>=1) { eval(shift(@insns)); }
  540. &st1_32 ("{$T0}","[$Xfer], #16");
  541. eval(shift(@insns));
  542. push(@X,shift(@X)); # "rotate" X[]
  543. }
  544. sub Xpreload()
  545. { use integer;
  546. my $body = shift;
  547. my @insns = (&$body,&$body,&$body,&$body);
  548. my ($a,$b,$c,$d,$e,$f,$g,$h);
  549. eval(shift(@insns));
  550. eval(shift(@insns));
  551. &ld1_8 ("{@X[0]}","[$inp],#16");
  552. eval(shift(@insns));
  553. eval(shift(@insns));
  554. &ld1_32 ("{$T0}","[$Ktbl],#16");
  555. eval(shift(@insns));
  556. eval(shift(@insns));
  557. eval(shift(@insns));
  558. eval(shift(@insns));
  559. &rev32 (@X[0],@X[0]);
  560. eval(shift(@insns));
  561. eval(shift(@insns));
  562. eval(shift(@insns));
  563. eval(shift(@insns));
  564. &add_32 ($T0,$T0,@X[0]);
  565. foreach (@insns) { eval; } # remaining instructions
  566. &st1_32 ("{$T0}","[$Xfer], #16");
  567. push(@X,shift(@X)); # "rotate" X[]
  568. }
  569. sub body_00_15 () {
  570. (
  571. '($a,$b,$c,$d,$e,$f,$g,$h)=@V;'.
  572. '&add ($h,$h,$t1)', # h+=X[i]+K[i]
  573. '&add ($a,$a,$t4);'. # h+=Sigma0(a) from the past
  574. '&and ($t1,$f,$e)',
  575. '&bic ($t4,$g,$e)',
  576. '&eor ($t0,$e,$e,"ror#".($Sigma1[1]-$Sigma1[0]))',
  577. '&add ($a,$a,$t2)', # h+=Maj(a,b,c) from the past
  578. '&orr ($t1,$t1,$t4)', # Ch(e,f,g)
  579. '&eor ($t0,$t0,$e,"ror#".($Sigma1[2]-$Sigma1[0]))', # Sigma1(e)
  580. '&eor ($t4,$a,$a,"ror#".($Sigma0[1]-$Sigma0[0]))',
  581. '&add ($h,$h,$t1)', # h+=Ch(e,f,g)
  582. '&ror ($t0,$t0,"#$Sigma1[0]")',
  583. '&eor ($t2,$a,$b)', # a^b, b^c in next round
  584. '&eor ($t4,$t4,$a,"ror#".($Sigma0[2]-$Sigma0[0]))', # Sigma0(a)
  585. '&add ($h,$h,$t0)', # h+=Sigma1(e)
  586. '&ldr ($t1,sprintf "[sp,#%d]",4*(($j+1)&15)) if (($j&15)!=15);'.
  587. '&ldr ($t1,"[$Ktbl]") if ($j==15);'.
  588. '&and ($t3,$t3,$t2)', # (b^c)&=(a^b)
  589. '&ror ($t4,$t4,"#$Sigma0[0]")',
  590. '&add ($d,$d,$h)', # d+=h
  591. '&eor ($t3,$t3,$b)', # Maj(a,b,c)
  592. '$j++; unshift(@V,pop(@V)); ($t2,$t3)=($t3,$t2);'
  593. )
  594. }
  595. $code.=<<___;
  596. #ifdef __KERNEL__
  597. .globl sha256_block_neon
  598. #endif
  599. .type sha256_block_neon,%function
  600. .align 4
  601. sha256_block_neon:
  602. .Lneon_entry:
  603. stp x29, x30, [sp, #-16]!
  604. mov x29, sp
  605. sub sp,sp,#16*4
  606. adr $Ktbl,.LK256
  607. add $num,$inp,$num,lsl#6 // len to point at the end of inp
  608. ld1.8 {@X[0]},[$inp], #16
  609. ld1.8 {@X[1]},[$inp], #16
  610. ld1.8 {@X[2]},[$inp], #16
  611. ld1.8 {@X[3]},[$inp], #16
  612. ld1.32 {$T0},[$Ktbl], #16
  613. ld1.32 {$T1},[$Ktbl], #16
  614. ld1.32 {$T2},[$Ktbl], #16
  615. ld1.32 {$T3},[$Ktbl], #16
  616. rev32 @X[0],@X[0] // yes, even on
  617. rev32 @X[1],@X[1] // big-endian
  618. rev32 @X[2],@X[2]
  619. rev32 @X[3],@X[3]
  620. mov $Xfer,sp
  621. add.32 $T0,$T0,@X[0]
  622. add.32 $T1,$T1,@X[1]
  623. add.32 $T2,$T2,@X[2]
  624. st1.32 {$T0-$T1},[$Xfer], #32
  625. add.32 $T3,$T3,@X[3]
  626. st1.32 {$T2-$T3},[$Xfer]
  627. sub $Xfer,$Xfer,#32
  628. ldp $A,$B,[$ctx]
  629. ldp $C,$D,[$ctx,#8]
  630. ldp $E,$F,[$ctx,#16]
  631. ldp $G,$H,[$ctx,#24]
  632. ldr $t1,[sp,#0]
  633. mov $t2,wzr
  634. eor $t3,$B,$C
  635. mov $t4,wzr
  636. b .L_00_48
  637. .align 4
  638. .L_00_48:
  639. ___
  640. &Xupdate(\&body_00_15);
  641. &Xupdate(\&body_00_15);
  642. &Xupdate(\&body_00_15);
  643. &Xupdate(\&body_00_15);
  644. $code.=<<___;
  645. cmp $t1,#0 // check for K256 terminator
  646. ldr $t1,[sp,#0]
  647. sub $Xfer,$Xfer,#64
  648. bne .L_00_48
  649. sub $Ktbl,$Ktbl,#256 // rewind $Ktbl
  650. cmp $inp,$num
  651. mov $Xfer, #64
  652. csel $Xfer, $Xfer, xzr, eq
  653. sub $inp,$inp,$Xfer // avoid SEGV
  654. mov $Xfer,sp
  655. ___
  656. &Xpreload(\&body_00_15);
  657. &Xpreload(\&body_00_15);
  658. &Xpreload(\&body_00_15);
  659. &Xpreload(\&body_00_15);
  660. $code.=<<___;
  661. add $A,$A,$t4 // h+=Sigma0(a) from the past
  662. ldp $t0,$t1,[$ctx,#0]
  663. add $A,$A,$t2 // h+=Maj(a,b,c) from the past
  664. ldp $t2,$t3,[$ctx,#8]
  665. add $A,$A,$t0 // accumulate
  666. add $B,$B,$t1
  667. ldp $t0,$t1,[$ctx,#16]
  668. add $C,$C,$t2
  669. add $D,$D,$t3
  670. ldp $t2,$t3,[$ctx,#24]
  671. add $E,$E,$t0
  672. add $F,$F,$t1
  673. ldr $t1,[sp,#0]
  674. stp $A,$B,[$ctx,#0]
  675. add $G,$G,$t2
  676. mov $t2,wzr
  677. stp $C,$D,[$ctx,#8]
  678. add $H,$H,$t3
  679. stp $E,$F,[$ctx,#16]
  680. eor $t3,$B,$C
  681. stp $G,$H,[$ctx,#24]
  682. mov $t4,wzr
  683. mov $Xfer,sp
  684. b.ne .L_00_48
  685. ldr x29,[x29]
  686. add sp,sp,#16*4+16
  687. ret
  688. .size sha256_block_neon,.-sha256_block_neon
  689. ___
  690. }
  691. if ($SZ==8) {
  692. my $Ktbl="x3";
  693. my @H = map("v$_.16b",(0..4));
  694. my ($fg,$de,$m9_10)=map("v$_.16b",(5..7));
  695. my @MSG=map("v$_.16b",(16..23));
  696. my ($W0,$W1)=("v24.2d","v25.2d");
  697. my ($AB,$CD,$EF,$GH)=map("v$_.16b",(26..29));
  698. $code.=<<___;
  699. #ifndef __KERNEL__
  700. .type sha512_block_armv8,%function
  701. .align 6
  702. sha512_block_armv8:
  703. .Lv8_entry:
  704. stp x29,x30,[sp,#-16]!
  705. add x29,sp,#0
  706. ld1 {@MSG[0]-@MSG[3]},[$inp],#64 // load input
  707. ld1 {@MSG[4]-@MSG[7]},[$inp],#64
  708. ld1.64 {@H[0]-@H[3]},[$ctx] // load context
  709. adr $Ktbl,.LK512
  710. rev64 @MSG[0],@MSG[0]
  711. rev64 @MSG[1],@MSG[1]
  712. rev64 @MSG[2],@MSG[2]
  713. rev64 @MSG[3],@MSG[3]
  714. rev64 @MSG[4],@MSG[4]
  715. rev64 @MSG[5],@MSG[5]
  716. rev64 @MSG[6],@MSG[6]
  717. rev64 @MSG[7],@MSG[7]
  718. b .Loop_hw
  719. .align 4
  720. .Loop_hw:
  721. ld1.64 {$W0},[$Ktbl],#16
  722. subs $num,$num,#1
  723. sub x4,$inp,#128
  724. orr $AB,@H[0],@H[0] // offload
  725. orr $CD,@H[1],@H[1]
  726. orr $EF,@H[2],@H[2]
  727. orr $GH,@H[3],@H[3]
  728. csel $inp,$inp,x4,ne // conditional rewind
  729. ___
  730. for($i=0;$i<32;$i++) {
  731. $code.=<<___;
  732. add.i64 $W0,$W0,@MSG[0]
  733. ld1.64 {$W1},[$Ktbl],#16
  734. ext $W0,$W0,$W0,#8
  735. ext $fg,@H[2],@H[3],#8
  736. ext $de,@H[1],@H[2],#8
  737. add.i64 @H[3],@H[3],$W0 // "T1 + H + K512[i]"
  738. sha512su0 @MSG[0],@MSG[1]
  739. ext $m9_10,@MSG[4],@MSG[5],#8
  740. sha512h @H[3],$fg,$de
  741. sha512su1 @MSG[0],@MSG[7],$m9_10
  742. add.i64 @H[4],@H[1],@H[3] // "D + T1"
  743. sha512h2 @H[3],$H[1],@H[0]
  744. ___
  745. ($W0,$W1)=($W1,$W0); push(@MSG,shift(@MSG));
  746. @H = (@H[3],@H[0],@H[4],@H[2],@H[1]);
  747. }
  748. for(;$i<40;$i++) {
  749. $code.=<<___ if ($i<39);
  750. ld1.64 {$W1},[$Ktbl],#16
  751. ___
  752. $code.=<<___ if ($i==39);
  753. sub $Ktbl,$Ktbl,#$rounds*$SZ // rewind
  754. ___
  755. $code.=<<___;
  756. add.i64 $W0,$W0,@MSG[0]
  757. ld1 {@MSG[0]},[$inp],#16 // load next input
  758. ext $W0,$W0,$W0,#8
  759. ext $fg,@H[2],@H[3],#8
  760. ext $de,@H[1],@H[2],#8
  761. add.i64 @H[3],@H[3],$W0 // "T1 + H + K512[i]"
  762. sha512h @H[3],$fg,$de
  763. rev64 @MSG[0],@MSG[0]
  764. add.i64 @H[4],@H[1],@H[3] // "D + T1"
  765. sha512h2 @H[3],$H[1],@H[0]
  766. ___
  767. ($W0,$W1)=($W1,$W0); push(@MSG,shift(@MSG));
  768. @H = (@H[3],@H[0],@H[4],@H[2],@H[1]);
  769. }
  770. $code.=<<___;
  771. add.i64 @H[0],@H[0],$AB // accumulate
  772. add.i64 @H[1],@H[1],$CD
  773. add.i64 @H[2],@H[2],$EF
  774. add.i64 @H[3],@H[3],$GH
  775. cbnz $num,.Loop_hw
  776. st1.64 {@H[0]-@H[3]},[$ctx] // store context
  777. ldr x29,[sp],#16
  778. ret
  779. .size sha512_block_armv8,.-sha512_block_armv8
  780. #endif
  781. ___
  782. }
  783. { my %opcode = (
  784. "sha256h" => 0x5e004000, "sha256h2" => 0x5e005000,
  785. "sha256su0" => 0x5e282800, "sha256su1" => 0x5e006000 );
  786. sub unsha256 {
  787. my ($mnemonic,$arg)=@_;
  788. $arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)[^,]*(?:,\s*[qv]([0-9]+))?/o
  789. &&
  790. sprintf ".inst\t0x%08x\t//%s %s",
  791. $opcode{$mnemonic}|$1|($2<<5)|($3<<16),
  792. $mnemonic,$arg;
  793. }
  794. }
  795. { my %opcode = (
  796. "sha512h" => 0xce608000, "sha512h2" => 0xce608400,
  797. "sha512su0" => 0xcec08000, "sha512su1" => 0xce608800 );
  798. sub unsha512 {
  799. my ($mnemonic,$arg)=@_;
  800. $arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)[^,]*(?:,\s*[qv]([0-9]+))?/o
  801. &&
  802. sprintf ".inst\t0x%08x\t//%s %s",
  803. $opcode{$mnemonic}|$1|($2<<5)|($3<<16),
  804. $mnemonic,$arg;
  805. }
  806. }
  807. open SELF,$0;
  808. while(<SELF>) {
  809. next if (/^#!/);
  810. last if (!s/^#/\/\// and !/^$/);
  811. print;
  812. }
  813. close SELF;
  814. foreach(split("\n",$code)) {
  815. s/\`([^\`]*)\`/eval($1)/ge;
  816. s/\b(sha512\w+)\s+([qv].*)/unsha512($1,$2)/ge or
  817. s/\b(sha256\w+)\s+([qv].*)/unsha256($1,$2)/ge;
  818. s/\bq([0-9]+)\b/v$1.16b/g; # old->new registers
  819. s/\.[ui]?8(\s)/$1/;
  820. s/\.\w?64\b// and s/\.16b/\.2d/g or
  821. s/\.\w?32\b// and s/\.16b/\.4s/g;
  822. m/\bext\b/ and s/\.2d/\.16b/g or
  823. m/(ld|st)1[^\[]+\[0\]/ and s/\.4s/\.s/g;
  824. print $_,"\n";
  825. }
  826. close STDOUT or die "error closing STDOUT: $!";