ghash-sparcv9.pl 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571
  1. #!/usr/bin/env perl
  2. # ====================================================================
  3. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  4. # project. The module is, however, dual licensed under OpenSSL and
  5. # CRYPTOGAMS licenses depending on where you obtain it. For further
  6. # details see http://www.openssl.org/~appro/cryptogams/.
  7. # ====================================================================
  8. # March 2010
  9. #
  10. # The module implements "4-bit" GCM GHASH function and underlying
  11. # single multiplication operation in GF(2^128). "4-bit" means that it
  12. # uses 256 bytes per-key table [+128 bytes shared table]. Performance
  13. # results are for streamed GHASH subroutine on UltraSPARC pre-Tx CPU
  14. # and are expressed in cycles per processed byte, less is better:
  15. #
  16. # gcc 3.3.x cc 5.2 this assembler
  17. #
  18. # 32-bit build 81.4 43.3 12.6 (+546%/+244%)
  19. # 64-bit build 20.2 21.2 12.6 (+60%/+68%)
  20. #
  21. # Here is data collected on UltraSPARC T1 system running Linux:
  22. #
  23. # gcc 4.4.1 this assembler
  24. #
  25. # 32-bit build 566 50 (+1000%)
  26. # 64-bit build 56 50 (+12%)
  27. #
  28. # I don't quite understand why difference between 32-bit and 64-bit
  29. # compiler-generated code is so big. Compilers *were* instructed to
  30. # generate code for UltraSPARC and should have used 64-bit registers
  31. # for Z vector (see C code) even in 32-bit build... Oh well, it only
  32. # means more impressive improvement coefficients for this assembler
  33. # module;-) Loops are aggressively modulo-scheduled in respect to
  34. # references to input data and Z.hi updates to achieve 12 cycles
  35. # timing. To anchor to something else, sha1-sparcv9.pl spends 11.6
  36. # cycles to process one byte on UltraSPARC pre-Tx CPU and ~24 on T1.
  37. #
  38. # October 2012
  39. #
  40. # Add VIS3 lookup-table-free implementation using polynomial
  41. # multiplication xmulx[hi] and extended addition addxc[cc]
  42. # instructions. 4.52/7.63x improvement on T3/T4 or in absolute
  43. # terms 7.90/2.14 cycles per byte. On T4 multi-process benchmark
  44. # saturates at ~15.5x single-process result on 8-core processor,
  45. # or ~20.5GBps per 2.85GHz socket.
  46. $bits=32;
  47. for (@ARGV) { $bits=64 if (/\-m64/ || /\-xarch\=v9/); }
  48. if ($bits==64) { $bias=2047; $frame=192; }
  49. else { $bias=0; $frame=112; }
  50. $output=shift;
  51. open STDOUT,">$output";
  52. $Zhi="%o0"; # 64-bit values
  53. $Zlo="%o1";
  54. $Thi="%o2";
  55. $Tlo="%o3";
  56. $rem="%o4";
  57. $tmp="%o5";
  58. $nhi="%l0"; # small values and pointers
  59. $nlo="%l1";
  60. $xi0="%l2";
  61. $xi1="%l3";
  62. $rem_4bit="%l4";
  63. $remi="%l5";
  64. $Htblo="%l6";
  65. $cnt="%l7";
  66. $Xi="%i0"; # input argument block
  67. $Htbl="%i1";
  68. $inp="%i2";
  69. $len="%i3";
  70. $code.=<<___ if ($bits==64);
  71. .register %g2,#scratch
  72. .register %g3,#scratch
  73. ___
  74. $code.=<<___;
  75. .section ".text",#alloc,#execinstr
  76. .align 64
  77. rem_4bit:
  78. .long `0x0000<<16`,0,`0x1C20<<16`,0,`0x3840<<16`,0,`0x2460<<16`,0
  79. .long `0x7080<<16`,0,`0x6CA0<<16`,0,`0x48C0<<16`,0,`0x54E0<<16`,0
  80. .long `0xE100<<16`,0,`0xFD20<<16`,0,`0xD940<<16`,0,`0xC560<<16`,0
  81. .long `0x9180<<16`,0,`0x8DA0<<16`,0,`0xA9C0<<16`,0,`0xB5E0<<16`,0
  82. .type rem_4bit,#object
  83. .size rem_4bit,(.-rem_4bit)
  84. .globl gcm_ghash_4bit
  85. .align 32
  86. gcm_ghash_4bit:
  87. save %sp,-$frame,%sp
  88. ldub [$inp+15],$nlo
  89. ldub [$Xi+15],$xi0
  90. ldub [$Xi+14],$xi1
  91. add $len,$inp,$len
  92. add $Htbl,8,$Htblo
  93. 1: call .+8
  94. add %o7,rem_4bit-1b,$rem_4bit
  95. .Louter:
  96. xor $xi0,$nlo,$nlo
  97. and $nlo,0xf0,$nhi
  98. and $nlo,0x0f,$nlo
  99. sll $nlo,4,$nlo
  100. ldx [$Htblo+$nlo],$Zlo
  101. ldx [$Htbl+$nlo],$Zhi
  102. ldub [$inp+14],$nlo
  103. ldx [$Htblo+$nhi],$Tlo
  104. and $Zlo,0xf,$remi
  105. ldx [$Htbl+$nhi],$Thi
  106. sll $remi,3,$remi
  107. ldx [$rem_4bit+$remi],$rem
  108. srlx $Zlo,4,$Zlo
  109. mov 13,$cnt
  110. sllx $Zhi,60,$tmp
  111. xor $Tlo,$Zlo,$Zlo
  112. srlx $Zhi,4,$Zhi
  113. xor $Zlo,$tmp,$Zlo
  114. xor $xi1,$nlo,$nlo
  115. and $Zlo,0xf,$remi
  116. and $nlo,0xf0,$nhi
  117. and $nlo,0x0f,$nlo
  118. ba .Lghash_inner
  119. sll $nlo,4,$nlo
  120. .align 32
  121. .Lghash_inner:
  122. ldx [$Htblo+$nlo],$Tlo
  123. sll $remi,3,$remi
  124. xor $Thi,$Zhi,$Zhi
  125. ldx [$Htbl+$nlo],$Thi
  126. srlx $Zlo,4,$Zlo
  127. xor $rem,$Zhi,$Zhi
  128. ldx [$rem_4bit+$remi],$rem
  129. sllx $Zhi,60,$tmp
  130. xor $Tlo,$Zlo,$Zlo
  131. ldub [$inp+$cnt],$nlo
  132. srlx $Zhi,4,$Zhi
  133. xor $Zlo,$tmp,$Zlo
  134. ldub [$Xi+$cnt],$xi1
  135. xor $Thi,$Zhi,$Zhi
  136. and $Zlo,0xf,$remi
  137. ldx [$Htblo+$nhi],$Tlo
  138. sll $remi,3,$remi
  139. xor $rem,$Zhi,$Zhi
  140. ldx [$Htbl+$nhi],$Thi
  141. srlx $Zlo,4,$Zlo
  142. ldx [$rem_4bit+$remi],$rem
  143. sllx $Zhi,60,$tmp
  144. xor $xi1,$nlo,$nlo
  145. srlx $Zhi,4,$Zhi
  146. and $nlo,0xf0,$nhi
  147. addcc $cnt,-1,$cnt
  148. xor $Zlo,$tmp,$Zlo
  149. and $nlo,0x0f,$nlo
  150. xor $Tlo,$Zlo,$Zlo
  151. sll $nlo,4,$nlo
  152. blu .Lghash_inner
  153. and $Zlo,0xf,$remi
  154. ldx [$Htblo+$nlo],$Tlo
  155. sll $remi,3,$remi
  156. xor $Thi,$Zhi,$Zhi
  157. ldx [$Htbl+$nlo],$Thi
  158. srlx $Zlo,4,$Zlo
  159. xor $rem,$Zhi,$Zhi
  160. ldx [$rem_4bit+$remi],$rem
  161. sllx $Zhi,60,$tmp
  162. xor $Tlo,$Zlo,$Zlo
  163. srlx $Zhi,4,$Zhi
  164. xor $Zlo,$tmp,$Zlo
  165. xor $Thi,$Zhi,$Zhi
  166. add $inp,16,$inp
  167. cmp $inp,$len
  168. be,pn `$bits==64?"%xcc":"%icc"`,.Ldone
  169. and $Zlo,0xf,$remi
  170. ldx [$Htblo+$nhi],$Tlo
  171. sll $remi,3,$remi
  172. xor $rem,$Zhi,$Zhi
  173. ldx [$Htbl+$nhi],$Thi
  174. srlx $Zlo,4,$Zlo
  175. ldx [$rem_4bit+$remi],$rem
  176. sllx $Zhi,60,$tmp
  177. xor $Tlo,$Zlo,$Zlo
  178. ldub [$inp+15],$nlo
  179. srlx $Zhi,4,$Zhi
  180. xor $Zlo,$tmp,$Zlo
  181. xor $Thi,$Zhi,$Zhi
  182. stx $Zlo,[$Xi+8]
  183. xor $rem,$Zhi,$Zhi
  184. stx $Zhi,[$Xi]
  185. srl $Zlo,8,$xi1
  186. and $Zlo,0xff,$xi0
  187. ba .Louter
  188. and $xi1,0xff,$xi1
  189. .align 32
  190. .Ldone:
  191. ldx [$Htblo+$nhi],$Tlo
  192. sll $remi,3,$remi
  193. xor $rem,$Zhi,$Zhi
  194. ldx [$Htbl+$nhi],$Thi
  195. srlx $Zlo,4,$Zlo
  196. ldx [$rem_4bit+$remi],$rem
  197. sllx $Zhi,60,$tmp
  198. xor $Tlo,$Zlo,$Zlo
  199. srlx $Zhi,4,$Zhi
  200. xor $Zlo,$tmp,$Zlo
  201. xor $Thi,$Zhi,$Zhi
  202. stx $Zlo,[$Xi+8]
  203. xor $rem,$Zhi,$Zhi
  204. stx $Zhi,[$Xi]
  205. ret
  206. restore
  207. .type gcm_ghash_4bit,#function
  208. .size gcm_ghash_4bit,(.-gcm_ghash_4bit)
  209. ___
  210. undef $inp;
  211. undef $len;
  212. $code.=<<___;
  213. .globl gcm_gmult_4bit
  214. .align 32
  215. gcm_gmult_4bit:
  216. save %sp,-$frame,%sp
  217. ldub [$Xi+15],$nlo
  218. add $Htbl,8,$Htblo
  219. 1: call .+8
  220. add %o7,rem_4bit-1b,$rem_4bit
  221. and $nlo,0xf0,$nhi
  222. and $nlo,0x0f,$nlo
  223. sll $nlo,4,$nlo
  224. ldx [$Htblo+$nlo],$Zlo
  225. ldx [$Htbl+$nlo],$Zhi
  226. ldub [$Xi+14],$nlo
  227. ldx [$Htblo+$nhi],$Tlo
  228. and $Zlo,0xf,$remi
  229. ldx [$Htbl+$nhi],$Thi
  230. sll $remi,3,$remi
  231. ldx [$rem_4bit+$remi],$rem
  232. srlx $Zlo,4,$Zlo
  233. mov 13,$cnt
  234. sllx $Zhi,60,$tmp
  235. xor $Tlo,$Zlo,$Zlo
  236. srlx $Zhi,4,$Zhi
  237. xor $Zlo,$tmp,$Zlo
  238. and $Zlo,0xf,$remi
  239. and $nlo,0xf0,$nhi
  240. and $nlo,0x0f,$nlo
  241. ba .Lgmult_inner
  242. sll $nlo,4,$nlo
  243. .align 32
  244. .Lgmult_inner:
  245. ldx [$Htblo+$nlo],$Tlo
  246. sll $remi,3,$remi
  247. xor $Thi,$Zhi,$Zhi
  248. ldx [$Htbl+$nlo],$Thi
  249. srlx $Zlo,4,$Zlo
  250. xor $rem,$Zhi,$Zhi
  251. ldx [$rem_4bit+$remi],$rem
  252. sllx $Zhi,60,$tmp
  253. xor $Tlo,$Zlo,$Zlo
  254. ldub [$Xi+$cnt],$nlo
  255. srlx $Zhi,4,$Zhi
  256. xor $Zlo,$tmp,$Zlo
  257. xor $Thi,$Zhi,$Zhi
  258. and $Zlo,0xf,$remi
  259. ldx [$Htblo+$nhi],$Tlo
  260. sll $remi,3,$remi
  261. xor $rem,$Zhi,$Zhi
  262. ldx [$Htbl+$nhi],$Thi
  263. srlx $Zlo,4,$Zlo
  264. ldx [$rem_4bit+$remi],$rem
  265. sllx $Zhi,60,$tmp
  266. srlx $Zhi,4,$Zhi
  267. and $nlo,0xf0,$nhi
  268. addcc $cnt,-1,$cnt
  269. xor $Zlo,$tmp,$Zlo
  270. and $nlo,0x0f,$nlo
  271. xor $Tlo,$Zlo,$Zlo
  272. sll $nlo,4,$nlo
  273. blu .Lgmult_inner
  274. and $Zlo,0xf,$remi
  275. ldx [$Htblo+$nlo],$Tlo
  276. sll $remi,3,$remi
  277. xor $Thi,$Zhi,$Zhi
  278. ldx [$Htbl+$nlo],$Thi
  279. srlx $Zlo,4,$Zlo
  280. xor $rem,$Zhi,$Zhi
  281. ldx [$rem_4bit+$remi],$rem
  282. sllx $Zhi,60,$tmp
  283. xor $Tlo,$Zlo,$Zlo
  284. srlx $Zhi,4,$Zhi
  285. xor $Zlo,$tmp,$Zlo
  286. xor $Thi,$Zhi,$Zhi
  287. and $Zlo,0xf,$remi
  288. ldx [$Htblo+$nhi],$Tlo
  289. sll $remi,3,$remi
  290. xor $rem,$Zhi,$Zhi
  291. ldx [$Htbl+$nhi],$Thi
  292. srlx $Zlo,4,$Zlo
  293. ldx [$rem_4bit+$remi],$rem
  294. sllx $Zhi,60,$tmp
  295. xor $Tlo,$Zlo,$Zlo
  296. srlx $Zhi,4,$Zhi
  297. xor $Zlo,$tmp,$Zlo
  298. xor $Thi,$Zhi,$Zhi
  299. stx $Zlo,[$Xi+8]
  300. xor $rem,$Zhi,$Zhi
  301. stx $Zhi,[$Xi]
  302. ret
  303. restore
  304. .type gcm_gmult_4bit,#function
  305. .size gcm_gmult_4bit,(.-gcm_gmult_4bit)
  306. ___
  307. {{{
  308. # Straightforward 128x128-bit multiplication using Karatsuba algorithm
  309. # followed by pair of 64-bit reductions [with a shortcut in first one,
  310. # which allowed to break dependency between reductions and remove one
  311. # multiplication from critical path]. While it might be suboptimal
  312. # with regard to sheer number of multiplications, other methods [such
  313. # as aggregate reduction] would require more 64-bit registers, which
  314. # we don't have in 32-bit application context.
  315. ($Xip,$Htable,$inp,$len)=map("%i$_",(0..3));
  316. ($Hhl,$Hlo,$Hhi,$Xlo,$Xhi,$xE1,$sqr, $C0,$C1,$C2,$C3,$V)=
  317. (map("%o$_",(0..5,7)),map("%g$_",(1..5)));
  318. ($shl,$shr)=map("%l$_",(0..7));
  319. # For details regarding "twisted H" see ghash-x86.pl.
  320. $code.=<<___;
  321. .globl gcm_init_vis3
  322. .align 32
  323. gcm_init_vis3:
  324. save %sp,-$frame,%sp
  325. ldx [%i1+0],$Hhi
  326. ldx [%i1+8],$Hlo
  327. mov 0xE1,$Xhi
  328. mov 1,$Xlo
  329. sllx $Xhi,57,$Xhi
  330. srax $Hhi,63,$C0 ! broadcast carry
  331. addcc $Hlo,$Hlo,$Hlo ! H<<=1
  332. addxc $Hhi,$Hhi,$Hhi
  333. and $C0,$Xlo,$Xlo
  334. and $C0,$Xhi,$Xhi
  335. xor $Xlo,$Hlo,$Hlo
  336. xor $Xhi,$Hhi,$Hhi
  337. stx $Hlo,[%i0+8] ! save twisted H
  338. stx $Hhi,[%i0+0]
  339. sethi %hi(0xA0406080),$V
  340. sethi %hi(0x20C0E000),%l0
  341. or $V,%lo(0xA0406080),$V
  342. or %l0,%lo(0x20C0E000),%l0
  343. sllx $V,32,$V
  344. or %l0,$V,$V ! (0xE0·i)&0xff=0xA040608020C0E000
  345. stx $V,[%i0+16]
  346. ret
  347. restore
  348. .type gcm_init_vis3,#function
  349. .size gcm_init_vis3,.-gcm_init_vis3
  350. .globl gcm_gmult_vis3
  351. .align 32
  352. gcm_gmult_vis3:
  353. save %sp,-$frame,%sp
  354. ldx [$Xip+8],$Xlo ! load Xi
  355. ldx [$Xip+0],$Xhi
  356. ldx [$Htable+8],$Hlo ! load twisted H
  357. ldx [$Htable+0],$Hhi
  358. mov 0xE1,%l7
  359. sllx %l7,57,$xE1 ! 57 is not a typo
  360. ldx [$Htable+16],$V ! (0xE0·i)&0xff=0xA040608020C0E000
  361. xor $Hhi,$Hlo,$Hhl ! Karatsuba pre-processing
  362. xmulx $Xlo,$Hlo,$C0
  363. xor $Xlo,$Xhi,$C2 ! Karatsuba pre-processing
  364. xmulx $C2,$Hhl,$C1
  365. xmulxhi $Xlo,$Hlo,$Xlo
  366. xmulxhi $C2,$Hhl,$C2
  367. xmulxhi $Xhi,$Hhi,$C3
  368. xmulx $Xhi,$Hhi,$Xhi
  369. sll $C0,3,$sqr
  370. srlx $V,$sqr,$sqr ! ·0xE0 [implicit &(7<<3)]
  371. xor $C0,$sqr,$sqr
  372. sllx $sqr,57,$sqr ! ($C0·0xE1)<<1<<56 [implicit &0x7f]
  373. xor $C0,$C1,$C1 ! Karatsuba post-processing
  374. xor $Xlo,$C2,$C2
  375. xor $sqr,$Xlo,$Xlo ! real destination is $C1
  376. xor $C3,$C2,$C2
  377. xor $Xlo,$C1,$C1
  378. xor $Xhi,$C2,$C2
  379. xor $Xhi,$C1,$C1
  380. xmulxhi $C0,$xE1,$Xlo ! ·0xE1<<1<<56
  381. xor $C0,$C2,$C2
  382. xmulx $C1,$xE1,$C0
  383. xor $C1,$C3,$C3
  384. xmulxhi $C1,$xE1,$C1
  385. xor $Xlo,$C2,$C2
  386. xor $C0,$C2,$C2
  387. xor $C1,$C3,$C3
  388. stx $C2,[$Xip+8] ! save Xi
  389. stx $C3,[$Xip+0]
  390. ret
  391. restore
  392. .type gcm_gmult_vis3,#function
  393. .size gcm_gmult_vis3,.-gcm_gmult_vis3
  394. .globl gcm_ghash_vis3
  395. .align 32
  396. gcm_ghash_vis3:
  397. save %sp,-$frame,%sp
  398. ldx [$Xip+8],$C2 ! load Xi
  399. ldx [$Xip+0],$C3
  400. ldx [$Htable+8],$Hlo ! load twisted H
  401. ldx [$Htable+0],$Hhi
  402. mov 0xE1,%l7
  403. sllx %l7,57,$xE1 ! 57 is not a typo
  404. ldx [$Htable+16],$V ! (0xE0·i)&0xff=0xA040608020C0E000
  405. and $inp,7,$shl
  406. andn $inp,7,$inp
  407. sll $shl,3,$shl
  408. prefetch [$inp+63], 20
  409. sub %g0,$shl,$shr
  410. xor $Hhi,$Hlo,$Hhl ! Karatsuba pre-processing
  411. .Loop:
  412. ldx [$inp+8],$Xlo
  413. brz,pt $shl,1f
  414. ldx [$inp+0],$Xhi
  415. ldx [$inp+16],$C1 ! align data
  416. srlx $Xlo,$shr,$C0
  417. sllx $Xlo,$shl,$Xlo
  418. sllx $Xhi,$shl,$Xhi
  419. srlx $C1,$shr,$C1
  420. or $C0,$Xhi,$Xhi
  421. or $C1,$Xlo,$Xlo
  422. 1:
  423. add $inp,16,$inp
  424. sub $len,16,$len
  425. xor $C2,$Xlo,$Xlo
  426. xor $C3,$Xhi,$Xhi
  427. prefetch [$inp+63], 20
  428. xmulx $Xlo,$Hlo,$C0
  429. xor $Xlo,$Xhi,$C2 ! Karatsuba pre-processing
  430. xmulx $C2,$Hhl,$C1
  431. xmulxhi $Xlo,$Hlo,$Xlo
  432. xmulxhi $C2,$Hhl,$C2
  433. xmulxhi $Xhi,$Hhi,$C3
  434. xmulx $Xhi,$Hhi,$Xhi
  435. sll $C0,3,$sqr
  436. srlx $V,$sqr,$sqr ! ·0xE0 [implicit &(7<<3)]
  437. xor $C0,$sqr,$sqr
  438. sllx $sqr,57,$sqr ! ($C0·0xE1)<<1<<56 [implicit &0x7f]
  439. xor $C0,$C1,$C1 ! Karatsuba post-processing
  440. xor $Xlo,$C2,$C2
  441. xor $sqr,$Xlo,$Xlo ! real destination is $C1
  442. xor $C3,$C2,$C2
  443. xor $Xlo,$C1,$C1
  444. xor $Xhi,$C2,$C2
  445. xor $Xhi,$C1,$C1
  446. xmulxhi $C0,$xE1,$Xlo ! ·0xE1<<1<<56
  447. xor $C0,$C2,$C2
  448. xmulx $C1,$xE1,$C0
  449. xor $C1,$C3,$C3
  450. xmulxhi $C1,$xE1,$C1
  451. xor $Xlo,$C2,$C2
  452. xor $C0,$C2,$C2
  453. brnz,pt $len,.Loop
  454. xor $C1,$C3,$C3
  455. stx $C2,[$Xip+8] ! save Xi
  456. stx $C3,[$Xip+0]
  457. ret
  458. restore
  459. .type gcm_ghash_vis3,#function
  460. .size gcm_ghash_vis3,.-gcm_ghash_vis3
  461. ___
  462. }}}
  463. $code.=<<___;
  464. .asciz "GHASH for SPARCv9/VIS3, CRYPTOGAMS by <appro\@openssl.org>"
  465. .align 4
  466. ___
  467. # Purpose of these subroutines is to explicitly encode VIS instructions,
  468. # so that one can compile the module without having to specify VIS
  469. # extentions on compiler command line, e.g. -xarch=v9 vs. -xarch=v9a.
  470. # Idea is to reserve for option to produce "universal" binary and let
  471. # programmer detect if current CPU is VIS capable at run-time.
  472. sub unvis3 {
  473. my ($mnemonic,$rs1,$rs2,$rd)=@_;
  474. my %bias = ( "g" => 0, "o" => 8, "l" => 16, "i" => 24 );
  475. my ($ref,$opf);
  476. my %visopf = ( "addxc" => 0x011,
  477. "addxccc" => 0x013,
  478. "xmulx" => 0x115,
  479. "xmulxhi" => 0x116 );
  480. $ref = "$mnemonic\t$rs1,$rs2,$rd";
  481. if ($opf=$visopf{$mnemonic}) {
  482. foreach ($rs1,$rs2,$rd) {
  483. return $ref if (!/%([goli])([0-9])/);
  484. $_=$bias{$1}+$2;
  485. }
  486. return sprintf ".word\t0x%08x !%s",
  487. 0x81b00000|$rd<<25|$rs1<<14|$opf<<5|$rs2,
  488. $ref;
  489. } else {
  490. return $ref;
  491. }
  492. }
  493. foreach (split("\n",$code)) {
  494. s/\`([^\`]*)\`/eval $1/ge;
  495. s/\b(xmulx[hi]*|addxc[c]{0,2})\s+(%[goli][0-7]),\s*(%[goli][0-7]),\s*(%[goli][0-7])/
  496. &unvis3($1,$2,$3,$4)
  497. /ge;
  498. print $_,"\n";
  499. }
  500. close STDOUT;