sljitNativeARM_64.c 61 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057
  1. /*
  2. * Stack-less Just-In-Time compiler
  3. *
  4. * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
  5. *
  6. * Redistribution and use in source and binary forms, with or without modification, are
  7. * permitted provided that the following conditions are met:
  8. *
  9. * 1. Redistributions of source code must retain the above copyright notice, this list of
  10. * conditions and the following disclaimer.
  11. *
  12. * 2. Redistributions in binary form must reproduce the above copyright notice, this list
  13. * of conditions and the following disclaimer in the documentation and/or other materials
  14. * provided with the distribution.
  15. *
  16. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
  17. * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  18. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
  19. * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  20. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
  21. * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  22. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  23. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
  24. * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void)
  27. {
  28. return "ARM-64" SLJIT_CPUINFO;
  29. }
  30. /* Length of an instruction word */
  31. typedef sljit_u32 sljit_ins;
  32. #define TMP_ZERO (0)
  33. #define TMP_REG1 (SLJIT_NUMBER_OF_REGISTERS + 2)
  34. #define TMP_REG2 (SLJIT_NUMBER_OF_REGISTERS + 3)
  35. #define TMP_LR (SLJIT_NUMBER_OF_REGISTERS + 4)
  36. #define TMP_FP (SLJIT_NUMBER_OF_REGISTERS + 5)
  37. #define TMP_FREG1 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1)
  38. #define TMP_FREG2 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 2)
  39. /* r18 - platform register, currently not used */
  40. static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 8] = {
  41. 31, 0, 1, 2, 3, 4, 5, 6, 7, 11, 12, 13, 14, 15, 16, 17, 8, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 31, 9, 10, 30, 29
  42. };
  43. static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
  44. 0, 0, 1, 2, 3, 4, 5, 6, 7
  45. };
  46. #define W_OP (1u << 31)
  47. #define RD(rd) (reg_map[rd])
  48. #define RT(rt) (reg_map[rt])
  49. #define RN(rn) (reg_map[rn] << 5)
  50. #define RT2(rt2) (reg_map[rt2] << 10)
  51. #define RM(rm) (reg_map[rm] << 16)
  52. #define VD(vd) (freg_map[vd])
  53. #define VT(vt) (freg_map[vt])
  54. #define VN(vn) (freg_map[vn] << 5)
  55. #define VM(vm) (freg_map[vm] << 16)
  56. /* --------------------------------------------------------------------- */
  57. /* Instrucion forms */
  58. /* --------------------------------------------------------------------- */
  59. #define ADC 0x9a000000
  60. #define ADD 0x8b000000
  61. #define ADDE 0x8b200000
  62. #define ADDI 0x91000000
  63. #define AND 0x8a000000
  64. #define ANDI 0x92000000
  65. #define ASRV 0x9ac02800
  66. #define B 0x14000000
  67. #define B_CC 0x54000000
  68. #define BL 0x94000000
  69. #define BLR 0xd63f0000
  70. #define BR 0xd61f0000
  71. #define BRK 0xd4200000
  72. #define CBZ 0xb4000000
  73. #define CLZ 0xdac01000
  74. #define CSEL 0x9a800000
  75. #define CSINC 0x9a800400
  76. #define EOR 0xca000000
  77. #define EORI 0xd2000000
  78. #define FABS 0x1e60c000
  79. #define FADD 0x1e602800
  80. #define FCMP 0x1e602000
  81. #define FCVT 0x1e224000
  82. #define FCVTZS 0x9e780000
  83. #define FDIV 0x1e601800
  84. #define FMOV 0x1e604000
  85. #define FMUL 0x1e600800
  86. #define FNEG 0x1e614000
  87. #define FSUB 0x1e603800
  88. #define LDRI 0xf9400000
  89. #define LDP 0xa9400000
  90. #define LDP_PRE 0xa9c00000
  91. #define LDR_PRE 0xf8400c00
  92. #define LSLV 0x9ac02000
  93. #define LSRV 0x9ac02400
  94. #define MADD 0x9b000000
  95. #define MOVK 0xf2800000
  96. #define MOVN 0x92800000
  97. #define MOVZ 0xd2800000
  98. #define NOP 0xd503201f
  99. #define ORN 0xaa200000
  100. #define ORR 0xaa000000
  101. #define ORRI 0xb2000000
  102. #define RET 0xd65f0000
  103. #define SBC 0xda000000
  104. #define SBFM 0x93000000
  105. #define SCVTF 0x9e620000
  106. #define SDIV 0x9ac00c00
  107. #define SMADDL 0x9b200000
  108. #define SMULH 0x9b403c00
  109. #define STP 0xa9000000
  110. #define STP_PRE 0xa9800000
  111. #define STRB 0x38206800
  112. #define STRBI 0x39000000
  113. #define STRI 0xf9000000
  114. #define STR_FI 0x3d000000
  115. #define STR_FR 0x3c206800
  116. #define STUR_FI 0x3c000000
  117. #define STURBI 0x38000000
  118. #define SUB 0xcb000000
  119. #define SUBI 0xd1000000
  120. #define SUBS 0xeb000000
  121. #define UBFM 0xd3000000
  122. #define UDIV 0x9ac00800
  123. #define UMULH 0x9bc03c00
  124. /* dest_reg is the absolute name of the register
  125. Useful for reordering instructions in the delay slot. */
  126. static sljit_s32 push_inst(struct sljit_compiler *compiler, sljit_ins ins)
  127. {
  128. sljit_ins *ptr = (sljit_ins*)ensure_buf(compiler, sizeof(sljit_ins));
  129. FAIL_IF(!ptr);
  130. *ptr = ins;
  131. compiler->size++;
  132. return SLJIT_SUCCESS;
  133. }
  134. static SLJIT_INLINE sljit_s32 emit_imm64_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_uw imm)
  135. {
  136. FAIL_IF(push_inst(compiler, MOVZ | RD(dst) | ((imm & 0xffff) << 5)));
  137. FAIL_IF(push_inst(compiler, MOVK | RD(dst) | (((imm >> 16) & 0xffff) << 5) | (1 << 21)));
  138. FAIL_IF(push_inst(compiler, MOVK | RD(dst) | (((imm >> 32) & 0xffff) << 5) | (2 << 21)));
  139. return push_inst(compiler, MOVK | RD(dst) | ((imm >> 48) << 5) | (3 << 21));
  140. }
  141. static SLJIT_INLINE sljit_sw detect_jump_type(struct sljit_jump *jump, sljit_ins *code_ptr, sljit_ins *code, sljit_sw executable_offset)
  142. {
  143. sljit_sw diff;
  144. sljit_uw target_addr;
  145. if (jump->flags & SLJIT_REWRITABLE_JUMP) {
  146. jump->flags |= PATCH_ABS64;
  147. return 0;
  148. }
  149. if (jump->flags & JUMP_ADDR)
  150. target_addr = jump->u.target;
  151. else {
  152. SLJIT_ASSERT(jump->flags & JUMP_LABEL);
  153. target_addr = (sljit_uw)(code + jump->u.label->size) + (sljit_uw)executable_offset;
  154. }
  155. diff = (sljit_sw)target_addr - (sljit_sw)(code_ptr + 4) - executable_offset;
  156. if (jump->flags & IS_COND) {
  157. diff += sizeof(sljit_ins);
  158. if (diff <= 0xfffff && diff >= -0x100000) {
  159. code_ptr[-5] ^= (jump->flags & IS_CBZ) ? (0x1 << 24) : 0x1;
  160. jump->addr -= sizeof(sljit_ins);
  161. jump->flags |= PATCH_COND;
  162. return 5;
  163. }
  164. diff -= sizeof(sljit_ins);
  165. }
  166. if (diff <= 0x7ffffff && diff >= -0x8000000) {
  167. jump->flags |= PATCH_B;
  168. return 4;
  169. }
  170. if (target_addr < 0x100000000l) {
  171. if (jump->flags & IS_COND)
  172. code_ptr[-5] -= (2 << 5);
  173. code_ptr[-2] = code_ptr[0];
  174. return 2;
  175. }
  176. if (target_addr < 0x1000000000000l) {
  177. if (jump->flags & IS_COND)
  178. code_ptr[-5] -= (1 << 5);
  179. jump->flags |= PATCH_ABS48;
  180. code_ptr[-1] = code_ptr[0];
  181. return 1;
  182. }
  183. jump->flags |= PATCH_ABS64;
  184. return 0;
  185. }
  186. static SLJIT_INLINE sljit_sw put_label_get_length(struct sljit_put_label *put_label, sljit_uw max_label)
  187. {
  188. if (max_label < 0x100000000l) {
  189. put_label->flags = 0;
  190. return 2;
  191. }
  192. if (max_label < 0x1000000000000l) {
  193. put_label->flags = 1;
  194. return 1;
  195. }
  196. put_label->flags = 2;
  197. return 0;
  198. }
  199. SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler)
  200. {
  201. struct sljit_memory_fragment *buf;
  202. sljit_ins *code;
  203. sljit_ins *code_ptr;
  204. sljit_ins *buf_ptr;
  205. sljit_ins *buf_end;
  206. sljit_uw word_count;
  207. sljit_uw next_addr;
  208. sljit_sw executable_offset;
  209. sljit_uw addr;
  210. sljit_s32 dst;
  211. struct sljit_label *label;
  212. struct sljit_jump *jump;
  213. struct sljit_const *const_;
  214. struct sljit_put_label *put_label;
  215. CHECK_ERROR_PTR();
  216. CHECK_PTR(check_sljit_generate_code(compiler));
  217. reverse_buf(compiler);
  218. code = (sljit_ins*)SLJIT_MALLOC_EXEC(compiler->size * sizeof(sljit_ins), compiler->exec_allocator_data);
  219. PTR_FAIL_WITH_EXEC_IF(code);
  220. buf = compiler->buf;
  221. code_ptr = code;
  222. word_count = 0;
  223. next_addr = 0;
  224. executable_offset = SLJIT_EXEC_OFFSET(code);
  225. label = compiler->labels;
  226. jump = compiler->jumps;
  227. const_ = compiler->consts;
  228. put_label = compiler->put_labels;
  229. do {
  230. buf_ptr = (sljit_ins*)buf->memory;
  231. buf_end = buf_ptr + (buf->used_size >> 2);
  232. do {
  233. *code_ptr = *buf_ptr++;
  234. if (next_addr == word_count) {
  235. SLJIT_ASSERT(!label || label->size >= word_count);
  236. SLJIT_ASSERT(!jump || jump->addr >= word_count);
  237. SLJIT_ASSERT(!const_ || const_->addr >= word_count);
  238. SLJIT_ASSERT(!put_label || put_label->addr >= word_count);
  239. /* These structures are ordered by their address. */
  240. if (label && label->size == word_count) {
  241. label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
  242. label->size = code_ptr - code;
  243. label = label->next;
  244. }
  245. if (jump && jump->addr == word_count) {
  246. jump->addr = (sljit_uw)(code_ptr - 4);
  247. code_ptr -= detect_jump_type(jump, code_ptr, code, executable_offset);
  248. jump = jump->next;
  249. }
  250. if (const_ && const_->addr == word_count) {
  251. const_->addr = (sljit_uw)code_ptr;
  252. const_ = const_->next;
  253. }
  254. if (put_label && put_label->addr == word_count) {
  255. SLJIT_ASSERT(put_label->label);
  256. put_label->addr = (sljit_uw)(code_ptr - 3);
  257. code_ptr -= put_label_get_length(put_label, (sljit_uw)(SLJIT_ADD_EXEC_OFFSET(code, executable_offset) + put_label->label->size));
  258. put_label = put_label->next;
  259. }
  260. next_addr = compute_next_addr(label, jump, const_, put_label);
  261. }
  262. code_ptr ++;
  263. word_count ++;
  264. } while (buf_ptr < buf_end);
  265. buf = buf->next;
  266. } while (buf);
  267. if (label && label->size == word_count) {
  268. label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
  269. label->size = code_ptr - code;
  270. label = label->next;
  271. }
  272. SLJIT_ASSERT(!label);
  273. SLJIT_ASSERT(!jump);
  274. SLJIT_ASSERT(!const_);
  275. SLJIT_ASSERT(!put_label);
  276. SLJIT_ASSERT(code_ptr - code <= (sljit_sw)compiler->size);
  277. jump = compiler->jumps;
  278. while (jump) {
  279. do {
  280. addr = (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target;
  281. buf_ptr = (sljit_ins *)jump->addr;
  282. if (jump->flags & PATCH_B) {
  283. addr = (sljit_sw)(addr - (sljit_uw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset)) >> 2;
  284. SLJIT_ASSERT((sljit_sw)addr <= 0x1ffffff && (sljit_sw)addr >= -0x2000000);
  285. buf_ptr[0] = ((jump->flags & IS_BL) ? BL : B) | (addr & 0x3ffffff);
  286. if (jump->flags & IS_COND)
  287. buf_ptr[-1] -= (4 << 5);
  288. break;
  289. }
  290. if (jump->flags & PATCH_COND) {
  291. addr = (sljit_sw)(addr - (sljit_uw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset)) >> 2;
  292. SLJIT_ASSERT((sljit_sw)addr <= 0x3ffff && (sljit_sw)addr >= -0x40000);
  293. buf_ptr[0] = (buf_ptr[0] & ~0xffffe0) | ((addr & 0x7ffff) << 5);
  294. break;
  295. }
  296. SLJIT_ASSERT((jump->flags & (PATCH_ABS48 | PATCH_ABS64)) || addr <= 0xffffffffl);
  297. SLJIT_ASSERT((jump->flags & PATCH_ABS64) || addr <= 0xffffffffffffl);
  298. dst = buf_ptr[0] & 0x1f;
  299. buf_ptr[0] = MOVZ | dst | ((addr & 0xffff) << 5);
  300. buf_ptr[1] = MOVK | dst | (((addr >> 16) & 0xffff) << 5) | (1 << 21);
  301. if (jump->flags & (PATCH_ABS48 | PATCH_ABS64))
  302. buf_ptr[2] = MOVK | dst | (((addr >> 32) & 0xffff) << 5) | (2 << 21);
  303. if (jump->flags & PATCH_ABS64)
  304. buf_ptr[3] = MOVK | dst | (((addr >> 48) & 0xffff) << 5) | (3 << 21);
  305. } while (0);
  306. jump = jump->next;
  307. }
  308. put_label = compiler->put_labels;
  309. while (put_label) {
  310. addr = put_label->label->addr;
  311. buf_ptr = (sljit_ins *)put_label->addr;
  312. buf_ptr[0] |= (addr & 0xffff) << 5;
  313. buf_ptr[1] |= ((addr >> 16) & 0xffff) << 5;
  314. if (put_label->flags >= 1)
  315. buf_ptr[2] |= ((addr >> 32) & 0xffff) << 5;
  316. if (put_label->flags >= 2)
  317. buf_ptr[3] |= ((addr >> 48) & 0xffff) << 5;
  318. put_label = put_label->next;
  319. }
  320. compiler->error = SLJIT_ERR_COMPILED;
  321. compiler->executable_offset = executable_offset;
  322. compiler->executable_size = (code_ptr - code) * sizeof(sljit_ins);
  323. code = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(code, executable_offset);
  324. code_ptr = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
  325. SLJIT_CACHE_FLUSH(code, code_ptr);
  326. SLJIT_UPDATE_WX_FLAGS(code, code_ptr, 1);
  327. return code;
  328. }
  329. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
  330. {
  331. switch (feature_type) {
  332. case SLJIT_HAS_FPU:
  333. #ifdef SLJIT_IS_FPU_AVAILABLE
  334. return SLJIT_IS_FPU_AVAILABLE;
  335. #else
  336. /* Available by default. */
  337. return 1;
  338. #endif
  339. case SLJIT_HAS_CLZ:
  340. case SLJIT_HAS_CMOV:
  341. case SLJIT_HAS_PREFETCH:
  342. return 1;
  343. default:
  344. return 0;
  345. }
  346. }
  347. /* --------------------------------------------------------------------- */
  348. /* Core code generator functions. */
  349. /* --------------------------------------------------------------------- */
  350. #define COUNT_TRAILING_ZERO(value, result) \
  351. result = 0; \
  352. if (!(value & 0xffffffff)) { \
  353. result += 32; \
  354. value >>= 32; \
  355. } \
  356. if (!(value & 0xffff)) { \
  357. result += 16; \
  358. value >>= 16; \
  359. } \
  360. if (!(value & 0xff)) { \
  361. result += 8; \
  362. value >>= 8; \
  363. } \
  364. if (!(value & 0xf)) { \
  365. result += 4; \
  366. value >>= 4; \
  367. } \
  368. if (!(value & 0x3)) { \
  369. result += 2; \
  370. value >>= 2; \
  371. } \
  372. if (!(value & 0x1)) { \
  373. result += 1; \
  374. value >>= 1; \
  375. }
  376. #define LOGICAL_IMM_CHECK 0x100
  377. static sljit_ins logical_imm(sljit_sw imm, sljit_s32 len)
  378. {
  379. sljit_s32 negated, ones, right;
  380. sljit_uw mask, uimm;
  381. sljit_ins ins;
  382. if (len & LOGICAL_IMM_CHECK) {
  383. len &= ~LOGICAL_IMM_CHECK;
  384. if (len == 32 && (imm == 0 || imm == -1))
  385. return 0;
  386. if (len == 16 && ((sljit_s32)imm == 0 || (sljit_s32)imm == -1))
  387. return 0;
  388. }
  389. SLJIT_ASSERT((len == 32 && imm != 0 && imm != -1)
  390. || (len == 16 && (sljit_s32)imm != 0 && (sljit_s32)imm != -1));
  391. uimm = (sljit_uw)imm;
  392. while (1) {
  393. if (len <= 0) {
  394. SLJIT_UNREACHABLE();
  395. return 0;
  396. }
  397. mask = ((sljit_uw)1 << len) - 1;
  398. if ((uimm & mask) != ((uimm >> len) & mask))
  399. break;
  400. len >>= 1;
  401. }
  402. len <<= 1;
  403. negated = 0;
  404. if (uimm & 0x1) {
  405. negated = 1;
  406. uimm = ~uimm;
  407. }
  408. if (len < 64)
  409. uimm &= ((sljit_uw)1 << len) - 1;
  410. /* Unsigned right shift. */
  411. COUNT_TRAILING_ZERO(uimm, right);
  412. /* Signed shift. We also know that the highest bit is set. */
  413. imm = (sljit_sw)~uimm;
  414. SLJIT_ASSERT(imm < 0);
  415. COUNT_TRAILING_ZERO(imm, ones);
  416. if (~imm)
  417. return 0;
  418. if (len == 64)
  419. ins = 1 << 22;
  420. else
  421. ins = (0x3f - ((len << 1) - 1)) << 10;
  422. if (negated)
  423. return ins | ((len - ones - 1) << 10) | ((len - ones - right) << 16);
  424. return ins | ((ones - 1) << 10) | ((len - right) << 16);
  425. }
  426. #undef COUNT_TRAILING_ZERO
  427. static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw simm)
  428. {
  429. sljit_uw imm = (sljit_uw)simm;
  430. sljit_s32 i, zeros, ones, first;
  431. sljit_ins bitmask;
  432. /* Handling simple immediates first. */
  433. if (imm <= 0xffff)
  434. return push_inst(compiler, MOVZ | RD(dst) | (imm << 5));
  435. if (simm < 0 && simm >= -0x10000)
  436. return push_inst(compiler, MOVN | RD(dst) | ((~imm & 0xffff) << 5));
  437. if (imm <= 0xffffffffl) {
  438. if ((imm & 0xffff) == 0)
  439. return push_inst(compiler, MOVZ | RD(dst) | ((imm >> 16) << 5) | (1 << 21));
  440. if ((imm & 0xffff0000l) == 0xffff0000)
  441. return push_inst(compiler, (MOVN ^ W_OP) | RD(dst) | ((~imm & 0xffff) << 5));
  442. if ((imm & 0xffff) == 0xffff)
  443. return push_inst(compiler, (MOVN ^ W_OP) | RD(dst) | ((~imm & 0xffff0000l) >> (16 - 5)) | (1 << 21));
  444. bitmask = logical_imm(simm, 16);
  445. if (bitmask != 0)
  446. return push_inst(compiler, (ORRI ^ W_OP) | RD(dst) | RN(TMP_ZERO) | bitmask);
  447. FAIL_IF(push_inst(compiler, MOVZ | RD(dst) | ((imm & 0xffff) << 5)));
  448. return push_inst(compiler, MOVK | RD(dst) | ((imm & 0xffff0000l) >> (16 - 5)) | (1 << 21));
  449. }
  450. bitmask = logical_imm(simm, 32);
  451. if (bitmask != 0)
  452. return push_inst(compiler, ORRI | RD(dst) | RN(TMP_ZERO) | bitmask);
  453. if (simm < 0 && simm >= -0x100000000l) {
  454. if ((imm & 0xffff) == 0xffff)
  455. return push_inst(compiler, MOVN | RD(dst) | ((~imm & 0xffff0000l) >> (16 - 5)) | (1 << 21));
  456. FAIL_IF(push_inst(compiler, MOVN | RD(dst) | ((~imm & 0xffff) << 5)));
  457. return push_inst(compiler, MOVK | RD(dst) | ((imm & 0xffff0000l) >> (16 - 5)) | (1 << 21));
  458. }
  459. /* A large amount of number can be constructed from ORR and MOVx, but computing them is costly. */
  460. zeros = 0;
  461. ones = 0;
  462. for (i = 4; i > 0; i--) {
  463. if ((simm & 0xffff) == 0)
  464. zeros++;
  465. if ((simm & 0xffff) == 0xffff)
  466. ones++;
  467. simm >>= 16;
  468. }
  469. simm = (sljit_sw)imm;
  470. first = 1;
  471. if (ones > zeros) {
  472. simm = ~simm;
  473. for (i = 0; i < 4; i++) {
  474. if (!(simm & 0xffff)) {
  475. simm >>= 16;
  476. continue;
  477. }
  478. if (first) {
  479. first = 0;
  480. FAIL_IF(push_inst(compiler, MOVN | RD(dst) | ((simm & 0xffff) << 5) | (i << 21)));
  481. }
  482. else
  483. FAIL_IF(push_inst(compiler, MOVK | RD(dst) | ((~simm & 0xffff) << 5) | (i << 21)));
  484. simm >>= 16;
  485. }
  486. return SLJIT_SUCCESS;
  487. }
  488. for (i = 0; i < 4; i++) {
  489. if (!(simm & 0xffff)) {
  490. simm >>= 16;
  491. continue;
  492. }
  493. if (first) {
  494. first = 0;
  495. FAIL_IF(push_inst(compiler, MOVZ | RD(dst) | ((simm & 0xffff) << 5) | (i << 21)));
  496. }
  497. else
  498. FAIL_IF(push_inst(compiler, MOVK | RD(dst) | ((simm & 0xffff) << 5) | (i << 21)));
  499. simm >>= 16;
  500. }
  501. return SLJIT_SUCCESS;
  502. }
  503. #define ARG1_IMM 0x0010000
  504. #define ARG2_IMM 0x0020000
  505. #define INT_OP 0x0040000
  506. #define SET_FLAGS 0x0080000
  507. #define UNUSED_RETURN 0x0100000
  508. #define CHECK_FLAGS(flag_bits) \
  509. if (flags & SET_FLAGS) { \
  510. inv_bits |= flag_bits; \
  511. if (flags & UNUSED_RETURN) \
  512. dst = TMP_ZERO; \
  513. }
  514. static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 dst, sljit_sw arg1, sljit_sw arg2)
  515. {
  516. /* dst must be register, TMP_REG1
  517. arg1 must be register, TMP_REG1, imm
  518. arg2 must be register, TMP_REG2, imm */
  519. sljit_ins inv_bits = (flags & INT_OP) ? W_OP : 0;
  520. sljit_ins inst_bits;
  521. sljit_s32 op = (flags & 0xffff);
  522. sljit_s32 reg;
  523. sljit_sw imm, nimm;
  524. if (SLJIT_UNLIKELY((flags & (ARG1_IMM | ARG2_IMM)) == (ARG1_IMM | ARG2_IMM))) {
  525. /* Both are immediates. */
  526. flags &= ~ARG1_IMM;
  527. if (arg1 == 0 && op != SLJIT_ADD && op != SLJIT_SUB)
  528. arg1 = TMP_ZERO;
  529. else {
  530. FAIL_IF(load_immediate(compiler, TMP_REG1, arg1));
  531. arg1 = TMP_REG1;
  532. }
  533. }
  534. if (flags & (ARG1_IMM | ARG2_IMM)) {
  535. reg = (flags & ARG2_IMM) ? arg1 : arg2;
  536. imm = (flags & ARG2_IMM) ? arg2 : arg1;
  537. switch (op) {
  538. case SLJIT_MUL:
  539. case SLJIT_NEG:
  540. case SLJIT_CLZ:
  541. case SLJIT_ADDC:
  542. case SLJIT_SUBC:
  543. /* No form with immediate operand (except imm 0, which
  544. is represented by a ZERO register). */
  545. break;
  546. case SLJIT_MOV:
  547. SLJIT_ASSERT(!(flags & SET_FLAGS) && (flags & ARG2_IMM) && arg1 == TMP_REG1);
  548. return load_immediate(compiler, dst, imm);
  549. case SLJIT_NOT:
  550. SLJIT_ASSERT(flags & ARG2_IMM);
  551. FAIL_IF(load_immediate(compiler, dst, (flags & INT_OP) ? (~imm & 0xffffffff) : ~imm));
  552. goto set_flags;
  553. case SLJIT_SUB:
  554. if (flags & ARG1_IMM)
  555. break;
  556. imm = -imm;
  557. /* Fall through. */
  558. case SLJIT_ADD:
  559. compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD_SUB;
  560. if (imm == 0) {
  561. CHECK_FLAGS(1 << 29);
  562. return push_inst(compiler, ((op == SLJIT_ADD ? ADDI : SUBI) ^ inv_bits) | RD(dst) | RN(reg));
  563. }
  564. if (imm > 0 && imm <= 0xfff) {
  565. CHECK_FLAGS(1 << 29);
  566. return push_inst(compiler, (ADDI ^ inv_bits) | RD(dst) | RN(reg) | (imm << 10));
  567. }
  568. nimm = -imm;
  569. if (nimm > 0 && nimm <= 0xfff) {
  570. CHECK_FLAGS(1 << 29);
  571. return push_inst(compiler, (SUBI ^ inv_bits) | RD(dst) | RN(reg) | (nimm << 10));
  572. }
  573. if (imm > 0 && imm <= 0xffffff && !(imm & 0xfff)) {
  574. CHECK_FLAGS(1 << 29);
  575. return push_inst(compiler, (ADDI ^ inv_bits) | RD(dst) | RN(reg) | ((imm >> 12) << 10) | (1 << 22));
  576. }
  577. if (nimm > 0 && nimm <= 0xffffff && !(nimm & 0xfff)) {
  578. CHECK_FLAGS(1 << 29);
  579. return push_inst(compiler, (SUBI ^ inv_bits) | RD(dst) | RN(reg) | ((nimm >> 12) << 10) | (1 << 22));
  580. }
  581. if (imm > 0 && imm <= 0xffffff && !(flags & SET_FLAGS)) {
  582. FAIL_IF(push_inst(compiler, (ADDI ^ inv_bits) | RD(dst) | RN(reg) | ((imm >> 12) << 10) | (1 << 22)));
  583. return push_inst(compiler, (ADDI ^ inv_bits) | RD(dst) | RN(dst) | ((imm & 0xfff) << 10));
  584. }
  585. if (nimm > 0 && nimm <= 0xffffff && !(flags & SET_FLAGS)) {
  586. FAIL_IF(push_inst(compiler, (SUBI ^ inv_bits) | RD(dst) | RN(reg) | ((nimm >> 12) << 10) | (1 << 22)));
  587. return push_inst(compiler, (SUBI ^ inv_bits) | RD(dst) | RN(dst) | ((nimm & 0xfff) << 10));
  588. }
  589. break;
  590. case SLJIT_AND:
  591. inst_bits = logical_imm(imm, LOGICAL_IMM_CHECK | ((flags & INT_OP) ? 16 : 32));
  592. if (!inst_bits)
  593. break;
  594. CHECK_FLAGS(3 << 29);
  595. return push_inst(compiler, (ANDI ^ inv_bits) | RD(dst) | RN(reg) | inst_bits);
  596. case SLJIT_OR:
  597. case SLJIT_XOR:
  598. inst_bits = logical_imm(imm, LOGICAL_IMM_CHECK | ((flags & INT_OP) ? 16 : 32));
  599. if (!inst_bits)
  600. break;
  601. if (op == SLJIT_OR)
  602. inst_bits |= ORRI;
  603. else
  604. inst_bits |= EORI;
  605. FAIL_IF(push_inst(compiler, (inst_bits ^ inv_bits) | RD(dst) | RN(reg)));
  606. goto set_flags;
  607. case SLJIT_SHL:
  608. if (flags & ARG1_IMM)
  609. break;
  610. if (flags & INT_OP) {
  611. imm &= 0x1f;
  612. FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg1) | ((-imm & 0x1f) << 16) | ((31 - imm) << 10)));
  613. }
  614. else {
  615. imm &= 0x3f;
  616. FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg1) | (1 << 22) | ((-imm & 0x3f) << 16) | ((63 - imm) << 10)));
  617. }
  618. goto set_flags;
  619. case SLJIT_LSHR:
  620. case SLJIT_ASHR:
  621. if (flags & ARG1_IMM)
  622. break;
  623. if (op == SLJIT_ASHR)
  624. inv_bits |= 1 << 30;
  625. if (flags & INT_OP) {
  626. imm &= 0x1f;
  627. FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg1) | (imm << 16) | (31 << 10)));
  628. }
  629. else {
  630. imm &= 0x3f;
  631. FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg1) | (1 << 22) | (imm << 16) | (63 << 10)));
  632. }
  633. goto set_flags;
  634. default:
  635. SLJIT_UNREACHABLE();
  636. break;
  637. }
  638. if (flags & ARG2_IMM) {
  639. if (arg2 == 0)
  640. arg2 = TMP_ZERO;
  641. else {
  642. FAIL_IF(load_immediate(compiler, TMP_REG2, arg2));
  643. arg2 = TMP_REG2;
  644. }
  645. }
  646. else {
  647. if (arg1 == 0)
  648. arg1 = TMP_ZERO;
  649. else {
  650. FAIL_IF(load_immediate(compiler, TMP_REG1, arg1));
  651. arg1 = TMP_REG1;
  652. }
  653. }
  654. }
  655. /* Both arguments are registers. */
  656. switch (op) {
  657. case SLJIT_MOV:
  658. case SLJIT_MOV_P:
  659. SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
  660. if (dst == arg2)
  661. return SLJIT_SUCCESS;
  662. return push_inst(compiler, ORR | RD(dst) | RN(TMP_ZERO) | RM(arg2));
  663. case SLJIT_MOV_U8:
  664. SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
  665. return push_inst(compiler, (UBFM ^ W_OP) | RD(dst) | RN(arg2) | (7 << 10));
  666. case SLJIT_MOV_S8:
  667. SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
  668. if (!(flags & INT_OP))
  669. inv_bits |= 1 << 22;
  670. return push_inst(compiler, (SBFM ^ inv_bits) | RD(dst) | RN(arg2) | (7 << 10));
  671. case SLJIT_MOV_U16:
  672. SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
  673. return push_inst(compiler, (UBFM ^ W_OP) | RD(dst) | RN(arg2) | (15 << 10));
  674. case SLJIT_MOV_S16:
  675. SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
  676. if (!(flags & INT_OP))
  677. inv_bits |= 1 << 22;
  678. return push_inst(compiler, (SBFM ^ inv_bits) | RD(dst) | RN(arg2) | (15 << 10));
  679. case SLJIT_MOV_U32:
  680. SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
  681. if ((flags & INT_OP) && dst == arg2)
  682. return SLJIT_SUCCESS;
  683. return push_inst(compiler, (ORR ^ W_OP) | RD(dst) | RN(TMP_ZERO) | RM(arg2));
  684. case SLJIT_MOV_S32:
  685. SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
  686. if ((flags & INT_OP) && dst == arg2)
  687. return SLJIT_SUCCESS;
  688. return push_inst(compiler, SBFM | (1 << 22) | RD(dst) | RN(arg2) | (31 << 10));
  689. case SLJIT_NOT:
  690. SLJIT_ASSERT(arg1 == TMP_REG1);
  691. FAIL_IF(push_inst(compiler, (ORN ^ inv_bits) | RD(dst) | RN(TMP_ZERO) | RM(arg2)));
  692. break; /* Set flags. */
  693. case SLJIT_NEG:
  694. SLJIT_ASSERT(arg1 == TMP_REG1);
  695. compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD_SUB;
  696. if (flags & SET_FLAGS)
  697. inv_bits |= 1 << 29;
  698. return push_inst(compiler, (SUB ^ inv_bits) | RD(dst) | RN(TMP_ZERO) | RM(arg2));
  699. case SLJIT_CLZ:
  700. SLJIT_ASSERT(arg1 == TMP_REG1);
  701. return push_inst(compiler, (CLZ ^ inv_bits) | RD(dst) | RN(arg2));
  702. case SLJIT_ADD:
  703. CHECK_FLAGS(1 << 29);
  704. compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD_SUB;
  705. return push_inst(compiler, (ADD ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2));
  706. case SLJIT_ADDC:
  707. CHECK_FLAGS(1 << 29);
  708. return push_inst(compiler, (ADC ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2));
  709. case SLJIT_SUB:
  710. CHECK_FLAGS(1 << 29);
  711. compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD_SUB;
  712. return push_inst(compiler, (SUB ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2));
  713. case SLJIT_SUBC:
  714. CHECK_FLAGS(1 << 29);
  715. return push_inst(compiler, (SBC ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2));
  716. case SLJIT_MUL:
  717. compiler->status_flags_state = 0;
  718. if (!(flags & SET_FLAGS))
  719. return push_inst(compiler, (MADD ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2) | RT2(TMP_ZERO));
  720. if (flags & INT_OP) {
  721. FAIL_IF(push_inst(compiler, SMADDL | RD(dst) | RN(arg1) | RM(arg2) | (31 << 10)));
  722. FAIL_IF(push_inst(compiler, ADD | RD(TMP_LR) | RN(TMP_ZERO) | RM(dst) | (2 << 22) | (31 << 10)));
  723. return push_inst(compiler, SUBS | RD(TMP_ZERO) | RN(TMP_LR) | RM(dst) | (2 << 22) | (63 << 10));
  724. }
  725. FAIL_IF(push_inst(compiler, SMULH | RD(TMP_LR) | RN(arg1) | RM(arg2)));
  726. FAIL_IF(push_inst(compiler, MADD | RD(dst) | RN(arg1) | RM(arg2) | RT2(TMP_ZERO)));
  727. return push_inst(compiler, SUBS | RD(TMP_ZERO) | RN(TMP_LR) | RM(dst) | (2 << 22) | (63 << 10));
  728. case SLJIT_AND:
  729. CHECK_FLAGS(3 << 29);
  730. return push_inst(compiler, (AND ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2));
  731. case SLJIT_OR:
  732. FAIL_IF(push_inst(compiler, (ORR ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2)));
  733. break; /* Set flags. */
  734. case SLJIT_XOR:
  735. FAIL_IF(push_inst(compiler, (EOR ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2)));
  736. break; /* Set flags. */
  737. case SLJIT_SHL:
  738. FAIL_IF(push_inst(compiler, (LSLV ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2)));
  739. break; /* Set flags. */
  740. case SLJIT_LSHR:
  741. FAIL_IF(push_inst(compiler, (LSRV ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2)));
  742. break; /* Set flags. */
  743. case SLJIT_ASHR:
  744. FAIL_IF(push_inst(compiler, (ASRV ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2)));
  745. break; /* Set flags. */
  746. default:
  747. SLJIT_UNREACHABLE();
  748. return SLJIT_SUCCESS;
  749. }
  750. set_flags:
  751. if (flags & SET_FLAGS)
  752. return push_inst(compiler, (SUBS ^ inv_bits) | RD(TMP_ZERO) | RN(dst) | RM(TMP_ZERO));
  753. return SLJIT_SUCCESS;
  754. }
  755. #define STORE 0x10
  756. #define SIGNED 0x20
  757. #define BYTE_SIZE 0x0
  758. #define HALF_SIZE 0x1
  759. #define INT_SIZE 0x2
  760. #define WORD_SIZE 0x3
  761. #define MEM_SIZE_SHIFT(flags) ((flags) & 0x3)
  762. static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg,
  763. sljit_s32 arg, sljit_sw argw, sljit_s32 tmp_reg)
  764. {
  765. sljit_u32 shift = MEM_SIZE_SHIFT(flags);
  766. sljit_u32 type = (shift << 30);
  767. if (!(flags & STORE))
  768. type |= (flags & SIGNED) ? 0x00800000 : 0x00400000;
  769. SLJIT_ASSERT(arg & SLJIT_MEM);
  770. if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) {
  771. argw &= 0x3;
  772. if (argw == 0 || argw == shift)
  773. return push_inst(compiler, STRB | type | RT(reg)
  774. | RN(arg & REG_MASK) | RM(OFFS_REG(arg)) | (argw ? (1 << 12) : 0));
  775. FAIL_IF(push_inst(compiler, ADD | RD(tmp_reg) | RN(arg & REG_MASK) | RM(OFFS_REG(arg)) | (argw << 10)));
  776. return push_inst(compiler, STRBI | type | RT(reg) | RN(tmp_reg));
  777. }
  778. arg &= REG_MASK;
  779. if (arg == SLJIT_UNUSED) {
  780. FAIL_IF(load_immediate(compiler, tmp_reg, argw & ~(0xfff << shift)));
  781. argw = (argw >> shift) & 0xfff;
  782. return push_inst(compiler, STRBI | type | RT(reg) | RN(tmp_reg) | (argw << 10));
  783. }
  784. if (argw >= 0 && (argw & ((1 << shift) - 1)) == 0) {
  785. if ((argw >> shift) <= 0xfff) {
  786. return push_inst(compiler, STRBI | type | RT(reg) | RN(arg) | (argw << (10 - shift)));
  787. }
  788. if (argw <= 0xffffff) {
  789. FAIL_IF(push_inst(compiler, ADDI | (1 << 22) | RD(tmp_reg) | RN(arg) | ((argw >> 12) << 10)));
  790. argw = ((argw & 0xfff) >> shift);
  791. return push_inst(compiler, STRBI | type | RT(reg) | RN(tmp_reg) | (argw << 10));
  792. }
  793. }
  794. if (argw <= 255 && argw >= -256)
  795. return push_inst(compiler, STURBI | type | RT(reg) | RN(arg) | ((argw & 0x1ff) << 12));
  796. FAIL_IF(load_immediate(compiler, tmp_reg, argw));
  797. return push_inst(compiler, STRB | type | RT(reg) | RN(arg) | RM(tmp_reg));
  798. }
  799. /* --------------------------------------------------------------------- */
  800. /* Entry, exit */
  801. /* --------------------------------------------------------------------- */
  802. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler,
  803. sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
  804. sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
  805. {
  806. sljit_s32 args, i, tmp, offs, prev, saved_regs_size;
  807. CHECK_ERROR();
  808. CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
  809. set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
  810. saved_regs_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 2);
  811. if (saved_regs_size & 0x8)
  812. saved_regs_size += sizeof(sljit_sw);
  813. local_size = (local_size + 15) & ~0xf;
  814. compiler->local_size = local_size + saved_regs_size;
  815. FAIL_IF(push_inst(compiler, STP_PRE | RT(TMP_FP) | RT2(TMP_LR)
  816. | RN(SLJIT_SP) | ((-(saved_regs_size >> 3) & 0x7f) << 15)));
  817. #ifdef _WIN32
  818. if (local_size >= 4096)
  819. FAIL_IF(push_inst(compiler, SUBI | RD(TMP_REG1) | RN(SLJIT_SP) | (1 << 10) | (1 << 22)));
  820. else if (local_size > 256)
  821. FAIL_IF(push_inst(compiler, SUBI | RD(TMP_REG1) | RN(SLJIT_SP) | (local_size << 10)));
  822. #endif
  823. tmp = saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - saveds) : SLJIT_FIRST_SAVED_REG;
  824. prev = -1;
  825. offs = 2 << 15;
  826. for (i = SLJIT_S0; i >= tmp; i--) {
  827. if (prev == -1) {
  828. prev = i;
  829. continue;
  830. }
  831. FAIL_IF(push_inst(compiler, STP | RT(prev) | RT2(i) | RN(SLJIT_SP) | offs));
  832. offs += 2 << 15;
  833. prev = -1;
  834. }
  835. for (i = scratches; i >= SLJIT_FIRST_SAVED_REG; i--) {
  836. if (prev == -1) {
  837. prev = i;
  838. continue;
  839. }
  840. FAIL_IF(push_inst(compiler, STP | RT(prev) | RT2(i) | RN(SLJIT_SP) | offs));
  841. offs += 2 << 15;
  842. prev = -1;
  843. }
  844. if (prev != -1)
  845. FAIL_IF(push_inst(compiler, STRI | RT(prev) | RN(SLJIT_SP) | (offs >> 5)));
  846. FAIL_IF(push_inst(compiler, ADDI | RD(TMP_FP) | RN(SLJIT_SP) | (0 << 10)));
  847. args = get_arg_count(arg_types);
  848. if (args >= 1)
  849. FAIL_IF(push_inst(compiler, ORR | RD(SLJIT_S0) | RN(TMP_ZERO) | RM(SLJIT_R0)));
  850. if (args >= 2)
  851. FAIL_IF(push_inst(compiler, ORR | RD(SLJIT_S1) | RN(TMP_ZERO) | RM(SLJIT_R1)));
  852. if (args >= 3)
  853. FAIL_IF(push_inst(compiler, ORR | RD(SLJIT_S2) | RN(TMP_ZERO) | RM(SLJIT_R2)));
  854. #ifdef _WIN32
  855. if (local_size >= 4096) {
  856. if (local_size < 4 * 4096) {
  857. /* No need for a loop. */
  858. if (local_size >= 2 * 4096) {
  859. FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1)));
  860. FAIL_IF(push_inst(compiler, SUBI | RD(TMP_REG1) | RN(TMP_REG1) | (1 << 10) | (1 << 22)));
  861. local_size -= 4096;
  862. }
  863. if (local_size >= 2 * 4096) {
  864. FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1)));
  865. FAIL_IF(push_inst(compiler, SUBI | RD(TMP_REG1) | RN(TMP_REG1) | (1 << 10) | (1 << 22)));
  866. local_size -= 4096;
  867. }
  868. FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1)));
  869. local_size -= 4096;
  870. }
  871. else {
  872. FAIL_IF(push_inst(compiler, MOVZ | RD(TMP_REG2) | (((local_size >> 12) - 1) << 5)));
  873. FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1)));
  874. FAIL_IF(push_inst(compiler, SUBI | RD(TMP_REG1) | RN(TMP_REG1) | (1 << 10) | (1 << 22)));
  875. FAIL_IF(push_inst(compiler, SUBI | (1 << 29) | RD(TMP_REG2) | RN(TMP_REG2) | (1 << 10)));
  876. FAIL_IF(push_inst(compiler, B_CC | ((((sljit_ins) -3) & 0x7ffff) << 5) | 0x1 /* not-equal */));
  877. FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1)));
  878. local_size &= 0xfff;
  879. }
  880. if (local_size > 256) {
  881. FAIL_IF(push_inst(compiler, SUBI | RD(TMP_REG1) | RN(TMP_REG1) | (local_size << 10)));
  882. FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1)));
  883. }
  884. else if (local_size > 0)
  885. FAIL_IF(push_inst(compiler, LDR_PRE | RT(TMP_ZERO) | RN(TMP_REG1) | ((-local_size & 0x1ff) << 12)));
  886. FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RN(TMP_REG1) | (0 << 10)));
  887. }
  888. else if (local_size > 256) {
  889. FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1)));
  890. FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RN(TMP_REG1) | (0 << 10)));
  891. }
  892. else if (local_size > 0)
  893. FAIL_IF(push_inst(compiler, LDR_PRE | RT(TMP_ZERO) | RN(SLJIT_SP) | ((-local_size & 0x1ff) << 12)));
  894. #else /* !_WIN32 */
  895. /* The local_size does not include saved registers size. */
  896. if (local_size > 0xfff) {
  897. FAIL_IF(push_inst(compiler, SUBI | RD(SLJIT_SP) | RN(SLJIT_SP) | ((local_size >> 12) << 10) | (1 << 22)));
  898. local_size &= 0xfff;
  899. }
  900. if (local_size != 0)
  901. FAIL_IF(push_inst(compiler, SUBI | RD(SLJIT_SP) | RN(SLJIT_SP) | (local_size << 10)));
  902. #endif /* _WIN32 */
  903. return SLJIT_SUCCESS;
  904. }
  905. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler,
  906. sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
  907. sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
  908. {
  909. sljit_s32 saved_regs_size;
  910. CHECK_ERROR();
  911. CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
  912. set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
  913. saved_regs_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 2);
  914. if (saved_regs_size & 0x8)
  915. saved_regs_size += sizeof(sljit_sw);
  916. compiler->local_size = saved_regs_size + ((local_size + 15) & ~0xf);
  917. return SLJIT_SUCCESS;
  918. }
  919. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw)
  920. {
  921. sljit_s32 local_size;
  922. sljit_s32 i, tmp, offs, prev, saved_regs_size;
  923. CHECK_ERROR();
  924. CHECK(check_sljit_emit_return(compiler, op, src, srcw));
  925. FAIL_IF(emit_mov_before_return(compiler, op, src, srcw));
  926. saved_regs_size = GET_SAVED_REGISTERS_SIZE(compiler->scratches, compiler->saveds, 2);
  927. if (saved_regs_size & 0x8)
  928. saved_regs_size += sizeof(sljit_sw);
  929. local_size = compiler->local_size - saved_regs_size;
  930. /* Load LR as early as possible. */
  931. if (local_size == 0)
  932. FAIL_IF(push_inst(compiler, LDP | RT(TMP_FP) | RT2(TMP_LR) | RN(SLJIT_SP)));
  933. else if (local_size < 63 * sizeof(sljit_sw)) {
  934. FAIL_IF(push_inst(compiler, LDP_PRE | RT(TMP_FP) | RT2(TMP_LR)
  935. | RN(SLJIT_SP) | (local_size << (15 - 3))));
  936. }
  937. else {
  938. if (local_size > 0xfff) {
  939. FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RN(SLJIT_SP) | ((local_size >> 12) << 10) | (1 << 22)));
  940. local_size &= 0xfff;
  941. }
  942. if (local_size)
  943. FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RN(SLJIT_SP) | (local_size << 10)));
  944. FAIL_IF(push_inst(compiler, LDP | RT(TMP_FP) | RT2(TMP_LR) | RN(SLJIT_SP)));
  945. }
  946. tmp = compiler->saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - compiler->saveds) : SLJIT_FIRST_SAVED_REG;
  947. prev = -1;
  948. offs = 2 << 15;
  949. for (i = SLJIT_S0; i >= tmp; i--) {
  950. if (prev == -1) {
  951. prev = i;
  952. continue;
  953. }
  954. FAIL_IF(push_inst(compiler, LDP | RT(prev) | RT2(i) | RN(SLJIT_SP) | offs));
  955. offs += 2 << 15;
  956. prev = -1;
  957. }
  958. for (i = compiler->scratches; i >= SLJIT_FIRST_SAVED_REG; i--) {
  959. if (prev == -1) {
  960. prev = i;
  961. continue;
  962. }
  963. FAIL_IF(push_inst(compiler, LDP | RT(prev) | RT2(i) | RN(SLJIT_SP) | offs));
  964. offs += 2 << 15;
  965. prev = -1;
  966. }
  967. if (prev != -1)
  968. FAIL_IF(push_inst(compiler, LDRI | RT(prev) | RN(SLJIT_SP) | (offs >> 5)));
  969. /* These two can be executed in parallel. */
  970. FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RN(SLJIT_SP) | (saved_regs_size << 10)));
  971. return push_inst(compiler, RET | RN(TMP_LR));
  972. }
  973. /* --------------------------------------------------------------------- */
  974. /* Operators */
  975. /* --------------------------------------------------------------------- */
  976. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op)
  977. {
  978. sljit_ins inv_bits = (op & SLJIT_I32_OP) ? W_OP : 0;
  979. CHECK_ERROR();
  980. CHECK(check_sljit_emit_op0(compiler, op));
  981. op = GET_OPCODE(op);
  982. switch (op) {
  983. case SLJIT_BREAKPOINT:
  984. return push_inst(compiler, BRK);
  985. case SLJIT_NOP:
  986. return push_inst(compiler, NOP);
  987. case SLJIT_LMUL_UW:
  988. case SLJIT_LMUL_SW:
  989. FAIL_IF(push_inst(compiler, ORR | RD(TMP_REG1) | RN(TMP_ZERO) | RM(SLJIT_R0)));
  990. FAIL_IF(push_inst(compiler, MADD | RD(SLJIT_R0) | RN(SLJIT_R0) | RM(SLJIT_R1) | RT2(TMP_ZERO)));
  991. return push_inst(compiler, (op == SLJIT_LMUL_UW ? UMULH : SMULH) | RD(SLJIT_R1) | RN(TMP_REG1) | RM(SLJIT_R1));
  992. case SLJIT_DIVMOD_UW:
  993. case SLJIT_DIVMOD_SW:
  994. FAIL_IF(push_inst(compiler, (ORR ^ inv_bits) | RD(TMP_REG1) | RN(TMP_ZERO) | RM(SLJIT_R0)));
  995. FAIL_IF(push_inst(compiler, ((op == SLJIT_DIVMOD_UW ? UDIV : SDIV) ^ inv_bits) | RD(SLJIT_R0) | RN(SLJIT_R0) | RM(SLJIT_R1)));
  996. FAIL_IF(push_inst(compiler, (MADD ^ inv_bits) | RD(SLJIT_R1) | RN(SLJIT_R0) | RM(SLJIT_R1) | RT2(TMP_ZERO)));
  997. return push_inst(compiler, (SUB ^ inv_bits) | RD(SLJIT_R1) | RN(TMP_REG1) | RM(SLJIT_R1));
  998. case SLJIT_DIV_UW:
  999. case SLJIT_DIV_SW:
  1000. return push_inst(compiler, ((op == SLJIT_DIV_UW ? UDIV : SDIV) ^ inv_bits) | RD(SLJIT_R0) | RN(SLJIT_R0) | RM(SLJIT_R1));
  1001. case SLJIT_ENDBR:
  1002. case SLJIT_SKIP_FRAMES_BEFORE_RETURN:
  1003. return SLJIT_SUCCESS;
  1004. }
  1005. return SLJIT_SUCCESS;
  1006. }
  1007. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compiler, sljit_s32 op,
  1008. sljit_s32 dst, sljit_sw dstw,
  1009. sljit_s32 src, sljit_sw srcw)
  1010. {
  1011. sljit_s32 dst_r, flags, mem_flags;
  1012. sljit_s32 op_flags = GET_ALL_FLAGS(op);
  1013. CHECK_ERROR();
  1014. CHECK(check_sljit_emit_op1(compiler, op, dst, dstw, src, srcw));
  1015. ADJUST_LOCAL_OFFSET(dst, dstw);
  1016. ADJUST_LOCAL_OFFSET(src, srcw);
  1017. dst_r = SLOW_IS_REG(dst) ? dst : TMP_REG1;
  1018. op = GET_OPCODE(op);
  1019. if (op >= SLJIT_MOV && op <= SLJIT_MOV_P) {
  1020. /* Both operands are registers. */
  1021. if (dst_r != TMP_REG1 && FAST_IS_REG(src))
  1022. return emit_op_imm(compiler, op | ((op_flags & SLJIT_I32_OP) ? INT_OP : 0), dst_r, TMP_REG1, src);
  1023. switch (op) {
  1024. case SLJIT_MOV:
  1025. case SLJIT_MOV_P:
  1026. mem_flags = WORD_SIZE;
  1027. break;
  1028. case SLJIT_MOV_U8:
  1029. mem_flags = BYTE_SIZE;
  1030. if (src & SLJIT_IMM)
  1031. srcw = (sljit_u8)srcw;
  1032. break;
  1033. case SLJIT_MOV_S8:
  1034. mem_flags = BYTE_SIZE | SIGNED;
  1035. if (src & SLJIT_IMM)
  1036. srcw = (sljit_s8)srcw;
  1037. break;
  1038. case SLJIT_MOV_U16:
  1039. mem_flags = HALF_SIZE;
  1040. if (src & SLJIT_IMM)
  1041. srcw = (sljit_u16)srcw;
  1042. break;
  1043. case SLJIT_MOV_S16:
  1044. mem_flags = HALF_SIZE | SIGNED;
  1045. if (src & SLJIT_IMM)
  1046. srcw = (sljit_s16)srcw;
  1047. break;
  1048. case SLJIT_MOV_U32:
  1049. mem_flags = INT_SIZE;
  1050. if (src & SLJIT_IMM)
  1051. srcw = (sljit_u32)srcw;
  1052. break;
  1053. case SLJIT_MOV_S32:
  1054. mem_flags = INT_SIZE | SIGNED;
  1055. if (src & SLJIT_IMM)
  1056. srcw = (sljit_s32)srcw;
  1057. break;
  1058. default:
  1059. SLJIT_UNREACHABLE();
  1060. mem_flags = 0;
  1061. break;
  1062. }
  1063. if (src & SLJIT_IMM)
  1064. FAIL_IF(emit_op_imm(compiler, SLJIT_MOV | ARG2_IMM, dst_r, TMP_REG1, srcw));
  1065. else if (!(src & SLJIT_MEM))
  1066. dst_r = src;
  1067. else
  1068. FAIL_IF(emit_op_mem(compiler, mem_flags, dst_r, src, srcw, TMP_REG1));
  1069. if (dst & SLJIT_MEM)
  1070. return emit_op_mem(compiler, mem_flags | STORE, dst_r, dst, dstw, TMP_REG2);
  1071. return SLJIT_SUCCESS;
  1072. }
  1073. flags = HAS_FLAGS(op_flags) ? SET_FLAGS : 0;
  1074. mem_flags = WORD_SIZE;
  1075. if (op_flags & SLJIT_I32_OP) {
  1076. flags |= INT_OP;
  1077. mem_flags = INT_SIZE;
  1078. }
  1079. if (dst == SLJIT_UNUSED)
  1080. flags |= UNUSED_RETURN;
  1081. if (src & SLJIT_MEM) {
  1082. FAIL_IF(emit_op_mem(compiler, mem_flags, TMP_REG2, src, srcw, TMP_REG2));
  1083. src = TMP_REG2;
  1084. }
  1085. emit_op_imm(compiler, flags | op, dst_r, TMP_REG1, src);
  1086. if (SLJIT_UNLIKELY(dst & SLJIT_MEM))
  1087. return emit_op_mem(compiler, mem_flags | STORE, dst_r, dst, dstw, TMP_REG2);
  1088. return SLJIT_SUCCESS;
  1089. }
  1090. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compiler, sljit_s32 op,
  1091. sljit_s32 dst, sljit_sw dstw,
  1092. sljit_s32 src1, sljit_sw src1w,
  1093. sljit_s32 src2, sljit_sw src2w)
  1094. {
  1095. sljit_s32 dst_r, flags, mem_flags;
  1096. CHECK_ERROR();
  1097. CHECK(check_sljit_emit_op2(compiler, op, dst, dstw, src1, src1w, src2, src2w));
  1098. ADJUST_LOCAL_OFFSET(dst, dstw);
  1099. ADJUST_LOCAL_OFFSET(src1, src1w);
  1100. ADJUST_LOCAL_OFFSET(src2, src2w);
  1101. if (dst == SLJIT_UNUSED && !HAS_FLAGS(op))
  1102. return SLJIT_SUCCESS;
  1103. dst_r = SLOW_IS_REG(dst) ? dst : TMP_REG1;
  1104. flags = HAS_FLAGS(op) ? SET_FLAGS : 0;
  1105. mem_flags = WORD_SIZE;
  1106. if (op & SLJIT_I32_OP) {
  1107. flags |= INT_OP;
  1108. mem_flags = INT_SIZE;
  1109. }
  1110. if (dst == SLJIT_UNUSED)
  1111. flags |= UNUSED_RETURN;
  1112. if (src1 & SLJIT_MEM) {
  1113. FAIL_IF(emit_op_mem(compiler, mem_flags, TMP_REG1, src1, src1w, TMP_REG1));
  1114. src1 = TMP_REG1;
  1115. }
  1116. if (src2 & SLJIT_MEM) {
  1117. FAIL_IF(emit_op_mem(compiler, mem_flags, TMP_REG2, src2, src2w, TMP_REG2));
  1118. src2 = TMP_REG2;
  1119. }
  1120. if (src1 & SLJIT_IMM)
  1121. flags |= ARG1_IMM;
  1122. else
  1123. src1w = src1;
  1124. if (src2 & SLJIT_IMM)
  1125. flags |= ARG2_IMM;
  1126. else
  1127. src2w = src2;
  1128. emit_op_imm(compiler, flags | GET_OPCODE(op), dst_r, src1w, src2w);
  1129. if (dst & SLJIT_MEM)
  1130. return emit_op_mem(compiler, mem_flags | STORE, dst_r, dst, dstw, TMP_REG2);
  1131. return SLJIT_SUCCESS;
  1132. }
  1133. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *compiler, sljit_s32 op,
  1134. sljit_s32 src, sljit_sw srcw)
  1135. {
  1136. CHECK_ERROR();
  1137. CHECK(check_sljit_emit_op_src(compiler, op, src, srcw));
  1138. ADJUST_LOCAL_OFFSET(src, srcw);
  1139. switch (op) {
  1140. case SLJIT_FAST_RETURN:
  1141. if (FAST_IS_REG(src))
  1142. FAIL_IF(push_inst(compiler, ORR | RD(TMP_LR) | RN(TMP_ZERO) | RM(src)));
  1143. else
  1144. FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_LR, src, srcw, TMP_REG1));
  1145. return push_inst(compiler, RET | RN(TMP_LR));
  1146. case SLJIT_SKIP_FRAMES_BEFORE_FAST_RETURN:
  1147. return SLJIT_SUCCESS;
  1148. case SLJIT_PREFETCH_L1:
  1149. case SLJIT_PREFETCH_L2:
  1150. case SLJIT_PREFETCH_L3:
  1151. case SLJIT_PREFETCH_ONCE:
  1152. SLJIT_ASSERT(reg_map[1] == 0 && reg_map[3] == 2 && reg_map[5] == 4);
  1153. /* The reg_map[op] should provide the appropriate constant. */
  1154. if (op == SLJIT_PREFETCH_L1)
  1155. op = 1;
  1156. else if (op == SLJIT_PREFETCH_L2)
  1157. op = 3;
  1158. else if (op == SLJIT_PREFETCH_L3)
  1159. op = 5;
  1160. else
  1161. op = 2;
  1162. /* Signed word sized load is the prefetch instruction. */
  1163. return emit_op_mem(compiler, WORD_SIZE | SIGNED, op, src, srcw, TMP_REG1);
  1164. }
  1165. return SLJIT_SUCCESS;
  1166. }
  1167. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg)
  1168. {
  1169. CHECK_REG_INDEX(check_sljit_get_register_index(reg));
  1170. return reg_map[reg];
  1171. }
  1172. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg)
  1173. {
  1174. CHECK_REG_INDEX(check_sljit_get_float_register_index(reg));
  1175. return freg_map[reg];
  1176. }
  1177. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler,
  1178. void *instruction, sljit_s32 size)
  1179. {
  1180. CHECK_ERROR();
  1181. CHECK(check_sljit_emit_op_custom(compiler, instruction, size));
  1182. return push_inst(compiler, *(sljit_ins*)instruction);
  1183. }
  1184. /* --------------------------------------------------------------------- */
  1185. /* Floating point operators */
  1186. /* --------------------------------------------------------------------- */
  1187. static sljit_s32 emit_fop_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw)
  1188. {
  1189. sljit_u32 shift = MEM_SIZE_SHIFT(flags);
  1190. sljit_ins type = (shift << 30);
  1191. SLJIT_ASSERT(arg & SLJIT_MEM);
  1192. if (!(flags & STORE))
  1193. type |= 0x00400000;
  1194. if (arg & OFFS_REG_MASK) {
  1195. argw &= 3;
  1196. if (argw == 0 || argw == shift)
  1197. return push_inst(compiler, STR_FR | type | VT(reg)
  1198. | RN(arg & REG_MASK) | RM(OFFS_REG(arg)) | (argw ? (1 << 12) : 0));
  1199. FAIL_IF(push_inst(compiler, ADD | RD(TMP_REG1) | RN(arg & REG_MASK) | RM(OFFS_REG(arg)) | (argw << 10)));
  1200. return push_inst(compiler, STR_FI | type | VT(reg) | RN(TMP_REG1));
  1201. }
  1202. arg &= REG_MASK;
  1203. if (arg == SLJIT_UNUSED) {
  1204. FAIL_IF(load_immediate(compiler, TMP_REG1, argw & ~(0xfff << shift)));
  1205. argw = (argw >> shift) & 0xfff;
  1206. return push_inst(compiler, STR_FI | type | VT(reg) | RN(TMP_REG1) | (argw << 10));
  1207. }
  1208. if (argw >= 0 && (argw & ((1 << shift) - 1)) == 0) {
  1209. if ((argw >> shift) <= 0xfff)
  1210. return push_inst(compiler, STR_FI | type | VT(reg) | RN(arg) | (argw << (10 - shift)));
  1211. if (argw <= 0xffffff) {
  1212. FAIL_IF(push_inst(compiler, ADDI | (1 << 22) | RD(TMP_REG1) | RN(arg) | ((argw >> 12) << 10)));
  1213. argw = ((argw & 0xfff) >> shift);
  1214. return push_inst(compiler, STR_FI | type | VT(reg) | RN(TMP_REG1) | (argw << 10));
  1215. }
  1216. }
  1217. if (argw <= 255 && argw >= -256)
  1218. return push_inst(compiler, STUR_FI | type | VT(reg) | RN(arg) | ((argw & 0x1ff) << 12));
  1219. FAIL_IF(load_immediate(compiler, TMP_REG1, argw));
  1220. return push_inst(compiler, STR_FR | type | VT(reg) | RN(arg) | RM(TMP_REG1));
  1221. }
  1222. static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op,
  1223. sljit_s32 dst, sljit_sw dstw,
  1224. sljit_s32 src, sljit_sw srcw)
  1225. {
  1226. sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
  1227. sljit_ins inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0;
  1228. if (GET_OPCODE(op) == SLJIT_CONV_S32_FROM_F64)
  1229. inv_bits |= W_OP;
  1230. if (src & SLJIT_MEM) {
  1231. emit_fop_mem(compiler, (op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE, TMP_FREG1, src, srcw);
  1232. src = TMP_FREG1;
  1233. }
  1234. FAIL_IF(push_inst(compiler, (FCVTZS ^ inv_bits) | RD(dst_r) | VN(src)));
  1235. if (dst & SLJIT_MEM)
  1236. return emit_op_mem(compiler, ((GET_OPCODE(op) == SLJIT_CONV_S32_FROM_F64) ? INT_SIZE : WORD_SIZE) | STORE, TMP_REG1, dst, dstw, TMP_REG2);
  1237. return SLJIT_SUCCESS;
  1238. }
  1239. static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
  1240. sljit_s32 dst, sljit_sw dstw,
  1241. sljit_s32 src, sljit_sw srcw)
  1242. {
  1243. sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
  1244. sljit_ins inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0;
  1245. if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32)
  1246. inv_bits |= W_OP;
  1247. if (src & SLJIT_MEM) {
  1248. emit_op_mem(compiler, ((GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32) ? INT_SIZE : WORD_SIZE), TMP_REG1, src, srcw, TMP_REG1);
  1249. src = TMP_REG1;
  1250. } else if (src & SLJIT_IMM) {
  1251. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  1252. if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32)
  1253. srcw = (sljit_s32)srcw;
  1254. #endif
  1255. FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
  1256. src = TMP_REG1;
  1257. }
  1258. FAIL_IF(push_inst(compiler, (SCVTF ^ inv_bits) | VD(dst_r) | RN(src)));
  1259. if (dst & SLJIT_MEM)
  1260. return emit_fop_mem(compiler, ((op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE) | STORE, TMP_FREG1, dst, dstw);
  1261. return SLJIT_SUCCESS;
  1262. }
  1263. static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compiler, sljit_s32 op,
  1264. sljit_s32 src1, sljit_sw src1w,
  1265. sljit_s32 src2, sljit_sw src2w)
  1266. {
  1267. sljit_s32 mem_flags = (op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE;
  1268. sljit_ins inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0;
  1269. if (src1 & SLJIT_MEM) {
  1270. emit_fop_mem(compiler, mem_flags, TMP_FREG1, src1, src1w);
  1271. src1 = TMP_FREG1;
  1272. }
  1273. if (src2 & SLJIT_MEM) {
  1274. emit_fop_mem(compiler, mem_flags, TMP_FREG2, src2, src2w);
  1275. src2 = TMP_FREG2;
  1276. }
  1277. return push_inst(compiler, (FCMP ^ inv_bits) | VN(src1) | VM(src2));
  1278. }
  1279. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op,
  1280. sljit_s32 dst, sljit_sw dstw,
  1281. sljit_s32 src, sljit_sw srcw)
  1282. {
  1283. sljit_s32 dst_r, mem_flags = (op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE;
  1284. sljit_ins inv_bits;
  1285. CHECK_ERROR();
  1286. SLJIT_COMPILE_ASSERT((INT_SIZE ^ 0x1) == WORD_SIZE, must_be_one_bit_difference);
  1287. SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw);
  1288. inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0;
  1289. dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
  1290. if (src & SLJIT_MEM) {
  1291. emit_fop_mem(compiler, (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32) ? (mem_flags ^ 0x1) : mem_flags, dst_r, src, srcw);
  1292. src = dst_r;
  1293. }
  1294. switch (GET_OPCODE(op)) {
  1295. case SLJIT_MOV_F64:
  1296. if (src != dst_r) {
  1297. if (dst_r != TMP_FREG1)
  1298. FAIL_IF(push_inst(compiler, (FMOV ^ inv_bits) | VD(dst_r) | VN(src)));
  1299. else
  1300. dst_r = src;
  1301. }
  1302. break;
  1303. case SLJIT_NEG_F64:
  1304. FAIL_IF(push_inst(compiler, (FNEG ^ inv_bits) | VD(dst_r) | VN(src)));
  1305. break;
  1306. case SLJIT_ABS_F64:
  1307. FAIL_IF(push_inst(compiler, (FABS ^ inv_bits) | VD(dst_r) | VN(src)));
  1308. break;
  1309. case SLJIT_CONV_F64_FROM_F32:
  1310. FAIL_IF(push_inst(compiler, FCVT | ((op & SLJIT_F32_OP) ? (1 << 22) : (1 << 15)) | VD(dst_r) | VN(src)));
  1311. break;
  1312. }
  1313. if (dst & SLJIT_MEM)
  1314. return emit_fop_mem(compiler, mem_flags | STORE, dst_r, dst, dstw);
  1315. return SLJIT_SUCCESS;
  1316. }
  1317. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compiler, sljit_s32 op,
  1318. sljit_s32 dst, sljit_sw dstw,
  1319. sljit_s32 src1, sljit_sw src1w,
  1320. sljit_s32 src2, sljit_sw src2w)
  1321. {
  1322. sljit_s32 dst_r, mem_flags = (op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE;
  1323. sljit_ins inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0;
  1324. CHECK_ERROR();
  1325. CHECK(check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w));
  1326. ADJUST_LOCAL_OFFSET(dst, dstw);
  1327. ADJUST_LOCAL_OFFSET(src1, src1w);
  1328. ADJUST_LOCAL_OFFSET(src2, src2w);
  1329. dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
  1330. if (src1 & SLJIT_MEM) {
  1331. emit_fop_mem(compiler, mem_flags, TMP_FREG1, src1, src1w);
  1332. src1 = TMP_FREG1;
  1333. }
  1334. if (src2 & SLJIT_MEM) {
  1335. emit_fop_mem(compiler, mem_flags, TMP_FREG2, src2, src2w);
  1336. src2 = TMP_FREG2;
  1337. }
  1338. switch (GET_OPCODE(op)) {
  1339. case SLJIT_ADD_F64:
  1340. FAIL_IF(push_inst(compiler, (FADD ^ inv_bits) | VD(dst_r) | VN(src1) | VM(src2)));
  1341. break;
  1342. case SLJIT_SUB_F64:
  1343. FAIL_IF(push_inst(compiler, (FSUB ^ inv_bits) | VD(dst_r) | VN(src1) | VM(src2)));
  1344. break;
  1345. case SLJIT_MUL_F64:
  1346. FAIL_IF(push_inst(compiler, (FMUL ^ inv_bits) | VD(dst_r) | VN(src1) | VM(src2)));
  1347. break;
  1348. case SLJIT_DIV_F64:
  1349. FAIL_IF(push_inst(compiler, (FDIV ^ inv_bits) | VD(dst_r) | VN(src1) | VM(src2)));
  1350. break;
  1351. }
  1352. if (!(dst & SLJIT_MEM))
  1353. return SLJIT_SUCCESS;
  1354. return emit_fop_mem(compiler, mem_flags | STORE, TMP_FREG1, dst, dstw);
  1355. }
  1356. /* --------------------------------------------------------------------- */
  1357. /* Other instructions */
  1358. /* --------------------------------------------------------------------- */
  1359. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw)
  1360. {
  1361. CHECK_ERROR();
  1362. CHECK(check_sljit_emit_fast_enter(compiler, dst, dstw));
  1363. ADJUST_LOCAL_OFFSET(dst, dstw);
  1364. if (FAST_IS_REG(dst))
  1365. return push_inst(compiler, ORR | RD(dst) | RN(TMP_ZERO) | RM(TMP_LR));
  1366. /* Memory. */
  1367. return emit_op_mem(compiler, WORD_SIZE | STORE, TMP_LR, dst, dstw, TMP_REG1);
  1368. }
  1369. /* --------------------------------------------------------------------- */
  1370. /* Conditional instructions */
  1371. /* --------------------------------------------------------------------- */
  1372. static sljit_uw get_cc(struct sljit_compiler *compiler, sljit_s32 type)
  1373. {
  1374. switch (type) {
  1375. case SLJIT_EQUAL:
  1376. case SLJIT_EQUAL_F64:
  1377. return 0x1;
  1378. case SLJIT_NOT_EQUAL:
  1379. case SLJIT_NOT_EQUAL_F64:
  1380. return 0x0;
  1381. case SLJIT_LESS:
  1382. case SLJIT_LESS_F64:
  1383. return 0x2;
  1384. case SLJIT_GREATER_EQUAL:
  1385. case SLJIT_GREATER_EQUAL_F64:
  1386. return 0x3;
  1387. case SLJIT_GREATER:
  1388. case SLJIT_GREATER_F64:
  1389. return 0x9;
  1390. case SLJIT_LESS_EQUAL:
  1391. case SLJIT_LESS_EQUAL_F64:
  1392. return 0x8;
  1393. case SLJIT_SIG_LESS:
  1394. return 0xa;
  1395. case SLJIT_SIG_GREATER_EQUAL:
  1396. return 0xb;
  1397. case SLJIT_SIG_GREATER:
  1398. return 0xd;
  1399. case SLJIT_SIG_LESS_EQUAL:
  1400. return 0xc;
  1401. case SLJIT_OVERFLOW:
  1402. if (!(compiler->status_flags_state & SLJIT_CURRENT_FLAGS_ADD_SUB))
  1403. return 0x0;
  1404. case SLJIT_UNORDERED_F64:
  1405. return 0x7;
  1406. case SLJIT_NOT_OVERFLOW:
  1407. if (!(compiler->status_flags_state & SLJIT_CURRENT_FLAGS_ADD_SUB))
  1408. return 0x1;
  1409. case SLJIT_ORDERED_F64:
  1410. return 0x6;
  1411. default:
  1412. SLJIT_UNREACHABLE();
  1413. return 0xe;
  1414. }
  1415. }
  1416. SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compiler *compiler)
  1417. {
  1418. struct sljit_label *label;
  1419. CHECK_ERROR_PTR();
  1420. CHECK_PTR(check_sljit_emit_label(compiler));
  1421. if (compiler->last_label && compiler->last_label->size == compiler->size)
  1422. return compiler->last_label;
  1423. label = (struct sljit_label*)ensure_abuf(compiler, sizeof(struct sljit_label));
  1424. PTR_FAIL_IF(!label);
  1425. set_label(label, compiler);
  1426. return label;
  1427. }
  1428. SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_s32 type)
  1429. {
  1430. struct sljit_jump *jump;
  1431. CHECK_ERROR_PTR();
  1432. CHECK_PTR(check_sljit_emit_jump(compiler, type));
  1433. jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
  1434. PTR_FAIL_IF(!jump);
  1435. set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP);
  1436. type &= 0xff;
  1437. if (type < SLJIT_JUMP) {
  1438. jump->flags |= IS_COND;
  1439. PTR_FAIL_IF(push_inst(compiler, B_CC | (6 << 5) | get_cc(compiler, type)));
  1440. }
  1441. else if (type >= SLJIT_FAST_CALL)
  1442. jump->flags |= IS_BL;
  1443. PTR_FAIL_IF(emit_imm64_const(compiler, TMP_REG1, 0));
  1444. jump->addr = compiler->size;
  1445. PTR_FAIL_IF(push_inst(compiler, ((type >= SLJIT_FAST_CALL) ? BLR : BR) | RN(TMP_REG1)));
  1446. return jump;
  1447. }
  1448. SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type,
  1449. sljit_s32 arg_types)
  1450. {
  1451. CHECK_ERROR_PTR();
  1452. CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types));
  1453. #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
  1454. || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
  1455. compiler->skip_checks = 1;
  1456. #endif
  1457. return sljit_emit_jump(compiler, type);
  1458. }
  1459. static SLJIT_INLINE struct sljit_jump* emit_cmp_to0(struct sljit_compiler *compiler, sljit_s32 type,
  1460. sljit_s32 src, sljit_sw srcw)
  1461. {
  1462. struct sljit_jump *jump;
  1463. sljit_ins inv_bits = (type & SLJIT_I32_OP) ? W_OP : 0;
  1464. SLJIT_ASSERT((type & 0xff) == SLJIT_EQUAL || (type & 0xff) == SLJIT_NOT_EQUAL);
  1465. ADJUST_LOCAL_OFFSET(src, srcw);
  1466. jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
  1467. PTR_FAIL_IF(!jump);
  1468. set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP);
  1469. jump->flags |= IS_CBZ | IS_COND;
  1470. if (src & SLJIT_MEM) {
  1471. PTR_FAIL_IF(emit_op_mem(compiler, inv_bits ? INT_SIZE : WORD_SIZE, TMP_REG1, src, srcw, TMP_REG1));
  1472. src = TMP_REG1;
  1473. }
  1474. else if (src & SLJIT_IMM) {
  1475. PTR_FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
  1476. src = TMP_REG1;
  1477. }
  1478. SLJIT_ASSERT(FAST_IS_REG(src));
  1479. if ((type & 0xff) == SLJIT_EQUAL)
  1480. inv_bits |= 1 << 24;
  1481. PTR_FAIL_IF(push_inst(compiler, (CBZ ^ inv_bits) | (6 << 5) | RT(src)));
  1482. PTR_FAIL_IF(emit_imm64_const(compiler, TMP_REG1, 0));
  1483. jump->addr = compiler->size;
  1484. PTR_FAIL_IF(push_inst(compiler, BR | RN(TMP_REG1)));
  1485. return jump;
  1486. }
  1487. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src, sljit_sw srcw)
  1488. {
  1489. struct sljit_jump *jump;
  1490. CHECK_ERROR();
  1491. CHECK(check_sljit_emit_ijump(compiler, type, src, srcw));
  1492. ADJUST_LOCAL_OFFSET(src, srcw);
  1493. if (!(src & SLJIT_IMM)) {
  1494. if (src & SLJIT_MEM) {
  1495. FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG1, src, srcw, TMP_REG1));
  1496. src = TMP_REG1;
  1497. }
  1498. return push_inst(compiler, ((type >= SLJIT_FAST_CALL) ? BLR : BR) | RN(src));
  1499. }
  1500. /* These jumps are converted to jump/call instructions when possible. */
  1501. jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
  1502. FAIL_IF(!jump);
  1503. set_jump(jump, compiler, JUMP_ADDR | ((type >= SLJIT_FAST_CALL) ? IS_BL : 0));
  1504. jump->u.target = srcw;
  1505. FAIL_IF(emit_imm64_const(compiler, TMP_REG1, 0));
  1506. jump->addr = compiler->size;
  1507. return push_inst(compiler, ((type >= SLJIT_FAST_CALL) ? BLR : BR) | RN(TMP_REG1));
  1508. }
  1509. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type,
  1510. sljit_s32 arg_types,
  1511. sljit_s32 src, sljit_sw srcw)
  1512. {
  1513. CHECK_ERROR();
  1514. CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw));
  1515. #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
  1516. || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
  1517. compiler->skip_checks = 1;
  1518. #endif
  1519. return sljit_emit_ijump(compiler, type, src, srcw);
  1520. }
  1521. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_s32 op,
  1522. sljit_s32 dst, sljit_sw dstw,
  1523. sljit_s32 type)
  1524. {
  1525. sljit_s32 dst_r, src_r, flags, mem_flags;
  1526. sljit_ins cc;
  1527. CHECK_ERROR();
  1528. CHECK(check_sljit_emit_op_flags(compiler, op, dst, dstw, type));
  1529. ADJUST_LOCAL_OFFSET(dst, dstw);
  1530. cc = get_cc(compiler, type & 0xff);
  1531. dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
  1532. if (GET_OPCODE(op) < SLJIT_ADD) {
  1533. FAIL_IF(push_inst(compiler, CSINC | (cc << 12) | RD(dst_r) | RN(TMP_ZERO) | RM(TMP_ZERO)));
  1534. if (dst_r == TMP_REG1) {
  1535. mem_flags = (GET_OPCODE(op) == SLJIT_MOV ? WORD_SIZE : INT_SIZE) | STORE;
  1536. return emit_op_mem(compiler, mem_flags, TMP_REG1, dst, dstw, TMP_REG2);
  1537. }
  1538. return SLJIT_SUCCESS;
  1539. }
  1540. flags = HAS_FLAGS(op) ? SET_FLAGS : 0;
  1541. mem_flags = WORD_SIZE;
  1542. if (op & SLJIT_I32_OP) {
  1543. flags |= INT_OP;
  1544. mem_flags = INT_SIZE;
  1545. }
  1546. src_r = dst;
  1547. if (dst & SLJIT_MEM) {
  1548. FAIL_IF(emit_op_mem(compiler, mem_flags, TMP_REG1, dst, dstw, TMP_REG1));
  1549. src_r = TMP_REG1;
  1550. }
  1551. FAIL_IF(push_inst(compiler, CSINC | (cc << 12) | RD(TMP_REG2) | RN(TMP_ZERO) | RM(TMP_ZERO)));
  1552. emit_op_imm(compiler, flags | GET_OPCODE(op), dst_r, src_r, TMP_REG2);
  1553. if (dst & SLJIT_MEM)
  1554. return emit_op_mem(compiler, mem_flags | STORE, TMP_REG1, dst, dstw, TMP_REG2);
  1555. return SLJIT_SUCCESS;
  1556. }
  1557. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compiler, sljit_s32 type,
  1558. sljit_s32 dst_reg,
  1559. sljit_s32 src, sljit_sw srcw)
  1560. {
  1561. sljit_ins inv_bits = (dst_reg & SLJIT_I32_OP) ? W_OP : 0;
  1562. sljit_ins cc;
  1563. CHECK_ERROR();
  1564. CHECK(check_sljit_emit_cmov(compiler, type, dst_reg, src, srcw));
  1565. if (SLJIT_UNLIKELY(src & SLJIT_IMM)) {
  1566. if (dst_reg & SLJIT_I32_OP)
  1567. srcw = (sljit_s32)srcw;
  1568. FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
  1569. src = TMP_REG1;
  1570. srcw = 0;
  1571. }
  1572. cc = get_cc(compiler, type & 0xff);
  1573. dst_reg &= ~SLJIT_I32_OP;
  1574. return push_inst(compiler, (CSEL ^ inv_bits) | (cc << 12) | RD(dst_reg) | RN(dst_reg) | RM(src));
  1575. }
  1576. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type,
  1577. sljit_s32 reg,
  1578. sljit_s32 mem, sljit_sw memw)
  1579. {
  1580. sljit_u32 sign = 0, inst;
  1581. CHECK_ERROR();
  1582. CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw));
  1583. if ((mem & OFFS_REG_MASK) || (memw > 255 || memw < -256))
  1584. return SLJIT_ERR_UNSUPPORTED;
  1585. if (type & SLJIT_MEM_SUPP)
  1586. return SLJIT_SUCCESS;
  1587. switch (type & 0xff) {
  1588. case SLJIT_MOV:
  1589. case SLJIT_MOV_P:
  1590. inst = STURBI | (MEM_SIZE_SHIFT(WORD_SIZE) << 30) | 0x400;
  1591. break;
  1592. case SLJIT_MOV_S8:
  1593. sign = 1;
  1594. case SLJIT_MOV_U8:
  1595. inst = STURBI | (MEM_SIZE_SHIFT(BYTE_SIZE) << 30) | 0x400;
  1596. break;
  1597. case SLJIT_MOV_S16:
  1598. sign = 1;
  1599. case SLJIT_MOV_U16:
  1600. inst = STURBI | (MEM_SIZE_SHIFT(HALF_SIZE) << 30) | 0x400;
  1601. break;
  1602. case SLJIT_MOV_S32:
  1603. sign = 1;
  1604. case SLJIT_MOV_U32:
  1605. inst = STURBI | (MEM_SIZE_SHIFT(INT_SIZE) << 30) | 0x400;
  1606. break;
  1607. default:
  1608. SLJIT_UNREACHABLE();
  1609. inst = STURBI | (MEM_SIZE_SHIFT(WORD_SIZE) << 30) | 0x400;
  1610. break;
  1611. }
  1612. if (!(type & SLJIT_MEM_STORE))
  1613. inst |= sign ? 0x00800000 : 0x00400000;
  1614. if (type & SLJIT_MEM_PRE)
  1615. inst |= 0x800;
  1616. return push_inst(compiler, inst | RT(reg) | RN(mem & REG_MASK) | ((memw & 0x1ff) << 12));
  1617. }
  1618. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compiler, sljit_s32 type,
  1619. sljit_s32 freg,
  1620. sljit_s32 mem, sljit_sw memw)
  1621. {
  1622. sljit_u32 inst;
  1623. CHECK_ERROR();
  1624. CHECK(check_sljit_emit_fmem(compiler, type, freg, mem, memw));
  1625. if ((mem & OFFS_REG_MASK) || (memw > 255 || memw < -256))
  1626. return SLJIT_ERR_UNSUPPORTED;
  1627. if (type & SLJIT_MEM_SUPP)
  1628. return SLJIT_SUCCESS;
  1629. inst = STUR_FI | 0x80000400;
  1630. if (!(type & SLJIT_F32_OP))
  1631. inst |= 0x40000000;
  1632. if (!(type & SLJIT_MEM_STORE))
  1633. inst |= 0x00400000;
  1634. if (type & SLJIT_MEM_PRE)
  1635. inst |= 0x800;
  1636. return push_inst(compiler, inst | VT(freg) | RN(mem & REG_MASK) | ((memw & 0x1ff) << 12));
  1637. }
  1638. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_local_base(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw offset)
  1639. {
  1640. sljit_s32 dst_reg;
  1641. sljit_ins ins;
  1642. CHECK_ERROR();
  1643. CHECK(check_sljit_get_local_base(compiler, dst, dstw, offset));
  1644. SLJIT_ASSERT (SLJIT_LOCALS_OFFSET_BASE == 0);
  1645. dst_reg = FAST_IS_REG(dst) ? dst : TMP_REG1;
  1646. if (offset <= 0xffffff && offset >= -0xffffff) {
  1647. ins = ADDI;
  1648. if (offset < 0) {
  1649. offset = -offset;
  1650. ins = SUBI;
  1651. }
  1652. if (offset <= 0xfff)
  1653. FAIL_IF(push_inst(compiler, ins | RD(dst_reg) | RN(SLJIT_SP) | (offset << 10)));
  1654. else {
  1655. FAIL_IF(push_inst(compiler, ins | RD(dst_reg) | RN(SLJIT_SP) | ((offset & 0xfff000) >> (12 - 10)) | (1 << 22)));
  1656. offset &= 0xfff;
  1657. if (offset != 0)
  1658. FAIL_IF(push_inst(compiler, ins | RD(dst_reg) | RN(dst_reg) | (offset << 10)));
  1659. }
  1660. }
  1661. else {
  1662. FAIL_IF(load_immediate (compiler, dst_reg, offset));
  1663. /* Add extended register form. */
  1664. FAIL_IF(push_inst(compiler, ADDE | (0x3 << 13) | RD(dst_reg) | RN(SLJIT_SP) | RM(dst_reg)));
  1665. }
  1666. if (SLJIT_UNLIKELY(dst & SLJIT_MEM))
  1667. return emit_op_mem(compiler, WORD_SIZE | STORE, dst_reg, dst, dstw, TMP_REG1);
  1668. return SLJIT_SUCCESS;
  1669. }
  1670. SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw init_value)
  1671. {
  1672. struct sljit_const *const_;
  1673. sljit_s32 dst_r;
  1674. CHECK_ERROR_PTR();
  1675. CHECK_PTR(check_sljit_emit_const(compiler, dst, dstw, init_value));
  1676. ADJUST_LOCAL_OFFSET(dst, dstw);
  1677. const_ = (struct sljit_const*)ensure_abuf(compiler, sizeof(struct sljit_const));
  1678. PTR_FAIL_IF(!const_);
  1679. set_const(const_, compiler);
  1680. dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
  1681. PTR_FAIL_IF(emit_imm64_const(compiler, dst_r, init_value));
  1682. if (dst & SLJIT_MEM)
  1683. PTR_FAIL_IF(emit_op_mem(compiler, WORD_SIZE | STORE, dst_r, dst, dstw, TMP_REG2));
  1684. return const_;
  1685. }
  1686. SLJIT_API_FUNC_ATTRIBUTE struct sljit_put_label* sljit_emit_put_label(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw)
  1687. {
  1688. struct sljit_put_label *put_label;
  1689. sljit_s32 dst_r;
  1690. CHECK_ERROR_PTR();
  1691. CHECK_PTR(check_sljit_emit_put_label(compiler, dst, dstw));
  1692. ADJUST_LOCAL_OFFSET(dst, dstw);
  1693. dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
  1694. PTR_FAIL_IF(emit_imm64_const(compiler, dst_r, 0));
  1695. put_label = (struct sljit_put_label*)ensure_abuf(compiler, sizeof(struct sljit_put_label));
  1696. PTR_FAIL_IF(!put_label);
  1697. set_put_label(put_label, compiler, 1);
  1698. if (dst & SLJIT_MEM)
  1699. PTR_FAIL_IF(emit_op_mem(compiler, WORD_SIZE | STORE, dst_r, dst, dstw, TMP_REG2));
  1700. return put_label;
  1701. }
  1702. SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_target, sljit_sw executable_offset)
  1703. {
  1704. sljit_ins* inst = (sljit_ins*)addr;
  1705. sljit_s32 dst;
  1706. SLJIT_UNUSED_ARG(executable_offset);
  1707. SLJIT_UPDATE_WX_FLAGS(inst, inst + 4, 0);
  1708. dst = inst[0] & 0x1f;
  1709. SLJIT_ASSERT((inst[0] & 0xffe00000) == MOVZ && (inst[1] & 0xffe00000) == (MOVK | (1 << 21)));
  1710. inst[0] = MOVZ | dst | ((new_target & 0xffff) << 5);
  1711. inst[1] = MOVK | dst | (((new_target >> 16) & 0xffff) << 5) | (1 << 21);
  1712. inst[2] = MOVK | dst | (((new_target >> 32) & 0xffff) << 5) | (2 << 21);
  1713. inst[3] = MOVK | dst | ((new_target >> 48) << 5) | (3 << 21);
  1714. SLJIT_UPDATE_WX_FLAGS(inst, inst + 4, 1);
  1715. inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
  1716. SLJIT_CACHE_FLUSH(inst, inst + 4);
  1717. }
  1718. SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset)
  1719. {
  1720. sljit_set_jump_addr(addr, new_constant, executable_offset);
  1721. }