sljitNativeARM_64.c 59 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964
  1. /*
  2. * Stack-less Just-In-Time compiler
  3. *
  4. * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
  5. *
  6. * Redistribution and use in source and binary forms, with or without modification, are
  7. * permitted provided that the following conditions are met:
  8. *
  9. * 1. Redistributions of source code must retain the above copyright notice, this list of
  10. * conditions and the following disclaimer.
  11. *
  12. * 2. Redistributions in binary form must reproduce the above copyright notice, this list
  13. * of conditions and the following disclaimer in the documentation and/or other materials
  14. * provided with the distribution.
  15. *
  16. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
  17. * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  18. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
  19. * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  20. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
  21. * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  22. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  23. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
  24. * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void)
  27. {
  28. return "ARM-64" SLJIT_CPUINFO;
  29. }
  30. /* Length of an instruction word */
  31. typedef sljit_u32 sljit_ins;
  32. #define TMP_ZERO (0)
  33. #define TMP_REG1 (SLJIT_NUMBER_OF_REGISTERS + 2)
  34. #define TMP_REG2 (SLJIT_NUMBER_OF_REGISTERS + 3)
  35. #define TMP_LR (SLJIT_NUMBER_OF_REGISTERS + 4)
  36. #define TMP_FP (SLJIT_NUMBER_OF_REGISTERS + 5)
  37. #define TMP_FREG1 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1)
  38. #define TMP_FREG2 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 2)
  39. /* r18 - platform register, currently not used */
  40. static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 8] = {
  41. 31, 0, 1, 2, 3, 4, 5, 6, 7, 11, 12, 13, 14, 15, 16, 17, 8, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 31, 9, 10, 30, 29
  42. };
  43. static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
  44. 0, 0, 1, 2, 3, 4, 5, 6, 7
  45. };
  46. #define W_OP (1u << 31)
  47. #define RD(rd) (reg_map[rd])
  48. #define RT(rt) (reg_map[rt])
  49. #define RN(rn) (reg_map[rn] << 5)
  50. #define RT2(rt2) (reg_map[rt2] << 10)
  51. #define RM(rm) (reg_map[rm] << 16)
  52. #define VD(vd) (freg_map[vd])
  53. #define VT(vt) (freg_map[vt])
  54. #define VN(vn) (freg_map[vn] << 5)
  55. #define VM(vm) (freg_map[vm] << 16)
  56. /* --------------------------------------------------------------------- */
  57. /* Instrucion forms */
  58. /* --------------------------------------------------------------------- */
  59. #define ADC 0x9a000000
  60. #define ADD 0x8b000000
  61. #define ADDE 0x8b200000
  62. #define ADDI 0x91000000
  63. #define AND 0x8a000000
  64. #define ANDI 0x92000000
  65. #define ASRV 0x9ac02800
  66. #define B 0x14000000
  67. #define B_CC 0x54000000
  68. #define BL 0x94000000
  69. #define BLR 0xd63f0000
  70. #define BR 0xd61f0000
  71. #define BRK 0xd4200000
  72. #define CBZ 0xb4000000
  73. #define CLZ 0xdac01000
  74. #define CSEL 0x9a800000
  75. #define CSINC 0x9a800400
  76. #define EOR 0xca000000
  77. #define EORI 0xd2000000
  78. #define FABS 0x1e60c000
  79. #define FADD 0x1e602800
  80. #define FCMP 0x1e602000
  81. #define FCVT 0x1e224000
  82. #define FCVTZS 0x9e780000
  83. #define FDIV 0x1e601800
  84. #define FMOV 0x1e604000
  85. #define FMUL 0x1e600800
  86. #define FNEG 0x1e614000
  87. #define FSUB 0x1e603800
  88. #define LDRI 0xf9400000
  89. #define LDP 0xa9400000
  90. #define LDP_PRE 0xa9c00000
  91. #define LDR_PRE 0xf8400c00
  92. #define LSLV 0x9ac02000
  93. #define LSRV 0x9ac02400
  94. #define MADD 0x9b000000
  95. #define MOVK 0xf2800000
  96. #define MOVN 0x92800000
  97. #define MOVZ 0xd2800000
  98. #define NOP 0xd503201f
  99. #define ORN 0xaa200000
  100. #define ORR 0xaa000000
  101. #define ORRI 0xb2000000
  102. #define RET 0xd65f0000
  103. #define SBC 0xda000000
  104. #define SBFM 0x93000000
  105. #define SCVTF 0x9e620000
  106. #define SDIV 0x9ac00c00
  107. #define SMADDL 0x9b200000
  108. #define SMULH 0x9b403c00
  109. #define STP 0xa9000000
  110. #define STP_PRE 0xa9800000
  111. #define STRB 0x38206800
  112. #define STRBI 0x39000000
  113. #define STRI 0xf9000000
  114. #define STR_FI 0x3d000000
  115. #define STR_FR 0x3c206800
  116. #define STUR_FI 0x3c000000
  117. #define STURBI 0x38000000
  118. #define SUB 0xcb000000
  119. #define SUBI 0xd1000000
  120. #define SUBS 0xeb000000
  121. #define UBFM 0xd3000000
  122. #define UDIV 0x9ac00800
  123. #define UMULH 0x9bc03c00
  124. /* dest_reg is the absolute name of the register
  125. Useful for reordering instructions in the delay slot. */
  126. static sljit_s32 push_inst(struct sljit_compiler *compiler, sljit_ins ins)
  127. {
  128. sljit_ins *ptr = (sljit_ins*)ensure_buf(compiler, sizeof(sljit_ins));
  129. FAIL_IF(!ptr);
  130. *ptr = ins;
  131. compiler->size++;
  132. return SLJIT_SUCCESS;
  133. }
  134. static SLJIT_INLINE sljit_s32 emit_imm64_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_uw imm)
  135. {
  136. FAIL_IF(push_inst(compiler, MOVZ | RD(dst) | ((imm & 0xffff) << 5)));
  137. FAIL_IF(push_inst(compiler, MOVK | RD(dst) | (((imm >> 16) & 0xffff) << 5) | (1 << 21)));
  138. FAIL_IF(push_inst(compiler, MOVK | RD(dst) | (((imm >> 32) & 0xffff) << 5) | (2 << 21)));
  139. return push_inst(compiler, MOVK | RD(dst) | ((imm >> 48) << 5) | (3 << 21));
  140. }
  141. static SLJIT_INLINE void modify_imm64_const(sljit_ins* inst, sljit_uw new_imm)
  142. {
  143. sljit_s32 dst = inst[0] & 0x1f;
  144. SLJIT_ASSERT((inst[0] & 0xffe00000) == MOVZ && (inst[1] & 0xffe00000) == (MOVK | (1 << 21)));
  145. inst[0] = MOVZ | dst | ((new_imm & 0xffff) << 5);
  146. inst[1] = MOVK | dst | (((new_imm >> 16) & 0xffff) << 5) | (1 << 21);
  147. inst[2] = MOVK | dst | (((new_imm >> 32) & 0xffff) << 5) | (2 << 21);
  148. inst[3] = MOVK | dst | ((new_imm >> 48) << 5) | (3 << 21);
  149. }
  150. static SLJIT_INLINE sljit_s32 detect_jump_type(struct sljit_jump *jump, sljit_ins *code_ptr, sljit_ins *code, sljit_sw executable_offset)
  151. {
  152. sljit_sw diff;
  153. sljit_uw target_addr;
  154. if (jump->flags & SLJIT_REWRITABLE_JUMP) {
  155. jump->flags |= PATCH_ABS64;
  156. return 0;
  157. }
  158. if (jump->flags & JUMP_ADDR)
  159. target_addr = jump->u.target;
  160. else {
  161. SLJIT_ASSERT(jump->flags & JUMP_LABEL);
  162. target_addr = (sljit_uw)(code + jump->u.label->size) + (sljit_uw)executable_offset;
  163. }
  164. diff = (sljit_sw)target_addr - (sljit_sw)(code_ptr + 4) - executable_offset;
  165. if (jump->flags & IS_COND) {
  166. diff += sizeof(sljit_ins);
  167. if (diff <= 0xfffff && diff >= -0x100000) {
  168. code_ptr[-5] ^= (jump->flags & IS_CBZ) ? (0x1 << 24) : 0x1;
  169. jump->addr -= sizeof(sljit_ins);
  170. jump->flags |= PATCH_COND;
  171. return 5;
  172. }
  173. diff -= sizeof(sljit_ins);
  174. }
  175. if (diff <= 0x7ffffff && diff >= -0x8000000) {
  176. jump->flags |= PATCH_B;
  177. return 4;
  178. }
  179. if (target_addr <= 0xffffffffl) {
  180. if (jump->flags & IS_COND)
  181. code_ptr[-5] -= (2 << 5);
  182. code_ptr[-2] = code_ptr[0];
  183. return 2;
  184. }
  185. if (target_addr <= 0xffffffffffffl) {
  186. if (jump->flags & IS_COND)
  187. code_ptr[-5] -= (1 << 5);
  188. jump->flags |= PATCH_ABS48;
  189. code_ptr[-1] = code_ptr[0];
  190. return 1;
  191. }
  192. jump->flags |= PATCH_ABS64;
  193. return 0;
  194. }
  195. SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler)
  196. {
  197. struct sljit_memory_fragment *buf;
  198. sljit_ins *code;
  199. sljit_ins *code_ptr;
  200. sljit_ins *buf_ptr;
  201. sljit_ins *buf_end;
  202. sljit_uw word_count;
  203. sljit_sw executable_offset;
  204. sljit_uw addr;
  205. sljit_s32 dst;
  206. struct sljit_label *label;
  207. struct sljit_jump *jump;
  208. struct sljit_const *const_;
  209. CHECK_ERROR_PTR();
  210. CHECK_PTR(check_sljit_generate_code(compiler));
  211. reverse_buf(compiler);
  212. code = (sljit_ins*)SLJIT_MALLOC_EXEC(compiler->size * sizeof(sljit_ins));
  213. PTR_FAIL_WITH_EXEC_IF(code);
  214. buf = compiler->buf;
  215. code_ptr = code;
  216. word_count = 0;
  217. executable_offset = SLJIT_EXEC_OFFSET(code);
  218. label = compiler->labels;
  219. jump = compiler->jumps;
  220. const_ = compiler->consts;
  221. do {
  222. buf_ptr = (sljit_ins*)buf->memory;
  223. buf_end = buf_ptr + (buf->used_size >> 2);
  224. do {
  225. *code_ptr = *buf_ptr++;
  226. /* These structures are ordered by their address. */
  227. SLJIT_ASSERT(!label || label->size >= word_count);
  228. SLJIT_ASSERT(!jump || jump->addr >= word_count);
  229. SLJIT_ASSERT(!const_ || const_->addr >= word_count);
  230. if (label && label->size == word_count) {
  231. label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
  232. label->size = code_ptr - code;
  233. label = label->next;
  234. }
  235. if (jump && jump->addr == word_count) {
  236. jump->addr = (sljit_uw)(code_ptr - 4);
  237. code_ptr -= detect_jump_type(jump, code_ptr, code, executable_offset);
  238. jump = jump->next;
  239. }
  240. if (const_ && const_->addr == word_count) {
  241. const_->addr = (sljit_uw)code_ptr;
  242. const_ = const_->next;
  243. }
  244. code_ptr ++;
  245. word_count ++;
  246. } while (buf_ptr < buf_end);
  247. buf = buf->next;
  248. } while (buf);
  249. if (label && label->size == word_count) {
  250. label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
  251. label->size = code_ptr - code;
  252. label = label->next;
  253. }
  254. SLJIT_ASSERT(!label);
  255. SLJIT_ASSERT(!jump);
  256. SLJIT_ASSERT(!const_);
  257. SLJIT_ASSERT(code_ptr - code <= (sljit_sw)compiler->size);
  258. jump = compiler->jumps;
  259. while (jump) {
  260. do {
  261. addr = (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target;
  262. buf_ptr = (sljit_ins *)jump->addr;
  263. if (jump->flags & PATCH_B) {
  264. addr = (sljit_sw)(addr - (sljit_uw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset)) >> 2;
  265. SLJIT_ASSERT((sljit_sw)addr <= 0x1ffffff && (sljit_sw)addr >= -0x2000000);
  266. buf_ptr[0] = ((jump->flags & IS_BL) ? BL : B) | (addr & 0x3ffffff);
  267. if (jump->flags & IS_COND)
  268. buf_ptr[-1] -= (4 << 5);
  269. break;
  270. }
  271. if (jump->flags & PATCH_COND) {
  272. addr = (sljit_sw)(addr - (sljit_uw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset)) >> 2;
  273. SLJIT_ASSERT((sljit_sw)addr <= 0x3ffff && (sljit_sw)addr >= -0x40000);
  274. buf_ptr[0] = (buf_ptr[0] & ~0xffffe0) | ((addr & 0x7ffff) << 5);
  275. break;
  276. }
  277. SLJIT_ASSERT((jump->flags & (PATCH_ABS48 | PATCH_ABS64)) || addr <= 0xffffffffl);
  278. SLJIT_ASSERT((jump->flags & PATCH_ABS64) || addr <= 0xffffffffffffl);
  279. dst = buf_ptr[0] & 0x1f;
  280. buf_ptr[0] = MOVZ | dst | ((addr & 0xffff) << 5);
  281. buf_ptr[1] = MOVK | dst | (((addr >> 16) & 0xffff) << 5) | (1 << 21);
  282. if (jump->flags & (PATCH_ABS48 | PATCH_ABS64))
  283. buf_ptr[2] = MOVK | dst | (((addr >> 32) & 0xffff) << 5) | (2 << 21);
  284. if (jump->flags & PATCH_ABS64)
  285. buf_ptr[3] = MOVK | dst | (((addr >> 48) & 0xffff) << 5) | (3 << 21);
  286. } while (0);
  287. jump = jump->next;
  288. }
  289. compiler->error = SLJIT_ERR_COMPILED;
  290. compiler->executable_offset = executable_offset;
  291. compiler->executable_size = (code_ptr - code) * sizeof(sljit_ins);
  292. code = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(code, executable_offset);
  293. code_ptr = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
  294. SLJIT_CACHE_FLUSH(code, code_ptr);
  295. return code;
  296. }
  297. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
  298. {
  299. switch (feature_type) {
  300. case SLJIT_HAS_FPU:
  301. #ifdef SLJIT_IS_FPU_AVAILABLE
  302. return SLJIT_IS_FPU_AVAILABLE;
  303. #else
  304. /* Available by default. */
  305. return 1;
  306. #endif
  307. case SLJIT_HAS_CLZ:
  308. case SLJIT_HAS_CMOV:
  309. return 1;
  310. default:
  311. return 0;
  312. }
  313. }
  314. /* --------------------------------------------------------------------- */
  315. /* Core code generator functions. */
  316. /* --------------------------------------------------------------------- */
  317. #define COUNT_TRAILING_ZERO(value, result) \
  318. result = 0; \
  319. if (!(value & 0xffffffff)) { \
  320. result += 32; \
  321. value >>= 32; \
  322. } \
  323. if (!(value & 0xffff)) { \
  324. result += 16; \
  325. value >>= 16; \
  326. } \
  327. if (!(value & 0xff)) { \
  328. result += 8; \
  329. value >>= 8; \
  330. } \
  331. if (!(value & 0xf)) { \
  332. result += 4; \
  333. value >>= 4; \
  334. } \
  335. if (!(value & 0x3)) { \
  336. result += 2; \
  337. value >>= 2; \
  338. } \
  339. if (!(value & 0x1)) { \
  340. result += 1; \
  341. value >>= 1; \
  342. }
  343. #define LOGICAL_IMM_CHECK 0x100
  344. static sljit_ins logical_imm(sljit_sw imm, sljit_s32 len)
  345. {
  346. sljit_s32 negated, ones, right;
  347. sljit_uw mask, uimm;
  348. sljit_ins ins;
  349. if (len & LOGICAL_IMM_CHECK) {
  350. len &= ~LOGICAL_IMM_CHECK;
  351. if (len == 32 && (imm == 0 || imm == -1))
  352. return 0;
  353. if (len == 16 && ((sljit_s32)imm == 0 || (sljit_s32)imm == -1))
  354. return 0;
  355. }
  356. SLJIT_ASSERT((len == 32 && imm != 0 && imm != -1)
  357. || (len == 16 && (sljit_s32)imm != 0 && (sljit_s32)imm != -1));
  358. uimm = (sljit_uw)imm;
  359. while (1) {
  360. if (len <= 0) {
  361. SLJIT_UNREACHABLE();
  362. return 0;
  363. }
  364. mask = ((sljit_uw)1 << len) - 1;
  365. if ((uimm & mask) != ((uimm >> len) & mask))
  366. break;
  367. len >>= 1;
  368. }
  369. len <<= 1;
  370. negated = 0;
  371. if (uimm & 0x1) {
  372. negated = 1;
  373. uimm = ~uimm;
  374. }
  375. if (len < 64)
  376. uimm &= ((sljit_uw)1 << len) - 1;
  377. /* Unsigned right shift. */
  378. COUNT_TRAILING_ZERO(uimm, right);
  379. /* Signed shift. We also know that the highest bit is set. */
  380. imm = (sljit_sw)~uimm;
  381. SLJIT_ASSERT(imm < 0);
  382. COUNT_TRAILING_ZERO(imm, ones);
  383. if (~imm)
  384. return 0;
  385. if (len == 64)
  386. ins = 1 << 22;
  387. else
  388. ins = (0x3f - ((len << 1) - 1)) << 10;
  389. if (negated)
  390. return ins | ((len - ones - 1) << 10) | ((len - ones - right) << 16);
  391. return ins | ((ones - 1) << 10) | ((len - right) << 16);
  392. }
  393. #undef COUNT_TRAILING_ZERO
  394. static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw simm)
  395. {
  396. sljit_uw imm = (sljit_uw)simm;
  397. sljit_s32 i, zeros, ones, first;
  398. sljit_ins bitmask;
  399. /* Handling simple immediates first. */
  400. if (imm <= 0xffff)
  401. return push_inst(compiler, MOVZ | RD(dst) | (imm << 5));
  402. if (simm < 0 && simm >= -0x10000)
  403. return push_inst(compiler, MOVN | RD(dst) | ((~imm & 0xffff) << 5));
  404. if (imm <= 0xffffffffl) {
  405. if ((imm & 0xffff) == 0)
  406. return push_inst(compiler, MOVZ | RD(dst) | ((imm >> 16) << 5) | (1 << 21));
  407. if ((imm & 0xffff0000l) == 0xffff0000)
  408. return push_inst(compiler, (MOVN ^ W_OP) | RD(dst) | ((~imm & 0xffff) << 5));
  409. if ((imm & 0xffff) == 0xffff)
  410. return push_inst(compiler, (MOVN ^ W_OP) | RD(dst) | ((~imm & 0xffff0000l) >> (16 - 5)) | (1 << 21));
  411. bitmask = logical_imm(simm, 16);
  412. if (bitmask != 0)
  413. return push_inst(compiler, (ORRI ^ W_OP) | RD(dst) | RN(TMP_ZERO) | bitmask);
  414. FAIL_IF(push_inst(compiler, MOVZ | RD(dst) | ((imm & 0xffff) << 5)));
  415. return push_inst(compiler, MOVK | RD(dst) | ((imm & 0xffff0000l) >> (16 - 5)) | (1 << 21));
  416. }
  417. bitmask = logical_imm(simm, 32);
  418. if (bitmask != 0)
  419. return push_inst(compiler, ORRI | RD(dst) | RN(TMP_ZERO) | bitmask);
  420. if (simm < 0 && simm >= -0x100000000l) {
  421. if ((imm & 0xffff) == 0xffff)
  422. return push_inst(compiler, MOVN | RD(dst) | ((~imm & 0xffff0000l) >> (16 - 5)) | (1 << 21));
  423. FAIL_IF(push_inst(compiler, MOVN | RD(dst) | ((~imm & 0xffff) << 5)));
  424. return push_inst(compiler, MOVK | RD(dst) | ((imm & 0xffff0000l) >> (16 - 5)) | (1 << 21));
  425. }
  426. /* A large amount of number can be constructed from ORR and MOVx, but computing them is costly. */
  427. zeros = 0;
  428. ones = 0;
  429. for (i = 4; i > 0; i--) {
  430. if ((simm & 0xffff) == 0)
  431. zeros++;
  432. if ((simm & 0xffff) == 0xffff)
  433. ones++;
  434. simm >>= 16;
  435. }
  436. simm = (sljit_sw)imm;
  437. first = 1;
  438. if (ones > zeros) {
  439. simm = ~simm;
  440. for (i = 0; i < 4; i++) {
  441. if (!(simm & 0xffff)) {
  442. simm >>= 16;
  443. continue;
  444. }
  445. if (first) {
  446. first = 0;
  447. FAIL_IF(push_inst(compiler, MOVN | RD(dst) | ((simm & 0xffff) << 5) | (i << 21)));
  448. }
  449. else
  450. FAIL_IF(push_inst(compiler, MOVK | RD(dst) | ((~simm & 0xffff) << 5) | (i << 21)));
  451. simm >>= 16;
  452. }
  453. return SLJIT_SUCCESS;
  454. }
  455. for (i = 0; i < 4; i++) {
  456. if (!(simm & 0xffff)) {
  457. simm >>= 16;
  458. continue;
  459. }
  460. if (first) {
  461. first = 0;
  462. FAIL_IF(push_inst(compiler, MOVZ | RD(dst) | ((simm & 0xffff) << 5) | (i << 21)));
  463. }
  464. else
  465. FAIL_IF(push_inst(compiler, MOVK | RD(dst) | ((simm & 0xffff) << 5) | (i << 21)));
  466. simm >>= 16;
  467. }
  468. return SLJIT_SUCCESS;
  469. }
  470. #define ARG1_IMM 0x0010000
  471. #define ARG2_IMM 0x0020000
  472. #define INT_OP 0x0040000
  473. #define SET_FLAGS 0x0080000
  474. #define UNUSED_RETURN 0x0100000
  475. #define CHECK_FLAGS(flag_bits) \
  476. if (flags & SET_FLAGS) { \
  477. inv_bits |= flag_bits; \
  478. if (flags & UNUSED_RETURN) \
  479. dst = TMP_ZERO; \
  480. }
  481. static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 dst, sljit_sw arg1, sljit_sw arg2)
  482. {
  483. /* dst must be register, TMP_REG1
  484. arg1 must be register, TMP_REG1, imm
  485. arg2 must be register, TMP_REG2, imm */
  486. sljit_ins inv_bits = (flags & INT_OP) ? W_OP : 0;
  487. sljit_ins inst_bits;
  488. sljit_s32 op = (flags & 0xffff);
  489. sljit_s32 reg;
  490. sljit_sw imm, nimm;
  491. if (SLJIT_UNLIKELY((flags & (ARG1_IMM | ARG2_IMM)) == (ARG1_IMM | ARG2_IMM))) {
  492. /* Both are immediates. */
  493. flags &= ~ARG1_IMM;
  494. if (arg1 == 0 && op != SLJIT_ADD && op != SLJIT_SUB)
  495. arg1 = TMP_ZERO;
  496. else {
  497. FAIL_IF(load_immediate(compiler, TMP_REG1, arg1));
  498. arg1 = TMP_REG1;
  499. }
  500. }
  501. if (flags & (ARG1_IMM | ARG2_IMM)) {
  502. reg = (flags & ARG2_IMM) ? arg1 : arg2;
  503. imm = (flags & ARG2_IMM) ? arg2 : arg1;
  504. switch (op) {
  505. case SLJIT_MUL:
  506. case SLJIT_NEG:
  507. case SLJIT_CLZ:
  508. case SLJIT_ADDC:
  509. case SLJIT_SUBC:
  510. /* No form with immediate operand (except imm 0, which
  511. is represented by a ZERO register). */
  512. break;
  513. case SLJIT_MOV:
  514. SLJIT_ASSERT(!(flags & SET_FLAGS) && (flags & ARG2_IMM) && arg1 == TMP_REG1);
  515. return load_immediate(compiler, dst, imm);
  516. case SLJIT_NOT:
  517. SLJIT_ASSERT(flags & ARG2_IMM);
  518. FAIL_IF(load_immediate(compiler, dst, (flags & INT_OP) ? (~imm & 0xffffffff) : ~imm));
  519. goto set_flags;
  520. case SLJIT_SUB:
  521. if (flags & ARG1_IMM)
  522. break;
  523. imm = -imm;
  524. /* Fall through. */
  525. case SLJIT_ADD:
  526. if (imm == 0) {
  527. CHECK_FLAGS(1 << 29);
  528. return push_inst(compiler, ((op == SLJIT_ADD ? ADDI : SUBI) ^ inv_bits) | RD(dst) | RN(reg));
  529. }
  530. if (imm > 0 && imm <= 0xfff) {
  531. CHECK_FLAGS(1 << 29);
  532. return push_inst(compiler, (ADDI ^ inv_bits) | RD(dst) | RN(reg) | (imm << 10));
  533. }
  534. nimm = -imm;
  535. if (nimm > 0 && nimm <= 0xfff) {
  536. CHECK_FLAGS(1 << 29);
  537. return push_inst(compiler, (SUBI ^ inv_bits) | RD(dst) | RN(reg) | (nimm << 10));
  538. }
  539. if (imm > 0 && imm <= 0xffffff && !(imm & 0xfff)) {
  540. CHECK_FLAGS(1 << 29);
  541. return push_inst(compiler, (ADDI ^ inv_bits) | RD(dst) | RN(reg) | ((imm >> 12) << 10) | (1 << 22));
  542. }
  543. if (nimm > 0 && nimm <= 0xffffff && !(nimm & 0xfff)) {
  544. CHECK_FLAGS(1 << 29);
  545. return push_inst(compiler, (SUBI ^ inv_bits) | RD(dst) | RN(reg) | ((nimm >> 12) << 10) | (1 << 22));
  546. }
  547. if (imm > 0 && imm <= 0xffffff && !(flags & SET_FLAGS)) {
  548. FAIL_IF(push_inst(compiler, (ADDI ^ inv_bits) | RD(dst) | RN(reg) | ((imm >> 12) << 10) | (1 << 22)));
  549. return push_inst(compiler, (ADDI ^ inv_bits) | RD(dst) | RN(dst) | ((imm & 0xfff) << 10));
  550. }
  551. if (nimm > 0 && nimm <= 0xffffff && !(flags & SET_FLAGS)) {
  552. FAIL_IF(push_inst(compiler, (SUBI ^ inv_bits) | RD(dst) | RN(reg) | ((nimm >> 12) << 10) | (1 << 22)));
  553. return push_inst(compiler, (SUBI ^ inv_bits) | RD(dst) | RN(dst) | ((nimm & 0xfff) << 10));
  554. }
  555. break;
  556. case SLJIT_AND:
  557. inst_bits = logical_imm(imm, LOGICAL_IMM_CHECK | ((flags & INT_OP) ? 16 : 32));
  558. if (!inst_bits)
  559. break;
  560. CHECK_FLAGS(3 << 29);
  561. return push_inst(compiler, (ANDI ^ inv_bits) | RD(dst) | RN(reg) | inst_bits);
  562. case SLJIT_OR:
  563. case SLJIT_XOR:
  564. inst_bits = logical_imm(imm, LOGICAL_IMM_CHECK | ((flags & INT_OP) ? 16 : 32));
  565. if (!inst_bits)
  566. break;
  567. if (op == SLJIT_OR)
  568. inst_bits |= ORRI;
  569. else
  570. inst_bits |= EORI;
  571. FAIL_IF(push_inst(compiler, (inst_bits ^ inv_bits) | RD(dst) | RN(reg)));
  572. goto set_flags;
  573. case SLJIT_SHL:
  574. if (flags & ARG1_IMM)
  575. break;
  576. if (flags & INT_OP) {
  577. imm &= 0x1f;
  578. FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg1) | ((-imm & 0x1f) << 16) | ((31 - imm) << 10)));
  579. }
  580. else {
  581. imm &= 0x3f;
  582. FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg1) | (1 << 22) | ((-imm & 0x3f) << 16) | ((63 - imm) << 10)));
  583. }
  584. goto set_flags;
  585. case SLJIT_LSHR:
  586. case SLJIT_ASHR:
  587. if (flags & ARG1_IMM)
  588. break;
  589. if (op == SLJIT_ASHR)
  590. inv_bits |= 1 << 30;
  591. if (flags & INT_OP) {
  592. imm &= 0x1f;
  593. FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg1) | (imm << 16) | (31 << 10)));
  594. }
  595. else {
  596. imm &= 0x3f;
  597. FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg1) | (1 << 22) | (imm << 16) | (63 << 10)));
  598. }
  599. goto set_flags;
  600. default:
  601. SLJIT_UNREACHABLE();
  602. break;
  603. }
  604. if (flags & ARG2_IMM) {
  605. if (arg2 == 0)
  606. arg2 = TMP_ZERO;
  607. else {
  608. FAIL_IF(load_immediate(compiler, TMP_REG2, arg2));
  609. arg2 = TMP_REG2;
  610. }
  611. }
  612. else {
  613. if (arg1 == 0)
  614. arg1 = TMP_ZERO;
  615. else {
  616. FAIL_IF(load_immediate(compiler, TMP_REG1, arg1));
  617. arg1 = TMP_REG1;
  618. }
  619. }
  620. }
  621. /* Both arguments are registers. */
  622. switch (op) {
  623. case SLJIT_MOV:
  624. case SLJIT_MOV_P:
  625. SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
  626. if (dst == arg2)
  627. return SLJIT_SUCCESS;
  628. return push_inst(compiler, ORR | RD(dst) | RN(TMP_ZERO) | RM(arg2));
  629. case SLJIT_MOV_U8:
  630. SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
  631. return push_inst(compiler, (UBFM ^ W_OP) | RD(dst) | RN(arg2) | (7 << 10));
  632. case SLJIT_MOV_S8:
  633. SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
  634. if (!(flags & INT_OP))
  635. inv_bits |= 1 << 22;
  636. return push_inst(compiler, (SBFM ^ inv_bits) | RD(dst) | RN(arg2) | (7 << 10));
  637. case SLJIT_MOV_U16:
  638. SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
  639. return push_inst(compiler, (UBFM ^ W_OP) | RD(dst) | RN(arg2) | (15 << 10));
  640. case SLJIT_MOV_S16:
  641. SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
  642. if (!(flags & INT_OP))
  643. inv_bits |= 1 << 22;
  644. return push_inst(compiler, (SBFM ^ inv_bits) | RD(dst) | RN(arg2) | (15 << 10));
  645. case SLJIT_MOV_U32:
  646. SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
  647. if ((flags & INT_OP) && dst == arg2)
  648. return SLJIT_SUCCESS;
  649. return push_inst(compiler, (ORR ^ W_OP) | RD(dst) | RN(TMP_ZERO) | RM(arg2));
  650. case SLJIT_MOV_S32:
  651. SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
  652. if ((flags & INT_OP) && dst == arg2)
  653. return SLJIT_SUCCESS;
  654. return push_inst(compiler, SBFM | (1 << 22) | RD(dst) | RN(arg2) | (31 << 10));
  655. case SLJIT_NOT:
  656. SLJIT_ASSERT(arg1 == TMP_REG1);
  657. FAIL_IF(push_inst(compiler, (ORN ^ inv_bits) | RD(dst) | RN(TMP_ZERO) | RM(arg2)));
  658. break; /* Set flags. */
  659. case SLJIT_NEG:
  660. SLJIT_ASSERT(arg1 == TMP_REG1);
  661. if (flags & SET_FLAGS)
  662. inv_bits |= 1 << 29;
  663. return push_inst(compiler, (SUB ^ inv_bits) | RD(dst) | RN(TMP_ZERO) | RM(arg2));
  664. case SLJIT_CLZ:
  665. SLJIT_ASSERT(arg1 == TMP_REG1);
  666. return push_inst(compiler, (CLZ ^ inv_bits) | RD(dst) | RN(arg2));
  667. case SLJIT_ADD:
  668. CHECK_FLAGS(1 << 29);
  669. return push_inst(compiler, (ADD ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2));
  670. case SLJIT_ADDC:
  671. CHECK_FLAGS(1 << 29);
  672. return push_inst(compiler, (ADC ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2));
  673. case SLJIT_SUB:
  674. CHECK_FLAGS(1 << 29);
  675. return push_inst(compiler, (SUB ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2));
  676. case SLJIT_SUBC:
  677. CHECK_FLAGS(1 << 29);
  678. return push_inst(compiler, (SBC ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2));
  679. case SLJIT_MUL:
  680. if (!(flags & SET_FLAGS))
  681. return push_inst(compiler, (MADD ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2) | RT2(TMP_ZERO));
  682. if (flags & INT_OP) {
  683. FAIL_IF(push_inst(compiler, SMADDL | RD(dst) | RN(arg1) | RM(arg2) | (31 << 10)));
  684. FAIL_IF(push_inst(compiler, ADD | RD(TMP_LR) | RN(TMP_ZERO) | RM(dst) | (2 << 22) | (31 << 10)));
  685. return push_inst(compiler, SUBS | RD(TMP_ZERO) | RN(TMP_LR) | RM(dst) | (2 << 22) | (63 << 10));
  686. }
  687. FAIL_IF(push_inst(compiler, SMULH | RD(TMP_LR) | RN(arg1) | RM(arg2)));
  688. FAIL_IF(push_inst(compiler, MADD | RD(dst) | RN(arg1) | RM(arg2) | RT2(TMP_ZERO)));
  689. return push_inst(compiler, SUBS | RD(TMP_ZERO) | RN(TMP_LR) | RM(dst) | (2 << 22) | (63 << 10));
  690. case SLJIT_AND:
  691. CHECK_FLAGS(3 << 29);
  692. return push_inst(compiler, (AND ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2));
  693. case SLJIT_OR:
  694. FAIL_IF(push_inst(compiler, (ORR ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2)));
  695. break; /* Set flags. */
  696. case SLJIT_XOR:
  697. FAIL_IF(push_inst(compiler, (EOR ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2)));
  698. break; /* Set flags. */
  699. case SLJIT_SHL:
  700. FAIL_IF(push_inst(compiler, (LSLV ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2)));
  701. break; /* Set flags. */
  702. case SLJIT_LSHR:
  703. FAIL_IF(push_inst(compiler, (LSRV ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2)));
  704. break; /* Set flags. */
  705. case SLJIT_ASHR:
  706. FAIL_IF(push_inst(compiler, (ASRV ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2)));
  707. break; /* Set flags. */
  708. default:
  709. SLJIT_UNREACHABLE();
  710. return SLJIT_SUCCESS;
  711. }
  712. set_flags:
  713. if (flags & SET_FLAGS)
  714. return push_inst(compiler, (SUBS ^ inv_bits) | RD(TMP_ZERO) | RN(dst) | RM(TMP_ZERO));
  715. return SLJIT_SUCCESS;
  716. }
  717. #define STORE 0x10
  718. #define SIGNED 0x20
  719. #define BYTE_SIZE 0x0
  720. #define HALF_SIZE 0x1
  721. #define INT_SIZE 0x2
  722. #define WORD_SIZE 0x3
  723. #define MEM_SIZE_SHIFT(flags) ((flags) & 0x3)
  724. static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg,
  725. sljit_s32 arg, sljit_sw argw, sljit_s32 tmp_reg)
  726. {
  727. sljit_u32 shift = MEM_SIZE_SHIFT(flags);
  728. sljit_u32 type = (shift << 30);
  729. if (!(flags & STORE))
  730. type |= (flags & SIGNED) ? 0x00800000 : 0x00400000;
  731. SLJIT_ASSERT(arg & SLJIT_MEM);
  732. if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) {
  733. argw &= 0x3;
  734. if (argw == 0 || argw == shift)
  735. return push_inst(compiler, STRB | type | RT(reg)
  736. | RN(arg & REG_MASK) | RM(OFFS_REG(arg)) | (argw ? (1 << 12) : 0));
  737. FAIL_IF(push_inst(compiler, ADD | RD(tmp_reg) | RN(arg & REG_MASK) | RM(OFFS_REG(arg)) | (argw << 10)));
  738. return push_inst(compiler, STRBI | type | RT(reg) | RN(tmp_reg));
  739. }
  740. arg &= REG_MASK;
  741. if (arg == SLJIT_UNUSED) {
  742. FAIL_IF(load_immediate(compiler, tmp_reg, argw & ~(0xfff << shift)));
  743. argw = (argw >> shift) & 0xfff;
  744. return push_inst(compiler, STRBI | type | RT(reg) | RN(tmp_reg) | (argw << 10));
  745. }
  746. if (argw >= 0 && (argw & ((1 << shift) - 1)) == 0) {
  747. if ((argw >> shift) <= 0xfff) {
  748. return push_inst(compiler, STRBI | type | RT(reg) | RN(arg) | (argw << (10 - shift)));
  749. }
  750. if (argw <= 0xffffff) {
  751. FAIL_IF(push_inst(compiler, ADDI | (1 << 22) | RD(tmp_reg) | RN(arg) | ((argw >> 12) << 10)));
  752. argw = ((argw & 0xfff) >> shift);
  753. return push_inst(compiler, STRBI | type | RT(reg) | RN(tmp_reg) | (argw << 10));
  754. }
  755. }
  756. if (argw <= 255 && argw >= -256)
  757. return push_inst(compiler, STURBI | type | RT(reg) | RN(arg) | ((argw & 0x1ff) << 12));
  758. FAIL_IF(load_immediate(compiler, tmp_reg, argw));
  759. return push_inst(compiler, STRB | type | RT(reg) | RN(arg) | RM(tmp_reg));
  760. }
  761. /* --------------------------------------------------------------------- */
  762. /* Entry, exit */
  763. /* --------------------------------------------------------------------- */
  764. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler,
  765. sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
  766. sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
  767. {
  768. sljit_s32 args, i, tmp, offs, prev, saved_regs_size;
  769. CHECK_ERROR();
  770. CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
  771. set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
  772. saved_regs_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 2);
  773. if (saved_regs_size & 0x8)
  774. saved_regs_size += sizeof(sljit_sw);
  775. local_size = (local_size + 15) & ~0xf;
  776. compiler->local_size = local_size + saved_regs_size;
  777. FAIL_IF(push_inst(compiler, STP_PRE | RT(TMP_FP) | RT2(TMP_LR)
  778. | RN(SLJIT_SP) | ((-(saved_regs_size >> 3) & 0x7f) << 15)));
  779. #ifdef _WIN32
  780. if (local_size >= 4096)
  781. FAIL_IF(push_inst(compiler, SUBI | RD(TMP_REG1) | RN(SLJIT_SP) | (1 << 10) | (1 << 22)));
  782. else if (local_size > 256)
  783. FAIL_IF(push_inst(compiler, SUBI | RD(TMP_REG1) | RN(SLJIT_SP) | (local_size << 10)));
  784. #endif
  785. tmp = saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - saveds) : SLJIT_FIRST_SAVED_REG;
  786. prev = -1;
  787. offs = 2 << 15;
  788. for (i = SLJIT_S0; i >= tmp; i--) {
  789. if (prev == -1) {
  790. prev = i;
  791. continue;
  792. }
  793. FAIL_IF(push_inst(compiler, STP | RT(prev) | RT2(i) | RN(SLJIT_SP) | offs));
  794. offs += 2 << 15;
  795. prev = -1;
  796. }
  797. for (i = scratches; i >= SLJIT_FIRST_SAVED_REG; i--) {
  798. if (prev == -1) {
  799. prev = i;
  800. continue;
  801. }
  802. FAIL_IF(push_inst(compiler, STP | RT(prev) | RT2(i) | RN(SLJIT_SP) | offs));
  803. offs += 2 << 15;
  804. prev = -1;
  805. }
  806. if (prev != -1)
  807. FAIL_IF(push_inst(compiler, STRI | RT(prev) | RN(SLJIT_SP) | (offs >> 5)));
  808. FAIL_IF(push_inst(compiler, ADDI | RD(TMP_FP) | RN(SLJIT_SP) | (0 << 10)));
  809. args = get_arg_count(arg_types);
  810. if (args >= 1)
  811. FAIL_IF(push_inst(compiler, ORR | RD(SLJIT_S0) | RN(TMP_ZERO) | RM(SLJIT_R0)));
  812. if (args >= 2)
  813. FAIL_IF(push_inst(compiler, ORR | RD(SLJIT_S1) | RN(TMP_ZERO) | RM(SLJIT_R1)));
  814. if (args >= 3)
  815. FAIL_IF(push_inst(compiler, ORR | RD(SLJIT_S2) | RN(TMP_ZERO) | RM(SLJIT_R2)));
  816. #ifdef _WIN32
  817. if (local_size >= 4096) {
  818. if (local_size < 4 * 4096) {
  819. /* No need for a loop. */
  820. if (local_size >= 2 * 4096) {
  821. FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1)));
  822. FAIL_IF(push_inst(compiler, SUBI | RD(TMP_REG1) | RN(TMP_REG1) | (1 << 10) | (1 << 22)));
  823. local_size -= 4096;
  824. }
  825. if (local_size >= 2 * 4096) {
  826. FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1)));
  827. FAIL_IF(push_inst(compiler, SUBI | RD(TMP_REG1) | RN(TMP_REG1) | (1 << 10) | (1 << 22)));
  828. local_size -= 4096;
  829. }
  830. FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1)));
  831. local_size -= 4096;
  832. }
  833. else {
  834. FAIL_IF(push_inst(compiler, MOVZ | RD(TMP_REG2) | (((local_size >> 12) - 1) << 5)));
  835. FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1)));
  836. FAIL_IF(push_inst(compiler, SUBI | RD(TMP_REG1) | RN(TMP_REG1) | (1 << 10) | (1 << 22)));
  837. FAIL_IF(push_inst(compiler, SUBI | (1 << 29) | RD(TMP_REG2) | RN(TMP_REG2) | (1 << 10)));
  838. FAIL_IF(push_inst(compiler, B_CC | ((((sljit_ins) -3) & 0x7ffff) << 5) | 0x1 /* not-equal */));
  839. FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1)));
  840. local_size &= 0xfff;
  841. }
  842. if (local_size > 256) {
  843. FAIL_IF(push_inst(compiler, SUBI | RD(TMP_REG1) | RN(TMP_REG1) | (local_size << 10)));
  844. FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1)));
  845. }
  846. else if (local_size > 0)
  847. FAIL_IF(push_inst(compiler, LDR_PRE | RT(TMP_ZERO) | RN(TMP_REG1) | ((-local_size & 0x1ff) << 12)));
  848. FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RN(TMP_REG1) | (0 << 10)));
  849. }
  850. else if (local_size > 256) {
  851. FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1)));
  852. FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RN(TMP_REG1) | (0 << 10)));
  853. }
  854. else if (local_size > 0)
  855. FAIL_IF(push_inst(compiler, LDR_PRE | RT(TMP_ZERO) | RN(SLJIT_SP) | ((-local_size & 0x1ff) << 12)));
  856. #else /* !_WIN32 */
  857. /* The local_size does not include saved registers size. */
  858. if (local_size > 0xfff) {
  859. FAIL_IF(push_inst(compiler, SUBI | RD(SLJIT_SP) | RN(SLJIT_SP) | ((local_size >> 12) << 10) | (1 << 22)));
  860. local_size &= 0xfff;
  861. }
  862. if (local_size != 0)
  863. FAIL_IF(push_inst(compiler, SUBI | RD(SLJIT_SP) | RN(SLJIT_SP) | (local_size << 10)));
  864. #endif /* _WIN32 */
  865. return SLJIT_SUCCESS;
  866. }
  867. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler,
  868. sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
  869. sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
  870. {
  871. sljit_s32 saved_regs_size;
  872. CHECK_ERROR();
  873. CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
  874. set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
  875. saved_regs_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 2);
  876. if (saved_regs_size & 0x8)
  877. saved_regs_size += sizeof(sljit_sw);
  878. compiler->local_size = saved_regs_size + ((local_size + 15) & ~0xf);
  879. return SLJIT_SUCCESS;
  880. }
  881. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw)
  882. {
  883. sljit_s32 local_size;
  884. sljit_s32 i, tmp, offs, prev, saved_regs_size;
  885. CHECK_ERROR();
  886. CHECK(check_sljit_emit_return(compiler, op, src, srcw));
  887. FAIL_IF(emit_mov_before_return(compiler, op, src, srcw));
  888. saved_regs_size = GET_SAVED_REGISTERS_SIZE(compiler->scratches, compiler->saveds, 2);
  889. if (saved_regs_size & 0x8)
  890. saved_regs_size += sizeof(sljit_sw);
  891. local_size = compiler->local_size - saved_regs_size;
  892. /* Load LR as early as possible. */
  893. if (local_size == 0)
  894. FAIL_IF(push_inst(compiler, LDP | RT(TMP_FP) | RT2(TMP_LR) | RN(SLJIT_SP)));
  895. else if (local_size < 63 * sizeof(sljit_sw)) {
  896. FAIL_IF(push_inst(compiler, LDP_PRE | RT(TMP_FP) | RT2(TMP_LR)
  897. | RN(SLJIT_SP) | (local_size << (15 - 3))));
  898. }
  899. else {
  900. if (local_size > 0xfff) {
  901. FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RN(SLJIT_SP) | ((local_size >> 12) << 10) | (1 << 22)));
  902. local_size &= 0xfff;
  903. }
  904. if (local_size)
  905. FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RN(SLJIT_SP) | (local_size << 10)));
  906. FAIL_IF(push_inst(compiler, LDP | RT(TMP_FP) | RT2(TMP_LR) | RN(SLJIT_SP)));
  907. }
  908. tmp = compiler->saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - compiler->saveds) : SLJIT_FIRST_SAVED_REG;
  909. prev = -1;
  910. offs = 2 << 15;
  911. for (i = SLJIT_S0; i >= tmp; i--) {
  912. if (prev == -1) {
  913. prev = i;
  914. continue;
  915. }
  916. FAIL_IF(push_inst(compiler, LDP | RT(prev) | RT2(i) | RN(SLJIT_SP) | offs));
  917. offs += 2 << 15;
  918. prev = -1;
  919. }
  920. for (i = compiler->scratches; i >= SLJIT_FIRST_SAVED_REG; i--) {
  921. if (prev == -1) {
  922. prev = i;
  923. continue;
  924. }
  925. FAIL_IF(push_inst(compiler, LDP | RT(prev) | RT2(i) | RN(SLJIT_SP) | offs));
  926. offs += 2 << 15;
  927. prev = -1;
  928. }
  929. if (prev != -1)
  930. FAIL_IF(push_inst(compiler, LDRI | RT(prev) | RN(SLJIT_SP) | (offs >> 5)));
  931. /* These two can be executed in parallel. */
  932. FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RN(SLJIT_SP) | (saved_regs_size << 10)));
  933. return push_inst(compiler, RET | RN(TMP_LR));
  934. }
  935. /* --------------------------------------------------------------------- */
  936. /* Operators */
  937. /* --------------------------------------------------------------------- */
  938. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op)
  939. {
  940. sljit_ins inv_bits = (op & SLJIT_I32_OP) ? W_OP : 0;
  941. CHECK_ERROR();
  942. CHECK(check_sljit_emit_op0(compiler, op));
  943. op = GET_OPCODE(op);
  944. switch (op) {
  945. case SLJIT_BREAKPOINT:
  946. return push_inst(compiler, BRK);
  947. case SLJIT_NOP:
  948. return push_inst(compiler, NOP);
  949. case SLJIT_LMUL_UW:
  950. case SLJIT_LMUL_SW:
  951. FAIL_IF(push_inst(compiler, ORR | RD(TMP_REG1) | RN(TMP_ZERO) | RM(SLJIT_R0)));
  952. FAIL_IF(push_inst(compiler, MADD | RD(SLJIT_R0) | RN(SLJIT_R0) | RM(SLJIT_R1) | RT2(TMP_ZERO)));
  953. return push_inst(compiler, (op == SLJIT_LMUL_UW ? UMULH : SMULH) | RD(SLJIT_R1) | RN(TMP_REG1) | RM(SLJIT_R1));
  954. case SLJIT_DIVMOD_UW:
  955. case SLJIT_DIVMOD_SW:
  956. FAIL_IF(push_inst(compiler, (ORR ^ inv_bits) | RD(TMP_REG1) | RN(TMP_ZERO) | RM(SLJIT_R0)));
  957. FAIL_IF(push_inst(compiler, ((op == SLJIT_DIVMOD_UW ? UDIV : SDIV) ^ inv_bits) | RD(SLJIT_R0) | RN(SLJIT_R0) | RM(SLJIT_R1)));
  958. FAIL_IF(push_inst(compiler, (MADD ^ inv_bits) | RD(SLJIT_R1) | RN(SLJIT_R0) | RM(SLJIT_R1) | RT2(TMP_ZERO)));
  959. return push_inst(compiler, (SUB ^ inv_bits) | RD(SLJIT_R1) | RN(TMP_REG1) | RM(SLJIT_R1));
  960. case SLJIT_DIV_UW:
  961. case SLJIT_DIV_SW:
  962. return push_inst(compiler, ((op == SLJIT_DIV_UW ? UDIV : SDIV) ^ inv_bits) | RD(SLJIT_R0) | RN(SLJIT_R0) | RM(SLJIT_R1));
  963. }
  964. return SLJIT_SUCCESS;
  965. }
  966. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compiler, sljit_s32 op,
  967. sljit_s32 dst, sljit_sw dstw,
  968. sljit_s32 src, sljit_sw srcw)
  969. {
  970. sljit_s32 dst_r, flags, mem_flags;
  971. sljit_s32 op_flags = GET_ALL_FLAGS(op);
  972. CHECK_ERROR();
  973. CHECK(check_sljit_emit_op1(compiler, op, dst, dstw, src, srcw));
  974. ADJUST_LOCAL_OFFSET(dst, dstw);
  975. ADJUST_LOCAL_OFFSET(src, srcw);
  976. if (dst == SLJIT_UNUSED && !HAS_FLAGS(op)) {
  977. if (op <= SLJIT_MOV_P && (src & SLJIT_MEM)) {
  978. SLJIT_ASSERT(reg_map[1] == 0 && reg_map[3] == 2 && reg_map[5] == 4);
  979. if (op >= SLJIT_MOV_U8 && op <= SLJIT_MOV_S8)
  980. dst = 5;
  981. else if (op >= SLJIT_MOV_U16 && op <= SLJIT_MOV_S16)
  982. dst = 3;
  983. else
  984. dst = 1;
  985. /* Signed word sized load is the prefetch instruction. */
  986. return emit_op_mem(compiler, WORD_SIZE | SIGNED, dst, src, srcw, TMP_REG1);
  987. }
  988. return SLJIT_SUCCESS;
  989. }
  990. dst_r = SLOW_IS_REG(dst) ? dst : TMP_REG1;
  991. op = GET_OPCODE(op);
  992. if (op >= SLJIT_MOV && op <= SLJIT_MOV_P) {
  993. /* Both operands are registers. */
  994. if (dst_r != TMP_REG1 && FAST_IS_REG(src))
  995. return emit_op_imm(compiler, op | ((op_flags & SLJIT_I32_OP) ? INT_OP : 0), dst_r, TMP_REG1, src);
  996. switch (op) {
  997. case SLJIT_MOV:
  998. case SLJIT_MOV_P:
  999. mem_flags = WORD_SIZE;
  1000. break;
  1001. case SLJIT_MOV_U8:
  1002. mem_flags = BYTE_SIZE;
  1003. if (src & SLJIT_IMM)
  1004. srcw = (sljit_u8)srcw;
  1005. break;
  1006. case SLJIT_MOV_S8:
  1007. mem_flags = BYTE_SIZE | SIGNED;
  1008. if (src & SLJIT_IMM)
  1009. srcw = (sljit_s8)srcw;
  1010. break;
  1011. case SLJIT_MOV_U16:
  1012. mem_flags = HALF_SIZE;
  1013. if (src & SLJIT_IMM)
  1014. srcw = (sljit_u16)srcw;
  1015. break;
  1016. case SLJIT_MOV_S16:
  1017. mem_flags = HALF_SIZE | SIGNED;
  1018. if (src & SLJIT_IMM)
  1019. srcw = (sljit_s16)srcw;
  1020. break;
  1021. case SLJIT_MOV_U32:
  1022. mem_flags = INT_SIZE;
  1023. if (src & SLJIT_IMM)
  1024. srcw = (sljit_u32)srcw;
  1025. break;
  1026. case SLJIT_MOV_S32:
  1027. mem_flags = INT_SIZE | SIGNED;
  1028. if (src & SLJIT_IMM)
  1029. srcw = (sljit_s32)srcw;
  1030. break;
  1031. default:
  1032. SLJIT_UNREACHABLE();
  1033. mem_flags = 0;
  1034. break;
  1035. }
  1036. if (src & SLJIT_IMM)
  1037. FAIL_IF(emit_op_imm(compiler, SLJIT_MOV | ARG2_IMM, dst_r, TMP_REG1, srcw));
  1038. else if (!(src & SLJIT_MEM))
  1039. dst_r = src;
  1040. else
  1041. FAIL_IF(emit_op_mem(compiler, mem_flags, dst_r, src, srcw, TMP_REG1));
  1042. if (dst & SLJIT_MEM)
  1043. return emit_op_mem(compiler, mem_flags | STORE, dst_r, dst, dstw, TMP_REG2);
  1044. return SLJIT_SUCCESS;
  1045. }
  1046. flags = HAS_FLAGS(op_flags) ? SET_FLAGS : 0;
  1047. mem_flags = WORD_SIZE;
  1048. if (op_flags & SLJIT_I32_OP) {
  1049. flags |= INT_OP;
  1050. mem_flags = INT_SIZE;
  1051. }
  1052. if (dst == SLJIT_UNUSED)
  1053. flags |= UNUSED_RETURN;
  1054. if (src & SLJIT_MEM) {
  1055. FAIL_IF(emit_op_mem(compiler, mem_flags, TMP_REG2, src, srcw, TMP_REG2));
  1056. src = TMP_REG2;
  1057. }
  1058. emit_op_imm(compiler, flags | op, dst_r, TMP_REG1, src);
  1059. if (SLJIT_UNLIKELY(dst & SLJIT_MEM))
  1060. return emit_op_mem(compiler, mem_flags | STORE, dst_r, dst, dstw, TMP_REG2);
  1061. return SLJIT_SUCCESS;
  1062. }
  1063. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compiler, sljit_s32 op,
  1064. sljit_s32 dst, sljit_sw dstw,
  1065. sljit_s32 src1, sljit_sw src1w,
  1066. sljit_s32 src2, sljit_sw src2w)
  1067. {
  1068. sljit_s32 dst_r, flags, mem_flags;
  1069. CHECK_ERROR();
  1070. CHECK(check_sljit_emit_op2(compiler, op, dst, dstw, src1, src1w, src2, src2w));
  1071. ADJUST_LOCAL_OFFSET(dst, dstw);
  1072. ADJUST_LOCAL_OFFSET(src1, src1w);
  1073. ADJUST_LOCAL_OFFSET(src2, src2w);
  1074. if (dst == SLJIT_UNUSED && !HAS_FLAGS(op))
  1075. return SLJIT_SUCCESS;
  1076. dst_r = SLOW_IS_REG(dst) ? dst : TMP_REG1;
  1077. flags = HAS_FLAGS(op) ? SET_FLAGS : 0;
  1078. mem_flags = WORD_SIZE;
  1079. if (op & SLJIT_I32_OP) {
  1080. flags |= INT_OP;
  1081. mem_flags = INT_SIZE;
  1082. }
  1083. if (dst == SLJIT_UNUSED)
  1084. flags |= UNUSED_RETURN;
  1085. if (src1 & SLJIT_MEM) {
  1086. FAIL_IF(emit_op_mem(compiler, mem_flags, TMP_REG1, src1, src1w, TMP_REG1));
  1087. src1 = TMP_REG1;
  1088. }
  1089. if (src2 & SLJIT_MEM) {
  1090. FAIL_IF(emit_op_mem(compiler, mem_flags, TMP_REG2, src2, src2w, TMP_REG2));
  1091. src2 = TMP_REG2;
  1092. }
  1093. if (src1 & SLJIT_IMM)
  1094. flags |= ARG1_IMM;
  1095. else
  1096. src1w = src1;
  1097. if (src2 & SLJIT_IMM)
  1098. flags |= ARG2_IMM;
  1099. else
  1100. src2w = src2;
  1101. emit_op_imm(compiler, flags | GET_OPCODE(op), dst_r, src1w, src2w);
  1102. if (dst & SLJIT_MEM)
  1103. return emit_op_mem(compiler, mem_flags | STORE, dst_r, dst, dstw, TMP_REG2);
  1104. return SLJIT_SUCCESS;
  1105. }
  1106. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg)
  1107. {
  1108. CHECK_REG_INDEX(check_sljit_get_register_index(reg));
  1109. return reg_map[reg];
  1110. }
  1111. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg)
  1112. {
  1113. CHECK_REG_INDEX(check_sljit_get_float_register_index(reg));
  1114. return freg_map[reg];
  1115. }
  1116. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler,
  1117. void *instruction, sljit_s32 size)
  1118. {
  1119. CHECK_ERROR();
  1120. CHECK(check_sljit_emit_op_custom(compiler, instruction, size));
  1121. return push_inst(compiler, *(sljit_ins*)instruction);
  1122. }
  1123. /* --------------------------------------------------------------------- */
  1124. /* Floating point operators */
  1125. /* --------------------------------------------------------------------- */
  1126. static sljit_s32 emit_fop_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw)
  1127. {
  1128. sljit_u32 shift = MEM_SIZE_SHIFT(flags);
  1129. sljit_ins type = (shift << 30);
  1130. SLJIT_ASSERT(arg & SLJIT_MEM);
  1131. if (!(flags & STORE))
  1132. type |= 0x00400000;
  1133. if (arg & OFFS_REG_MASK) {
  1134. argw &= 3;
  1135. if (argw == 0 || argw == shift)
  1136. return push_inst(compiler, STR_FR | type | VT(reg)
  1137. | RN(arg & REG_MASK) | RM(OFFS_REG(arg)) | (argw ? (1 << 12) : 0));
  1138. FAIL_IF(push_inst(compiler, ADD | RD(TMP_REG1) | RN(arg & REG_MASK) | RM(OFFS_REG(arg)) | (argw << 10)));
  1139. return push_inst(compiler, STR_FI | type | VT(reg) | RN(TMP_REG1));
  1140. }
  1141. arg &= REG_MASK;
  1142. if (arg == SLJIT_UNUSED) {
  1143. FAIL_IF(load_immediate(compiler, TMP_REG1, argw & ~(0xfff << shift)));
  1144. argw = (argw >> shift) & 0xfff;
  1145. return push_inst(compiler, STR_FI | type | VT(reg) | RN(TMP_REG1) | (argw << 10));
  1146. }
  1147. if (argw >= 0 && (argw & ((1 << shift) - 1)) == 0) {
  1148. if ((argw >> shift) <= 0xfff)
  1149. return push_inst(compiler, STR_FI | type | VT(reg) | RN(arg) | (argw << (10 - shift)));
  1150. if (argw <= 0xffffff) {
  1151. FAIL_IF(push_inst(compiler, ADDI | (1 << 22) | RD(TMP_REG1) | RN(arg) | ((argw >> 12) << 10)));
  1152. argw = ((argw & 0xfff) >> shift);
  1153. return push_inst(compiler, STR_FI | type | VT(reg) | RN(TMP_REG1) | (argw << 10));
  1154. }
  1155. }
  1156. if (argw <= 255 && argw >= -256)
  1157. return push_inst(compiler, STUR_FI | type | VT(reg) | RN(arg) | ((argw & 0x1ff) << 12));
  1158. FAIL_IF(load_immediate(compiler, TMP_REG1, argw));
  1159. return push_inst(compiler, STR_FR | type | VT(reg) | RN(arg) | RM(TMP_REG1));
  1160. }
  1161. static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op,
  1162. sljit_s32 dst, sljit_sw dstw,
  1163. sljit_s32 src, sljit_sw srcw)
  1164. {
  1165. sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
  1166. sljit_ins inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0;
  1167. if (GET_OPCODE(op) == SLJIT_CONV_S32_FROM_F64)
  1168. inv_bits |= W_OP;
  1169. if (src & SLJIT_MEM) {
  1170. emit_fop_mem(compiler, (op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE, TMP_FREG1, src, srcw);
  1171. src = TMP_FREG1;
  1172. }
  1173. FAIL_IF(push_inst(compiler, (FCVTZS ^ inv_bits) | RD(dst_r) | VN(src)));
  1174. if (dst & SLJIT_MEM)
  1175. return emit_op_mem(compiler, ((GET_OPCODE(op) == SLJIT_CONV_S32_FROM_F64) ? INT_SIZE : WORD_SIZE) | STORE, TMP_REG1, dst, dstw, TMP_REG2);
  1176. return SLJIT_SUCCESS;
  1177. }
  1178. static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
  1179. sljit_s32 dst, sljit_sw dstw,
  1180. sljit_s32 src, sljit_sw srcw)
  1181. {
  1182. sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
  1183. sljit_ins inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0;
  1184. if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32)
  1185. inv_bits |= W_OP;
  1186. if (src & SLJIT_MEM) {
  1187. emit_op_mem(compiler, ((GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32) ? INT_SIZE : WORD_SIZE), TMP_REG1, src, srcw, TMP_REG1);
  1188. src = TMP_REG1;
  1189. } else if (src & SLJIT_IMM) {
  1190. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  1191. if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32)
  1192. srcw = (sljit_s32)srcw;
  1193. #endif
  1194. FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
  1195. src = TMP_REG1;
  1196. }
  1197. FAIL_IF(push_inst(compiler, (SCVTF ^ inv_bits) | VD(dst_r) | RN(src)));
  1198. if (dst & SLJIT_MEM)
  1199. return emit_fop_mem(compiler, ((op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE) | STORE, TMP_FREG1, dst, dstw);
  1200. return SLJIT_SUCCESS;
  1201. }
  1202. static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compiler, sljit_s32 op,
  1203. sljit_s32 src1, sljit_sw src1w,
  1204. sljit_s32 src2, sljit_sw src2w)
  1205. {
  1206. sljit_s32 mem_flags = (op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE;
  1207. sljit_ins inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0;
  1208. if (src1 & SLJIT_MEM) {
  1209. emit_fop_mem(compiler, mem_flags, TMP_FREG1, src1, src1w);
  1210. src1 = TMP_FREG1;
  1211. }
  1212. if (src2 & SLJIT_MEM) {
  1213. emit_fop_mem(compiler, mem_flags, TMP_FREG2, src2, src2w);
  1214. src2 = TMP_FREG2;
  1215. }
  1216. return push_inst(compiler, (FCMP ^ inv_bits) | VN(src1) | VM(src2));
  1217. }
  1218. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op,
  1219. sljit_s32 dst, sljit_sw dstw,
  1220. sljit_s32 src, sljit_sw srcw)
  1221. {
  1222. sljit_s32 dst_r, mem_flags = (op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE;
  1223. sljit_ins inv_bits;
  1224. CHECK_ERROR();
  1225. SLJIT_COMPILE_ASSERT((INT_SIZE ^ 0x1) == WORD_SIZE, must_be_one_bit_difference);
  1226. SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw);
  1227. inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0;
  1228. dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
  1229. if (src & SLJIT_MEM) {
  1230. emit_fop_mem(compiler, (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32) ? (mem_flags ^ 0x1) : mem_flags, dst_r, src, srcw);
  1231. src = dst_r;
  1232. }
  1233. switch (GET_OPCODE(op)) {
  1234. case SLJIT_MOV_F64:
  1235. if (src != dst_r) {
  1236. if (dst_r != TMP_FREG1)
  1237. FAIL_IF(push_inst(compiler, (FMOV ^ inv_bits) | VD(dst_r) | VN(src)));
  1238. else
  1239. dst_r = src;
  1240. }
  1241. break;
  1242. case SLJIT_NEG_F64:
  1243. FAIL_IF(push_inst(compiler, (FNEG ^ inv_bits) | VD(dst_r) | VN(src)));
  1244. break;
  1245. case SLJIT_ABS_F64:
  1246. FAIL_IF(push_inst(compiler, (FABS ^ inv_bits) | VD(dst_r) | VN(src)));
  1247. break;
  1248. case SLJIT_CONV_F64_FROM_F32:
  1249. FAIL_IF(push_inst(compiler, FCVT | ((op & SLJIT_F32_OP) ? (1 << 22) : (1 << 15)) | VD(dst_r) | VN(src)));
  1250. break;
  1251. }
  1252. if (dst & SLJIT_MEM)
  1253. return emit_fop_mem(compiler, mem_flags | STORE, dst_r, dst, dstw);
  1254. return SLJIT_SUCCESS;
  1255. }
  1256. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compiler, sljit_s32 op,
  1257. sljit_s32 dst, sljit_sw dstw,
  1258. sljit_s32 src1, sljit_sw src1w,
  1259. sljit_s32 src2, sljit_sw src2w)
  1260. {
  1261. sljit_s32 dst_r, mem_flags = (op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE;
  1262. sljit_ins inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0;
  1263. CHECK_ERROR();
  1264. CHECK(check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w));
  1265. ADJUST_LOCAL_OFFSET(dst, dstw);
  1266. ADJUST_LOCAL_OFFSET(src1, src1w);
  1267. ADJUST_LOCAL_OFFSET(src2, src2w);
  1268. dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
  1269. if (src1 & SLJIT_MEM) {
  1270. emit_fop_mem(compiler, mem_flags, TMP_FREG1, src1, src1w);
  1271. src1 = TMP_FREG1;
  1272. }
  1273. if (src2 & SLJIT_MEM) {
  1274. emit_fop_mem(compiler, mem_flags, TMP_FREG2, src2, src2w);
  1275. src2 = TMP_FREG2;
  1276. }
  1277. switch (GET_OPCODE(op)) {
  1278. case SLJIT_ADD_F64:
  1279. FAIL_IF(push_inst(compiler, (FADD ^ inv_bits) | VD(dst_r) | VN(src1) | VM(src2)));
  1280. break;
  1281. case SLJIT_SUB_F64:
  1282. FAIL_IF(push_inst(compiler, (FSUB ^ inv_bits) | VD(dst_r) | VN(src1) | VM(src2)));
  1283. break;
  1284. case SLJIT_MUL_F64:
  1285. FAIL_IF(push_inst(compiler, (FMUL ^ inv_bits) | VD(dst_r) | VN(src1) | VM(src2)));
  1286. break;
  1287. case SLJIT_DIV_F64:
  1288. FAIL_IF(push_inst(compiler, (FDIV ^ inv_bits) | VD(dst_r) | VN(src1) | VM(src2)));
  1289. break;
  1290. }
  1291. if (!(dst & SLJIT_MEM))
  1292. return SLJIT_SUCCESS;
  1293. return emit_fop_mem(compiler, mem_flags | STORE, TMP_FREG1, dst, dstw);
  1294. }
  1295. /* --------------------------------------------------------------------- */
  1296. /* Other instructions */
  1297. /* --------------------------------------------------------------------- */
  1298. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw)
  1299. {
  1300. CHECK_ERROR();
  1301. CHECK(check_sljit_emit_fast_enter(compiler, dst, dstw));
  1302. ADJUST_LOCAL_OFFSET(dst, dstw);
  1303. if (FAST_IS_REG(dst))
  1304. return push_inst(compiler, ORR | RD(dst) | RN(TMP_ZERO) | RM(TMP_LR));
  1305. /* Memory. */
  1306. return emit_op_mem(compiler, WORD_SIZE | STORE, TMP_LR, dst, dstw, TMP_REG1);
  1307. }
  1308. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_s32 src, sljit_sw srcw)
  1309. {
  1310. CHECK_ERROR();
  1311. CHECK(check_sljit_emit_fast_return(compiler, src, srcw));
  1312. ADJUST_LOCAL_OFFSET(src, srcw);
  1313. if (FAST_IS_REG(src))
  1314. FAIL_IF(push_inst(compiler, ORR | RD(TMP_LR) | RN(TMP_ZERO) | RM(src)));
  1315. else
  1316. FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_LR, src, srcw, TMP_REG1));
  1317. return push_inst(compiler, RET | RN(TMP_LR));
  1318. }
  1319. /* --------------------------------------------------------------------- */
  1320. /* Conditional instructions */
  1321. /* --------------------------------------------------------------------- */
  1322. static sljit_uw get_cc(sljit_s32 type)
  1323. {
  1324. switch (type) {
  1325. case SLJIT_EQUAL:
  1326. case SLJIT_MUL_NOT_OVERFLOW:
  1327. case SLJIT_EQUAL_F64:
  1328. return 0x1;
  1329. case SLJIT_NOT_EQUAL:
  1330. case SLJIT_MUL_OVERFLOW:
  1331. case SLJIT_NOT_EQUAL_F64:
  1332. return 0x0;
  1333. case SLJIT_LESS:
  1334. case SLJIT_LESS_F64:
  1335. return 0x2;
  1336. case SLJIT_GREATER_EQUAL:
  1337. case SLJIT_GREATER_EQUAL_F64:
  1338. return 0x3;
  1339. case SLJIT_GREATER:
  1340. case SLJIT_GREATER_F64:
  1341. return 0x9;
  1342. case SLJIT_LESS_EQUAL:
  1343. case SLJIT_LESS_EQUAL_F64:
  1344. return 0x8;
  1345. case SLJIT_SIG_LESS:
  1346. return 0xa;
  1347. case SLJIT_SIG_GREATER_EQUAL:
  1348. return 0xb;
  1349. case SLJIT_SIG_GREATER:
  1350. return 0xd;
  1351. case SLJIT_SIG_LESS_EQUAL:
  1352. return 0xc;
  1353. case SLJIT_OVERFLOW:
  1354. case SLJIT_UNORDERED_F64:
  1355. return 0x7;
  1356. case SLJIT_NOT_OVERFLOW:
  1357. case SLJIT_ORDERED_F64:
  1358. return 0x6;
  1359. default:
  1360. SLJIT_UNREACHABLE();
  1361. return 0xe;
  1362. }
  1363. }
  1364. SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compiler *compiler)
  1365. {
  1366. struct sljit_label *label;
  1367. CHECK_ERROR_PTR();
  1368. CHECK_PTR(check_sljit_emit_label(compiler));
  1369. if (compiler->last_label && compiler->last_label->size == compiler->size)
  1370. return compiler->last_label;
  1371. label = (struct sljit_label*)ensure_abuf(compiler, sizeof(struct sljit_label));
  1372. PTR_FAIL_IF(!label);
  1373. set_label(label, compiler);
  1374. return label;
  1375. }
  1376. SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_s32 type)
  1377. {
  1378. struct sljit_jump *jump;
  1379. CHECK_ERROR_PTR();
  1380. CHECK_PTR(check_sljit_emit_jump(compiler, type));
  1381. jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
  1382. PTR_FAIL_IF(!jump);
  1383. set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP);
  1384. type &= 0xff;
  1385. if (type < SLJIT_JUMP) {
  1386. jump->flags |= IS_COND;
  1387. PTR_FAIL_IF(push_inst(compiler, B_CC | (6 << 5) | get_cc(type)));
  1388. }
  1389. else if (type >= SLJIT_FAST_CALL)
  1390. jump->flags |= IS_BL;
  1391. PTR_FAIL_IF(emit_imm64_const(compiler, TMP_REG1, 0));
  1392. jump->addr = compiler->size;
  1393. PTR_FAIL_IF(push_inst(compiler, ((type >= SLJIT_FAST_CALL) ? BLR : BR) | RN(TMP_REG1)));
  1394. return jump;
  1395. }
  1396. SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type,
  1397. sljit_s32 arg_types)
  1398. {
  1399. CHECK_ERROR_PTR();
  1400. CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types));
  1401. #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
  1402. || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
  1403. compiler->skip_checks = 1;
  1404. #endif
  1405. return sljit_emit_jump(compiler, type);
  1406. }
  1407. static SLJIT_INLINE struct sljit_jump* emit_cmp_to0(struct sljit_compiler *compiler, sljit_s32 type,
  1408. sljit_s32 src, sljit_sw srcw)
  1409. {
  1410. struct sljit_jump *jump;
  1411. sljit_ins inv_bits = (type & SLJIT_I32_OP) ? W_OP : 0;
  1412. SLJIT_ASSERT((type & 0xff) == SLJIT_EQUAL || (type & 0xff) == SLJIT_NOT_EQUAL);
  1413. ADJUST_LOCAL_OFFSET(src, srcw);
  1414. jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
  1415. PTR_FAIL_IF(!jump);
  1416. set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP);
  1417. jump->flags |= IS_CBZ | IS_COND;
  1418. if (src & SLJIT_MEM) {
  1419. PTR_FAIL_IF(emit_op_mem(compiler, inv_bits ? INT_SIZE : WORD_SIZE, TMP_REG1, src, srcw, TMP_REG1));
  1420. src = TMP_REG1;
  1421. }
  1422. else if (src & SLJIT_IMM) {
  1423. PTR_FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
  1424. src = TMP_REG1;
  1425. }
  1426. SLJIT_ASSERT(FAST_IS_REG(src));
  1427. if ((type & 0xff) == SLJIT_EQUAL)
  1428. inv_bits |= 1 << 24;
  1429. PTR_FAIL_IF(push_inst(compiler, (CBZ ^ inv_bits) | (6 << 5) | RT(src)));
  1430. PTR_FAIL_IF(emit_imm64_const(compiler, TMP_REG1, 0));
  1431. jump->addr = compiler->size;
  1432. PTR_FAIL_IF(push_inst(compiler, BR | RN(TMP_REG1)));
  1433. return jump;
  1434. }
  1435. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src, sljit_sw srcw)
  1436. {
  1437. struct sljit_jump *jump;
  1438. CHECK_ERROR();
  1439. CHECK(check_sljit_emit_ijump(compiler, type, src, srcw));
  1440. ADJUST_LOCAL_OFFSET(src, srcw);
  1441. if (!(src & SLJIT_IMM)) {
  1442. if (src & SLJIT_MEM) {
  1443. FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG1, src, srcw, TMP_REG1));
  1444. src = TMP_REG1;
  1445. }
  1446. return push_inst(compiler, ((type >= SLJIT_FAST_CALL) ? BLR : BR) | RN(src));
  1447. }
  1448. /* These jumps are converted to jump/call instructions when possible. */
  1449. jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
  1450. FAIL_IF(!jump);
  1451. set_jump(jump, compiler, JUMP_ADDR | ((type >= SLJIT_FAST_CALL) ? IS_BL : 0));
  1452. jump->u.target = srcw;
  1453. FAIL_IF(emit_imm64_const(compiler, TMP_REG1, 0));
  1454. jump->addr = compiler->size;
  1455. return push_inst(compiler, ((type >= SLJIT_FAST_CALL) ? BLR : BR) | RN(TMP_REG1));
  1456. }
  1457. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type,
  1458. sljit_s32 arg_types,
  1459. sljit_s32 src, sljit_sw srcw)
  1460. {
  1461. CHECK_ERROR();
  1462. CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw));
  1463. #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
  1464. || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
  1465. compiler->skip_checks = 1;
  1466. #endif
  1467. return sljit_emit_ijump(compiler, type, src, srcw);
  1468. }
  1469. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_s32 op,
  1470. sljit_s32 dst, sljit_sw dstw,
  1471. sljit_s32 type)
  1472. {
  1473. sljit_s32 dst_r, src_r, flags, mem_flags;
  1474. sljit_ins cc;
  1475. CHECK_ERROR();
  1476. CHECK(check_sljit_emit_op_flags(compiler, op, dst, dstw, type));
  1477. ADJUST_LOCAL_OFFSET(dst, dstw);
  1478. cc = get_cc(type & 0xff);
  1479. dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
  1480. if (GET_OPCODE(op) < SLJIT_ADD) {
  1481. FAIL_IF(push_inst(compiler, CSINC | (cc << 12) | RD(dst_r) | RN(TMP_ZERO) | RM(TMP_ZERO)));
  1482. if (dst_r == TMP_REG1) {
  1483. mem_flags = (GET_OPCODE(op) == SLJIT_MOV ? WORD_SIZE : INT_SIZE) | STORE;
  1484. return emit_op_mem(compiler, mem_flags, TMP_REG1, dst, dstw, TMP_REG2);
  1485. }
  1486. return SLJIT_SUCCESS;
  1487. }
  1488. flags = HAS_FLAGS(op) ? SET_FLAGS : 0;
  1489. mem_flags = WORD_SIZE;
  1490. if (op & SLJIT_I32_OP) {
  1491. flags |= INT_OP;
  1492. mem_flags = INT_SIZE;
  1493. }
  1494. src_r = dst;
  1495. if (dst & SLJIT_MEM) {
  1496. FAIL_IF(emit_op_mem(compiler, mem_flags, TMP_REG1, dst, dstw, TMP_REG1));
  1497. src_r = TMP_REG1;
  1498. }
  1499. FAIL_IF(push_inst(compiler, CSINC | (cc << 12) | RD(TMP_REG2) | RN(TMP_ZERO) | RM(TMP_ZERO)));
  1500. emit_op_imm(compiler, flags | GET_OPCODE(op), dst_r, src_r, TMP_REG2);
  1501. if (dst & SLJIT_MEM)
  1502. return emit_op_mem(compiler, mem_flags | STORE, TMP_REG1, dst, dstw, TMP_REG2);
  1503. return SLJIT_SUCCESS;
  1504. }
  1505. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compiler, sljit_s32 type,
  1506. sljit_s32 dst_reg,
  1507. sljit_s32 src, sljit_sw srcw)
  1508. {
  1509. sljit_ins inv_bits = (dst_reg & SLJIT_I32_OP) ? W_OP : 0;
  1510. sljit_ins cc;
  1511. CHECK_ERROR();
  1512. CHECK(check_sljit_emit_cmov(compiler, type, dst_reg, src, srcw));
  1513. if (SLJIT_UNLIKELY(src & SLJIT_IMM)) {
  1514. if (dst_reg & SLJIT_I32_OP)
  1515. srcw = (sljit_s32)srcw;
  1516. FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
  1517. src = TMP_REG1;
  1518. srcw = 0;
  1519. }
  1520. cc = get_cc(type & 0xff);
  1521. dst_reg &= ~SLJIT_I32_OP;
  1522. return push_inst(compiler, (CSEL ^ inv_bits) | (cc << 12) | RD(dst_reg) | RN(dst_reg) | RM(src));
  1523. }
  1524. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type,
  1525. sljit_s32 reg,
  1526. sljit_s32 mem, sljit_sw memw)
  1527. {
  1528. sljit_u32 sign = 0, inst;
  1529. CHECK_ERROR();
  1530. CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw));
  1531. if ((mem & OFFS_REG_MASK) || (memw > 255 && memw < -256))
  1532. return SLJIT_ERR_UNSUPPORTED;
  1533. if (type & SLJIT_MEM_SUPP)
  1534. return SLJIT_SUCCESS;
  1535. switch (type & 0xff) {
  1536. case SLJIT_MOV:
  1537. case SLJIT_MOV_P:
  1538. inst = STURBI | (MEM_SIZE_SHIFT(WORD_SIZE) << 30) | 0x400;
  1539. break;
  1540. case SLJIT_MOV_S8:
  1541. sign = 1;
  1542. case SLJIT_MOV_U8:
  1543. inst = STURBI | (MEM_SIZE_SHIFT(BYTE_SIZE) << 30) | 0x400;
  1544. break;
  1545. case SLJIT_MOV_S16:
  1546. sign = 1;
  1547. case SLJIT_MOV_U16:
  1548. inst = STURBI | (MEM_SIZE_SHIFT(HALF_SIZE) << 30) | 0x400;
  1549. break;
  1550. case SLJIT_MOV_S32:
  1551. sign = 1;
  1552. case SLJIT_MOV_U32:
  1553. inst = STURBI | (MEM_SIZE_SHIFT(INT_SIZE) << 30) | 0x400;
  1554. break;
  1555. default:
  1556. SLJIT_UNREACHABLE();
  1557. inst = STURBI | (MEM_SIZE_SHIFT(WORD_SIZE) << 30) | 0x400;
  1558. break;
  1559. }
  1560. if (!(type & SLJIT_MEM_STORE))
  1561. inst |= sign ? 0x00800000 : 0x00400000;
  1562. if (type & SLJIT_MEM_PRE)
  1563. inst |= 0x800;
  1564. return push_inst(compiler, inst | RT(reg) | RN(mem & REG_MASK) | ((memw & 0x1ff) << 12));
  1565. }
  1566. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compiler, sljit_s32 type,
  1567. sljit_s32 freg,
  1568. sljit_s32 mem, sljit_sw memw)
  1569. {
  1570. sljit_u32 inst;
  1571. CHECK_ERROR();
  1572. CHECK(check_sljit_emit_fmem(compiler, type, freg, mem, memw));
  1573. if ((mem & OFFS_REG_MASK) || (memw > 255 && memw < -256))
  1574. return SLJIT_ERR_UNSUPPORTED;
  1575. if (type & SLJIT_MEM_SUPP)
  1576. return SLJIT_SUCCESS;
  1577. inst = STUR_FI | 0x80000400;
  1578. if (!(type & SLJIT_F32_OP))
  1579. inst |= 0x40000000;
  1580. if (!(type & SLJIT_MEM_STORE))
  1581. inst |= 0x00400000;
  1582. if (type & SLJIT_MEM_PRE)
  1583. inst |= 0x800;
  1584. return push_inst(compiler, inst | VT(freg) | RN(mem & REG_MASK) | ((memw & 0x1ff) << 12));
  1585. }
  1586. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_local_base(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw offset)
  1587. {
  1588. sljit_s32 dst_reg;
  1589. sljit_ins ins;
  1590. CHECK_ERROR();
  1591. CHECK(check_sljit_get_local_base(compiler, dst, dstw, offset));
  1592. SLJIT_ASSERT (SLJIT_LOCALS_OFFSET_BASE == 0);
  1593. dst_reg = FAST_IS_REG(dst) ? dst : TMP_REG1;
  1594. if (offset <= 0xffffff && offset >= -0xffffff) {
  1595. ins = ADDI;
  1596. if (offset < 0) {
  1597. offset = -offset;
  1598. ins = SUBI;
  1599. }
  1600. if (offset <= 0xfff)
  1601. FAIL_IF(push_inst(compiler, ins | RD(dst_reg) | RN(SLJIT_SP) | (offset << 10)));
  1602. else {
  1603. FAIL_IF(push_inst(compiler, ins | RD(dst_reg) | RN(SLJIT_SP) | ((offset & 0xfff000) >> (12 - 10)) | (1 << 22)));
  1604. offset &= 0xfff;
  1605. if (offset != 0)
  1606. FAIL_IF(push_inst(compiler, ins | RD(dst_reg) | RN(dst_reg) | (offset << 10)));
  1607. }
  1608. }
  1609. else {
  1610. FAIL_IF(load_immediate (compiler, dst_reg, offset));
  1611. /* Add extended register form. */
  1612. FAIL_IF(push_inst(compiler, ADDE | (0x3 << 13) | RD(dst_reg) | RN(SLJIT_SP) | RM(dst_reg)));
  1613. }
  1614. if (SLJIT_UNLIKELY(dst & SLJIT_MEM))
  1615. return emit_op_mem(compiler, WORD_SIZE | STORE, dst_reg, dst, dstw, TMP_REG1);
  1616. return SLJIT_SUCCESS;
  1617. }
  1618. SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw init_value)
  1619. {
  1620. struct sljit_const *const_;
  1621. sljit_s32 dst_r;
  1622. CHECK_ERROR_PTR();
  1623. CHECK_PTR(check_sljit_emit_const(compiler, dst, dstw, init_value));
  1624. ADJUST_LOCAL_OFFSET(dst, dstw);
  1625. const_ = (struct sljit_const*)ensure_abuf(compiler, sizeof(struct sljit_const));
  1626. PTR_FAIL_IF(!const_);
  1627. set_const(const_, compiler);
  1628. dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
  1629. PTR_FAIL_IF(emit_imm64_const(compiler, dst_r, init_value));
  1630. if (dst & SLJIT_MEM)
  1631. PTR_FAIL_IF(emit_op_mem(compiler, WORD_SIZE | STORE, dst_r, dst, dstw, TMP_REG2));
  1632. return const_;
  1633. }
  1634. SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_target, sljit_sw executable_offset)
  1635. {
  1636. sljit_ins* inst = (sljit_ins*)addr;
  1637. modify_imm64_const(inst, new_target);
  1638. inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
  1639. SLJIT_CACHE_FLUSH(inst, inst + 4);
  1640. }
  1641. SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset)
  1642. {
  1643. sljit_ins* inst = (sljit_ins*)addr;
  1644. modify_imm64_const(inst, new_constant);
  1645. inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
  1646. SLJIT_CACHE_FLUSH(inst, inst + 4);
  1647. }