sljitNativeX86_64.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848
  1. /*
  2. * Stack-less Just-In-Time compiler
  3. *
  4. * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
  5. *
  6. * Redistribution and use in source and binary forms, with or without modification, are
  7. * permitted provided that the following conditions are met:
  8. *
  9. * 1. Redistributions of source code must retain the above copyright notice, this list of
  10. * conditions and the following disclaimer.
  11. *
  12. * 2. Redistributions in binary form must reproduce the above copyright notice, this list
  13. * of conditions and the following disclaimer in the documentation and/or other materials
  14. * provided with the distribution.
  15. *
  16. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
  17. * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  18. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
  19. * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  20. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
  21. * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  22. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  23. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
  24. * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. /* x86 64-bit arch dependent functions. */
  27. static sljit_s32 emit_load_imm64(struct sljit_compiler *compiler, sljit_s32 reg, sljit_sw imm)
  28. {
  29. sljit_u8 *inst;
  30. inst = (sljit_u8*)ensure_buf(compiler, 1 + 2 + sizeof(sljit_sw));
  31. FAIL_IF(!inst);
  32. INC_SIZE(2 + sizeof(sljit_sw));
  33. *inst++ = REX_W | ((reg_map[reg] <= 7) ? 0 : REX_B);
  34. *inst++ = MOV_r_i32 + (reg_map[reg] & 0x7);
  35. sljit_unaligned_store_sw(inst, imm);
  36. return SLJIT_SUCCESS;
  37. }
  38. static sljit_u8* generate_far_jump_code(struct sljit_jump *jump, sljit_u8 *code_ptr, sljit_s32 type)
  39. {
  40. int short_addr = !(jump->flags & SLJIT_REWRITABLE_JUMP) && !(jump->flags & JUMP_LABEL) && (jump->u.target <= 0xffffffff);
  41. /* The relative jump below specialized for this case. */
  42. SLJIT_ASSERT(reg_map[TMP_REG2] >= 8);
  43. if (type < SLJIT_JUMP) {
  44. /* Invert type. */
  45. *code_ptr++ = get_jump_code(type ^ 0x1) - 0x10;
  46. *code_ptr++ = short_addr ? (6 + 3) : (10 + 3);
  47. }
  48. *code_ptr++ = short_addr ? REX_B : (REX_W | REX_B);
  49. *code_ptr++ = MOV_r_i32 | reg_lmap[TMP_REG2];
  50. jump->addr = (sljit_uw)code_ptr;
  51. if (jump->flags & JUMP_LABEL)
  52. jump->flags |= PATCH_MD;
  53. else if (short_addr)
  54. sljit_unaligned_store_s32(code_ptr, (sljit_s32)jump->u.target);
  55. else
  56. sljit_unaligned_store_sw(code_ptr, jump->u.target);
  57. code_ptr += short_addr ? sizeof(sljit_s32) : sizeof(sljit_sw);
  58. *code_ptr++ = REX_B;
  59. *code_ptr++ = GROUP_FF;
  60. *code_ptr++ = MOD_REG | (type >= SLJIT_FAST_CALL ? CALL_rm : JMP_rm) | reg_lmap[TMP_REG2];
  61. return code_ptr;
  62. }
  63. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler,
  64. sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
  65. sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
  66. {
  67. sljit_s32 args, i, tmp, size, saved_register_size;
  68. sljit_u8 *inst;
  69. CHECK_ERROR();
  70. CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
  71. set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
  72. compiler->mode32 = 0;
  73. #ifdef _WIN64
  74. /* Two/four register slots for parameters plus space for xmm6 register if needed. */
  75. if (fscratches >= 6 || fsaveds >= 1)
  76. compiler->locals_offset = 6 * sizeof(sljit_sw);
  77. else
  78. compiler->locals_offset = ((scratches > 2) ? 4 : 2) * sizeof(sljit_sw);
  79. #endif
  80. /* Including the return address saved by the call instruction. */
  81. saved_register_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1);
  82. tmp = saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - saveds) : SLJIT_FIRST_SAVED_REG;
  83. for (i = SLJIT_S0; i >= tmp; i--) {
  84. size = reg_map[i] >= 8 ? 2 : 1;
  85. inst = (sljit_u8*)ensure_buf(compiler, 1 + size);
  86. FAIL_IF(!inst);
  87. INC_SIZE(size);
  88. if (reg_map[i] >= 8)
  89. *inst++ = REX_B;
  90. PUSH_REG(reg_lmap[i]);
  91. }
  92. for (i = scratches; i >= SLJIT_FIRST_SAVED_REG; i--) {
  93. size = reg_map[i] >= 8 ? 2 : 1;
  94. inst = (sljit_u8*)ensure_buf(compiler, 1 + size);
  95. FAIL_IF(!inst);
  96. INC_SIZE(size);
  97. if (reg_map[i] >= 8)
  98. *inst++ = REX_B;
  99. PUSH_REG(reg_lmap[i]);
  100. }
  101. args = get_arg_count(arg_types);
  102. if (args > 0) {
  103. size = args * 3;
  104. inst = (sljit_u8*)ensure_buf(compiler, 1 + size);
  105. FAIL_IF(!inst);
  106. INC_SIZE(size);
  107. #ifndef _WIN64
  108. if (args > 0) {
  109. inst[0] = REX_W;
  110. inst[1] = MOV_r_rm;
  111. inst[2] = MOD_REG | (reg_map[SLJIT_S0] << 3) | 0x7 /* rdi */;
  112. inst += 3;
  113. }
  114. if (args > 1) {
  115. inst[0] = REX_W | REX_R;
  116. inst[1] = MOV_r_rm;
  117. inst[2] = MOD_REG | (reg_lmap[SLJIT_S1] << 3) | 0x6 /* rsi */;
  118. inst += 3;
  119. }
  120. if (args > 2) {
  121. inst[0] = REX_W | REX_R;
  122. inst[1] = MOV_r_rm;
  123. inst[2] = MOD_REG | (reg_lmap[SLJIT_S2] << 3) | 0x2 /* rdx */;
  124. }
  125. #else
  126. if (args > 0) {
  127. inst[0] = REX_W;
  128. inst[1] = MOV_r_rm;
  129. inst[2] = MOD_REG | (reg_map[SLJIT_S0] << 3) | 0x1 /* rcx */;
  130. inst += 3;
  131. }
  132. if (args > 1) {
  133. inst[0] = REX_W;
  134. inst[1] = MOV_r_rm;
  135. inst[2] = MOD_REG | (reg_map[SLJIT_S1] << 3) | 0x2 /* rdx */;
  136. inst += 3;
  137. }
  138. if (args > 2) {
  139. inst[0] = REX_W | REX_B;
  140. inst[1] = MOV_r_rm;
  141. inst[2] = MOD_REG | (reg_map[SLJIT_S2] << 3) | 0x0 /* r8 */;
  142. }
  143. #endif
  144. }
  145. local_size = ((local_size + SLJIT_LOCALS_OFFSET + saved_register_size + 15) & ~15) - saved_register_size;
  146. compiler->local_size = local_size;
  147. #ifdef _WIN64
  148. if (local_size > 0) {
  149. if (local_size <= 4 * 4096) {
  150. if (local_size > 4096)
  151. EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), -4096);
  152. if (local_size > 2 * 4096)
  153. EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), -4096 * 2);
  154. if (local_size > 3 * 4096)
  155. EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), -4096 * 3);
  156. }
  157. else {
  158. EMIT_MOV(compiler, SLJIT_R0, 0, SLJIT_SP, 0);
  159. EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, (local_size - 1) >> 12);
  160. SLJIT_ASSERT (reg_map[SLJIT_R0] == 0);
  161. EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_MEM1(SLJIT_R0), -4096);
  162. FAIL_IF(emit_non_cum_binary(compiler, BINARY_OPCODE(SUB),
  163. SLJIT_R0, 0, SLJIT_R0, 0, SLJIT_IMM, 4096));
  164. FAIL_IF(emit_non_cum_binary(compiler, BINARY_OPCODE(SUB),
  165. TMP_REG1, 0, TMP_REG1, 0, SLJIT_IMM, 1));
  166. inst = (sljit_u8*)ensure_buf(compiler, 1 + 2);
  167. FAIL_IF(!inst);
  168. INC_SIZE(2);
  169. inst[0] = JNE_i8;
  170. inst[1] = (sljit_s8) -19;
  171. }
  172. EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), -local_size);
  173. }
  174. #endif
  175. if (local_size > 0) {
  176. FAIL_IF(emit_non_cum_binary(compiler, BINARY_OPCODE(SUB),
  177. SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, local_size));
  178. }
  179. #ifdef _WIN64
  180. /* Save xmm6 register: movaps [rsp + 0x20], xmm6 */
  181. if (fscratches >= 6 || fsaveds >= 1) {
  182. inst = (sljit_u8*)ensure_buf(compiler, 1 + 5);
  183. FAIL_IF(!inst);
  184. INC_SIZE(5);
  185. *inst++ = GROUP_0F;
  186. sljit_unaligned_store_s32(inst, 0x20247429);
  187. }
  188. #endif
  189. return SLJIT_SUCCESS;
  190. }
  191. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler,
  192. sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
  193. sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
  194. {
  195. sljit_s32 saved_register_size;
  196. CHECK_ERROR();
  197. CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
  198. set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
  199. #ifdef _WIN64
  200. /* Two/four register slots for parameters plus space for xmm6 register if needed. */
  201. if (fscratches >= 6 || fsaveds >= 1)
  202. compiler->locals_offset = 6 * sizeof(sljit_sw);
  203. else
  204. compiler->locals_offset = ((scratches > 2) ? 4 : 2) * sizeof(sljit_sw);
  205. #endif
  206. /* Including the return address saved by the call instruction. */
  207. saved_register_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1);
  208. compiler->local_size = ((local_size + SLJIT_LOCALS_OFFSET + saved_register_size + 15) & ~15) - saved_register_size;
  209. return SLJIT_SUCCESS;
  210. }
  211. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw)
  212. {
  213. sljit_s32 i, tmp, size;
  214. sljit_u8 *inst;
  215. CHECK_ERROR();
  216. CHECK(check_sljit_emit_return(compiler, op, src, srcw));
  217. FAIL_IF(emit_mov_before_return(compiler, op, src, srcw));
  218. #ifdef _WIN64
  219. /* Restore xmm6 register: movaps xmm6, [rsp + 0x20] */
  220. if (compiler->fscratches >= 6 || compiler->fsaveds >= 1) {
  221. inst = (sljit_u8*)ensure_buf(compiler, 1 + 5);
  222. FAIL_IF(!inst);
  223. INC_SIZE(5);
  224. *inst++ = GROUP_0F;
  225. sljit_unaligned_store_s32(inst, 0x20247428);
  226. }
  227. #endif
  228. if (compiler->local_size > 0) {
  229. if (compiler->local_size <= 127) {
  230. inst = (sljit_u8*)ensure_buf(compiler, 1 + 4);
  231. FAIL_IF(!inst);
  232. INC_SIZE(4);
  233. *inst++ = REX_W;
  234. *inst++ = GROUP_BINARY_83;
  235. *inst++ = MOD_REG | ADD | 4;
  236. *inst = compiler->local_size;
  237. }
  238. else {
  239. inst = (sljit_u8*)ensure_buf(compiler, 1 + 7);
  240. FAIL_IF(!inst);
  241. INC_SIZE(7);
  242. *inst++ = REX_W;
  243. *inst++ = GROUP_BINARY_81;
  244. *inst++ = MOD_REG | ADD | 4;
  245. sljit_unaligned_store_s32(inst, compiler->local_size);
  246. }
  247. }
  248. tmp = compiler->scratches;
  249. for (i = SLJIT_FIRST_SAVED_REG; i <= tmp; i++) {
  250. size = reg_map[i] >= 8 ? 2 : 1;
  251. inst = (sljit_u8*)ensure_buf(compiler, 1 + size);
  252. FAIL_IF(!inst);
  253. INC_SIZE(size);
  254. if (reg_map[i] >= 8)
  255. *inst++ = REX_B;
  256. POP_REG(reg_lmap[i]);
  257. }
  258. tmp = compiler->saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - compiler->saveds) : SLJIT_FIRST_SAVED_REG;
  259. for (i = tmp; i <= SLJIT_S0; i++) {
  260. size = reg_map[i] >= 8 ? 2 : 1;
  261. inst = (sljit_u8*)ensure_buf(compiler, 1 + size);
  262. FAIL_IF(!inst);
  263. INC_SIZE(size);
  264. if (reg_map[i] >= 8)
  265. *inst++ = REX_B;
  266. POP_REG(reg_lmap[i]);
  267. }
  268. inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
  269. FAIL_IF(!inst);
  270. INC_SIZE(1);
  271. RET();
  272. return SLJIT_SUCCESS;
  273. }
  274. /* --------------------------------------------------------------------- */
  275. /* Operators */
  276. /* --------------------------------------------------------------------- */
  277. static sljit_s32 emit_do_imm32(struct sljit_compiler *compiler, sljit_u8 rex, sljit_u8 opcode, sljit_sw imm)
  278. {
  279. sljit_u8 *inst;
  280. sljit_s32 length = 1 + (rex ? 1 : 0) + sizeof(sljit_s32);
  281. inst = (sljit_u8*)ensure_buf(compiler, 1 + length);
  282. FAIL_IF(!inst);
  283. INC_SIZE(length);
  284. if (rex)
  285. *inst++ = rex;
  286. *inst++ = opcode;
  287. sljit_unaligned_store_s32(inst, imm);
  288. return SLJIT_SUCCESS;
  289. }
  290. static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_s32 size,
  291. /* The register or immediate operand. */
  292. sljit_s32 a, sljit_sw imma,
  293. /* The general operand (not immediate). */
  294. sljit_s32 b, sljit_sw immb)
  295. {
  296. sljit_u8 *inst;
  297. sljit_u8 *buf_ptr;
  298. sljit_u8 rex = 0;
  299. sljit_s32 flags = size & ~0xf;
  300. sljit_s32 inst_size;
  301. /* The immediate operand must be 32 bit. */
  302. SLJIT_ASSERT(!(a & SLJIT_IMM) || compiler->mode32 || IS_HALFWORD(imma));
  303. /* Both cannot be switched on. */
  304. SLJIT_ASSERT((flags & (EX86_BIN_INS | EX86_SHIFT_INS)) != (EX86_BIN_INS | EX86_SHIFT_INS));
  305. /* Size flags not allowed for typed instructions. */
  306. SLJIT_ASSERT(!(flags & (EX86_BIN_INS | EX86_SHIFT_INS)) || (flags & (EX86_BYTE_ARG | EX86_HALF_ARG)) == 0);
  307. /* Both size flags cannot be switched on. */
  308. SLJIT_ASSERT((flags & (EX86_BYTE_ARG | EX86_HALF_ARG)) != (EX86_BYTE_ARG | EX86_HALF_ARG));
  309. /* SSE2 and immediate is not possible. */
  310. SLJIT_ASSERT(!(a & SLJIT_IMM) || !(flags & EX86_SSE2));
  311. SLJIT_ASSERT((flags & (EX86_PREF_F2 | EX86_PREF_F3)) != (EX86_PREF_F2 | EX86_PREF_F3)
  312. && (flags & (EX86_PREF_F2 | EX86_PREF_66)) != (EX86_PREF_F2 | EX86_PREF_66)
  313. && (flags & (EX86_PREF_F3 | EX86_PREF_66)) != (EX86_PREF_F3 | EX86_PREF_66));
  314. size &= 0xf;
  315. inst_size = size;
  316. if (!compiler->mode32 && !(flags & EX86_NO_REXW))
  317. rex |= REX_W;
  318. else if (flags & EX86_REX)
  319. rex |= REX;
  320. if (flags & (EX86_PREF_F2 | EX86_PREF_F3))
  321. inst_size++;
  322. if (flags & EX86_PREF_66)
  323. inst_size++;
  324. /* Calculate size of b. */
  325. inst_size += 1; /* mod r/m byte. */
  326. if (b & SLJIT_MEM) {
  327. if (!(b & OFFS_REG_MASK)) {
  328. if (NOT_HALFWORD(immb)) {
  329. PTR_FAIL_IF(emit_load_imm64(compiler, TMP_REG2, immb));
  330. immb = 0;
  331. if (b & REG_MASK)
  332. b |= TO_OFFS_REG(TMP_REG2);
  333. else
  334. b |= TMP_REG2;
  335. }
  336. else if (reg_lmap[b & REG_MASK] == 4)
  337. b |= TO_OFFS_REG(SLJIT_SP);
  338. }
  339. if ((b & REG_MASK) == SLJIT_UNUSED)
  340. inst_size += 1 + sizeof(sljit_s32); /* SIB byte required to avoid RIP based addressing. */
  341. else {
  342. if (reg_map[b & REG_MASK] >= 8)
  343. rex |= REX_B;
  344. if (immb != 0 && (!(b & OFFS_REG_MASK) || (b & OFFS_REG_MASK) == TO_OFFS_REG(SLJIT_SP))) {
  345. /* Immediate operand. */
  346. if (immb <= 127 && immb >= -128)
  347. inst_size += sizeof(sljit_s8);
  348. else
  349. inst_size += sizeof(sljit_s32);
  350. }
  351. else if (reg_lmap[b & REG_MASK] == 5)
  352. inst_size += sizeof(sljit_s8);
  353. if ((b & OFFS_REG_MASK) != SLJIT_UNUSED) {
  354. inst_size += 1; /* SIB byte. */
  355. if (reg_map[OFFS_REG(b)] >= 8)
  356. rex |= REX_X;
  357. }
  358. }
  359. }
  360. else if (!(flags & EX86_SSE2_OP2)) {
  361. if (reg_map[b] >= 8)
  362. rex |= REX_B;
  363. }
  364. else if (freg_map[b] >= 8)
  365. rex |= REX_B;
  366. if (a & SLJIT_IMM) {
  367. if (flags & EX86_BIN_INS) {
  368. if (imma <= 127 && imma >= -128) {
  369. inst_size += 1;
  370. flags |= EX86_BYTE_ARG;
  371. } else
  372. inst_size += 4;
  373. }
  374. else if (flags & EX86_SHIFT_INS) {
  375. imma &= compiler->mode32 ? 0x1f : 0x3f;
  376. if (imma != 1) {
  377. inst_size ++;
  378. flags |= EX86_BYTE_ARG;
  379. }
  380. } else if (flags & EX86_BYTE_ARG)
  381. inst_size++;
  382. else if (flags & EX86_HALF_ARG)
  383. inst_size += sizeof(short);
  384. else
  385. inst_size += sizeof(sljit_s32);
  386. }
  387. else {
  388. SLJIT_ASSERT(!(flags & EX86_SHIFT_INS) || a == SLJIT_PREF_SHIFT_REG);
  389. /* reg_map[SLJIT_PREF_SHIFT_REG] is less than 8. */
  390. if (!(flags & EX86_SSE2_OP1)) {
  391. if (reg_map[a] >= 8)
  392. rex |= REX_R;
  393. }
  394. else if (freg_map[a] >= 8)
  395. rex |= REX_R;
  396. }
  397. if (rex)
  398. inst_size++;
  399. inst = (sljit_u8*)ensure_buf(compiler, 1 + inst_size);
  400. PTR_FAIL_IF(!inst);
  401. /* Encoding the byte. */
  402. INC_SIZE(inst_size);
  403. if (flags & EX86_PREF_F2)
  404. *inst++ = 0xf2;
  405. if (flags & EX86_PREF_F3)
  406. *inst++ = 0xf3;
  407. if (flags & EX86_PREF_66)
  408. *inst++ = 0x66;
  409. if (rex)
  410. *inst++ = rex;
  411. buf_ptr = inst + size;
  412. /* Encode mod/rm byte. */
  413. if (!(flags & EX86_SHIFT_INS)) {
  414. if ((flags & EX86_BIN_INS) && (a & SLJIT_IMM))
  415. *inst = (flags & EX86_BYTE_ARG) ? GROUP_BINARY_83 : GROUP_BINARY_81;
  416. if (a & SLJIT_IMM)
  417. *buf_ptr = 0;
  418. else if (!(flags & EX86_SSE2_OP1))
  419. *buf_ptr = reg_lmap[a] << 3;
  420. else
  421. *buf_ptr = freg_lmap[a] << 3;
  422. }
  423. else {
  424. if (a & SLJIT_IMM) {
  425. if (imma == 1)
  426. *inst = GROUP_SHIFT_1;
  427. else
  428. *inst = GROUP_SHIFT_N;
  429. } else
  430. *inst = GROUP_SHIFT_CL;
  431. *buf_ptr = 0;
  432. }
  433. if (!(b & SLJIT_MEM))
  434. *buf_ptr++ |= MOD_REG + ((!(flags & EX86_SSE2_OP2)) ? reg_lmap[b] : freg_lmap[b]);
  435. else if ((b & REG_MASK) != SLJIT_UNUSED) {
  436. if ((b & OFFS_REG_MASK) == SLJIT_UNUSED || (b & OFFS_REG_MASK) == TO_OFFS_REG(SLJIT_SP)) {
  437. if (immb != 0 || reg_lmap[b & REG_MASK] == 5) {
  438. if (immb <= 127 && immb >= -128)
  439. *buf_ptr |= 0x40;
  440. else
  441. *buf_ptr |= 0x80;
  442. }
  443. if ((b & OFFS_REG_MASK) == SLJIT_UNUSED)
  444. *buf_ptr++ |= reg_lmap[b & REG_MASK];
  445. else {
  446. *buf_ptr++ |= 0x04;
  447. *buf_ptr++ = reg_lmap[b & REG_MASK] | (reg_lmap[OFFS_REG(b)] << 3);
  448. }
  449. if (immb != 0 || reg_lmap[b & REG_MASK] == 5) {
  450. if (immb <= 127 && immb >= -128)
  451. *buf_ptr++ = immb; /* 8 bit displacement. */
  452. else {
  453. sljit_unaligned_store_s32(buf_ptr, immb); /* 32 bit displacement. */
  454. buf_ptr += sizeof(sljit_s32);
  455. }
  456. }
  457. }
  458. else {
  459. if (reg_lmap[b & REG_MASK] == 5)
  460. *buf_ptr |= 0x40;
  461. *buf_ptr++ |= 0x04;
  462. *buf_ptr++ = reg_lmap[b & REG_MASK] | (reg_lmap[OFFS_REG(b)] << 3) | (immb << 6);
  463. if (reg_lmap[b & REG_MASK] == 5)
  464. *buf_ptr++ = 0;
  465. }
  466. }
  467. else {
  468. *buf_ptr++ |= 0x04;
  469. *buf_ptr++ = 0x25;
  470. sljit_unaligned_store_s32(buf_ptr, immb); /* 32 bit displacement. */
  471. buf_ptr += sizeof(sljit_s32);
  472. }
  473. if (a & SLJIT_IMM) {
  474. if (flags & EX86_BYTE_ARG)
  475. *buf_ptr = imma;
  476. else if (flags & EX86_HALF_ARG)
  477. sljit_unaligned_store_s16(buf_ptr, imma);
  478. else if (!(flags & EX86_SHIFT_INS))
  479. sljit_unaligned_store_s32(buf_ptr, imma);
  480. }
  481. return !(flags & EX86_SHIFT_INS) ? inst : (inst + 1);
  482. }
  483. /* --------------------------------------------------------------------- */
  484. /* Call / return instructions */
  485. /* --------------------------------------------------------------------- */
  486. #ifndef _WIN64
  487. static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_s32 *src_ptr, sljit_sw srcw)
  488. {
  489. sljit_s32 src = src_ptr ? (*src_ptr) : 0;
  490. sljit_s32 word_arg_count = 0;
  491. SLJIT_ASSERT(reg_map[SLJIT_R1] == 6 && reg_map[SLJIT_R3] == 1 && reg_map[TMP_REG1] == 2);
  492. compiler->mode32 = 0;
  493. /* Remove return value. */
  494. arg_types >>= SLJIT_DEF_SHIFT;
  495. while (arg_types) {
  496. if ((arg_types & SLJIT_DEF_MASK) < SLJIT_ARG_TYPE_F32)
  497. word_arg_count++;
  498. arg_types >>= SLJIT_DEF_SHIFT;
  499. }
  500. if (word_arg_count == 0)
  501. return SLJIT_SUCCESS;
  502. if (src & SLJIT_MEM) {
  503. ADJUST_LOCAL_OFFSET(src, srcw);
  504. EMIT_MOV(compiler, TMP_REG2, 0, src, srcw);
  505. *src_ptr = TMP_REG2;
  506. }
  507. else if (src == SLJIT_R2 && word_arg_count >= SLJIT_R2)
  508. *src_ptr = TMP_REG1;
  509. if (word_arg_count >= 3)
  510. EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_R2, 0);
  511. return emit_mov(compiler, SLJIT_R2, 0, SLJIT_R0, 0);
  512. }
  513. #else
  514. static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_s32 *src_ptr, sljit_sw srcw)
  515. {
  516. sljit_s32 src = src_ptr ? (*src_ptr) : 0;
  517. sljit_s32 arg_count = 0;
  518. sljit_s32 word_arg_count = 0;
  519. sljit_s32 float_arg_count = 0;
  520. sljit_s32 types = 0;
  521. sljit_s32 data_trandfer = 0;
  522. static sljit_u8 word_arg_regs[5] = { 0, SLJIT_R3, SLJIT_R1, SLJIT_R2, TMP_REG1 };
  523. SLJIT_ASSERT(reg_map[SLJIT_R3] == 1 && reg_map[SLJIT_R1] == 2 && reg_map[SLJIT_R2] == 8 && reg_map[TMP_REG1] == 9);
  524. compiler->mode32 = 0;
  525. arg_types >>= SLJIT_DEF_SHIFT;
  526. while (arg_types) {
  527. types = (types << SLJIT_DEF_SHIFT) | (arg_types & SLJIT_DEF_MASK);
  528. switch (arg_types & SLJIT_DEF_MASK) {
  529. case SLJIT_ARG_TYPE_F32:
  530. case SLJIT_ARG_TYPE_F64:
  531. arg_count++;
  532. float_arg_count++;
  533. if (arg_count != float_arg_count)
  534. data_trandfer = 1;
  535. break;
  536. default:
  537. arg_count++;
  538. word_arg_count++;
  539. if (arg_count != word_arg_count || arg_count != word_arg_regs[arg_count]) {
  540. data_trandfer = 1;
  541. if (src == word_arg_regs[arg_count]) {
  542. EMIT_MOV(compiler, TMP_REG2, 0, src, 0);
  543. *src_ptr = TMP_REG2;
  544. }
  545. }
  546. break;
  547. }
  548. arg_types >>= SLJIT_DEF_SHIFT;
  549. }
  550. if (!data_trandfer)
  551. return SLJIT_SUCCESS;
  552. if (src & SLJIT_MEM) {
  553. ADJUST_LOCAL_OFFSET(src, srcw);
  554. EMIT_MOV(compiler, TMP_REG2, 0, src, srcw);
  555. *src_ptr = TMP_REG2;
  556. }
  557. while (types) {
  558. switch (types & SLJIT_DEF_MASK) {
  559. case SLJIT_ARG_TYPE_F32:
  560. if (arg_count != float_arg_count)
  561. FAIL_IF(emit_sse2_load(compiler, 1, arg_count, float_arg_count, 0));
  562. arg_count--;
  563. float_arg_count--;
  564. break;
  565. case SLJIT_ARG_TYPE_F64:
  566. if (arg_count != float_arg_count)
  567. FAIL_IF(emit_sse2_load(compiler, 0, arg_count, float_arg_count, 0));
  568. arg_count--;
  569. float_arg_count--;
  570. break;
  571. default:
  572. if (arg_count != word_arg_count || arg_count != word_arg_regs[arg_count])
  573. EMIT_MOV(compiler, word_arg_regs[arg_count], 0, word_arg_count, 0);
  574. arg_count--;
  575. word_arg_count--;
  576. break;
  577. }
  578. types >>= SLJIT_DEF_SHIFT;
  579. }
  580. return SLJIT_SUCCESS;
  581. }
  582. #endif
  583. SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type,
  584. sljit_s32 arg_types)
  585. {
  586. CHECK_ERROR_PTR();
  587. CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types));
  588. PTR_FAIL_IF(call_with_args(compiler, arg_types, NULL, 0));
  589. #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
  590. || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
  591. compiler->skip_checks = 1;
  592. #endif
  593. return sljit_emit_jump(compiler, type);
  594. }
  595. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type,
  596. sljit_s32 arg_types,
  597. sljit_s32 src, sljit_sw srcw)
  598. {
  599. CHECK_ERROR();
  600. CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw));
  601. FAIL_IF(call_with_args(compiler, arg_types, &src, srcw));
  602. #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
  603. || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
  604. compiler->skip_checks = 1;
  605. #endif
  606. return sljit_emit_ijump(compiler, type, src, srcw);
  607. }
  608. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw)
  609. {
  610. sljit_u8 *inst;
  611. CHECK_ERROR();
  612. CHECK(check_sljit_emit_fast_enter(compiler, dst, dstw));
  613. ADJUST_LOCAL_OFFSET(dst, dstw);
  614. /* For UNUSED dst. Uncommon, but possible. */
  615. if (dst == SLJIT_UNUSED)
  616. dst = TMP_REG1;
  617. if (FAST_IS_REG(dst)) {
  618. if (reg_map[dst] < 8) {
  619. inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
  620. FAIL_IF(!inst);
  621. INC_SIZE(1);
  622. POP_REG(reg_lmap[dst]);
  623. return SLJIT_SUCCESS;
  624. }
  625. inst = (sljit_u8*)ensure_buf(compiler, 1 + 2);
  626. FAIL_IF(!inst);
  627. INC_SIZE(2);
  628. *inst++ = REX_B;
  629. POP_REG(reg_lmap[dst]);
  630. return SLJIT_SUCCESS;
  631. }
  632. /* REX_W is not necessary (src is not immediate). */
  633. compiler->mode32 = 1;
  634. inst = emit_x86_instruction(compiler, 1, 0, 0, dst, dstw);
  635. FAIL_IF(!inst);
  636. *inst++ = POP_rm;
  637. return SLJIT_SUCCESS;
  638. }
  639. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_s32 src, sljit_sw srcw)
  640. {
  641. sljit_u8 *inst;
  642. CHECK_ERROR();
  643. CHECK(check_sljit_emit_fast_return(compiler, src, srcw));
  644. ADJUST_LOCAL_OFFSET(src, srcw);
  645. if (FAST_IS_REG(src)) {
  646. if (reg_map[src] < 8) {
  647. inst = (sljit_u8*)ensure_buf(compiler, 1 + 1 + 1);
  648. FAIL_IF(!inst);
  649. INC_SIZE(1 + 1);
  650. PUSH_REG(reg_lmap[src]);
  651. }
  652. else {
  653. inst = (sljit_u8*)ensure_buf(compiler, 1 + 2 + 1);
  654. FAIL_IF(!inst);
  655. INC_SIZE(2 + 1);
  656. *inst++ = REX_B;
  657. PUSH_REG(reg_lmap[src]);
  658. }
  659. }
  660. else {
  661. /* REX_W is not necessary (src is not immediate). */
  662. compiler->mode32 = 1;
  663. inst = emit_x86_instruction(compiler, 1, 0, 0, src, srcw);
  664. FAIL_IF(!inst);
  665. *inst++ = GROUP_FF;
  666. *inst |= PUSH_rm;
  667. inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
  668. FAIL_IF(!inst);
  669. INC_SIZE(1);
  670. }
  671. RET();
  672. return SLJIT_SUCCESS;
  673. }
  674. /* --------------------------------------------------------------------- */
  675. /* Extend input */
  676. /* --------------------------------------------------------------------- */
  677. static sljit_s32 emit_mov_int(struct sljit_compiler *compiler, sljit_s32 sign,
  678. sljit_s32 dst, sljit_sw dstw,
  679. sljit_s32 src, sljit_sw srcw)
  680. {
  681. sljit_u8* inst;
  682. sljit_s32 dst_r;
  683. compiler->mode32 = 0;
  684. if (dst == SLJIT_UNUSED && !(src & SLJIT_MEM))
  685. return SLJIT_SUCCESS; /* Empty instruction. */
  686. if (src & SLJIT_IMM) {
  687. if (FAST_IS_REG(dst)) {
  688. if (sign || ((sljit_uw)srcw <= 0x7fffffff)) {
  689. inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, (sljit_sw)(sljit_s32)srcw, dst, dstw);
  690. FAIL_IF(!inst);
  691. *inst = MOV_rm_i32;
  692. return SLJIT_SUCCESS;
  693. }
  694. return emit_load_imm64(compiler, dst, srcw);
  695. }
  696. compiler->mode32 = 1;
  697. inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, (sljit_sw)(sljit_s32)srcw, dst, dstw);
  698. FAIL_IF(!inst);
  699. *inst = MOV_rm_i32;
  700. compiler->mode32 = 0;
  701. return SLJIT_SUCCESS;
  702. }
  703. dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
  704. if ((dst & SLJIT_MEM) && FAST_IS_REG(src))
  705. dst_r = src;
  706. else {
  707. if (sign) {
  708. inst = emit_x86_instruction(compiler, 1, dst_r, 0, src, srcw);
  709. FAIL_IF(!inst);
  710. *inst++ = MOVSXD_r_rm;
  711. } else {
  712. compiler->mode32 = 1;
  713. FAIL_IF(emit_mov(compiler, dst_r, 0, src, srcw));
  714. compiler->mode32 = 0;
  715. }
  716. }
  717. if (dst & SLJIT_MEM) {
  718. compiler->mode32 = 1;
  719. inst = emit_x86_instruction(compiler, 1, dst_r, 0, dst, dstw);
  720. FAIL_IF(!inst);
  721. *inst = MOV_rm_r;
  722. compiler->mode32 = 0;
  723. }
  724. return SLJIT_SUCCESS;
  725. }