sljitNativeX86_common.c 88 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140
  1. /*
  2. * Stack-less Just-In-Time compiler
  3. *
  4. * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
  5. *
  6. * Redistribution and use in source and binary forms, with or without modification, are
  7. * permitted provided that the following conditions are met:
  8. *
  9. * 1. Redistributions of source code must retain the above copyright notice, this list of
  10. * conditions and the following disclaimer.
  11. *
  12. * 2. Redistributions in binary form must reproduce the above copyright notice, this list
  13. * of conditions and the following disclaimer in the documentation and/or other materials
  14. * provided with the distribution.
  15. *
  16. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
  17. * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  18. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
  19. * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  20. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
  21. * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  22. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  23. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
  24. * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void)
  27. {
  28. #if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
  29. return "x86" SLJIT_CPUINFO " ABI:fastcall";
  30. #else
  31. return "x86" SLJIT_CPUINFO;
  32. #endif
  33. }
  34. /*
  35. 32b register indexes:
  36. 0 - EAX
  37. 1 - ECX
  38. 2 - EDX
  39. 3 - EBX
  40. 4 - ESP
  41. 5 - EBP
  42. 6 - ESI
  43. 7 - EDI
  44. */
  45. /*
  46. 64b register indexes:
  47. 0 - RAX
  48. 1 - RCX
  49. 2 - RDX
  50. 3 - RBX
  51. 4 - RSP
  52. 5 - RBP
  53. 6 - RSI
  54. 7 - RDI
  55. 8 - R8 - From now on REX prefix is required
  56. 9 - R9
  57. 10 - R10
  58. 11 - R11
  59. 12 - R12
  60. 13 - R13
  61. 14 - R14
  62. 15 - R15
  63. */
  64. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  65. /* Last register + 1. */
  66. #define TMP_REG1 (SLJIT_NUMBER_OF_REGISTERS + 2)
  67. static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 3] = {
  68. 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 7, 6, 3, 4, 5
  69. };
  70. #define CHECK_EXTRA_REGS(p, w, do) \
  71. if (p >= SLJIT_R3 && p <= SLJIT_S3) { \
  72. if (p <= compiler->scratches) \
  73. w = compiler->saveds_offset - ((p) - SLJIT_R2) * (sljit_sw)sizeof(sljit_sw); \
  74. else \
  75. w = compiler->locals_offset + ((p) - SLJIT_S2) * (sljit_sw)sizeof(sljit_sw); \
  76. p = SLJIT_MEM1(SLJIT_SP); \
  77. do; \
  78. }
  79. #else /* SLJIT_CONFIG_X86_32 */
  80. /* Last register + 1. */
  81. #define TMP_REG1 (SLJIT_NUMBER_OF_REGISTERS + 2)
  82. #define TMP_REG2 (SLJIT_NUMBER_OF_REGISTERS + 3)
  83. /* Note: r12 & 0x7 == 0b100, which decoded as SIB byte present
  84. Note: avoid to use r12 and r13 for memory addessing
  85. therefore r12 is better to be a higher saved register. */
  86. #ifndef _WIN64
  87. /* Args: rdi(=7), rsi(=6), rdx(=2), rcx(=1), r8, r9. Scratches: rax(=0), r10, r11 */
  88. static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 4] = {
  89. 0, 0, 6, 7, 1, 8, 11, 10, 12, 5, 13, 14, 15, 3, 4, 2, 9
  90. };
  91. /* low-map. reg_map & 0x7. */
  92. static const sljit_u8 reg_lmap[SLJIT_NUMBER_OF_REGISTERS + 4] = {
  93. 0, 0, 6, 7, 1, 0, 3, 2, 4, 5, 5, 6, 7, 3, 4, 2, 1
  94. };
  95. #else
  96. /* Args: rcx(=1), rdx(=2), r8, r9. Scratches: rax(=0), r10, r11 */
  97. static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 4] = {
  98. 0, 0, 2, 8, 1, 11, 12, 5, 13, 14, 15, 7, 6, 3, 4, 9, 10
  99. };
  100. /* low-map. reg_map & 0x7. */
  101. static const sljit_u8 reg_lmap[SLJIT_NUMBER_OF_REGISTERS + 4] = {
  102. 0, 0, 2, 0, 1, 3, 4, 5, 5, 6, 7, 7, 6, 3, 4, 1, 2
  103. };
  104. #endif
  105. /* Args: xmm0-xmm3 */
  106. static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = {
  107. 4, 0, 1, 2, 3, 5, 6
  108. };
  109. /* low-map. freg_map & 0x7. */
  110. static const sljit_u8 freg_lmap[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = {
  111. 4, 0, 1, 2, 3, 5, 6
  112. };
  113. #define REX_W 0x48
  114. #define REX_R 0x44
  115. #define REX_X 0x42
  116. #define REX_B 0x41
  117. #define REX 0x40
  118. #ifndef _WIN64
  119. #define HALFWORD_MAX 0x7fffffffl
  120. #define HALFWORD_MIN -0x80000000l
  121. #else
  122. #define HALFWORD_MAX 0x7fffffffll
  123. #define HALFWORD_MIN -0x80000000ll
  124. #endif
  125. #define IS_HALFWORD(x) ((x) <= HALFWORD_MAX && (x) >= HALFWORD_MIN)
  126. #define NOT_HALFWORD(x) ((x) > HALFWORD_MAX || (x) < HALFWORD_MIN)
  127. #define CHECK_EXTRA_REGS(p, w, do)
  128. #endif /* SLJIT_CONFIG_X86_32 */
  129. #define TMP_FREG (0)
  130. /* Size flags for emit_x86_instruction: */
  131. #define EX86_BIN_INS 0x0010
  132. #define EX86_SHIFT_INS 0x0020
  133. #define EX86_REX 0x0040
  134. #define EX86_NO_REXW 0x0080
  135. #define EX86_BYTE_ARG 0x0100
  136. #define EX86_HALF_ARG 0x0200
  137. #define EX86_PREF_66 0x0400
  138. #define EX86_PREF_F2 0x0800
  139. #define EX86_PREF_F3 0x1000
  140. #define EX86_SSE2_OP1 0x2000
  141. #define EX86_SSE2_OP2 0x4000
  142. #define EX86_SSE2 (EX86_SSE2_OP1 | EX86_SSE2_OP2)
  143. /* --------------------------------------------------------------------- */
  144. /* Instrucion forms */
  145. /* --------------------------------------------------------------------- */
  146. #define ADD (/* BINARY */ 0 << 3)
  147. #define ADD_EAX_i32 0x05
  148. #define ADD_r_rm 0x03
  149. #define ADD_rm_r 0x01
  150. #define ADDSD_x_xm 0x58
  151. #define ADC (/* BINARY */ 2 << 3)
  152. #define ADC_EAX_i32 0x15
  153. #define ADC_r_rm 0x13
  154. #define ADC_rm_r 0x11
  155. #define AND (/* BINARY */ 4 << 3)
  156. #define AND_EAX_i32 0x25
  157. #define AND_r_rm 0x23
  158. #define AND_rm_r 0x21
  159. #define ANDPD_x_xm 0x54
  160. #define BSR_r_rm (/* GROUP_0F */ 0xbd)
  161. #define CALL_i32 0xe8
  162. #define CALL_rm (/* GROUP_FF */ 2 << 3)
  163. #define CDQ 0x99
  164. #define CMOVE_r_rm (/* GROUP_0F */ 0x44)
  165. #define CMP (/* BINARY */ 7 << 3)
  166. #define CMP_EAX_i32 0x3d
  167. #define CMP_r_rm 0x3b
  168. #define CMP_rm_r 0x39
  169. #define CVTPD2PS_x_xm 0x5a
  170. #define CVTSI2SD_x_rm 0x2a
  171. #define CVTTSD2SI_r_xm 0x2c
  172. #define DIV (/* GROUP_F7 */ 6 << 3)
  173. #define DIVSD_x_xm 0x5e
  174. #define FSTPS 0xd9
  175. #define FSTPD 0xdd
  176. #define INT3 0xcc
  177. #define IDIV (/* GROUP_F7 */ 7 << 3)
  178. #define IMUL (/* GROUP_F7 */ 5 << 3)
  179. #define IMUL_r_rm (/* GROUP_0F */ 0xaf)
  180. #define IMUL_r_rm_i8 0x6b
  181. #define IMUL_r_rm_i32 0x69
  182. #define JE_i8 0x74
  183. #define JNE_i8 0x75
  184. #define JMP_i8 0xeb
  185. #define JMP_i32 0xe9
  186. #define JMP_rm (/* GROUP_FF */ 4 << 3)
  187. #define LEA_r_m 0x8d
  188. #define MOV_r_rm 0x8b
  189. #define MOV_r_i32 0xb8
  190. #define MOV_rm_r 0x89
  191. #define MOV_rm_i32 0xc7
  192. #define MOV_rm8_i8 0xc6
  193. #define MOV_rm8_r8 0x88
  194. #define MOVSD_x_xm 0x10
  195. #define MOVSD_xm_x 0x11
  196. #define MOVSXD_r_rm 0x63
  197. #define MOVSX_r_rm8 (/* GROUP_0F */ 0xbe)
  198. #define MOVSX_r_rm16 (/* GROUP_0F */ 0xbf)
  199. #define MOVZX_r_rm8 (/* GROUP_0F */ 0xb6)
  200. #define MOVZX_r_rm16 (/* GROUP_0F */ 0xb7)
  201. #define MUL (/* GROUP_F7 */ 4 << 3)
  202. #define MULSD_x_xm 0x59
  203. #define NEG_rm (/* GROUP_F7 */ 3 << 3)
  204. #define NOP 0x90
  205. #define NOT_rm (/* GROUP_F7 */ 2 << 3)
  206. #define OR (/* BINARY */ 1 << 3)
  207. #define OR_r_rm 0x0b
  208. #define OR_EAX_i32 0x0d
  209. #define OR_rm_r 0x09
  210. #define OR_rm8_r8 0x08
  211. #define POP_r 0x58
  212. #define POP_rm 0x8f
  213. #define POPF 0x9d
  214. #define PREFETCH 0x18
  215. #define PUSH_i32 0x68
  216. #define PUSH_r 0x50
  217. #define PUSH_rm (/* GROUP_FF */ 6 << 3)
  218. #define PUSHF 0x9c
  219. #define RET_near 0xc3
  220. #define RET_i16 0xc2
  221. #define SBB (/* BINARY */ 3 << 3)
  222. #define SBB_EAX_i32 0x1d
  223. #define SBB_r_rm 0x1b
  224. #define SBB_rm_r 0x19
  225. #define SAR (/* SHIFT */ 7 << 3)
  226. #define SHL (/* SHIFT */ 4 << 3)
  227. #define SHR (/* SHIFT */ 5 << 3)
  228. #define SUB (/* BINARY */ 5 << 3)
  229. #define SUB_EAX_i32 0x2d
  230. #define SUB_r_rm 0x2b
  231. #define SUB_rm_r 0x29
  232. #define SUBSD_x_xm 0x5c
  233. #define TEST_EAX_i32 0xa9
  234. #define TEST_rm_r 0x85
  235. #define UCOMISD_x_xm 0x2e
  236. #define UNPCKLPD_x_xm 0x14
  237. #define XCHG_EAX_r 0x90
  238. #define XCHG_r_rm 0x87
  239. #define XOR (/* BINARY */ 6 << 3)
  240. #define XOR_EAX_i32 0x35
  241. #define XOR_r_rm 0x33
  242. #define XOR_rm_r 0x31
  243. #define XORPD_x_xm 0x57
  244. #define GROUP_0F 0x0f
  245. #define GROUP_F7 0xf7
  246. #define GROUP_FF 0xff
  247. #define GROUP_BINARY_81 0x81
  248. #define GROUP_BINARY_83 0x83
  249. #define GROUP_SHIFT_1 0xd1
  250. #define GROUP_SHIFT_N 0xc1
  251. #define GROUP_SHIFT_CL 0xd3
  252. #define MOD_REG 0xc0
  253. #define MOD_DISP8 0x40
  254. #define INC_SIZE(s) (*inst++ = (s), compiler->size += (s))
  255. #define PUSH_REG(r) (*inst++ = (PUSH_r + (r)))
  256. #define POP_REG(r) (*inst++ = (POP_r + (r)))
  257. #define RET() (*inst++ = (RET_near))
  258. #define RET_I16(n) (*inst++ = (RET_i16), *inst++ = n, *inst++ = 0)
  259. /* r32, r/m32 */
  260. #define MOV_RM(mod, reg, rm) (*inst++ = (MOV_r_rm), *inst++ = (mod) << 6 | (reg) << 3 | (rm))
  261. /* Multithreading does not affect these static variables, since they store
  262. built-in CPU features. Therefore they can be overwritten by different threads
  263. if they detect the CPU features in the same time. */
  264. #if (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2)
  265. static sljit_s32 cpu_has_sse2 = -1;
  266. #endif
  267. static sljit_s32 cpu_has_cmov = -1;
  268. #ifdef _WIN32_WCE
  269. #include <cmnintrin.h>
  270. #elif defined(_MSC_VER) && _MSC_VER >= 1400
  271. #include <intrin.h>
  272. #endif
  273. /******************************************************/
  274. /* Unaligned-store functions */
  275. /******************************************************/
  276. static SLJIT_INLINE void sljit_unaligned_store_s16(void *addr, sljit_s16 value)
  277. {
  278. SLJIT_MEMCPY(addr, &value, sizeof(value));
  279. }
  280. static SLJIT_INLINE void sljit_unaligned_store_s32(void *addr, sljit_s32 value)
  281. {
  282. SLJIT_MEMCPY(addr, &value, sizeof(value));
  283. }
  284. static SLJIT_INLINE void sljit_unaligned_store_sw(void *addr, sljit_sw value)
  285. {
  286. SLJIT_MEMCPY(addr, &value, sizeof(value));
  287. }
  288. /******************************************************/
  289. /* Utility functions */
  290. /******************************************************/
  291. static void get_cpu_features(void)
  292. {
  293. sljit_u32 features;
  294. #if defined(_MSC_VER) && _MSC_VER >= 1400
  295. int CPUInfo[4];
  296. __cpuid(CPUInfo, 1);
  297. features = (sljit_u32)CPUInfo[3];
  298. #elif defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__SUNPRO_C)
  299. /* AT&T syntax. */
  300. __asm__ (
  301. "movl $0x1, %%eax\n"
  302. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  303. /* On x86-32, there is no red zone, so this
  304. should work (no need for a local variable). */
  305. "push %%ebx\n"
  306. #endif
  307. "cpuid\n"
  308. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  309. "pop %%ebx\n"
  310. #endif
  311. "movl %%edx, %0\n"
  312. : "=g" (features)
  313. :
  314. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  315. : "%eax", "%ecx", "%edx"
  316. #else
  317. : "%rax", "%rbx", "%rcx", "%rdx"
  318. #endif
  319. );
  320. #else /* _MSC_VER && _MSC_VER >= 1400 */
  321. /* Intel syntax. */
  322. __asm {
  323. mov eax, 1
  324. cpuid
  325. mov features, edx
  326. }
  327. #endif /* _MSC_VER && _MSC_VER >= 1400 */
  328. #if (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2)
  329. cpu_has_sse2 = (features >> 26) & 0x1;
  330. #endif
  331. cpu_has_cmov = (features >> 15) & 0x1;
  332. }
  333. static sljit_u8 get_jump_code(sljit_s32 type)
  334. {
  335. switch (type) {
  336. case SLJIT_EQUAL:
  337. case SLJIT_EQUAL_F64:
  338. return 0x84 /* je */;
  339. case SLJIT_NOT_EQUAL:
  340. case SLJIT_NOT_EQUAL_F64:
  341. return 0x85 /* jne */;
  342. case SLJIT_LESS:
  343. case SLJIT_LESS_F64:
  344. return 0x82 /* jc */;
  345. case SLJIT_GREATER_EQUAL:
  346. case SLJIT_GREATER_EQUAL_F64:
  347. return 0x83 /* jae */;
  348. case SLJIT_GREATER:
  349. case SLJIT_GREATER_F64:
  350. return 0x87 /* jnbe */;
  351. case SLJIT_LESS_EQUAL:
  352. case SLJIT_LESS_EQUAL_F64:
  353. return 0x86 /* jbe */;
  354. case SLJIT_SIG_LESS:
  355. return 0x8c /* jl */;
  356. case SLJIT_SIG_GREATER_EQUAL:
  357. return 0x8d /* jnl */;
  358. case SLJIT_SIG_GREATER:
  359. return 0x8f /* jnle */;
  360. case SLJIT_SIG_LESS_EQUAL:
  361. return 0x8e /* jle */;
  362. case SLJIT_OVERFLOW:
  363. return 0x80 /* jo */;
  364. case SLJIT_NOT_OVERFLOW:
  365. return 0x81 /* jno */;
  366. case SLJIT_UNORDERED_F64:
  367. return 0x8a /* jp */;
  368. case SLJIT_ORDERED_F64:
  369. return 0x8b /* jpo */;
  370. }
  371. return 0;
  372. }
  373. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  374. static sljit_u8* generate_far_jump_code(struct sljit_jump *jump, sljit_u8 *code_ptr, sljit_sw executable_offset);
  375. #else
  376. static sljit_u8* generate_far_jump_code(struct sljit_jump *jump, sljit_u8 *code_ptr);
  377. static sljit_u8* generate_put_label_code(struct sljit_put_label *put_label, sljit_u8 *code_ptr, sljit_uw max_label);
  378. #endif
  379. static sljit_u8* generate_near_jump_code(struct sljit_jump *jump, sljit_u8 *code_ptr, sljit_u8 *code, sljit_sw executable_offset)
  380. {
  381. sljit_s32 type = jump->flags >> TYPE_SHIFT;
  382. sljit_s32 short_jump;
  383. sljit_uw label_addr;
  384. if (jump->flags & JUMP_LABEL)
  385. label_addr = (sljit_uw)(code + jump->u.label->size);
  386. else
  387. label_addr = jump->u.target - executable_offset;
  388. short_jump = (sljit_sw)(label_addr - (jump->addr + 2)) >= -128 && (sljit_sw)(label_addr - (jump->addr + 2)) <= 127;
  389. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  390. if ((sljit_sw)(label_addr - (jump->addr + 1)) > HALFWORD_MAX || (sljit_sw)(label_addr - (jump->addr + 1)) < HALFWORD_MIN)
  391. return generate_far_jump_code(jump, code_ptr);
  392. #endif
  393. if (type == SLJIT_JUMP) {
  394. if (short_jump)
  395. *code_ptr++ = JMP_i8;
  396. else
  397. *code_ptr++ = JMP_i32;
  398. jump->addr++;
  399. }
  400. else if (type >= SLJIT_FAST_CALL) {
  401. short_jump = 0;
  402. *code_ptr++ = CALL_i32;
  403. jump->addr++;
  404. }
  405. else if (short_jump) {
  406. *code_ptr++ = get_jump_code(type) - 0x10;
  407. jump->addr++;
  408. }
  409. else {
  410. *code_ptr++ = GROUP_0F;
  411. *code_ptr++ = get_jump_code(type);
  412. jump->addr += 2;
  413. }
  414. if (short_jump) {
  415. jump->flags |= PATCH_MB;
  416. code_ptr += sizeof(sljit_s8);
  417. } else {
  418. jump->flags |= PATCH_MW;
  419. code_ptr += sizeof(sljit_s32);
  420. }
  421. return code_ptr;
  422. }
  423. SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler)
  424. {
  425. struct sljit_memory_fragment *buf;
  426. sljit_u8 *code;
  427. sljit_u8 *code_ptr;
  428. sljit_u8 *buf_ptr;
  429. sljit_u8 *buf_end;
  430. sljit_u8 len;
  431. sljit_sw executable_offset;
  432. sljit_sw jump_addr;
  433. struct sljit_label *label;
  434. struct sljit_jump *jump;
  435. struct sljit_const *const_;
  436. struct sljit_put_label *put_label;
  437. CHECK_ERROR_PTR();
  438. CHECK_PTR(check_sljit_generate_code(compiler));
  439. reverse_buf(compiler);
  440. /* Second code generation pass. */
  441. code = (sljit_u8*)SLJIT_MALLOC_EXEC(compiler->size, compiler->exec_allocator_data);
  442. PTR_FAIL_WITH_EXEC_IF(code);
  443. buf = compiler->buf;
  444. code_ptr = code;
  445. label = compiler->labels;
  446. jump = compiler->jumps;
  447. const_ = compiler->consts;
  448. put_label = compiler->put_labels;
  449. executable_offset = SLJIT_EXEC_OFFSET(code);
  450. do {
  451. buf_ptr = buf->memory;
  452. buf_end = buf_ptr + buf->used_size;
  453. do {
  454. len = *buf_ptr++;
  455. if (len > 0) {
  456. /* The code is already generated. */
  457. SLJIT_MEMCPY(code_ptr, buf_ptr, len);
  458. code_ptr += len;
  459. buf_ptr += len;
  460. }
  461. else {
  462. switch (*buf_ptr) {
  463. case 0:
  464. label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
  465. label->size = code_ptr - code;
  466. label = label->next;
  467. break;
  468. case 1:
  469. jump->addr = (sljit_uw)code_ptr;
  470. if (!(jump->flags & SLJIT_REWRITABLE_JUMP))
  471. code_ptr = generate_near_jump_code(jump, code_ptr, code, executable_offset);
  472. else {
  473. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  474. code_ptr = generate_far_jump_code(jump, code_ptr, executable_offset);
  475. #else
  476. code_ptr = generate_far_jump_code(jump, code_ptr);
  477. #endif
  478. }
  479. jump = jump->next;
  480. break;
  481. case 2:
  482. const_->addr = ((sljit_uw)code_ptr) - sizeof(sljit_sw);
  483. const_ = const_->next;
  484. break;
  485. default:
  486. SLJIT_ASSERT(*buf_ptr == 3);
  487. SLJIT_ASSERT(put_label->label);
  488. put_label->addr = (sljit_uw)code_ptr;
  489. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  490. code_ptr = generate_put_label_code(put_label, code_ptr, (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code, executable_offset) + put_label->label->size);
  491. #endif
  492. put_label = put_label->next;
  493. break;
  494. }
  495. buf_ptr++;
  496. }
  497. } while (buf_ptr < buf_end);
  498. SLJIT_ASSERT(buf_ptr == buf_end);
  499. buf = buf->next;
  500. } while (buf);
  501. SLJIT_ASSERT(!label);
  502. SLJIT_ASSERT(!jump);
  503. SLJIT_ASSERT(!const_);
  504. SLJIT_ASSERT(!put_label);
  505. SLJIT_ASSERT(code_ptr <= code + compiler->size);
  506. jump = compiler->jumps;
  507. while (jump) {
  508. jump_addr = jump->addr + executable_offset;
  509. if (jump->flags & PATCH_MB) {
  510. SLJIT_ASSERT((sljit_sw)(jump->u.label->addr - (jump_addr + sizeof(sljit_s8))) >= -128 && (sljit_sw)(jump->u.label->addr - (jump_addr + sizeof(sljit_s8))) <= 127);
  511. *(sljit_u8*)jump->addr = (sljit_u8)(jump->u.label->addr - (jump_addr + sizeof(sljit_s8)));
  512. } else if (jump->flags & PATCH_MW) {
  513. if (jump->flags & JUMP_LABEL) {
  514. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  515. sljit_unaligned_store_sw((void*)jump->addr, (sljit_sw)(jump->u.label->addr - (jump_addr + sizeof(sljit_sw))));
  516. #else
  517. SLJIT_ASSERT((sljit_sw)(jump->u.label->addr - (jump_addr + sizeof(sljit_s32))) >= HALFWORD_MIN && (sljit_sw)(jump->u.label->addr - (jump_addr + sizeof(sljit_s32))) <= HALFWORD_MAX);
  518. sljit_unaligned_store_s32((void*)jump->addr, (sljit_s32)(jump->u.label->addr - (jump_addr + sizeof(sljit_s32))));
  519. #endif
  520. }
  521. else {
  522. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  523. sljit_unaligned_store_sw((void*)jump->addr, (sljit_sw)(jump->u.target - (jump_addr + sizeof(sljit_sw))));
  524. #else
  525. SLJIT_ASSERT((sljit_sw)(jump->u.target - (jump_addr + sizeof(sljit_s32))) >= HALFWORD_MIN && (sljit_sw)(jump->u.target - (jump_addr + sizeof(sljit_s32))) <= HALFWORD_MAX);
  526. sljit_unaligned_store_s32((void*)jump->addr, (sljit_s32)(jump->u.target - (jump_addr + sizeof(sljit_s32))));
  527. #endif
  528. }
  529. }
  530. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  531. else if (jump->flags & PATCH_MD)
  532. sljit_unaligned_store_sw((void*)jump->addr, jump->u.label->addr);
  533. #endif
  534. jump = jump->next;
  535. }
  536. put_label = compiler->put_labels;
  537. while (put_label) {
  538. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  539. sljit_unaligned_store_sw((void*)(put_label->addr - sizeof(sljit_sw)), (sljit_sw)put_label->label->addr);
  540. #else
  541. if (put_label->flags & PATCH_MD) {
  542. SLJIT_ASSERT(put_label->label->addr > HALFWORD_MAX);
  543. sljit_unaligned_store_sw((void*)(put_label->addr - sizeof(sljit_sw)), (sljit_sw)put_label->label->addr);
  544. }
  545. else {
  546. SLJIT_ASSERT(put_label->label->addr <= HALFWORD_MAX);
  547. sljit_unaligned_store_s32((void*)(put_label->addr - sizeof(sljit_s32)), (sljit_s32)put_label->label->addr);
  548. }
  549. #endif
  550. put_label = put_label->next;
  551. }
  552. compiler->error = SLJIT_ERR_COMPILED;
  553. compiler->executable_offset = executable_offset;
  554. compiler->executable_size = code_ptr - code;
  555. code = (sljit_u8*)SLJIT_ADD_EXEC_OFFSET(code, executable_offset);
  556. SLJIT_UPDATE_WX_FLAGS(code, (sljit_u8*)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset), 1);
  557. return (void*)code;
  558. }
  559. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
  560. {
  561. switch (feature_type) {
  562. case SLJIT_HAS_FPU:
  563. #ifdef SLJIT_IS_FPU_AVAILABLE
  564. return SLJIT_IS_FPU_AVAILABLE;
  565. #elif (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2)
  566. if (cpu_has_sse2 == -1)
  567. get_cpu_features();
  568. return cpu_has_sse2;
  569. #else /* SLJIT_DETECT_SSE2 */
  570. return 1;
  571. #endif /* SLJIT_DETECT_SSE2 */
  572. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  573. case SLJIT_HAS_VIRTUAL_REGISTERS:
  574. return 1;
  575. #endif
  576. case SLJIT_HAS_CLZ:
  577. case SLJIT_HAS_CMOV:
  578. if (cpu_has_cmov == -1)
  579. get_cpu_features();
  580. return cpu_has_cmov;
  581. case SLJIT_HAS_PREFETCH:
  582. return 1;
  583. case SLJIT_HAS_SSE2:
  584. #if (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2)
  585. if (cpu_has_sse2 == -1)
  586. get_cpu_features();
  587. return cpu_has_sse2;
  588. #else
  589. return 1;
  590. #endif
  591. default:
  592. return 0;
  593. }
  594. }
  595. /* --------------------------------------------------------------------- */
  596. /* Operators */
  597. /* --------------------------------------------------------------------- */
  598. #define BINARY_OPCODE(opcode) (((opcode ## _EAX_i32) << 24) | ((opcode ## _r_rm) << 16) | ((opcode ## _rm_r) << 8) | (opcode))
  599. static sljit_s32 emit_cum_binary(struct sljit_compiler *compiler,
  600. sljit_u32 op_types,
  601. sljit_s32 dst, sljit_sw dstw,
  602. sljit_s32 src1, sljit_sw src1w,
  603. sljit_s32 src2, sljit_sw src2w);
  604. static sljit_s32 emit_non_cum_binary(struct sljit_compiler *compiler,
  605. sljit_u32 op_types,
  606. sljit_s32 dst, sljit_sw dstw,
  607. sljit_s32 src1, sljit_sw src1w,
  608. sljit_s32 src2, sljit_sw src2w);
  609. static sljit_s32 emit_mov(struct sljit_compiler *compiler,
  610. sljit_s32 dst, sljit_sw dstw,
  611. sljit_s32 src, sljit_sw srcw);
  612. #define EMIT_MOV(compiler, dst, dstw, src, srcw) \
  613. FAIL_IF(emit_mov(compiler, dst, dstw, src, srcw));
  614. static SLJIT_INLINE sljit_s32 emit_sse2_store(struct sljit_compiler *compiler,
  615. sljit_s32 single, sljit_s32 dst, sljit_sw dstw, sljit_s32 src);
  616. static SLJIT_INLINE sljit_s32 emit_sse2_load(struct sljit_compiler *compiler,
  617. sljit_s32 single, sljit_s32 dst, sljit_s32 src, sljit_sw srcw);
  618. static sljit_s32 emit_cmp_binary(struct sljit_compiler *compiler,
  619. sljit_s32 src1, sljit_sw src1w,
  620. sljit_s32 src2, sljit_sw src2w);
  621. static SLJIT_INLINE sljit_s32 emit_endbranch(struct sljit_compiler *compiler)
  622. {
  623. #if (defined SLJIT_CONFIG_X86_CET && SLJIT_CONFIG_X86_CET)
  624. /* Emit endbr32/endbr64 when CET is enabled. */
  625. sljit_u8 *inst;
  626. inst = (sljit_u8*)ensure_buf(compiler, 1 + 4);
  627. FAIL_IF(!inst);
  628. INC_SIZE(4);
  629. *inst++ = 0xf3;
  630. *inst++ = 0x0f;
  631. *inst++ = 0x1e;
  632. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  633. *inst = 0xfb;
  634. #else
  635. *inst = 0xfa;
  636. #endif
  637. #else /* !SLJIT_CONFIG_X86_CET */
  638. SLJIT_UNUSED_ARG(compiler);
  639. #endif /* SLJIT_CONFIG_X86_CET */
  640. return SLJIT_SUCCESS;
  641. }
  642. #if (defined SLJIT_CONFIG_X86_CET && SLJIT_CONFIG_X86_CET) && defined (__SHSTK__)
  643. static SLJIT_INLINE sljit_s32 emit_rdssp(struct sljit_compiler *compiler, sljit_s32 reg)
  644. {
  645. sljit_u8 *inst;
  646. sljit_s32 size;
  647. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  648. size = 5;
  649. #else
  650. size = 4;
  651. #endif
  652. inst = (sljit_u8*)ensure_buf(compiler, 1 + size);
  653. FAIL_IF(!inst);
  654. INC_SIZE(size);
  655. *inst++ = 0xf3;
  656. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  657. *inst++ = REX_W | (reg_map[reg] <= 7 ? 0 : REX_B);
  658. #endif
  659. *inst++ = 0x0f;
  660. *inst++ = 0x1e;
  661. *inst = (0x3 << 6) | (0x1 << 3) | (reg_map[reg] & 0x7);
  662. return SLJIT_SUCCESS;
  663. }
  664. static SLJIT_INLINE sljit_s32 emit_incssp(struct sljit_compiler *compiler, sljit_s32 reg)
  665. {
  666. sljit_u8 *inst;
  667. sljit_s32 size;
  668. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  669. size = 5;
  670. #else
  671. size = 4;
  672. #endif
  673. inst = (sljit_u8*)ensure_buf(compiler, 1 + size);
  674. FAIL_IF(!inst);
  675. INC_SIZE(size);
  676. *inst++ = 0xf3;
  677. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  678. *inst++ = REX_W | (reg_map[reg] <= 7 ? 0 : REX_B);
  679. #endif
  680. *inst++ = 0x0f;
  681. *inst++ = 0xae;
  682. *inst = (0x3 << 6) | (0x5 << 3) | (reg_map[reg] & 0x7);
  683. return SLJIT_SUCCESS;
  684. }
  685. #endif /* SLJIT_CONFIG_X86_CET && __SHSTK__ */
  686. static SLJIT_INLINE sljit_s32 cpu_has_shadow_stack(void)
  687. {
  688. #if (defined SLJIT_CONFIG_X86_CET && SLJIT_CONFIG_X86_CET) && defined (__SHSTK__)
  689. return _get_ssp() != 0;
  690. #else /* !SLJIT_CONFIG_X86_CET || !__SHSTK__ */
  691. return 0;
  692. #endif /* SLJIT_CONFIG_X86_CET && __SHSTK__ */
  693. }
  694. static SLJIT_INLINE sljit_s32 adjust_shadow_stack(struct sljit_compiler *compiler,
  695. sljit_s32 src, sljit_sw srcw, sljit_s32 base, sljit_sw disp)
  696. {
  697. #if (defined SLJIT_CONFIG_X86_CET && SLJIT_CONFIG_X86_CET) && defined (__SHSTK__)
  698. sljit_u8 *inst, *jz_after_cmp_inst;
  699. sljit_uw size_jz_after_cmp_inst;
  700. sljit_uw size_before_rdssp_inst = compiler->size;
  701. /* Generate "RDSSP TMP_REG1". */
  702. FAIL_IF(emit_rdssp(compiler, TMP_REG1));
  703. /* Load return address on shadow stack into TMP_REG1. */
  704. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  705. SLJIT_ASSERT(reg_map[TMP_REG1] == 5);
  706. /* Hand code unsupported "mov 0x0(%ebp),%ebp". */
  707. inst = (sljit_u8*)ensure_buf(compiler, 1 + 3);
  708. FAIL_IF(!inst);
  709. INC_SIZE(3);
  710. *inst++ = 0x8b;
  711. *inst++ = 0x6d;
  712. *inst = 0;
  713. #else /* !SLJIT_CONFIG_X86_32 */
  714. EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(TMP_REG1), 0);
  715. #endif /* SLJIT_CONFIG_X86_32 */
  716. if (src == SLJIT_UNUSED) {
  717. /* Return address is on stack. */
  718. src = SLJIT_MEM1(base);
  719. srcw = disp;
  720. }
  721. /* Compare return address against TMP_REG1. */
  722. FAIL_IF(emit_cmp_binary (compiler, TMP_REG1, 0, src, srcw));
  723. /* Generate JZ to skip shadow stack ajdustment when shadow
  724. stack matches normal stack. */
  725. inst = (sljit_u8*)ensure_buf(compiler, 1 + 2);
  726. FAIL_IF(!inst);
  727. INC_SIZE(2);
  728. *inst++ = get_jump_code(SLJIT_EQUAL) - 0x10;
  729. size_jz_after_cmp_inst = compiler->size;
  730. jz_after_cmp_inst = inst;
  731. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  732. /* REX_W is not necessary. */
  733. compiler->mode32 = 1;
  734. #endif
  735. /* Load 1 into TMP_REG1. */
  736. EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, 1);
  737. /* Generate "INCSSP TMP_REG1". */
  738. FAIL_IF(emit_incssp(compiler, TMP_REG1));
  739. /* Jump back to "RDSSP TMP_REG1" to check shadow stack again. */
  740. inst = (sljit_u8*)ensure_buf(compiler, 1 + 2);
  741. FAIL_IF(!inst);
  742. INC_SIZE(2);
  743. *inst++ = JMP_i8;
  744. *inst = size_before_rdssp_inst - compiler->size;
  745. *jz_after_cmp_inst = compiler->size - size_jz_after_cmp_inst;
  746. #else /* !SLJIT_CONFIG_X86_CET || !__SHSTK__ */
  747. SLJIT_UNUSED_ARG(compiler);
  748. SLJIT_UNUSED_ARG(src);
  749. SLJIT_UNUSED_ARG(srcw);
  750. SLJIT_UNUSED_ARG(base);
  751. SLJIT_UNUSED_ARG(disp);
  752. #endif /* SLJIT_CONFIG_X86_CET && __SHSTK__ */
  753. return SLJIT_SUCCESS;
  754. }
  755. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  756. #include "sljitNativeX86_32.c"
  757. #else
  758. #include "sljitNativeX86_64.c"
  759. #endif
  760. static sljit_s32 emit_mov(struct sljit_compiler *compiler,
  761. sljit_s32 dst, sljit_sw dstw,
  762. sljit_s32 src, sljit_sw srcw)
  763. {
  764. sljit_u8* inst;
  765. SLJIT_ASSERT(dst != SLJIT_UNUSED);
  766. if (FAST_IS_REG(src)) {
  767. inst = emit_x86_instruction(compiler, 1, src, 0, dst, dstw);
  768. FAIL_IF(!inst);
  769. *inst = MOV_rm_r;
  770. return SLJIT_SUCCESS;
  771. }
  772. if (src & SLJIT_IMM) {
  773. if (FAST_IS_REG(dst)) {
  774. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  775. return emit_do_imm(compiler, MOV_r_i32 + reg_map[dst], srcw);
  776. #else
  777. if (!compiler->mode32) {
  778. if (NOT_HALFWORD(srcw))
  779. return emit_load_imm64(compiler, dst, srcw);
  780. }
  781. else
  782. return emit_do_imm32(compiler, (reg_map[dst] >= 8) ? REX_B : 0, MOV_r_i32 + reg_lmap[dst], srcw);
  783. #endif
  784. }
  785. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  786. if (!compiler->mode32 && NOT_HALFWORD(srcw)) {
  787. /* Immediate to memory move. Only SLJIT_MOV operation copies
  788. an immediate directly into memory so TMP_REG1 can be used. */
  789. FAIL_IF(emit_load_imm64(compiler, TMP_REG1, srcw));
  790. inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, dst, dstw);
  791. FAIL_IF(!inst);
  792. *inst = MOV_rm_r;
  793. return SLJIT_SUCCESS;
  794. }
  795. #endif
  796. inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, srcw, dst, dstw);
  797. FAIL_IF(!inst);
  798. *inst = MOV_rm_i32;
  799. return SLJIT_SUCCESS;
  800. }
  801. if (FAST_IS_REG(dst)) {
  802. inst = emit_x86_instruction(compiler, 1, dst, 0, src, srcw);
  803. FAIL_IF(!inst);
  804. *inst = MOV_r_rm;
  805. return SLJIT_SUCCESS;
  806. }
  807. /* Memory to memory move. Only SLJIT_MOV operation copies
  808. data from memory to memory so TMP_REG1 can be used. */
  809. inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, src, srcw);
  810. FAIL_IF(!inst);
  811. *inst = MOV_r_rm;
  812. inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, dst, dstw);
  813. FAIL_IF(!inst);
  814. *inst = MOV_rm_r;
  815. return SLJIT_SUCCESS;
  816. }
  817. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op)
  818. {
  819. sljit_u8 *inst;
  820. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  821. sljit_s32 size;
  822. #endif
  823. CHECK_ERROR();
  824. CHECK(check_sljit_emit_op0(compiler, op));
  825. switch (GET_OPCODE(op)) {
  826. case SLJIT_BREAKPOINT:
  827. inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
  828. FAIL_IF(!inst);
  829. INC_SIZE(1);
  830. *inst = INT3;
  831. break;
  832. case SLJIT_NOP:
  833. inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
  834. FAIL_IF(!inst);
  835. INC_SIZE(1);
  836. *inst = NOP;
  837. break;
  838. case SLJIT_LMUL_UW:
  839. case SLJIT_LMUL_SW:
  840. case SLJIT_DIVMOD_UW:
  841. case SLJIT_DIVMOD_SW:
  842. case SLJIT_DIV_UW:
  843. case SLJIT_DIV_SW:
  844. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  845. #ifdef _WIN64
  846. SLJIT_ASSERT(
  847. reg_map[SLJIT_R0] == 0
  848. && reg_map[SLJIT_R1] == 2
  849. && reg_map[TMP_REG1] > 7);
  850. #else
  851. SLJIT_ASSERT(
  852. reg_map[SLJIT_R0] == 0
  853. && reg_map[SLJIT_R1] < 7
  854. && reg_map[TMP_REG1] == 2);
  855. #endif
  856. compiler->mode32 = op & SLJIT_I32_OP;
  857. #endif
  858. SLJIT_COMPILE_ASSERT((SLJIT_DIVMOD_UW & 0x2) == 0 && SLJIT_DIV_UW - 0x2 == SLJIT_DIVMOD_UW, bad_div_opcode_assignments);
  859. op = GET_OPCODE(op);
  860. if ((op | 0x2) == SLJIT_DIV_UW) {
  861. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) || defined(_WIN64)
  862. EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_R1, 0);
  863. inst = emit_x86_instruction(compiler, 1, SLJIT_R1, 0, SLJIT_R1, 0);
  864. #else
  865. inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, TMP_REG1, 0);
  866. #endif
  867. FAIL_IF(!inst);
  868. *inst = XOR_r_rm;
  869. }
  870. if ((op | 0x2) == SLJIT_DIV_SW) {
  871. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) || defined(_WIN64)
  872. EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_R1, 0);
  873. #endif
  874. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  875. inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
  876. FAIL_IF(!inst);
  877. INC_SIZE(1);
  878. *inst = CDQ;
  879. #else
  880. if (compiler->mode32) {
  881. inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
  882. FAIL_IF(!inst);
  883. INC_SIZE(1);
  884. *inst = CDQ;
  885. } else {
  886. inst = (sljit_u8*)ensure_buf(compiler, 1 + 2);
  887. FAIL_IF(!inst);
  888. INC_SIZE(2);
  889. *inst++ = REX_W;
  890. *inst = CDQ;
  891. }
  892. #endif
  893. }
  894. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  895. inst = (sljit_u8*)ensure_buf(compiler, 1 + 2);
  896. FAIL_IF(!inst);
  897. INC_SIZE(2);
  898. *inst++ = GROUP_F7;
  899. *inst = MOD_REG | ((op >= SLJIT_DIVMOD_UW) ? reg_map[TMP_REG1] : reg_map[SLJIT_R1]);
  900. #else
  901. #ifdef _WIN64
  902. size = (!compiler->mode32 || op >= SLJIT_DIVMOD_UW) ? 3 : 2;
  903. #else
  904. size = (!compiler->mode32) ? 3 : 2;
  905. #endif
  906. inst = (sljit_u8*)ensure_buf(compiler, 1 + size);
  907. FAIL_IF(!inst);
  908. INC_SIZE(size);
  909. #ifdef _WIN64
  910. if (!compiler->mode32)
  911. *inst++ = REX_W | ((op >= SLJIT_DIVMOD_UW) ? REX_B : 0);
  912. else if (op >= SLJIT_DIVMOD_UW)
  913. *inst++ = REX_B;
  914. *inst++ = GROUP_F7;
  915. *inst = MOD_REG | ((op >= SLJIT_DIVMOD_UW) ? reg_lmap[TMP_REG1] : reg_lmap[SLJIT_R1]);
  916. #else
  917. if (!compiler->mode32)
  918. *inst++ = REX_W;
  919. *inst++ = GROUP_F7;
  920. *inst = MOD_REG | reg_map[SLJIT_R1];
  921. #endif
  922. #endif
  923. switch (op) {
  924. case SLJIT_LMUL_UW:
  925. *inst |= MUL;
  926. break;
  927. case SLJIT_LMUL_SW:
  928. *inst |= IMUL;
  929. break;
  930. case SLJIT_DIVMOD_UW:
  931. case SLJIT_DIV_UW:
  932. *inst |= DIV;
  933. break;
  934. case SLJIT_DIVMOD_SW:
  935. case SLJIT_DIV_SW:
  936. *inst |= IDIV;
  937. break;
  938. }
  939. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) && !defined(_WIN64)
  940. if (op <= SLJIT_DIVMOD_SW)
  941. EMIT_MOV(compiler, SLJIT_R1, 0, TMP_REG1, 0);
  942. #else
  943. if (op >= SLJIT_DIV_UW)
  944. EMIT_MOV(compiler, SLJIT_R1, 0, TMP_REG1, 0);
  945. #endif
  946. break;
  947. case SLJIT_ENDBR:
  948. return emit_endbranch(compiler);
  949. case SLJIT_SKIP_FRAMES_BEFORE_RETURN:
  950. return skip_frames_before_return(compiler);
  951. }
  952. return SLJIT_SUCCESS;
  953. }
  954. #define ENCODE_PREFIX(prefix) \
  955. do { \
  956. inst = (sljit_u8*)ensure_buf(compiler, 1 + 1); \
  957. FAIL_IF(!inst); \
  958. INC_SIZE(1); \
  959. *inst = (prefix); \
  960. } while (0)
  961. static sljit_s32 emit_mov_byte(struct sljit_compiler *compiler, sljit_s32 sign,
  962. sljit_s32 dst, sljit_sw dstw,
  963. sljit_s32 src, sljit_sw srcw)
  964. {
  965. sljit_u8* inst;
  966. sljit_s32 dst_r;
  967. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  968. sljit_s32 work_r;
  969. #endif
  970. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  971. compiler->mode32 = 0;
  972. #endif
  973. if (src & SLJIT_IMM) {
  974. if (FAST_IS_REG(dst)) {
  975. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  976. return emit_do_imm(compiler, MOV_r_i32 + reg_map[dst], srcw);
  977. #else
  978. inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, srcw, dst, 0);
  979. FAIL_IF(!inst);
  980. *inst = MOV_rm_i32;
  981. return SLJIT_SUCCESS;
  982. #endif
  983. }
  984. inst = emit_x86_instruction(compiler, 1 | EX86_BYTE_ARG | EX86_NO_REXW, SLJIT_IMM, srcw, dst, dstw);
  985. FAIL_IF(!inst);
  986. *inst = MOV_rm8_i8;
  987. return SLJIT_SUCCESS;
  988. }
  989. dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
  990. if ((dst & SLJIT_MEM) && FAST_IS_REG(src)) {
  991. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  992. if (reg_map[src] >= 4) {
  993. SLJIT_ASSERT(dst_r == TMP_REG1);
  994. EMIT_MOV(compiler, TMP_REG1, 0, src, 0);
  995. } else
  996. dst_r = src;
  997. #else
  998. dst_r = src;
  999. #endif
  1000. }
  1001. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  1002. else if (FAST_IS_REG(src) && reg_map[src] >= 4) {
  1003. /* src, dst are registers. */
  1004. SLJIT_ASSERT(SLOW_IS_REG(dst));
  1005. if (reg_map[dst] < 4) {
  1006. if (dst != src)
  1007. EMIT_MOV(compiler, dst, 0, src, 0);
  1008. inst = emit_x86_instruction(compiler, 2, dst, 0, dst, 0);
  1009. FAIL_IF(!inst);
  1010. *inst++ = GROUP_0F;
  1011. *inst = sign ? MOVSX_r_rm8 : MOVZX_r_rm8;
  1012. }
  1013. else {
  1014. if (dst != src)
  1015. EMIT_MOV(compiler, dst, 0, src, 0);
  1016. if (sign) {
  1017. /* shl reg, 24 */
  1018. inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_IMM, 24, dst, 0);
  1019. FAIL_IF(!inst);
  1020. *inst |= SHL;
  1021. /* sar reg, 24 */
  1022. inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_IMM, 24, dst, 0);
  1023. FAIL_IF(!inst);
  1024. *inst |= SAR;
  1025. }
  1026. else {
  1027. inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, 0xff, dst, 0);
  1028. FAIL_IF(!inst);
  1029. *(inst + 1) |= AND;
  1030. }
  1031. }
  1032. return SLJIT_SUCCESS;
  1033. }
  1034. #endif
  1035. else {
  1036. /* src can be memory addr or reg_map[src] < 4 on x86_32 architectures. */
  1037. inst = emit_x86_instruction(compiler, 2, dst_r, 0, src, srcw);
  1038. FAIL_IF(!inst);
  1039. *inst++ = GROUP_0F;
  1040. *inst = sign ? MOVSX_r_rm8 : MOVZX_r_rm8;
  1041. }
  1042. if (dst & SLJIT_MEM) {
  1043. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  1044. if (dst_r == TMP_REG1) {
  1045. /* Find a non-used register, whose reg_map[src] < 4. */
  1046. if ((dst & REG_MASK) == SLJIT_R0) {
  1047. if ((dst & OFFS_REG_MASK) == TO_OFFS_REG(SLJIT_R1))
  1048. work_r = SLJIT_R2;
  1049. else
  1050. work_r = SLJIT_R1;
  1051. }
  1052. else {
  1053. if ((dst & OFFS_REG_MASK) != TO_OFFS_REG(SLJIT_R0))
  1054. work_r = SLJIT_R0;
  1055. else if ((dst & REG_MASK) == SLJIT_R1)
  1056. work_r = SLJIT_R2;
  1057. else
  1058. work_r = SLJIT_R1;
  1059. }
  1060. if (work_r == SLJIT_R0) {
  1061. ENCODE_PREFIX(XCHG_EAX_r + reg_map[TMP_REG1]);
  1062. }
  1063. else {
  1064. inst = emit_x86_instruction(compiler, 1, work_r, 0, dst_r, 0);
  1065. FAIL_IF(!inst);
  1066. *inst = XCHG_r_rm;
  1067. }
  1068. inst = emit_x86_instruction(compiler, 1, work_r, 0, dst, dstw);
  1069. FAIL_IF(!inst);
  1070. *inst = MOV_rm8_r8;
  1071. if (work_r == SLJIT_R0) {
  1072. ENCODE_PREFIX(XCHG_EAX_r + reg_map[TMP_REG1]);
  1073. }
  1074. else {
  1075. inst = emit_x86_instruction(compiler, 1, work_r, 0, dst_r, 0);
  1076. FAIL_IF(!inst);
  1077. *inst = XCHG_r_rm;
  1078. }
  1079. }
  1080. else {
  1081. inst = emit_x86_instruction(compiler, 1, dst_r, 0, dst, dstw);
  1082. FAIL_IF(!inst);
  1083. *inst = MOV_rm8_r8;
  1084. }
  1085. #else
  1086. inst = emit_x86_instruction(compiler, 1 | EX86_REX | EX86_NO_REXW, dst_r, 0, dst, dstw);
  1087. FAIL_IF(!inst);
  1088. *inst = MOV_rm8_r8;
  1089. #endif
  1090. }
  1091. return SLJIT_SUCCESS;
  1092. }
  1093. static sljit_s32 emit_prefetch(struct sljit_compiler *compiler, sljit_s32 op,
  1094. sljit_s32 src, sljit_sw srcw)
  1095. {
  1096. sljit_u8* inst;
  1097. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  1098. compiler->mode32 = 1;
  1099. #endif
  1100. inst = emit_x86_instruction(compiler, 2, 0, 0, src, srcw);
  1101. FAIL_IF(!inst);
  1102. *inst++ = GROUP_0F;
  1103. *inst++ = PREFETCH;
  1104. if (op == SLJIT_PREFETCH_L1)
  1105. *inst |= (1 << 3);
  1106. else if (op == SLJIT_PREFETCH_L2)
  1107. *inst |= (2 << 3);
  1108. else if (op == SLJIT_PREFETCH_L3)
  1109. *inst |= (3 << 3);
  1110. return SLJIT_SUCCESS;
  1111. }
  1112. static sljit_s32 emit_mov_half(struct sljit_compiler *compiler, sljit_s32 sign,
  1113. sljit_s32 dst, sljit_sw dstw,
  1114. sljit_s32 src, sljit_sw srcw)
  1115. {
  1116. sljit_u8* inst;
  1117. sljit_s32 dst_r;
  1118. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  1119. compiler->mode32 = 0;
  1120. #endif
  1121. if (src & SLJIT_IMM) {
  1122. if (FAST_IS_REG(dst)) {
  1123. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  1124. return emit_do_imm(compiler, MOV_r_i32 + reg_map[dst], srcw);
  1125. #else
  1126. inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, srcw, dst, 0);
  1127. FAIL_IF(!inst);
  1128. *inst = MOV_rm_i32;
  1129. return SLJIT_SUCCESS;
  1130. #endif
  1131. }
  1132. inst = emit_x86_instruction(compiler, 1 | EX86_HALF_ARG | EX86_NO_REXW | EX86_PREF_66, SLJIT_IMM, srcw, dst, dstw);
  1133. FAIL_IF(!inst);
  1134. *inst = MOV_rm_i32;
  1135. return SLJIT_SUCCESS;
  1136. }
  1137. dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
  1138. if ((dst & SLJIT_MEM) && FAST_IS_REG(src))
  1139. dst_r = src;
  1140. else {
  1141. inst = emit_x86_instruction(compiler, 2, dst_r, 0, src, srcw);
  1142. FAIL_IF(!inst);
  1143. *inst++ = GROUP_0F;
  1144. *inst = sign ? MOVSX_r_rm16 : MOVZX_r_rm16;
  1145. }
  1146. if (dst & SLJIT_MEM) {
  1147. inst = emit_x86_instruction(compiler, 1 | EX86_NO_REXW | EX86_PREF_66, dst_r, 0, dst, dstw);
  1148. FAIL_IF(!inst);
  1149. *inst = MOV_rm_r;
  1150. }
  1151. return SLJIT_SUCCESS;
  1152. }
  1153. static sljit_s32 emit_unary(struct sljit_compiler *compiler, sljit_u8 opcode,
  1154. sljit_s32 dst, sljit_sw dstw,
  1155. sljit_s32 src, sljit_sw srcw)
  1156. {
  1157. sljit_u8* inst;
  1158. if (dst == src && dstw == srcw) {
  1159. /* Same input and output */
  1160. inst = emit_x86_instruction(compiler, 1, 0, 0, dst, dstw);
  1161. FAIL_IF(!inst);
  1162. *inst++ = GROUP_F7;
  1163. *inst |= opcode;
  1164. return SLJIT_SUCCESS;
  1165. }
  1166. if (SLJIT_UNLIKELY(dst == SLJIT_UNUSED))
  1167. dst = TMP_REG1;
  1168. if (FAST_IS_REG(dst)) {
  1169. EMIT_MOV(compiler, dst, 0, src, srcw);
  1170. inst = emit_x86_instruction(compiler, 1, 0, 0, dst, 0);
  1171. FAIL_IF(!inst);
  1172. *inst++ = GROUP_F7;
  1173. *inst |= opcode;
  1174. return SLJIT_SUCCESS;
  1175. }
  1176. EMIT_MOV(compiler, TMP_REG1, 0, src, srcw);
  1177. inst = emit_x86_instruction(compiler, 1, 0, 0, TMP_REG1, 0);
  1178. FAIL_IF(!inst);
  1179. *inst++ = GROUP_F7;
  1180. *inst |= opcode;
  1181. EMIT_MOV(compiler, dst, dstw, TMP_REG1, 0);
  1182. return SLJIT_SUCCESS;
  1183. }
  1184. static sljit_s32 emit_not_with_flags(struct sljit_compiler *compiler,
  1185. sljit_s32 dst, sljit_sw dstw,
  1186. sljit_s32 src, sljit_sw srcw)
  1187. {
  1188. sljit_u8* inst;
  1189. if (dst == SLJIT_UNUSED)
  1190. dst = TMP_REG1;
  1191. if (FAST_IS_REG(dst)) {
  1192. EMIT_MOV(compiler, dst, 0, src, srcw);
  1193. inst = emit_x86_instruction(compiler, 1, 0, 0, dst, 0);
  1194. FAIL_IF(!inst);
  1195. *inst++ = GROUP_F7;
  1196. *inst |= NOT_rm;
  1197. inst = emit_x86_instruction(compiler, 1, dst, 0, dst, 0);
  1198. FAIL_IF(!inst);
  1199. *inst = OR_r_rm;
  1200. return SLJIT_SUCCESS;
  1201. }
  1202. EMIT_MOV(compiler, TMP_REG1, 0, src, srcw);
  1203. inst = emit_x86_instruction(compiler, 1, 0, 0, TMP_REG1, 0);
  1204. FAIL_IF(!inst);
  1205. *inst++ = GROUP_F7;
  1206. *inst |= NOT_rm;
  1207. inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, TMP_REG1, 0);
  1208. FAIL_IF(!inst);
  1209. *inst = OR_r_rm;
  1210. EMIT_MOV(compiler, dst, dstw, TMP_REG1, 0);
  1211. return SLJIT_SUCCESS;
  1212. }
  1213. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  1214. static const sljit_sw emit_clz_arg = 32 + 31;
  1215. #endif
  1216. static sljit_s32 emit_clz(struct sljit_compiler *compiler, sljit_s32 op_flags,
  1217. sljit_s32 dst, sljit_sw dstw,
  1218. sljit_s32 src, sljit_sw srcw)
  1219. {
  1220. sljit_u8* inst;
  1221. sljit_s32 dst_r;
  1222. SLJIT_UNUSED_ARG(op_flags);
  1223. if (cpu_has_cmov == -1)
  1224. get_cpu_features();
  1225. dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
  1226. inst = emit_x86_instruction(compiler, 2, dst_r, 0, src, srcw);
  1227. FAIL_IF(!inst);
  1228. *inst++ = GROUP_0F;
  1229. *inst = BSR_r_rm;
  1230. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  1231. if (cpu_has_cmov) {
  1232. if (dst_r != TMP_REG1) {
  1233. EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, 32 + 31);
  1234. inst = emit_x86_instruction(compiler, 2, dst_r, 0, TMP_REG1, 0);
  1235. }
  1236. else
  1237. inst = emit_x86_instruction(compiler, 2, dst_r, 0, SLJIT_MEM0(), (sljit_sw)&emit_clz_arg);
  1238. FAIL_IF(!inst);
  1239. *inst++ = GROUP_0F;
  1240. *inst = CMOVE_r_rm;
  1241. }
  1242. else
  1243. FAIL_IF(sljit_emit_cmov_generic(compiler, SLJIT_EQUAL, dst_r, SLJIT_IMM, 32 + 31));
  1244. inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, 31, dst_r, 0);
  1245. #else
  1246. if (cpu_has_cmov) {
  1247. EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_IMM, !(op_flags & SLJIT_I32_OP) ? (64 + 63) : (32 + 31));
  1248. inst = emit_x86_instruction(compiler, 2, dst_r, 0, TMP_REG2, 0);
  1249. FAIL_IF(!inst);
  1250. *inst++ = GROUP_0F;
  1251. *inst = CMOVE_r_rm;
  1252. }
  1253. else
  1254. FAIL_IF(sljit_emit_cmov_generic(compiler, SLJIT_EQUAL, dst_r, SLJIT_IMM, !(op_flags & SLJIT_I32_OP) ? (64 + 63) : (32 + 31)));
  1255. inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, !(op_flags & SLJIT_I32_OP) ? 63 : 31, dst_r, 0);
  1256. #endif
  1257. FAIL_IF(!inst);
  1258. *(inst + 1) |= XOR;
  1259. if (dst & SLJIT_MEM)
  1260. EMIT_MOV(compiler, dst, dstw, TMP_REG1, 0);
  1261. return SLJIT_SUCCESS;
  1262. }
  1263. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compiler, sljit_s32 op,
  1264. sljit_s32 dst, sljit_sw dstw,
  1265. sljit_s32 src, sljit_sw srcw)
  1266. {
  1267. sljit_s32 op_flags = GET_ALL_FLAGS(op);
  1268. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  1269. sljit_s32 dst_is_ereg = 0;
  1270. #endif
  1271. CHECK_ERROR();
  1272. CHECK(check_sljit_emit_op1(compiler, op, dst, dstw, src, srcw));
  1273. ADJUST_LOCAL_OFFSET(dst, dstw);
  1274. ADJUST_LOCAL_OFFSET(src, srcw);
  1275. CHECK_EXTRA_REGS(dst, dstw, dst_is_ereg = 1);
  1276. CHECK_EXTRA_REGS(src, srcw, (void)0);
  1277. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  1278. compiler->mode32 = op_flags & SLJIT_I32_OP;
  1279. #endif
  1280. op = GET_OPCODE(op);
  1281. if (op >= SLJIT_MOV && op <= SLJIT_MOV_P) {
  1282. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  1283. compiler->mode32 = 0;
  1284. #endif
  1285. if (FAST_IS_REG(src) && src == dst) {
  1286. if (!TYPE_CAST_NEEDED(op))
  1287. return SLJIT_SUCCESS;
  1288. }
  1289. if (op_flags & SLJIT_I32_OP) {
  1290. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  1291. if (src & SLJIT_MEM) {
  1292. if (op == SLJIT_MOV_S32)
  1293. op = SLJIT_MOV_U32;
  1294. }
  1295. else if (src & SLJIT_IMM) {
  1296. if (op == SLJIT_MOV_U32)
  1297. op = SLJIT_MOV_S32;
  1298. }
  1299. #endif
  1300. }
  1301. if (src & SLJIT_IMM) {
  1302. switch (op) {
  1303. case SLJIT_MOV_U8:
  1304. srcw = (sljit_u8)srcw;
  1305. break;
  1306. case SLJIT_MOV_S8:
  1307. srcw = (sljit_s8)srcw;
  1308. break;
  1309. case SLJIT_MOV_U16:
  1310. srcw = (sljit_u16)srcw;
  1311. break;
  1312. case SLJIT_MOV_S16:
  1313. srcw = (sljit_s16)srcw;
  1314. break;
  1315. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  1316. case SLJIT_MOV_U32:
  1317. srcw = (sljit_u32)srcw;
  1318. break;
  1319. case SLJIT_MOV_S32:
  1320. srcw = (sljit_s32)srcw;
  1321. break;
  1322. #endif
  1323. }
  1324. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  1325. if (SLJIT_UNLIKELY(dst_is_ereg))
  1326. return emit_mov(compiler, dst, dstw, src, srcw);
  1327. #endif
  1328. }
  1329. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  1330. if (SLJIT_UNLIKELY(dst_is_ereg) && (!(op == SLJIT_MOV || op == SLJIT_MOV_U32 || op == SLJIT_MOV_S32 || op == SLJIT_MOV_P) || (src & SLJIT_MEM))) {
  1331. SLJIT_ASSERT(dst == SLJIT_MEM1(SLJIT_SP));
  1332. dst = TMP_REG1;
  1333. }
  1334. #endif
  1335. switch (op) {
  1336. case SLJIT_MOV:
  1337. case SLJIT_MOV_P:
  1338. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  1339. case SLJIT_MOV_U32:
  1340. case SLJIT_MOV_S32:
  1341. #endif
  1342. FAIL_IF(emit_mov(compiler, dst, dstw, src, srcw));
  1343. break;
  1344. case SLJIT_MOV_U8:
  1345. FAIL_IF(emit_mov_byte(compiler, 0, dst, dstw, src, srcw));
  1346. break;
  1347. case SLJIT_MOV_S8:
  1348. FAIL_IF(emit_mov_byte(compiler, 1, dst, dstw, src, srcw));
  1349. break;
  1350. case SLJIT_MOV_U16:
  1351. FAIL_IF(emit_mov_half(compiler, 0, dst, dstw, src, srcw));
  1352. break;
  1353. case SLJIT_MOV_S16:
  1354. FAIL_IF(emit_mov_half(compiler, 1, dst, dstw, src, srcw));
  1355. break;
  1356. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  1357. case SLJIT_MOV_U32:
  1358. FAIL_IF(emit_mov_int(compiler, 0, dst, dstw, src, srcw));
  1359. break;
  1360. case SLJIT_MOV_S32:
  1361. FAIL_IF(emit_mov_int(compiler, 1, dst, dstw, src, srcw));
  1362. break;
  1363. #endif
  1364. }
  1365. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  1366. if (SLJIT_UNLIKELY(dst_is_ereg) && dst == TMP_REG1)
  1367. return emit_mov(compiler, SLJIT_MEM1(SLJIT_SP), dstw, TMP_REG1, 0);
  1368. #endif
  1369. return SLJIT_SUCCESS;
  1370. }
  1371. switch (op) {
  1372. case SLJIT_NOT:
  1373. if (SLJIT_UNLIKELY(op_flags & SLJIT_SET_Z))
  1374. return emit_not_with_flags(compiler, dst, dstw, src, srcw);
  1375. return emit_unary(compiler, NOT_rm, dst, dstw, src, srcw);
  1376. case SLJIT_NEG:
  1377. return emit_unary(compiler, NEG_rm, dst, dstw, src, srcw);
  1378. case SLJIT_CLZ:
  1379. return emit_clz(compiler, op_flags, dst, dstw, src, srcw);
  1380. }
  1381. return SLJIT_SUCCESS;
  1382. }
  1383. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  1384. #define BINARY_IMM(op_imm, op_mr, immw, arg, argw) \
  1385. if (IS_HALFWORD(immw) || compiler->mode32) { \
  1386. inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, immw, arg, argw); \
  1387. FAIL_IF(!inst); \
  1388. *(inst + 1) |= (op_imm); \
  1389. } \
  1390. else { \
  1391. FAIL_IF(emit_load_imm64(compiler, (arg == TMP_REG1) ? TMP_REG2 : TMP_REG1, immw)); \
  1392. inst = emit_x86_instruction(compiler, 1, (arg == TMP_REG1) ? TMP_REG2 : TMP_REG1, 0, arg, argw); \
  1393. FAIL_IF(!inst); \
  1394. *inst = (op_mr); \
  1395. }
  1396. #define BINARY_EAX_IMM(op_eax_imm, immw) \
  1397. FAIL_IF(emit_do_imm32(compiler, (!compiler->mode32) ? REX_W : 0, (op_eax_imm), immw))
  1398. #else
  1399. #define BINARY_IMM(op_imm, op_mr, immw, arg, argw) \
  1400. inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, immw, arg, argw); \
  1401. FAIL_IF(!inst); \
  1402. *(inst + 1) |= (op_imm);
  1403. #define BINARY_EAX_IMM(op_eax_imm, immw) \
  1404. FAIL_IF(emit_do_imm(compiler, (op_eax_imm), immw))
  1405. #endif
  1406. static sljit_s32 emit_cum_binary(struct sljit_compiler *compiler,
  1407. sljit_u32 op_types,
  1408. sljit_s32 dst, sljit_sw dstw,
  1409. sljit_s32 src1, sljit_sw src1w,
  1410. sljit_s32 src2, sljit_sw src2w)
  1411. {
  1412. sljit_u8* inst;
  1413. sljit_u8 op_eax_imm = (op_types >> 24);
  1414. sljit_u8 op_rm = (op_types >> 16) & 0xff;
  1415. sljit_u8 op_mr = (op_types >> 8) & 0xff;
  1416. sljit_u8 op_imm = op_types & 0xff;
  1417. if (dst == SLJIT_UNUSED) {
  1418. EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
  1419. if (src2 & SLJIT_IMM) {
  1420. BINARY_IMM(op_imm, op_mr, src2w, TMP_REG1, 0);
  1421. }
  1422. else {
  1423. inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, src2, src2w);
  1424. FAIL_IF(!inst);
  1425. *inst = op_rm;
  1426. }
  1427. return SLJIT_SUCCESS;
  1428. }
  1429. if (dst == src1 && dstw == src1w) {
  1430. if (src2 & SLJIT_IMM) {
  1431. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  1432. if ((dst == SLJIT_R0) && (src2w > 127 || src2w < -128) && (compiler->mode32 || IS_HALFWORD(src2w))) {
  1433. #else
  1434. if ((dst == SLJIT_R0) && (src2w > 127 || src2w < -128)) {
  1435. #endif
  1436. BINARY_EAX_IMM(op_eax_imm, src2w);
  1437. }
  1438. else {
  1439. BINARY_IMM(op_imm, op_mr, src2w, dst, dstw);
  1440. }
  1441. }
  1442. else if (FAST_IS_REG(dst)) {
  1443. inst = emit_x86_instruction(compiler, 1, dst, dstw, src2, src2w);
  1444. FAIL_IF(!inst);
  1445. *inst = op_rm;
  1446. }
  1447. else if (FAST_IS_REG(src2)) {
  1448. /* Special exception for sljit_emit_op_flags. */
  1449. inst = emit_x86_instruction(compiler, 1, src2, src2w, dst, dstw);
  1450. FAIL_IF(!inst);
  1451. *inst = op_mr;
  1452. }
  1453. else {
  1454. EMIT_MOV(compiler, TMP_REG1, 0, src2, src2w);
  1455. inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, dst, dstw);
  1456. FAIL_IF(!inst);
  1457. *inst = op_mr;
  1458. }
  1459. return SLJIT_SUCCESS;
  1460. }
  1461. /* Only for cumulative operations. */
  1462. if (dst == src2 && dstw == src2w) {
  1463. if (src1 & SLJIT_IMM) {
  1464. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  1465. if ((dst == SLJIT_R0) && (src1w > 127 || src1w < -128) && (compiler->mode32 || IS_HALFWORD(src1w))) {
  1466. #else
  1467. if ((dst == SLJIT_R0) && (src1w > 127 || src1w < -128)) {
  1468. #endif
  1469. BINARY_EAX_IMM(op_eax_imm, src1w);
  1470. }
  1471. else {
  1472. BINARY_IMM(op_imm, op_mr, src1w, dst, dstw);
  1473. }
  1474. }
  1475. else if (FAST_IS_REG(dst)) {
  1476. inst = emit_x86_instruction(compiler, 1, dst, dstw, src1, src1w);
  1477. FAIL_IF(!inst);
  1478. *inst = op_rm;
  1479. }
  1480. else if (FAST_IS_REG(src1)) {
  1481. inst = emit_x86_instruction(compiler, 1, src1, src1w, dst, dstw);
  1482. FAIL_IF(!inst);
  1483. *inst = op_mr;
  1484. }
  1485. else {
  1486. EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
  1487. inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, dst, dstw);
  1488. FAIL_IF(!inst);
  1489. *inst = op_mr;
  1490. }
  1491. return SLJIT_SUCCESS;
  1492. }
  1493. /* General version. */
  1494. if (FAST_IS_REG(dst)) {
  1495. EMIT_MOV(compiler, dst, 0, src1, src1w);
  1496. if (src2 & SLJIT_IMM) {
  1497. BINARY_IMM(op_imm, op_mr, src2w, dst, 0);
  1498. }
  1499. else {
  1500. inst = emit_x86_instruction(compiler, 1, dst, 0, src2, src2w);
  1501. FAIL_IF(!inst);
  1502. *inst = op_rm;
  1503. }
  1504. }
  1505. else {
  1506. /* This version requires less memory writing. */
  1507. EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
  1508. if (src2 & SLJIT_IMM) {
  1509. BINARY_IMM(op_imm, op_mr, src2w, TMP_REG1, 0);
  1510. }
  1511. else {
  1512. inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, src2, src2w);
  1513. FAIL_IF(!inst);
  1514. *inst = op_rm;
  1515. }
  1516. EMIT_MOV(compiler, dst, dstw, TMP_REG1, 0);
  1517. }
  1518. return SLJIT_SUCCESS;
  1519. }
  1520. static sljit_s32 emit_non_cum_binary(struct sljit_compiler *compiler,
  1521. sljit_u32 op_types,
  1522. sljit_s32 dst, sljit_sw dstw,
  1523. sljit_s32 src1, sljit_sw src1w,
  1524. sljit_s32 src2, sljit_sw src2w)
  1525. {
  1526. sljit_u8* inst;
  1527. sljit_u8 op_eax_imm = (op_types >> 24);
  1528. sljit_u8 op_rm = (op_types >> 16) & 0xff;
  1529. sljit_u8 op_mr = (op_types >> 8) & 0xff;
  1530. sljit_u8 op_imm = op_types & 0xff;
  1531. if (dst == SLJIT_UNUSED) {
  1532. EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
  1533. if (src2 & SLJIT_IMM) {
  1534. BINARY_IMM(op_imm, op_mr, src2w, TMP_REG1, 0);
  1535. }
  1536. else {
  1537. inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, src2, src2w);
  1538. FAIL_IF(!inst);
  1539. *inst = op_rm;
  1540. }
  1541. return SLJIT_SUCCESS;
  1542. }
  1543. if (dst == src1 && dstw == src1w) {
  1544. if (src2 & SLJIT_IMM) {
  1545. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  1546. if ((dst == SLJIT_R0) && (src2w > 127 || src2w < -128) && (compiler->mode32 || IS_HALFWORD(src2w))) {
  1547. #else
  1548. if ((dst == SLJIT_R0) && (src2w > 127 || src2w < -128)) {
  1549. #endif
  1550. BINARY_EAX_IMM(op_eax_imm, src2w);
  1551. }
  1552. else {
  1553. BINARY_IMM(op_imm, op_mr, src2w, dst, dstw);
  1554. }
  1555. }
  1556. else if (FAST_IS_REG(dst)) {
  1557. inst = emit_x86_instruction(compiler, 1, dst, dstw, src2, src2w);
  1558. FAIL_IF(!inst);
  1559. *inst = op_rm;
  1560. }
  1561. else if (FAST_IS_REG(src2)) {
  1562. inst = emit_x86_instruction(compiler, 1, src2, src2w, dst, dstw);
  1563. FAIL_IF(!inst);
  1564. *inst = op_mr;
  1565. }
  1566. else {
  1567. EMIT_MOV(compiler, TMP_REG1, 0, src2, src2w);
  1568. inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, dst, dstw);
  1569. FAIL_IF(!inst);
  1570. *inst = op_mr;
  1571. }
  1572. return SLJIT_SUCCESS;
  1573. }
  1574. /* General version. */
  1575. if (FAST_IS_REG(dst) && dst != src2) {
  1576. EMIT_MOV(compiler, dst, 0, src1, src1w);
  1577. if (src2 & SLJIT_IMM) {
  1578. BINARY_IMM(op_imm, op_mr, src2w, dst, 0);
  1579. }
  1580. else {
  1581. inst = emit_x86_instruction(compiler, 1, dst, 0, src2, src2w);
  1582. FAIL_IF(!inst);
  1583. *inst = op_rm;
  1584. }
  1585. }
  1586. else {
  1587. /* This version requires less memory writing. */
  1588. EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
  1589. if (src2 & SLJIT_IMM) {
  1590. BINARY_IMM(op_imm, op_mr, src2w, TMP_REG1, 0);
  1591. }
  1592. else {
  1593. inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, src2, src2w);
  1594. FAIL_IF(!inst);
  1595. *inst = op_rm;
  1596. }
  1597. EMIT_MOV(compiler, dst, dstw, TMP_REG1, 0);
  1598. }
  1599. return SLJIT_SUCCESS;
  1600. }
  1601. static sljit_s32 emit_mul(struct sljit_compiler *compiler,
  1602. sljit_s32 dst, sljit_sw dstw,
  1603. sljit_s32 src1, sljit_sw src1w,
  1604. sljit_s32 src2, sljit_sw src2w)
  1605. {
  1606. sljit_u8* inst;
  1607. sljit_s32 dst_r;
  1608. dst_r = SLOW_IS_REG(dst) ? dst : TMP_REG1;
  1609. /* Register destination. */
  1610. if (dst_r == src1 && !(src2 & SLJIT_IMM)) {
  1611. inst = emit_x86_instruction(compiler, 2, dst_r, 0, src2, src2w);
  1612. FAIL_IF(!inst);
  1613. *inst++ = GROUP_0F;
  1614. *inst = IMUL_r_rm;
  1615. }
  1616. else if (dst_r == src2 && !(src1 & SLJIT_IMM)) {
  1617. inst = emit_x86_instruction(compiler, 2, dst_r, 0, src1, src1w);
  1618. FAIL_IF(!inst);
  1619. *inst++ = GROUP_0F;
  1620. *inst = IMUL_r_rm;
  1621. }
  1622. else if (src1 & SLJIT_IMM) {
  1623. if (src2 & SLJIT_IMM) {
  1624. EMIT_MOV(compiler, dst_r, 0, SLJIT_IMM, src2w);
  1625. src2 = dst_r;
  1626. src2w = 0;
  1627. }
  1628. if (src1w <= 127 && src1w >= -128) {
  1629. inst = emit_x86_instruction(compiler, 1, dst_r, 0, src2, src2w);
  1630. FAIL_IF(!inst);
  1631. *inst = IMUL_r_rm_i8;
  1632. inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
  1633. FAIL_IF(!inst);
  1634. INC_SIZE(1);
  1635. *inst = (sljit_s8)src1w;
  1636. }
  1637. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  1638. else {
  1639. inst = emit_x86_instruction(compiler, 1, dst_r, 0, src2, src2w);
  1640. FAIL_IF(!inst);
  1641. *inst = IMUL_r_rm_i32;
  1642. inst = (sljit_u8*)ensure_buf(compiler, 1 + 4);
  1643. FAIL_IF(!inst);
  1644. INC_SIZE(4);
  1645. sljit_unaligned_store_sw(inst, src1w);
  1646. }
  1647. #else
  1648. else if (IS_HALFWORD(src1w)) {
  1649. inst = emit_x86_instruction(compiler, 1, dst_r, 0, src2, src2w);
  1650. FAIL_IF(!inst);
  1651. *inst = IMUL_r_rm_i32;
  1652. inst = (sljit_u8*)ensure_buf(compiler, 1 + 4);
  1653. FAIL_IF(!inst);
  1654. INC_SIZE(4);
  1655. sljit_unaligned_store_s32(inst, (sljit_s32)src1w);
  1656. }
  1657. else {
  1658. if (dst_r != src2)
  1659. EMIT_MOV(compiler, dst_r, 0, src2, src2w);
  1660. FAIL_IF(emit_load_imm64(compiler, TMP_REG2, src1w));
  1661. inst = emit_x86_instruction(compiler, 2, dst_r, 0, TMP_REG2, 0);
  1662. FAIL_IF(!inst);
  1663. *inst++ = GROUP_0F;
  1664. *inst = IMUL_r_rm;
  1665. }
  1666. #endif
  1667. }
  1668. else if (src2 & SLJIT_IMM) {
  1669. /* Note: src1 is NOT immediate. */
  1670. if (src2w <= 127 && src2w >= -128) {
  1671. inst = emit_x86_instruction(compiler, 1, dst_r, 0, src1, src1w);
  1672. FAIL_IF(!inst);
  1673. *inst = IMUL_r_rm_i8;
  1674. inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
  1675. FAIL_IF(!inst);
  1676. INC_SIZE(1);
  1677. *inst = (sljit_s8)src2w;
  1678. }
  1679. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  1680. else {
  1681. inst = emit_x86_instruction(compiler, 1, dst_r, 0, src1, src1w);
  1682. FAIL_IF(!inst);
  1683. *inst = IMUL_r_rm_i32;
  1684. inst = (sljit_u8*)ensure_buf(compiler, 1 + 4);
  1685. FAIL_IF(!inst);
  1686. INC_SIZE(4);
  1687. sljit_unaligned_store_sw(inst, src2w);
  1688. }
  1689. #else
  1690. else if (IS_HALFWORD(src2w)) {
  1691. inst = emit_x86_instruction(compiler, 1, dst_r, 0, src1, src1w);
  1692. FAIL_IF(!inst);
  1693. *inst = IMUL_r_rm_i32;
  1694. inst = (sljit_u8*)ensure_buf(compiler, 1 + 4);
  1695. FAIL_IF(!inst);
  1696. INC_SIZE(4);
  1697. sljit_unaligned_store_s32(inst, (sljit_s32)src2w);
  1698. }
  1699. else {
  1700. if (dst_r != src1)
  1701. EMIT_MOV(compiler, dst_r, 0, src1, src1w);
  1702. FAIL_IF(emit_load_imm64(compiler, TMP_REG2, src2w));
  1703. inst = emit_x86_instruction(compiler, 2, dst_r, 0, TMP_REG2, 0);
  1704. FAIL_IF(!inst);
  1705. *inst++ = GROUP_0F;
  1706. *inst = IMUL_r_rm;
  1707. }
  1708. #endif
  1709. }
  1710. else {
  1711. /* Neither argument is immediate. */
  1712. if (ADDRESSING_DEPENDS_ON(src2, dst_r))
  1713. dst_r = TMP_REG1;
  1714. EMIT_MOV(compiler, dst_r, 0, src1, src1w);
  1715. inst = emit_x86_instruction(compiler, 2, dst_r, 0, src2, src2w);
  1716. FAIL_IF(!inst);
  1717. *inst++ = GROUP_0F;
  1718. *inst = IMUL_r_rm;
  1719. }
  1720. if (dst & SLJIT_MEM)
  1721. EMIT_MOV(compiler, dst, dstw, TMP_REG1, 0);
  1722. return SLJIT_SUCCESS;
  1723. }
  1724. static sljit_s32 emit_lea_binary(struct sljit_compiler *compiler,
  1725. sljit_s32 dst, sljit_sw dstw,
  1726. sljit_s32 src1, sljit_sw src1w,
  1727. sljit_s32 src2, sljit_sw src2w)
  1728. {
  1729. sljit_u8* inst;
  1730. sljit_s32 dst_r, done = 0;
  1731. /* These cases better be left to handled by normal way. */
  1732. if (dst == src1 && dstw == src1w)
  1733. return SLJIT_ERR_UNSUPPORTED;
  1734. if (dst == src2 && dstw == src2w)
  1735. return SLJIT_ERR_UNSUPPORTED;
  1736. dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
  1737. if (FAST_IS_REG(src1)) {
  1738. if (FAST_IS_REG(src2)) {
  1739. inst = emit_x86_instruction(compiler, 1, dst_r, 0, SLJIT_MEM2(src1, src2), 0);
  1740. FAIL_IF(!inst);
  1741. *inst = LEA_r_m;
  1742. done = 1;
  1743. }
  1744. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  1745. if ((src2 & SLJIT_IMM) && (compiler->mode32 || IS_HALFWORD(src2w))) {
  1746. inst = emit_x86_instruction(compiler, 1, dst_r, 0, SLJIT_MEM1(src1), (sljit_s32)src2w);
  1747. #else
  1748. if (src2 & SLJIT_IMM) {
  1749. inst = emit_x86_instruction(compiler, 1, dst_r, 0, SLJIT_MEM1(src1), src2w);
  1750. #endif
  1751. FAIL_IF(!inst);
  1752. *inst = LEA_r_m;
  1753. done = 1;
  1754. }
  1755. }
  1756. else if (FAST_IS_REG(src2)) {
  1757. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  1758. if ((src1 & SLJIT_IMM) && (compiler->mode32 || IS_HALFWORD(src1w))) {
  1759. inst = emit_x86_instruction(compiler, 1, dst_r, 0, SLJIT_MEM1(src2), (sljit_s32)src1w);
  1760. #else
  1761. if (src1 & SLJIT_IMM) {
  1762. inst = emit_x86_instruction(compiler, 1, dst_r, 0, SLJIT_MEM1(src2), src1w);
  1763. #endif
  1764. FAIL_IF(!inst);
  1765. *inst = LEA_r_m;
  1766. done = 1;
  1767. }
  1768. }
  1769. if (done) {
  1770. if (dst_r == TMP_REG1)
  1771. return emit_mov(compiler, dst, dstw, TMP_REG1, 0);
  1772. return SLJIT_SUCCESS;
  1773. }
  1774. return SLJIT_ERR_UNSUPPORTED;
  1775. }
  1776. static sljit_s32 emit_cmp_binary(struct sljit_compiler *compiler,
  1777. sljit_s32 src1, sljit_sw src1w,
  1778. sljit_s32 src2, sljit_sw src2w)
  1779. {
  1780. sljit_u8* inst;
  1781. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  1782. if (src1 == SLJIT_R0 && (src2 & SLJIT_IMM) && (src2w > 127 || src2w < -128) && (compiler->mode32 || IS_HALFWORD(src2w))) {
  1783. #else
  1784. if (src1 == SLJIT_R0 && (src2 & SLJIT_IMM) && (src2w > 127 || src2w < -128)) {
  1785. #endif
  1786. BINARY_EAX_IMM(CMP_EAX_i32, src2w);
  1787. return SLJIT_SUCCESS;
  1788. }
  1789. if (FAST_IS_REG(src1)) {
  1790. if (src2 & SLJIT_IMM) {
  1791. BINARY_IMM(CMP, CMP_rm_r, src2w, src1, 0);
  1792. }
  1793. else {
  1794. inst = emit_x86_instruction(compiler, 1, src1, 0, src2, src2w);
  1795. FAIL_IF(!inst);
  1796. *inst = CMP_r_rm;
  1797. }
  1798. return SLJIT_SUCCESS;
  1799. }
  1800. if (FAST_IS_REG(src2) && !(src1 & SLJIT_IMM)) {
  1801. inst = emit_x86_instruction(compiler, 1, src2, 0, src1, src1w);
  1802. FAIL_IF(!inst);
  1803. *inst = CMP_rm_r;
  1804. return SLJIT_SUCCESS;
  1805. }
  1806. if (src2 & SLJIT_IMM) {
  1807. if (src1 & SLJIT_IMM) {
  1808. EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
  1809. src1 = TMP_REG1;
  1810. src1w = 0;
  1811. }
  1812. BINARY_IMM(CMP, CMP_rm_r, src2w, src1, src1w);
  1813. }
  1814. else {
  1815. EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
  1816. inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, src2, src2w);
  1817. FAIL_IF(!inst);
  1818. *inst = CMP_r_rm;
  1819. }
  1820. return SLJIT_SUCCESS;
  1821. }
  1822. static sljit_s32 emit_test_binary(struct sljit_compiler *compiler,
  1823. sljit_s32 src1, sljit_sw src1w,
  1824. sljit_s32 src2, sljit_sw src2w)
  1825. {
  1826. sljit_u8* inst;
  1827. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  1828. if (src1 == SLJIT_R0 && (src2 & SLJIT_IMM) && (src2w > 127 || src2w < -128) && (compiler->mode32 || IS_HALFWORD(src2w))) {
  1829. #else
  1830. if (src1 == SLJIT_R0 && (src2 & SLJIT_IMM) && (src2w > 127 || src2w < -128)) {
  1831. #endif
  1832. BINARY_EAX_IMM(TEST_EAX_i32, src2w);
  1833. return SLJIT_SUCCESS;
  1834. }
  1835. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  1836. if (src2 == SLJIT_R0 && (src1 & SLJIT_IMM) && (src1w > 127 || src1w < -128) && (compiler->mode32 || IS_HALFWORD(src1w))) {
  1837. #else
  1838. if (src2 == SLJIT_R0 && (src1 & SLJIT_IMM) && (src1w > 127 || src1w < -128)) {
  1839. #endif
  1840. BINARY_EAX_IMM(TEST_EAX_i32, src1w);
  1841. return SLJIT_SUCCESS;
  1842. }
  1843. if (!(src1 & SLJIT_IMM)) {
  1844. if (src2 & SLJIT_IMM) {
  1845. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  1846. if (IS_HALFWORD(src2w) || compiler->mode32) {
  1847. inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, src2w, src1, src1w);
  1848. FAIL_IF(!inst);
  1849. *inst = GROUP_F7;
  1850. }
  1851. else {
  1852. FAIL_IF(emit_load_imm64(compiler, TMP_REG1, src2w));
  1853. inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, src1, src1w);
  1854. FAIL_IF(!inst);
  1855. *inst = TEST_rm_r;
  1856. }
  1857. #else
  1858. inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, src2w, src1, src1w);
  1859. FAIL_IF(!inst);
  1860. *inst = GROUP_F7;
  1861. #endif
  1862. return SLJIT_SUCCESS;
  1863. }
  1864. else if (FAST_IS_REG(src1)) {
  1865. inst = emit_x86_instruction(compiler, 1, src1, 0, src2, src2w);
  1866. FAIL_IF(!inst);
  1867. *inst = TEST_rm_r;
  1868. return SLJIT_SUCCESS;
  1869. }
  1870. }
  1871. if (!(src2 & SLJIT_IMM)) {
  1872. if (src1 & SLJIT_IMM) {
  1873. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  1874. if (IS_HALFWORD(src1w) || compiler->mode32) {
  1875. inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, src1w, src2, src2w);
  1876. FAIL_IF(!inst);
  1877. *inst = GROUP_F7;
  1878. }
  1879. else {
  1880. FAIL_IF(emit_load_imm64(compiler, TMP_REG1, src1w));
  1881. inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, src2, src2w);
  1882. FAIL_IF(!inst);
  1883. *inst = TEST_rm_r;
  1884. }
  1885. #else
  1886. inst = emit_x86_instruction(compiler, 1, src1, src1w, src2, src2w);
  1887. FAIL_IF(!inst);
  1888. *inst = GROUP_F7;
  1889. #endif
  1890. return SLJIT_SUCCESS;
  1891. }
  1892. else if (FAST_IS_REG(src2)) {
  1893. inst = emit_x86_instruction(compiler, 1, src2, 0, src1, src1w);
  1894. FAIL_IF(!inst);
  1895. *inst = TEST_rm_r;
  1896. return SLJIT_SUCCESS;
  1897. }
  1898. }
  1899. EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
  1900. if (src2 & SLJIT_IMM) {
  1901. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  1902. if (IS_HALFWORD(src2w) || compiler->mode32) {
  1903. inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, src2w, TMP_REG1, 0);
  1904. FAIL_IF(!inst);
  1905. *inst = GROUP_F7;
  1906. }
  1907. else {
  1908. FAIL_IF(emit_load_imm64(compiler, TMP_REG2, src2w));
  1909. inst = emit_x86_instruction(compiler, 1, TMP_REG2, 0, TMP_REG1, 0);
  1910. FAIL_IF(!inst);
  1911. *inst = TEST_rm_r;
  1912. }
  1913. #else
  1914. inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, src2w, TMP_REG1, 0);
  1915. FAIL_IF(!inst);
  1916. *inst = GROUP_F7;
  1917. #endif
  1918. }
  1919. else {
  1920. inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, src2, src2w);
  1921. FAIL_IF(!inst);
  1922. *inst = TEST_rm_r;
  1923. }
  1924. return SLJIT_SUCCESS;
  1925. }
  1926. static sljit_s32 emit_shift(struct sljit_compiler *compiler,
  1927. sljit_u8 mode,
  1928. sljit_s32 dst, sljit_sw dstw,
  1929. sljit_s32 src1, sljit_sw src1w,
  1930. sljit_s32 src2, sljit_sw src2w)
  1931. {
  1932. sljit_u8* inst;
  1933. if ((src2 & SLJIT_IMM) || (src2 == SLJIT_PREF_SHIFT_REG)) {
  1934. if (dst == src1 && dstw == src1w) {
  1935. inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, src2, src2w, dst, dstw);
  1936. FAIL_IF(!inst);
  1937. *inst |= mode;
  1938. return SLJIT_SUCCESS;
  1939. }
  1940. if (dst == SLJIT_UNUSED) {
  1941. EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
  1942. inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, src2, src2w, TMP_REG1, 0);
  1943. FAIL_IF(!inst);
  1944. *inst |= mode;
  1945. return SLJIT_SUCCESS;
  1946. }
  1947. if (dst == SLJIT_PREF_SHIFT_REG && src2 == SLJIT_PREF_SHIFT_REG) {
  1948. EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
  1949. inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
  1950. FAIL_IF(!inst);
  1951. *inst |= mode;
  1952. EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
  1953. return SLJIT_SUCCESS;
  1954. }
  1955. if (FAST_IS_REG(dst)) {
  1956. EMIT_MOV(compiler, dst, 0, src1, src1w);
  1957. inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, src2, src2w, dst, 0);
  1958. FAIL_IF(!inst);
  1959. *inst |= mode;
  1960. return SLJIT_SUCCESS;
  1961. }
  1962. EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
  1963. inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, src2, src2w, TMP_REG1, 0);
  1964. FAIL_IF(!inst);
  1965. *inst |= mode;
  1966. EMIT_MOV(compiler, dst, dstw, TMP_REG1, 0);
  1967. return SLJIT_SUCCESS;
  1968. }
  1969. if (dst == SLJIT_PREF_SHIFT_REG) {
  1970. EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
  1971. EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w);
  1972. inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
  1973. FAIL_IF(!inst);
  1974. *inst |= mode;
  1975. EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
  1976. }
  1977. else if (SLOW_IS_REG(dst) && dst != src2 && !ADDRESSING_DEPENDS_ON(src2, dst)) {
  1978. if (src1 != dst)
  1979. EMIT_MOV(compiler, dst, 0, src1, src1w);
  1980. EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_PREF_SHIFT_REG, 0);
  1981. EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w);
  1982. inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, dst, 0);
  1983. FAIL_IF(!inst);
  1984. *inst |= mode;
  1985. EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
  1986. }
  1987. else {
  1988. /* This case is complex since ecx itself may be used for
  1989. addressing, and this case must be supported as well. */
  1990. EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
  1991. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  1992. EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), 0, SLJIT_PREF_SHIFT_REG, 0);
  1993. EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w);
  1994. inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
  1995. FAIL_IF(!inst);
  1996. *inst |= mode;
  1997. EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, SLJIT_MEM1(SLJIT_SP), 0);
  1998. #else
  1999. EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_PREF_SHIFT_REG, 0);
  2000. EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w);
  2001. inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
  2002. FAIL_IF(!inst);
  2003. *inst |= mode;
  2004. EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG2, 0);
  2005. #endif
  2006. if (dst != SLJIT_UNUSED)
  2007. return emit_mov(compiler, dst, dstw, TMP_REG1, 0);
  2008. }
  2009. return SLJIT_SUCCESS;
  2010. }
  2011. static sljit_s32 emit_shift_with_flags(struct sljit_compiler *compiler,
  2012. sljit_u8 mode, sljit_s32 set_flags,
  2013. sljit_s32 dst, sljit_sw dstw,
  2014. sljit_s32 src1, sljit_sw src1w,
  2015. sljit_s32 src2, sljit_sw src2w)
  2016. {
  2017. /* The CPU does not set flags if the shift count is 0. */
  2018. if (src2 & SLJIT_IMM) {
  2019. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  2020. if ((src2w & 0x3f) != 0 || (compiler->mode32 && (src2w & 0x1f) != 0))
  2021. return emit_shift(compiler, mode, dst, dstw, src1, src1w, src2, src2w);
  2022. #else
  2023. if ((src2w & 0x1f) != 0)
  2024. return emit_shift(compiler, mode, dst, dstw, src1, src1w, src2, src2w);
  2025. #endif
  2026. if (!set_flags)
  2027. return emit_mov(compiler, dst, dstw, src1, src1w);
  2028. /* OR dst, src, 0 */
  2029. return emit_cum_binary(compiler, BINARY_OPCODE(OR),
  2030. dst, dstw, src1, src1w, SLJIT_IMM, 0);
  2031. }
  2032. if (!set_flags)
  2033. return emit_shift(compiler, mode, dst, dstw, src1, src1w, src2, src2w);
  2034. if (!FAST_IS_REG(dst))
  2035. FAIL_IF(emit_cmp_binary(compiler, src1, src1w, SLJIT_IMM, 0));
  2036. FAIL_IF(emit_shift(compiler, mode, dst, dstw, src1, src1w, src2, src2w));
  2037. if (FAST_IS_REG(dst))
  2038. return emit_cmp_binary(compiler, (dst == SLJIT_UNUSED) ? TMP_REG1 : dst, dstw, SLJIT_IMM, 0);
  2039. return SLJIT_SUCCESS;
  2040. }
  2041. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compiler, sljit_s32 op,
  2042. sljit_s32 dst, sljit_sw dstw,
  2043. sljit_s32 src1, sljit_sw src1w,
  2044. sljit_s32 src2, sljit_sw src2w)
  2045. {
  2046. CHECK_ERROR();
  2047. CHECK(check_sljit_emit_op2(compiler, op, dst, dstw, src1, src1w, src2, src2w));
  2048. ADJUST_LOCAL_OFFSET(dst, dstw);
  2049. ADJUST_LOCAL_OFFSET(src1, src1w);
  2050. ADJUST_LOCAL_OFFSET(src2, src2w);
  2051. CHECK_EXTRA_REGS(dst, dstw, (void)0);
  2052. CHECK_EXTRA_REGS(src1, src1w, (void)0);
  2053. CHECK_EXTRA_REGS(src2, src2w, (void)0);
  2054. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  2055. compiler->mode32 = op & SLJIT_I32_OP;
  2056. #endif
  2057. if (dst == SLJIT_UNUSED && !HAS_FLAGS(op))
  2058. return SLJIT_SUCCESS;
  2059. switch (GET_OPCODE(op)) {
  2060. case SLJIT_ADD:
  2061. if (!HAS_FLAGS(op)) {
  2062. if (emit_lea_binary(compiler, dst, dstw, src1, src1w, src2, src2w) != SLJIT_ERR_UNSUPPORTED)
  2063. return compiler->error;
  2064. }
  2065. return emit_cum_binary(compiler, BINARY_OPCODE(ADD),
  2066. dst, dstw, src1, src1w, src2, src2w);
  2067. case SLJIT_ADDC:
  2068. return emit_cum_binary(compiler, BINARY_OPCODE(ADC),
  2069. dst, dstw, src1, src1w, src2, src2w);
  2070. case SLJIT_SUB:
  2071. if (!HAS_FLAGS(op)) {
  2072. if ((src2 & SLJIT_IMM) && emit_lea_binary(compiler, dst, dstw, src1, src1w, SLJIT_IMM, -src2w) != SLJIT_ERR_UNSUPPORTED)
  2073. return compiler->error;
  2074. if (SLOW_IS_REG(dst) && src2 == dst) {
  2075. FAIL_IF(emit_non_cum_binary(compiler, BINARY_OPCODE(SUB), dst, 0, dst, 0, src1, src1w));
  2076. return emit_unary(compiler, NEG_rm, dst, 0, dst, 0);
  2077. }
  2078. }
  2079. if (dst == SLJIT_UNUSED)
  2080. return emit_cmp_binary(compiler, src1, src1w, src2, src2w);
  2081. return emit_non_cum_binary(compiler, BINARY_OPCODE(SUB),
  2082. dst, dstw, src1, src1w, src2, src2w);
  2083. case SLJIT_SUBC:
  2084. return emit_non_cum_binary(compiler, BINARY_OPCODE(SBB),
  2085. dst, dstw, src1, src1w, src2, src2w);
  2086. case SLJIT_MUL:
  2087. return emit_mul(compiler, dst, dstw, src1, src1w, src2, src2w);
  2088. case SLJIT_AND:
  2089. if (dst == SLJIT_UNUSED)
  2090. return emit_test_binary(compiler, src1, src1w, src2, src2w);
  2091. return emit_cum_binary(compiler, BINARY_OPCODE(AND),
  2092. dst, dstw, src1, src1w, src2, src2w);
  2093. case SLJIT_OR:
  2094. return emit_cum_binary(compiler, BINARY_OPCODE(OR),
  2095. dst, dstw, src1, src1w, src2, src2w);
  2096. case SLJIT_XOR:
  2097. return emit_cum_binary(compiler, BINARY_OPCODE(XOR),
  2098. dst, dstw, src1, src1w, src2, src2w);
  2099. case SLJIT_SHL:
  2100. return emit_shift_with_flags(compiler, SHL, HAS_FLAGS(op),
  2101. dst, dstw, src1, src1w, src2, src2w);
  2102. case SLJIT_LSHR:
  2103. return emit_shift_with_flags(compiler, SHR, HAS_FLAGS(op),
  2104. dst, dstw, src1, src1w, src2, src2w);
  2105. case SLJIT_ASHR:
  2106. return emit_shift_with_flags(compiler, SAR, HAS_FLAGS(op),
  2107. dst, dstw, src1, src1w, src2, src2w);
  2108. }
  2109. return SLJIT_SUCCESS;
  2110. }
  2111. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *compiler, sljit_s32 op,
  2112. sljit_s32 src, sljit_sw srcw)
  2113. {
  2114. CHECK_ERROR();
  2115. CHECK(check_sljit_emit_op_src(compiler, op, src, srcw));
  2116. ADJUST_LOCAL_OFFSET(src, srcw);
  2117. CHECK_EXTRA_REGS(src, srcw, (void)0);
  2118. switch (op) {
  2119. case SLJIT_FAST_RETURN:
  2120. return emit_fast_return(compiler, src, srcw);
  2121. case SLJIT_SKIP_FRAMES_BEFORE_FAST_RETURN:
  2122. /* Don't adjust shadow stack if it isn't enabled. */
  2123. if (!cpu_has_shadow_stack ())
  2124. return SLJIT_SUCCESS;
  2125. return adjust_shadow_stack(compiler, src, srcw, SLJIT_UNUSED, 0);
  2126. case SLJIT_PREFETCH_L1:
  2127. case SLJIT_PREFETCH_L2:
  2128. case SLJIT_PREFETCH_L3:
  2129. case SLJIT_PREFETCH_ONCE:
  2130. return emit_prefetch(compiler, op, src, srcw);
  2131. }
  2132. return SLJIT_SUCCESS;
  2133. }
  2134. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg)
  2135. {
  2136. CHECK_REG_INDEX(check_sljit_get_register_index(reg));
  2137. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  2138. if (reg >= SLJIT_R3 && reg <= SLJIT_R8)
  2139. return -1;
  2140. #endif
  2141. return reg_map[reg];
  2142. }
  2143. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg)
  2144. {
  2145. CHECK_REG_INDEX(check_sljit_get_float_register_index(reg));
  2146. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  2147. return reg;
  2148. #else
  2149. return freg_map[reg];
  2150. #endif
  2151. }
  2152. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler,
  2153. void *instruction, sljit_s32 size)
  2154. {
  2155. sljit_u8 *inst;
  2156. CHECK_ERROR();
  2157. CHECK(check_sljit_emit_op_custom(compiler, instruction, size));
  2158. inst = (sljit_u8*)ensure_buf(compiler, 1 + size);
  2159. FAIL_IF(!inst);
  2160. INC_SIZE(size);
  2161. SLJIT_MEMCPY(inst, instruction, size);
  2162. return SLJIT_SUCCESS;
  2163. }
  2164. /* --------------------------------------------------------------------- */
  2165. /* Floating point operators */
  2166. /* --------------------------------------------------------------------- */
  2167. /* Alignment(3) + 4 * 16 bytes. */
  2168. static sljit_s32 sse2_data[3 + (4 * 4)];
  2169. static sljit_s32 *sse2_buffer;
  2170. static void init_compiler(void)
  2171. {
  2172. /* Align to 16 bytes. */
  2173. sse2_buffer = (sljit_s32*)(((sljit_uw)sse2_data + 15) & ~0xf);
  2174. /* Single precision constants (each constant is 16 byte long). */
  2175. sse2_buffer[0] = 0x80000000;
  2176. sse2_buffer[4] = 0x7fffffff;
  2177. /* Double precision constants (each constant is 16 byte long). */
  2178. sse2_buffer[8] = 0;
  2179. sse2_buffer[9] = 0x80000000;
  2180. sse2_buffer[12] = 0xffffffff;
  2181. sse2_buffer[13] = 0x7fffffff;
  2182. }
  2183. static sljit_s32 emit_sse2(struct sljit_compiler *compiler, sljit_u8 opcode,
  2184. sljit_s32 single, sljit_s32 xmm1, sljit_s32 xmm2, sljit_sw xmm2w)
  2185. {
  2186. sljit_u8 *inst;
  2187. inst = emit_x86_instruction(compiler, 2 | (single ? EX86_PREF_F3 : EX86_PREF_F2) | EX86_SSE2, xmm1, 0, xmm2, xmm2w);
  2188. FAIL_IF(!inst);
  2189. *inst++ = GROUP_0F;
  2190. *inst = opcode;
  2191. return SLJIT_SUCCESS;
  2192. }
  2193. static sljit_s32 emit_sse2_logic(struct sljit_compiler *compiler, sljit_u8 opcode,
  2194. sljit_s32 pref66, sljit_s32 xmm1, sljit_s32 xmm2, sljit_sw xmm2w)
  2195. {
  2196. sljit_u8 *inst;
  2197. inst = emit_x86_instruction(compiler, 2 | (pref66 ? EX86_PREF_66 : 0) | EX86_SSE2, xmm1, 0, xmm2, xmm2w);
  2198. FAIL_IF(!inst);
  2199. *inst++ = GROUP_0F;
  2200. *inst = opcode;
  2201. return SLJIT_SUCCESS;
  2202. }
  2203. static SLJIT_INLINE sljit_s32 emit_sse2_load(struct sljit_compiler *compiler,
  2204. sljit_s32 single, sljit_s32 dst, sljit_s32 src, sljit_sw srcw)
  2205. {
  2206. return emit_sse2(compiler, MOVSD_x_xm, single, dst, src, srcw);
  2207. }
  2208. static SLJIT_INLINE sljit_s32 emit_sse2_store(struct sljit_compiler *compiler,
  2209. sljit_s32 single, sljit_s32 dst, sljit_sw dstw, sljit_s32 src)
  2210. {
  2211. return emit_sse2(compiler, MOVSD_xm_x, single, src, dst, dstw);
  2212. }
  2213. static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op,
  2214. sljit_s32 dst, sljit_sw dstw,
  2215. sljit_s32 src, sljit_sw srcw)
  2216. {
  2217. sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
  2218. sljit_u8 *inst;
  2219. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  2220. if (GET_OPCODE(op) == SLJIT_CONV_SW_FROM_F64)
  2221. compiler->mode32 = 0;
  2222. #endif
  2223. inst = emit_x86_instruction(compiler, 2 | ((op & SLJIT_F32_OP) ? EX86_PREF_F3 : EX86_PREF_F2) | EX86_SSE2_OP2, dst_r, 0, src, srcw);
  2224. FAIL_IF(!inst);
  2225. *inst++ = GROUP_0F;
  2226. *inst = CVTTSD2SI_r_xm;
  2227. if (dst & SLJIT_MEM)
  2228. return emit_mov(compiler, dst, dstw, TMP_REG1, 0);
  2229. return SLJIT_SUCCESS;
  2230. }
  2231. static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
  2232. sljit_s32 dst, sljit_sw dstw,
  2233. sljit_s32 src, sljit_sw srcw)
  2234. {
  2235. sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG;
  2236. sljit_u8 *inst;
  2237. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  2238. if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_SW)
  2239. compiler->mode32 = 0;
  2240. #endif
  2241. if (src & SLJIT_IMM) {
  2242. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  2243. if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32)
  2244. srcw = (sljit_s32)srcw;
  2245. #endif
  2246. EMIT_MOV(compiler, TMP_REG1, 0, src, srcw);
  2247. src = TMP_REG1;
  2248. srcw = 0;
  2249. }
  2250. inst = emit_x86_instruction(compiler, 2 | ((op & SLJIT_F32_OP) ? EX86_PREF_F3 : EX86_PREF_F2) | EX86_SSE2_OP1, dst_r, 0, src, srcw);
  2251. FAIL_IF(!inst);
  2252. *inst++ = GROUP_0F;
  2253. *inst = CVTSI2SD_x_rm;
  2254. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  2255. compiler->mode32 = 1;
  2256. #endif
  2257. if (dst_r == TMP_FREG)
  2258. return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, TMP_FREG);
  2259. return SLJIT_SUCCESS;
  2260. }
  2261. static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compiler, sljit_s32 op,
  2262. sljit_s32 src1, sljit_sw src1w,
  2263. sljit_s32 src2, sljit_sw src2w)
  2264. {
  2265. if (!FAST_IS_REG(src1)) {
  2266. FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, TMP_FREG, src1, src1w));
  2267. src1 = TMP_FREG;
  2268. }
  2269. return emit_sse2_logic(compiler, UCOMISD_x_xm, !(op & SLJIT_F32_OP), src1, src2, src2w);
  2270. }
  2271. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op,
  2272. sljit_s32 dst, sljit_sw dstw,
  2273. sljit_s32 src, sljit_sw srcw)
  2274. {
  2275. sljit_s32 dst_r;
  2276. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  2277. compiler->mode32 = 1;
  2278. #endif
  2279. CHECK_ERROR();
  2280. SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw);
  2281. if (GET_OPCODE(op) == SLJIT_MOV_F64) {
  2282. if (FAST_IS_REG(dst))
  2283. return emit_sse2_load(compiler, op & SLJIT_F32_OP, dst, src, srcw);
  2284. if (FAST_IS_REG(src))
  2285. return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, src);
  2286. FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, TMP_FREG, src, srcw));
  2287. return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, TMP_FREG);
  2288. }
  2289. if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32) {
  2290. dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG;
  2291. if (FAST_IS_REG(src)) {
  2292. /* We overwrite the high bits of source. From SLJIT point of view,
  2293. this is not an issue.
  2294. Note: In SSE3, we could also use MOVDDUP and MOVSLDUP. */
  2295. FAIL_IF(emit_sse2_logic(compiler, UNPCKLPD_x_xm, op & SLJIT_F32_OP, src, src, 0));
  2296. }
  2297. else {
  2298. FAIL_IF(emit_sse2_load(compiler, !(op & SLJIT_F32_OP), TMP_FREG, src, srcw));
  2299. src = TMP_FREG;
  2300. }
  2301. FAIL_IF(emit_sse2_logic(compiler, CVTPD2PS_x_xm, op & SLJIT_F32_OP, dst_r, src, 0));
  2302. if (dst_r == TMP_FREG)
  2303. return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, TMP_FREG);
  2304. return SLJIT_SUCCESS;
  2305. }
  2306. if (FAST_IS_REG(dst)) {
  2307. dst_r = dst;
  2308. if (dst != src)
  2309. FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, dst_r, src, srcw));
  2310. }
  2311. else {
  2312. dst_r = TMP_FREG;
  2313. FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, dst_r, src, srcw));
  2314. }
  2315. switch (GET_OPCODE(op)) {
  2316. case SLJIT_NEG_F64:
  2317. FAIL_IF(emit_sse2_logic(compiler, XORPD_x_xm, 1, dst_r, SLJIT_MEM0(), (sljit_sw)(op & SLJIT_F32_OP ? sse2_buffer : sse2_buffer + 8)));
  2318. break;
  2319. case SLJIT_ABS_F64:
  2320. FAIL_IF(emit_sse2_logic(compiler, ANDPD_x_xm, 1, dst_r, SLJIT_MEM0(), (sljit_sw)(op & SLJIT_F32_OP ? sse2_buffer + 4 : sse2_buffer + 12)));
  2321. break;
  2322. }
  2323. if (dst_r == TMP_FREG)
  2324. return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, TMP_FREG);
  2325. return SLJIT_SUCCESS;
  2326. }
  2327. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compiler, sljit_s32 op,
  2328. sljit_s32 dst, sljit_sw dstw,
  2329. sljit_s32 src1, sljit_sw src1w,
  2330. sljit_s32 src2, sljit_sw src2w)
  2331. {
  2332. sljit_s32 dst_r;
  2333. CHECK_ERROR();
  2334. CHECK(check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w));
  2335. ADJUST_LOCAL_OFFSET(dst, dstw);
  2336. ADJUST_LOCAL_OFFSET(src1, src1w);
  2337. ADJUST_LOCAL_OFFSET(src2, src2w);
  2338. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  2339. compiler->mode32 = 1;
  2340. #endif
  2341. if (FAST_IS_REG(dst)) {
  2342. dst_r = dst;
  2343. if (dst == src1)
  2344. ; /* Do nothing here. */
  2345. else if (dst == src2 && (op == SLJIT_ADD_F64 || op == SLJIT_MUL_F64)) {
  2346. /* Swap arguments. */
  2347. src2 = src1;
  2348. src2w = src1w;
  2349. }
  2350. else if (dst != src2)
  2351. FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, dst_r, src1, src1w));
  2352. else {
  2353. dst_r = TMP_FREG;
  2354. FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, TMP_FREG, src1, src1w));
  2355. }
  2356. }
  2357. else {
  2358. dst_r = TMP_FREG;
  2359. FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, TMP_FREG, src1, src1w));
  2360. }
  2361. switch (GET_OPCODE(op)) {
  2362. case SLJIT_ADD_F64:
  2363. FAIL_IF(emit_sse2(compiler, ADDSD_x_xm, op & SLJIT_F32_OP, dst_r, src2, src2w));
  2364. break;
  2365. case SLJIT_SUB_F64:
  2366. FAIL_IF(emit_sse2(compiler, SUBSD_x_xm, op & SLJIT_F32_OP, dst_r, src2, src2w));
  2367. break;
  2368. case SLJIT_MUL_F64:
  2369. FAIL_IF(emit_sse2(compiler, MULSD_x_xm, op & SLJIT_F32_OP, dst_r, src2, src2w));
  2370. break;
  2371. case SLJIT_DIV_F64:
  2372. FAIL_IF(emit_sse2(compiler, DIVSD_x_xm, op & SLJIT_F32_OP, dst_r, src2, src2w));
  2373. break;
  2374. }
  2375. if (dst_r == TMP_FREG)
  2376. return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, TMP_FREG);
  2377. return SLJIT_SUCCESS;
  2378. }
  2379. /* --------------------------------------------------------------------- */
  2380. /* Conditional instructions */
  2381. /* --------------------------------------------------------------------- */
  2382. SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compiler *compiler)
  2383. {
  2384. sljit_u8 *inst;
  2385. struct sljit_label *label;
  2386. CHECK_ERROR_PTR();
  2387. CHECK_PTR(check_sljit_emit_label(compiler));
  2388. if (compiler->last_label && compiler->last_label->size == compiler->size)
  2389. return compiler->last_label;
  2390. label = (struct sljit_label*)ensure_abuf(compiler, sizeof(struct sljit_label));
  2391. PTR_FAIL_IF(!label);
  2392. set_label(label, compiler);
  2393. inst = (sljit_u8*)ensure_buf(compiler, 2);
  2394. PTR_FAIL_IF(!inst);
  2395. *inst++ = 0;
  2396. *inst++ = 0;
  2397. return label;
  2398. }
  2399. SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_s32 type)
  2400. {
  2401. sljit_u8 *inst;
  2402. struct sljit_jump *jump;
  2403. CHECK_ERROR_PTR();
  2404. CHECK_PTR(check_sljit_emit_jump(compiler, type));
  2405. jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
  2406. PTR_FAIL_IF_NULL(jump);
  2407. set_jump(jump, compiler, (type & SLJIT_REWRITABLE_JUMP) | ((type & 0xff) << TYPE_SHIFT));
  2408. type &= 0xff;
  2409. /* Worst case size. */
  2410. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  2411. compiler->size += (type >= SLJIT_JUMP) ? 5 : 6;
  2412. #else
  2413. compiler->size += (type >= SLJIT_JUMP) ? (10 + 3) : (2 + 10 + 3);
  2414. #endif
  2415. inst = (sljit_u8*)ensure_buf(compiler, 2);
  2416. PTR_FAIL_IF_NULL(inst);
  2417. *inst++ = 0;
  2418. *inst++ = 1;
  2419. return jump;
  2420. }
  2421. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src, sljit_sw srcw)
  2422. {
  2423. sljit_u8 *inst;
  2424. struct sljit_jump *jump;
  2425. CHECK_ERROR();
  2426. CHECK(check_sljit_emit_ijump(compiler, type, src, srcw));
  2427. ADJUST_LOCAL_OFFSET(src, srcw);
  2428. CHECK_EXTRA_REGS(src, srcw, (void)0);
  2429. if (src == SLJIT_IMM) {
  2430. jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
  2431. FAIL_IF_NULL(jump);
  2432. set_jump(jump, compiler, JUMP_ADDR | (type << TYPE_SHIFT));
  2433. jump->u.target = srcw;
  2434. /* Worst case size. */
  2435. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  2436. compiler->size += 5;
  2437. #else
  2438. compiler->size += 10 + 3;
  2439. #endif
  2440. inst = (sljit_u8*)ensure_buf(compiler, 2);
  2441. FAIL_IF_NULL(inst);
  2442. *inst++ = 0;
  2443. *inst++ = 1;
  2444. }
  2445. else {
  2446. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  2447. /* REX_W is not necessary (src is not immediate). */
  2448. compiler->mode32 = 1;
  2449. #endif
  2450. inst = emit_x86_instruction(compiler, 1, 0, 0, src, srcw);
  2451. FAIL_IF(!inst);
  2452. *inst++ = GROUP_FF;
  2453. *inst |= (type >= SLJIT_FAST_CALL) ? CALL_rm : JMP_rm;
  2454. }
  2455. return SLJIT_SUCCESS;
  2456. }
  2457. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_s32 op,
  2458. sljit_s32 dst, sljit_sw dstw,
  2459. sljit_s32 type)
  2460. {
  2461. sljit_u8 *inst;
  2462. sljit_u8 cond_set = 0;
  2463. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  2464. sljit_s32 reg;
  2465. #endif
  2466. /* ADJUST_LOCAL_OFFSET and CHECK_EXTRA_REGS might overwrite these values. */
  2467. sljit_s32 dst_save = dst;
  2468. sljit_sw dstw_save = dstw;
  2469. CHECK_ERROR();
  2470. CHECK(check_sljit_emit_op_flags(compiler, op, dst, dstw, type));
  2471. ADJUST_LOCAL_OFFSET(dst, dstw);
  2472. CHECK_EXTRA_REGS(dst, dstw, (void)0);
  2473. type &= 0xff;
  2474. /* setcc = jcc + 0x10. */
  2475. cond_set = get_jump_code(type) + 0x10;
  2476. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  2477. if (GET_OPCODE(op) == SLJIT_OR && !GET_ALL_FLAGS(op) && FAST_IS_REG(dst)) {
  2478. inst = (sljit_u8*)ensure_buf(compiler, 1 + 4 + 3);
  2479. FAIL_IF(!inst);
  2480. INC_SIZE(4 + 3);
  2481. /* Set low register to conditional flag. */
  2482. *inst++ = (reg_map[TMP_REG1] <= 7) ? REX : REX_B;
  2483. *inst++ = GROUP_0F;
  2484. *inst++ = cond_set;
  2485. *inst++ = MOD_REG | reg_lmap[TMP_REG1];
  2486. *inst++ = REX | (reg_map[TMP_REG1] <= 7 ? 0 : REX_R) | (reg_map[dst] <= 7 ? 0 : REX_B);
  2487. *inst++ = OR_rm8_r8;
  2488. *inst++ = MOD_REG | (reg_lmap[TMP_REG1] << 3) | reg_lmap[dst];
  2489. return SLJIT_SUCCESS;
  2490. }
  2491. reg = (GET_OPCODE(op) < SLJIT_ADD && FAST_IS_REG(dst)) ? dst : TMP_REG1;
  2492. inst = (sljit_u8*)ensure_buf(compiler, 1 + 4 + 4);
  2493. FAIL_IF(!inst);
  2494. INC_SIZE(4 + 4);
  2495. /* Set low register to conditional flag. */
  2496. *inst++ = (reg_map[reg] <= 7) ? REX : REX_B;
  2497. *inst++ = GROUP_0F;
  2498. *inst++ = cond_set;
  2499. *inst++ = MOD_REG | reg_lmap[reg];
  2500. *inst++ = REX_W | (reg_map[reg] <= 7 ? 0 : (REX_B | REX_R));
  2501. /* The movzx instruction does not affect flags. */
  2502. *inst++ = GROUP_0F;
  2503. *inst++ = MOVZX_r_rm8;
  2504. *inst = MOD_REG | (reg_lmap[reg] << 3) | reg_lmap[reg];
  2505. if (reg != TMP_REG1)
  2506. return SLJIT_SUCCESS;
  2507. if (GET_OPCODE(op) < SLJIT_ADD) {
  2508. compiler->mode32 = GET_OPCODE(op) != SLJIT_MOV;
  2509. return emit_mov(compiler, dst, dstw, TMP_REG1, 0);
  2510. }
  2511. #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
  2512. || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
  2513. compiler->skip_checks = 1;
  2514. #endif
  2515. return sljit_emit_op2(compiler, op, dst_save, dstw_save, dst_save, dstw_save, TMP_REG1, 0);
  2516. #else
  2517. /* The SLJIT_CONFIG_X86_32 code path starts here. */
  2518. if (GET_OPCODE(op) < SLJIT_ADD && FAST_IS_REG(dst)) {
  2519. if (reg_map[dst] <= 4) {
  2520. /* Low byte is accessible. */
  2521. inst = (sljit_u8*)ensure_buf(compiler, 1 + 3 + 3);
  2522. FAIL_IF(!inst);
  2523. INC_SIZE(3 + 3);
  2524. /* Set low byte to conditional flag. */
  2525. *inst++ = GROUP_0F;
  2526. *inst++ = cond_set;
  2527. *inst++ = MOD_REG | reg_map[dst];
  2528. *inst++ = GROUP_0F;
  2529. *inst++ = MOVZX_r_rm8;
  2530. *inst = MOD_REG | (reg_map[dst] << 3) | reg_map[dst];
  2531. return SLJIT_SUCCESS;
  2532. }
  2533. /* Low byte is not accessible. */
  2534. if (cpu_has_cmov == -1)
  2535. get_cpu_features();
  2536. if (cpu_has_cmov) {
  2537. EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, 1);
  2538. /* a xor reg, reg operation would overwrite the flags. */
  2539. EMIT_MOV(compiler, dst, 0, SLJIT_IMM, 0);
  2540. inst = (sljit_u8*)ensure_buf(compiler, 1 + 3);
  2541. FAIL_IF(!inst);
  2542. INC_SIZE(3);
  2543. *inst++ = GROUP_0F;
  2544. /* cmovcc = setcc - 0x50. */
  2545. *inst++ = cond_set - 0x50;
  2546. *inst++ = MOD_REG | (reg_map[dst] << 3) | reg_map[TMP_REG1];
  2547. return SLJIT_SUCCESS;
  2548. }
  2549. inst = (sljit_u8*)ensure_buf(compiler, 1 + 1 + 3 + 3 + 1);
  2550. FAIL_IF(!inst);
  2551. INC_SIZE(1 + 3 + 3 + 1);
  2552. *inst++ = XCHG_EAX_r + reg_map[TMP_REG1];
  2553. /* Set al to conditional flag. */
  2554. *inst++ = GROUP_0F;
  2555. *inst++ = cond_set;
  2556. *inst++ = MOD_REG | 0 /* eax */;
  2557. *inst++ = GROUP_0F;
  2558. *inst++ = MOVZX_r_rm8;
  2559. *inst++ = MOD_REG | (reg_map[dst] << 3) | 0 /* eax */;
  2560. *inst++ = XCHG_EAX_r + reg_map[TMP_REG1];
  2561. return SLJIT_SUCCESS;
  2562. }
  2563. if (GET_OPCODE(op) == SLJIT_OR && !GET_ALL_FLAGS(op) && FAST_IS_REG(dst) && reg_map[dst] <= 4) {
  2564. SLJIT_ASSERT(reg_map[SLJIT_R0] == 0);
  2565. if (dst != SLJIT_R0) {
  2566. inst = (sljit_u8*)ensure_buf(compiler, 1 + 1 + 3 + 2 + 1);
  2567. FAIL_IF(!inst);
  2568. INC_SIZE(1 + 3 + 2 + 1);
  2569. /* Set low register to conditional flag. */
  2570. *inst++ = XCHG_EAX_r + reg_map[TMP_REG1];
  2571. *inst++ = GROUP_0F;
  2572. *inst++ = cond_set;
  2573. *inst++ = MOD_REG | 0 /* eax */;
  2574. *inst++ = OR_rm8_r8;
  2575. *inst++ = MOD_REG | (0 /* eax */ << 3) | reg_map[dst];
  2576. *inst++ = XCHG_EAX_r + reg_map[TMP_REG1];
  2577. }
  2578. else {
  2579. inst = (sljit_u8*)ensure_buf(compiler, 1 + 2 + 3 + 2 + 2);
  2580. FAIL_IF(!inst);
  2581. INC_SIZE(2 + 3 + 2 + 2);
  2582. /* Set low register to conditional flag. */
  2583. *inst++ = XCHG_r_rm;
  2584. *inst++ = MOD_REG | (1 /* ecx */ << 3) | reg_map[TMP_REG1];
  2585. *inst++ = GROUP_0F;
  2586. *inst++ = cond_set;
  2587. *inst++ = MOD_REG | 1 /* ecx */;
  2588. *inst++ = OR_rm8_r8;
  2589. *inst++ = MOD_REG | (1 /* ecx */ << 3) | 0 /* eax */;
  2590. *inst++ = XCHG_r_rm;
  2591. *inst++ = MOD_REG | (1 /* ecx */ << 3) | reg_map[TMP_REG1];
  2592. }
  2593. return SLJIT_SUCCESS;
  2594. }
  2595. /* Set TMP_REG1 to the bit. */
  2596. inst = (sljit_u8*)ensure_buf(compiler, 1 + 1 + 3 + 3 + 1);
  2597. FAIL_IF(!inst);
  2598. INC_SIZE(1 + 3 + 3 + 1);
  2599. *inst++ = XCHG_EAX_r + reg_map[TMP_REG1];
  2600. /* Set al to conditional flag. */
  2601. *inst++ = GROUP_0F;
  2602. *inst++ = cond_set;
  2603. *inst++ = MOD_REG | 0 /* eax */;
  2604. *inst++ = GROUP_0F;
  2605. *inst++ = MOVZX_r_rm8;
  2606. *inst++ = MOD_REG | (0 << 3) /* eax */ | 0 /* eax */;
  2607. *inst++ = XCHG_EAX_r + reg_map[TMP_REG1];
  2608. if (GET_OPCODE(op) < SLJIT_ADD)
  2609. return emit_mov(compiler, dst, dstw, TMP_REG1, 0);
  2610. #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
  2611. || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
  2612. compiler->skip_checks = 1;
  2613. #endif
  2614. return sljit_emit_op2(compiler, op, dst_save, dstw_save, dst_save, dstw_save, TMP_REG1, 0);
  2615. #endif /* SLJIT_CONFIG_X86_64 */
  2616. }
  2617. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compiler, sljit_s32 type,
  2618. sljit_s32 dst_reg,
  2619. sljit_s32 src, sljit_sw srcw)
  2620. {
  2621. sljit_u8* inst;
  2622. CHECK_ERROR();
  2623. CHECK(check_sljit_emit_cmov(compiler, type, dst_reg, src, srcw));
  2624. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  2625. dst_reg &= ~SLJIT_I32_OP;
  2626. if (!sljit_has_cpu_feature(SLJIT_HAS_CMOV) || (dst_reg >= SLJIT_R3 && dst_reg <= SLJIT_S3))
  2627. return sljit_emit_cmov_generic(compiler, type, dst_reg, src, srcw);
  2628. #else
  2629. if (!sljit_has_cpu_feature(SLJIT_HAS_CMOV))
  2630. return sljit_emit_cmov_generic(compiler, type, dst_reg, src, srcw);
  2631. #endif
  2632. /* ADJUST_LOCAL_OFFSET is not needed. */
  2633. CHECK_EXTRA_REGS(src, srcw, (void)0);
  2634. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  2635. compiler->mode32 = dst_reg & SLJIT_I32_OP;
  2636. dst_reg &= ~SLJIT_I32_OP;
  2637. #endif
  2638. if (SLJIT_UNLIKELY(src & SLJIT_IMM)) {
  2639. EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, srcw);
  2640. src = TMP_REG1;
  2641. srcw = 0;
  2642. }
  2643. inst = emit_x86_instruction(compiler, 2, dst_reg, 0, src, srcw);
  2644. FAIL_IF(!inst);
  2645. *inst++ = GROUP_0F;
  2646. *inst = get_jump_code(type & 0xff) - 0x40;
  2647. return SLJIT_SUCCESS;
  2648. }
  2649. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_local_base(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw offset)
  2650. {
  2651. CHECK_ERROR();
  2652. CHECK(check_sljit_get_local_base(compiler, dst, dstw, offset));
  2653. ADJUST_LOCAL_OFFSET(dst, dstw);
  2654. CHECK_EXTRA_REGS(dst, dstw, (void)0);
  2655. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  2656. compiler->mode32 = 0;
  2657. #endif
  2658. ADJUST_LOCAL_OFFSET(SLJIT_MEM1(SLJIT_SP), offset);
  2659. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  2660. if (NOT_HALFWORD(offset)) {
  2661. FAIL_IF(emit_load_imm64(compiler, TMP_REG1, offset));
  2662. #if (defined SLJIT_DEBUG && SLJIT_DEBUG)
  2663. SLJIT_ASSERT(emit_lea_binary(compiler, dst, dstw, SLJIT_SP, 0, TMP_REG1, 0) != SLJIT_ERR_UNSUPPORTED);
  2664. return compiler->error;
  2665. #else
  2666. return emit_lea_binary(compiler, dst, dstw, SLJIT_SP, 0, TMP_REG1, 0);
  2667. #endif
  2668. }
  2669. #endif
  2670. if (offset != 0)
  2671. return emit_lea_binary(compiler, dst, dstw, SLJIT_SP, 0, SLJIT_IMM, offset);
  2672. return emit_mov(compiler, dst, dstw, SLJIT_SP, 0);
  2673. }
  2674. SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw init_value)
  2675. {
  2676. sljit_u8 *inst;
  2677. struct sljit_const *const_;
  2678. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  2679. sljit_s32 reg;
  2680. #endif
  2681. CHECK_ERROR_PTR();
  2682. CHECK_PTR(check_sljit_emit_const(compiler, dst, dstw, init_value));
  2683. ADJUST_LOCAL_OFFSET(dst, dstw);
  2684. CHECK_EXTRA_REGS(dst, dstw, (void)0);
  2685. const_ = (struct sljit_const*)ensure_abuf(compiler, sizeof(struct sljit_const));
  2686. PTR_FAIL_IF(!const_);
  2687. set_const(const_, compiler);
  2688. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  2689. compiler->mode32 = 0;
  2690. reg = FAST_IS_REG(dst) ? dst : TMP_REG1;
  2691. if (emit_load_imm64(compiler, reg, init_value))
  2692. return NULL;
  2693. #else
  2694. if (emit_mov(compiler, dst, dstw, SLJIT_IMM, init_value))
  2695. return NULL;
  2696. #endif
  2697. inst = (sljit_u8*)ensure_buf(compiler, 2);
  2698. PTR_FAIL_IF(!inst);
  2699. *inst++ = 0;
  2700. *inst++ = 2;
  2701. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  2702. if (dst & SLJIT_MEM)
  2703. if (emit_mov(compiler, dst, dstw, TMP_REG1, 0))
  2704. return NULL;
  2705. #endif
  2706. return const_;
  2707. }
  2708. SLJIT_API_FUNC_ATTRIBUTE struct sljit_put_label* sljit_emit_put_label(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw)
  2709. {
  2710. struct sljit_put_label *put_label;
  2711. sljit_u8 *inst;
  2712. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  2713. sljit_s32 reg;
  2714. sljit_uw start_size;
  2715. #endif
  2716. CHECK_ERROR_PTR();
  2717. CHECK_PTR(check_sljit_emit_put_label(compiler, dst, dstw));
  2718. ADJUST_LOCAL_OFFSET(dst, dstw);
  2719. CHECK_EXTRA_REGS(dst, dstw, (void)0);
  2720. put_label = (struct sljit_put_label*)ensure_abuf(compiler, sizeof(struct sljit_put_label));
  2721. PTR_FAIL_IF(!put_label);
  2722. set_put_label(put_label, compiler, 0);
  2723. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  2724. compiler->mode32 = 0;
  2725. reg = FAST_IS_REG(dst) ? dst : TMP_REG1;
  2726. if (emit_load_imm64(compiler, reg, 0))
  2727. return NULL;
  2728. #else
  2729. if (emit_mov(compiler, dst, dstw, SLJIT_IMM, 0))
  2730. return NULL;
  2731. #endif
  2732. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  2733. if (dst & SLJIT_MEM) {
  2734. start_size = compiler->size;
  2735. if (emit_mov(compiler, dst, dstw, TMP_REG1, 0))
  2736. return NULL;
  2737. put_label->flags = compiler->size - start_size;
  2738. }
  2739. #endif
  2740. inst = (sljit_u8*)ensure_buf(compiler, 2);
  2741. PTR_FAIL_IF(!inst);
  2742. *inst++ = 0;
  2743. *inst++ = 3;
  2744. return put_label;
  2745. }
  2746. SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_target, sljit_sw executable_offset)
  2747. {
  2748. SLJIT_UNUSED_ARG(executable_offset);
  2749. SLJIT_UPDATE_WX_FLAGS((void*)addr, (void*)(addr + sizeof(sljit_uw)), 0);
  2750. #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
  2751. sljit_unaligned_store_sw((void*)addr, new_target - (addr + 4) - (sljit_uw)executable_offset);
  2752. #else
  2753. sljit_unaligned_store_sw((void*)addr, (sljit_sw) new_target);
  2754. #endif
  2755. SLJIT_UPDATE_WX_FLAGS((void*)addr, (void*)(addr + sizeof(sljit_uw)), 1);
  2756. }
  2757. SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset)
  2758. {
  2759. SLJIT_UNUSED_ARG(executable_offset);
  2760. SLJIT_UPDATE_WX_FLAGS((void*)addr, (void*)(addr + sizeof(sljit_sw)), 0);
  2761. sljit_unaligned_store_sw((void*)addr, new_constant);
  2762. SLJIT_UPDATE_WX_FLAGS((void*)addr, (void*)(addr + sizeof(sljit_sw)), 1);
  2763. }