zend_hash.c 72 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894
  1. /*
  2. +----------------------------------------------------------------------+
  3. | Zend Engine |
  4. +----------------------------------------------------------------------+
  5. | Copyright (c) Zend Technologies Ltd. (http://www.zend.com) |
  6. +----------------------------------------------------------------------+
  7. | This source file is subject to version 2.00 of the Zend license, |
  8. | that is bundled with this package in the file LICENSE, and is |
  9. | available through the world-wide-web at the following url: |
  10. | http://www.zend.com/license/2_00.txt. |
  11. | If you did not receive a copy of the Zend license and are unable to |
  12. | obtain it through the world-wide-web, please send a note to |
  13. | license@zend.com so we can mail you a copy immediately. |
  14. +----------------------------------------------------------------------+
  15. | Authors: Andi Gutmans <andi@php.net> |
  16. | Zeev Suraski <zeev@php.net> |
  17. | Dmitry Stogov <dmitry@php.net> |
  18. +----------------------------------------------------------------------+
  19. */
  20. #include "zend.h"
  21. #include "zend_globals.h"
  22. #include "zend_variables.h"
  23. #if defined(__aarch64__)
  24. # include <arm_neon.h>
  25. #endif
  26. #ifdef __SSE2__
  27. # include <mmintrin.h>
  28. # include <emmintrin.h>
  29. #endif
  30. #if ZEND_DEBUG
  31. # define HT_ASSERT(ht, expr) \
  32. ZEND_ASSERT((expr) || (HT_FLAGS(ht) & HASH_FLAG_ALLOW_COW_VIOLATION))
  33. #else
  34. # define HT_ASSERT(ht, expr)
  35. #endif
  36. #define HT_ASSERT_RC1(ht) HT_ASSERT(ht, GC_REFCOUNT(ht) == 1)
  37. #define HT_POISONED_PTR ((HashTable *) (intptr_t) -1)
  38. #if ZEND_DEBUG
  39. #define HT_OK 0x00
  40. #define HT_IS_DESTROYING 0x01
  41. #define HT_DESTROYED 0x02
  42. #define HT_CLEANING 0x03
  43. static void _zend_is_inconsistent(const HashTable *ht, const char *file, int line)
  44. {
  45. if ((HT_FLAGS(ht) & HASH_FLAG_CONSISTENCY) == HT_OK) {
  46. return;
  47. }
  48. switch (HT_FLAGS(ht) & HASH_FLAG_CONSISTENCY) {
  49. case HT_IS_DESTROYING:
  50. zend_output_debug_string(1, "%s(%d) : ht=%p is being destroyed", file, line, ht);
  51. break;
  52. case HT_DESTROYED:
  53. zend_output_debug_string(1, "%s(%d) : ht=%p is already destroyed", file, line, ht);
  54. break;
  55. case HT_CLEANING:
  56. zend_output_debug_string(1, "%s(%d) : ht=%p is being cleaned", file, line, ht);
  57. break;
  58. default:
  59. zend_output_debug_string(1, "%s(%d) : ht=%p is inconsistent", file, line, ht);
  60. break;
  61. }
  62. ZEND_UNREACHABLE();
  63. }
  64. #define IS_CONSISTENT(a) _zend_is_inconsistent(a, __FILE__, __LINE__);
  65. #define SET_INCONSISTENT(n) do { \
  66. HT_FLAGS(ht) = (HT_FLAGS(ht) & ~HASH_FLAG_CONSISTENCY) | (n); \
  67. } while (0)
  68. #else
  69. #define IS_CONSISTENT(a)
  70. #define SET_INCONSISTENT(n)
  71. #endif
  72. #define ZEND_HASH_IF_FULL_DO_RESIZE(ht) \
  73. if ((ht)->nNumUsed >= (ht)->nTableSize) { \
  74. zend_hash_do_resize(ht); \
  75. }
  76. ZEND_API void *zend_hash_str_find_ptr_lc(const HashTable *ht, const char *str, size_t len) {
  77. void *result;
  78. char *lc_str;
  79. /* Stack allocate small strings to improve performance */
  80. ALLOCA_FLAG(use_heap)
  81. lc_str = zend_str_tolower_copy(do_alloca(len + 1, use_heap), str, len);
  82. result = zend_hash_str_find_ptr(ht, lc_str, len);
  83. free_alloca(lc_str, use_heap);
  84. return result;
  85. }
  86. ZEND_API void *zend_hash_find_ptr_lc(const HashTable *ht, zend_string *key) {
  87. void *result;
  88. zend_string *lc_key = zend_string_tolower(key);
  89. result = zend_hash_find_ptr(ht, lc_key);
  90. zend_string_release(lc_key);
  91. return result;
  92. }
  93. static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht);
  94. static zend_always_inline uint32_t zend_hash_check_size(uint32_t nSize)
  95. {
  96. #ifdef ZEND_WIN32
  97. unsigned long index;
  98. #endif
  99. /* Use big enough power of 2 */
  100. /* size should be between HT_MIN_SIZE and HT_MAX_SIZE */
  101. if (nSize <= HT_MIN_SIZE) {
  102. return HT_MIN_SIZE;
  103. } else if (UNEXPECTED(nSize >= HT_MAX_SIZE)) {
  104. zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", nSize, sizeof(Bucket), sizeof(Bucket));
  105. }
  106. #ifdef ZEND_WIN32
  107. if (BitScanReverse(&index, nSize - 1)) {
  108. return 0x2u << ((31 - index) ^ 0x1f);
  109. } else {
  110. /* nSize is ensured to be in the valid range, fall back to it
  111. rather than using an undefined bis scan result. */
  112. return nSize;
  113. }
  114. #elif (defined(__GNUC__) || __has_builtin(__builtin_clz)) && defined(PHP_HAVE_BUILTIN_CLZ)
  115. return 0x2u << (__builtin_clz(nSize - 1) ^ 0x1f);
  116. #else
  117. nSize -= 1;
  118. nSize |= (nSize >> 1);
  119. nSize |= (nSize >> 2);
  120. nSize |= (nSize >> 4);
  121. nSize |= (nSize >> 8);
  122. nSize |= (nSize >> 16);
  123. return nSize + 1;
  124. #endif
  125. }
  126. static zend_always_inline void zend_hash_real_init_packed_ex(HashTable *ht)
  127. {
  128. void *data;
  129. if (UNEXPECTED(GC_FLAGS(ht) & IS_ARRAY_PERSISTENT)) {
  130. data = pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), 1);
  131. } else if (EXPECTED(ht->nTableSize == HT_MIN_SIZE)) {
  132. /* Use specialized API with constant allocation amount for a particularly common case. */
  133. data = emalloc(HT_SIZE_EX(HT_MIN_SIZE, HT_MIN_MASK));
  134. } else {
  135. data = emalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK));
  136. }
  137. HT_SET_DATA_ADDR(ht, data);
  138. /* Don't overwrite iterator count. */
  139. ht->u.v.flags = HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
  140. HT_HASH_RESET_PACKED(ht);
  141. }
  142. static zend_always_inline void zend_hash_real_init_mixed_ex(HashTable *ht)
  143. {
  144. void *data;
  145. uint32_t nSize = ht->nTableSize;
  146. if (UNEXPECTED(GC_FLAGS(ht) & IS_ARRAY_PERSISTENT)) {
  147. data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), 1);
  148. } else if (EXPECTED(nSize == HT_MIN_SIZE)) {
  149. data = emalloc(HT_SIZE_EX(HT_MIN_SIZE, HT_SIZE_TO_MASK(HT_MIN_SIZE)));
  150. ht->nTableMask = HT_SIZE_TO_MASK(HT_MIN_SIZE);
  151. HT_SET_DATA_ADDR(ht, data);
  152. /* Don't overwrite iterator count. */
  153. ht->u.v.flags = HASH_FLAG_STATIC_KEYS;
  154. #ifdef __SSE2__
  155. do {
  156. __m128i xmm0 = _mm_setzero_si128();
  157. xmm0 = _mm_cmpeq_epi8(xmm0, xmm0);
  158. _mm_storeu_si128((__m128i*)&HT_HASH_EX(data, 0), xmm0);
  159. _mm_storeu_si128((__m128i*)&HT_HASH_EX(data, 4), xmm0);
  160. _mm_storeu_si128((__m128i*)&HT_HASH_EX(data, 8), xmm0);
  161. _mm_storeu_si128((__m128i*)&HT_HASH_EX(data, 12), xmm0);
  162. } while (0);
  163. #elif defined(__aarch64__)
  164. do {
  165. int32x4_t t = vdupq_n_s32(-1);
  166. vst1q_s32((int32_t*)&HT_HASH_EX(data, 0), t);
  167. vst1q_s32((int32_t*)&HT_HASH_EX(data, 4), t);
  168. vst1q_s32((int32_t*)&HT_HASH_EX(data, 8), t);
  169. vst1q_s32((int32_t*)&HT_HASH_EX(data, 12), t);
  170. } while (0);
  171. #else
  172. HT_HASH_EX(data, 0) = -1;
  173. HT_HASH_EX(data, 1) = -1;
  174. HT_HASH_EX(data, 2) = -1;
  175. HT_HASH_EX(data, 3) = -1;
  176. HT_HASH_EX(data, 4) = -1;
  177. HT_HASH_EX(data, 5) = -1;
  178. HT_HASH_EX(data, 6) = -1;
  179. HT_HASH_EX(data, 7) = -1;
  180. HT_HASH_EX(data, 8) = -1;
  181. HT_HASH_EX(data, 9) = -1;
  182. HT_HASH_EX(data, 10) = -1;
  183. HT_HASH_EX(data, 11) = -1;
  184. HT_HASH_EX(data, 12) = -1;
  185. HT_HASH_EX(data, 13) = -1;
  186. HT_HASH_EX(data, 14) = -1;
  187. HT_HASH_EX(data, 15) = -1;
  188. #endif
  189. return;
  190. } else {
  191. data = emalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)));
  192. }
  193. ht->nTableMask = HT_SIZE_TO_MASK(nSize);
  194. HT_SET_DATA_ADDR(ht, data);
  195. HT_FLAGS(ht) = HASH_FLAG_STATIC_KEYS;
  196. HT_HASH_RESET(ht);
  197. }
  198. static zend_always_inline void zend_hash_real_init_ex(HashTable *ht, bool packed)
  199. {
  200. HT_ASSERT_RC1(ht);
  201. ZEND_ASSERT(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED);
  202. if (packed) {
  203. zend_hash_real_init_packed_ex(ht);
  204. } else {
  205. zend_hash_real_init_mixed_ex(ht);
  206. }
  207. }
  208. static const uint32_t uninitialized_bucket[-HT_MIN_MASK] =
  209. {HT_INVALID_IDX, HT_INVALID_IDX};
  210. ZEND_API const HashTable zend_empty_array = {
  211. .gc.refcount = 2,
  212. .gc.u.type_info = IS_ARRAY | (GC_IMMUTABLE << GC_FLAGS_SHIFT),
  213. .u.flags = HASH_FLAG_UNINITIALIZED,
  214. .nTableMask = HT_MIN_MASK,
  215. .arData = (Bucket*)&uninitialized_bucket[2],
  216. .nNumUsed = 0,
  217. .nNumOfElements = 0,
  218. .nTableSize = HT_MIN_SIZE,
  219. .nInternalPointer = 0,
  220. .nNextFreeElement = 0,
  221. .pDestructor = ZVAL_PTR_DTOR
  222. };
  223. static zend_always_inline void _zend_hash_init_int(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, bool persistent)
  224. {
  225. GC_SET_REFCOUNT(ht, 1);
  226. GC_TYPE_INFO(ht) = GC_ARRAY | (persistent ? ((GC_PERSISTENT|GC_NOT_COLLECTABLE) << GC_FLAGS_SHIFT) : 0);
  227. HT_FLAGS(ht) = HASH_FLAG_UNINITIALIZED;
  228. ht->nTableMask = HT_MIN_MASK;
  229. HT_SET_DATA_ADDR(ht, &uninitialized_bucket);
  230. ht->nNumUsed = 0;
  231. ht->nNumOfElements = 0;
  232. ht->nInternalPointer = 0;
  233. ht->nNextFreeElement = ZEND_LONG_MIN;
  234. ht->pDestructor = pDestructor;
  235. ht->nTableSize = zend_hash_check_size(nSize);
  236. }
  237. ZEND_API void ZEND_FASTCALL _zend_hash_init(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, bool persistent)
  238. {
  239. _zend_hash_init_int(ht, nSize, pDestructor, persistent);
  240. }
  241. ZEND_API HashTable* ZEND_FASTCALL _zend_new_array_0(void)
  242. {
  243. HashTable *ht = emalloc(sizeof(HashTable));
  244. _zend_hash_init_int(ht, HT_MIN_SIZE, ZVAL_PTR_DTOR, 0);
  245. return ht;
  246. }
  247. ZEND_API HashTable* ZEND_FASTCALL _zend_new_array(uint32_t nSize)
  248. {
  249. HashTable *ht = emalloc(sizeof(HashTable));
  250. _zend_hash_init_int(ht, nSize, ZVAL_PTR_DTOR, 0);
  251. return ht;
  252. }
  253. ZEND_API HashTable* ZEND_FASTCALL zend_new_pair(zval *val1, zval *val2)
  254. {
  255. Bucket *p;
  256. HashTable *ht = emalloc(sizeof(HashTable));
  257. _zend_hash_init_int(ht, HT_MIN_SIZE, ZVAL_PTR_DTOR, 0);
  258. ht->nNumUsed = ht->nNumOfElements = ht->nNextFreeElement = 2;
  259. zend_hash_real_init_packed_ex(ht);
  260. p = ht->arData;
  261. ZVAL_COPY_VALUE(&p->val, val1);
  262. p->h = 0;
  263. p->key = NULL;
  264. p++;
  265. ZVAL_COPY_VALUE(&p->val, val2);
  266. p->h = 1;
  267. p->key = NULL;
  268. return ht;
  269. }
  270. ZEND_API void ZEND_FASTCALL zend_hash_packed_grow(HashTable *ht)
  271. {
  272. HT_ASSERT_RC1(ht);
  273. if (ht->nTableSize >= HT_MAX_SIZE) {
  274. zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket), sizeof(Bucket));
  275. }
  276. ht->nTableSize += ht->nTableSize;
  277. HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), HT_USED_SIZE(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
  278. }
  279. ZEND_API void ZEND_FASTCALL zend_hash_real_init(HashTable *ht, bool packed)
  280. {
  281. IS_CONSISTENT(ht);
  282. HT_ASSERT_RC1(ht);
  283. zend_hash_real_init_ex(ht, packed);
  284. }
  285. ZEND_API void ZEND_FASTCALL zend_hash_real_init_packed(HashTable *ht)
  286. {
  287. IS_CONSISTENT(ht);
  288. HT_ASSERT_RC1(ht);
  289. zend_hash_real_init_packed_ex(ht);
  290. }
  291. ZEND_API void ZEND_FASTCALL zend_hash_real_init_mixed(HashTable *ht)
  292. {
  293. IS_CONSISTENT(ht);
  294. HT_ASSERT_RC1(ht);
  295. zend_hash_real_init_mixed_ex(ht);
  296. }
  297. ZEND_API void ZEND_FASTCALL zend_hash_packed_to_hash(HashTable *ht)
  298. {
  299. void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
  300. Bucket *old_buckets = ht->arData;
  301. uint32_t nSize = ht->nTableSize;
  302. HT_ASSERT_RC1(ht);
  303. HT_FLAGS(ht) &= ~HASH_FLAG_PACKED;
  304. new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  305. ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
  306. HT_SET_DATA_ADDR(ht, new_data);
  307. memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
  308. pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  309. zend_hash_rehash(ht);
  310. }
  311. ZEND_API void ZEND_FASTCALL zend_hash_to_packed(HashTable *ht)
  312. {
  313. void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
  314. Bucket *old_buckets = ht->arData;
  315. HT_ASSERT_RC1(ht);
  316. new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  317. HT_FLAGS(ht) |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
  318. ht->nTableMask = HT_MIN_MASK;
  319. HT_SET_DATA_ADDR(ht, new_data);
  320. HT_HASH_RESET_PACKED(ht);
  321. memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
  322. pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  323. }
  324. ZEND_API void ZEND_FASTCALL zend_hash_extend(HashTable *ht, uint32_t nSize, bool packed)
  325. {
  326. HT_ASSERT_RC1(ht);
  327. if (nSize == 0) return;
  328. if (UNEXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
  329. if (nSize > ht->nTableSize) {
  330. ht->nTableSize = zend_hash_check_size(nSize);
  331. }
  332. zend_hash_real_init(ht, packed);
  333. } else {
  334. if (packed) {
  335. ZEND_ASSERT(HT_FLAGS(ht) & HASH_FLAG_PACKED);
  336. if (nSize > ht->nTableSize) {
  337. ht->nTableSize = zend_hash_check_size(nSize);
  338. HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), HT_USED_SIZE(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
  339. }
  340. } else {
  341. ZEND_ASSERT(!(HT_FLAGS(ht) & HASH_FLAG_PACKED));
  342. if (nSize > ht->nTableSize) {
  343. void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
  344. Bucket *old_buckets = ht->arData;
  345. nSize = zend_hash_check_size(nSize);
  346. ht->nTableSize = nSize;
  347. new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  348. ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
  349. HT_SET_DATA_ADDR(ht, new_data);
  350. memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
  351. pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  352. zend_hash_rehash(ht);
  353. }
  354. }
  355. }
  356. }
  357. ZEND_API void ZEND_FASTCALL zend_hash_discard(HashTable *ht, uint32_t nNumUsed)
  358. {
  359. Bucket *p, *end, *arData;
  360. uint32_t nIndex;
  361. arData = ht->arData;
  362. p = arData + ht->nNumUsed;
  363. end = arData + nNumUsed;
  364. ht->nNumUsed = nNumUsed;
  365. while (p != end) {
  366. p--;
  367. if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
  368. ht->nNumOfElements--;
  369. /* Collision pointers always directed from higher to lower buckets */
  370. #if 0
  371. if (!(Z_NEXT(p->val) == HT_INVALID_IDX || HT_HASH_TO_BUCKET_EX(arData, Z_NEXT(p->val)) < p)) {
  372. abort();
  373. }
  374. #endif
  375. nIndex = p->h | ht->nTableMask;
  376. HT_HASH_EX(arData, nIndex) = Z_NEXT(p->val);
  377. }
  378. }
  379. static uint32_t zend_array_recalc_elements(HashTable *ht)
  380. {
  381. zval *val;
  382. uint32_t num = ht->nNumOfElements;
  383. ZEND_HASH_FOREACH_VAL(ht, val) {
  384. if (Z_TYPE_P(val) == IS_INDIRECT) {
  385. if (UNEXPECTED(Z_TYPE_P(Z_INDIRECT_P(val)) == IS_UNDEF)) {
  386. num--;
  387. }
  388. }
  389. } ZEND_HASH_FOREACH_END();
  390. return num;
  391. }
  392. /* }}} */
  393. ZEND_API uint32_t zend_array_count(HashTable *ht)
  394. {
  395. uint32_t num;
  396. if (UNEXPECTED(HT_FLAGS(ht) & HASH_FLAG_HAS_EMPTY_IND)) {
  397. num = zend_array_recalc_elements(ht);
  398. if (UNEXPECTED(ht->nNumOfElements == num)) {
  399. HT_FLAGS(ht) &= ~HASH_FLAG_HAS_EMPTY_IND;
  400. }
  401. } else if (UNEXPECTED(ht == &EG(symbol_table))) {
  402. num = zend_array_recalc_elements(ht);
  403. } else {
  404. num = zend_hash_num_elements(ht);
  405. }
  406. return num;
  407. }
  408. /* }}} */
  409. static zend_always_inline HashPosition _zend_hash_get_valid_pos(const HashTable *ht, HashPosition pos)
  410. {
  411. while (pos < ht->nNumUsed && Z_ISUNDEF(ht->arData[pos].val)) {
  412. pos++;
  413. }
  414. return pos;
  415. }
  416. static zend_always_inline HashPosition _zend_hash_get_current_pos(const HashTable *ht)
  417. {
  418. return _zend_hash_get_valid_pos(ht, ht->nInternalPointer);
  419. }
  420. ZEND_API HashPosition ZEND_FASTCALL zend_hash_get_current_pos(const HashTable *ht)
  421. {
  422. return _zend_hash_get_current_pos(ht);
  423. }
  424. ZEND_API uint32_t ZEND_FASTCALL zend_hash_iterator_add(HashTable *ht, HashPosition pos)
  425. {
  426. HashTableIterator *iter = EG(ht_iterators);
  427. HashTableIterator *end = iter + EG(ht_iterators_count);
  428. uint32_t idx;
  429. if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
  430. HT_INC_ITERATORS_COUNT(ht);
  431. }
  432. while (iter != end) {
  433. if (iter->ht == NULL) {
  434. iter->ht = ht;
  435. iter->pos = pos;
  436. idx = iter - EG(ht_iterators);
  437. if (idx + 1 > EG(ht_iterators_used)) {
  438. EG(ht_iterators_used) = idx + 1;
  439. }
  440. return idx;
  441. }
  442. iter++;
  443. }
  444. if (EG(ht_iterators) == EG(ht_iterators_slots)) {
  445. EG(ht_iterators) = emalloc(sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
  446. memcpy(EG(ht_iterators), EG(ht_iterators_slots), sizeof(HashTableIterator) * EG(ht_iterators_count));
  447. } else {
  448. EG(ht_iterators) = erealloc(EG(ht_iterators), sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
  449. }
  450. iter = EG(ht_iterators) + EG(ht_iterators_count);
  451. EG(ht_iterators_count) += 8;
  452. iter->ht = ht;
  453. iter->pos = pos;
  454. memset(iter + 1, 0, sizeof(HashTableIterator) * 7);
  455. idx = iter - EG(ht_iterators);
  456. EG(ht_iterators_used) = idx + 1;
  457. return idx;
  458. }
  459. ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos(uint32_t idx, HashTable *ht)
  460. {
  461. HashTableIterator *iter = EG(ht_iterators) + idx;
  462. ZEND_ASSERT(idx != (uint32_t)-1);
  463. if (UNEXPECTED(iter->ht != ht)) {
  464. if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
  465. && EXPECTED(!HT_ITERATORS_OVERFLOW(iter->ht))) {
  466. HT_DEC_ITERATORS_COUNT(iter->ht);
  467. }
  468. if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
  469. HT_INC_ITERATORS_COUNT(ht);
  470. }
  471. iter->ht = ht;
  472. iter->pos = _zend_hash_get_current_pos(ht);
  473. }
  474. return iter->pos;
  475. }
  476. ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos_ex(uint32_t idx, zval *array)
  477. {
  478. HashTable *ht = Z_ARRVAL_P(array);
  479. HashTableIterator *iter = EG(ht_iterators) + idx;
  480. ZEND_ASSERT(idx != (uint32_t)-1);
  481. if (UNEXPECTED(iter->ht != ht)) {
  482. if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
  483. && EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
  484. HT_DEC_ITERATORS_COUNT(iter->ht);
  485. }
  486. SEPARATE_ARRAY(array);
  487. ht = Z_ARRVAL_P(array);
  488. if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
  489. HT_INC_ITERATORS_COUNT(ht);
  490. }
  491. iter->ht = ht;
  492. iter->pos = _zend_hash_get_current_pos(ht);
  493. }
  494. return iter->pos;
  495. }
  496. ZEND_API void ZEND_FASTCALL zend_hash_iterator_del(uint32_t idx)
  497. {
  498. HashTableIterator *iter = EG(ht_iterators) + idx;
  499. ZEND_ASSERT(idx != (uint32_t)-1);
  500. if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
  501. && EXPECTED(!HT_ITERATORS_OVERFLOW(iter->ht))) {
  502. ZEND_ASSERT(HT_ITERATORS_COUNT(iter->ht) != 0);
  503. HT_DEC_ITERATORS_COUNT(iter->ht);
  504. }
  505. iter->ht = NULL;
  506. if (idx == EG(ht_iterators_used) - 1) {
  507. while (idx > 0 && EG(ht_iterators)[idx - 1].ht == NULL) {
  508. idx--;
  509. }
  510. EG(ht_iterators_used) = idx;
  511. }
  512. }
  513. static zend_never_inline void ZEND_FASTCALL _zend_hash_iterators_remove(HashTable *ht)
  514. {
  515. HashTableIterator *iter = EG(ht_iterators);
  516. HashTableIterator *end = iter + EG(ht_iterators_used);
  517. while (iter != end) {
  518. if (iter->ht == ht) {
  519. iter->ht = HT_POISONED_PTR;
  520. }
  521. iter++;
  522. }
  523. }
  524. static zend_always_inline void zend_hash_iterators_remove(HashTable *ht)
  525. {
  526. if (UNEXPECTED(HT_HAS_ITERATORS(ht))) {
  527. _zend_hash_iterators_remove(ht);
  528. }
  529. }
  530. ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterators_lower_pos(HashTable *ht, HashPosition start)
  531. {
  532. HashTableIterator *iter = EG(ht_iterators);
  533. HashTableIterator *end = iter + EG(ht_iterators_used);
  534. HashPosition res = ht->nNumUsed;
  535. while (iter != end) {
  536. if (iter->ht == ht) {
  537. if (iter->pos >= start && iter->pos < res) {
  538. res = iter->pos;
  539. }
  540. }
  541. iter++;
  542. }
  543. return res;
  544. }
  545. ZEND_API void ZEND_FASTCALL _zend_hash_iterators_update(HashTable *ht, HashPosition from, HashPosition to)
  546. {
  547. HashTableIterator *iter = EG(ht_iterators);
  548. HashTableIterator *end = iter + EG(ht_iterators_used);
  549. while (iter != end) {
  550. if (iter->ht == ht && iter->pos == from) {
  551. iter->pos = to;
  552. }
  553. iter++;
  554. }
  555. }
  556. ZEND_API void ZEND_FASTCALL zend_hash_iterators_advance(HashTable *ht, HashPosition step)
  557. {
  558. HashTableIterator *iter = EG(ht_iterators);
  559. HashTableIterator *end = iter + EG(ht_iterators_used);
  560. while (iter != end) {
  561. if (iter->ht == ht) {
  562. iter->pos += step;
  563. }
  564. iter++;
  565. }
  566. }
  567. static zend_always_inline Bucket *zend_hash_find_bucket(const HashTable *ht, zend_string *key, bool known_hash)
  568. {
  569. zend_ulong h;
  570. uint32_t nIndex;
  571. uint32_t idx;
  572. Bucket *p, *arData;
  573. if (known_hash) {
  574. h = ZSTR_H(key);
  575. ZEND_ASSERT(h != 0 && "Hash must be known");
  576. } else {
  577. h = zend_string_hash_val(key);
  578. }
  579. arData = ht->arData;
  580. nIndex = h | ht->nTableMask;
  581. idx = HT_HASH_EX(arData, nIndex);
  582. if (UNEXPECTED(idx == HT_INVALID_IDX)) {
  583. return NULL;
  584. }
  585. p = HT_HASH_TO_BUCKET_EX(arData, idx);
  586. if (EXPECTED(p->key == key)) { /* check for the same interned string */
  587. return p;
  588. }
  589. while (1) {
  590. if (p->h == ZSTR_H(key) &&
  591. EXPECTED(p->key) &&
  592. zend_string_equal_content(p->key, key)) {
  593. return p;
  594. }
  595. idx = Z_NEXT(p->val);
  596. if (idx == HT_INVALID_IDX) {
  597. return NULL;
  598. }
  599. p = HT_HASH_TO_BUCKET_EX(arData, idx);
  600. if (p->key == key) { /* check for the same interned string */
  601. return p;
  602. }
  603. }
  604. }
  605. static zend_always_inline Bucket *zend_hash_str_find_bucket(const HashTable *ht, const char *str, size_t len, zend_ulong h)
  606. {
  607. uint32_t nIndex;
  608. uint32_t idx;
  609. Bucket *p, *arData;
  610. arData = ht->arData;
  611. nIndex = h | ht->nTableMask;
  612. idx = HT_HASH_EX(arData, nIndex);
  613. while (idx != HT_INVALID_IDX) {
  614. ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
  615. p = HT_HASH_TO_BUCKET_EX(arData, idx);
  616. if ((p->h == h)
  617. && p->key
  618. && (ZSTR_LEN(p->key) == len)
  619. && !memcmp(ZSTR_VAL(p->key), str, len)) {
  620. return p;
  621. }
  622. idx = Z_NEXT(p->val);
  623. }
  624. return NULL;
  625. }
  626. static zend_always_inline Bucket *zend_hash_index_find_bucket(const HashTable *ht, zend_ulong h)
  627. {
  628. uint32_t nIndex;
  629. uint32_t idx;
  630. Bucket *p, *arData;
  631. arData = ht->arData;
  632. nIndex = h | ht->nTableMask;
  633. idx = HT_HASH_EX(arData, nIndex);
  634. while (idx != HT_INVALID_IDX) {
  635. ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
  636. p = HT_HASH_TO_BUCKET_EX(arData, idx);
  637. if (p->h == h && !p->key) {
  638. return p;
  639. }
  640. idx = Z_NEXT(p->val);
  641. }
  642. return NULL;
  643. }
  644. static zend_always_inline zval *_zend_hash_add_or_update_i(HashTable *ht, zend_string *key, zval *pData, uint32_t flag)
  645. {
  646. zend_ulong h;
  647. uint32_t nIndex;
  648. uint32_t idx;
  649. Bucket *p, *arData;
  650. IS_CONSISTENT(ht);
  651. HT_ASSERT_RC1(ht);
  652. if (!ZSTR_IS_INTERNED(key)) {
  653. zend_string_hash_val(key);
  654. }
  655. if (UNEXPECTED(HT_FLAGS(ht) & (HASH_FLAG_UNINITIALIZED|HASH_FLAG_PACKED))) {
  656. if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
  657. zend_hash_real_init_mixed(ht);
  658. goto add_to_hash;
  659. } else {
  660. zend_hash_packed_to_hash(ht);
  661. }
  662. } else if ((flag & HASH_ADD_NEW) == 0 || ZEND_DEBUG) {
  663. p = zend_hash_find_bucket(ht, key, 1);
  664. if (p) {
  665. zval *data;
  666. ZEND_ASSERT((flag & HASH_ADD_NEW) == 0);
  667. if (flag & HASH_LOOKUP) {
  668. return &p->val;
  669. } else if (flag & HASH_ADD) {
  670. if (!(flag & HASH_UPDATE_INDIRECT)) {
  671. return NULL;
  672. }
  673. ZEND_ASSERT(&p->val != pData);
  674. data = &p->val;
  675. if (Z_TYPE_P(data) == IS_INDIRECT) {
  676. data = Z_INDIRECT_P(data);
  677. if (Z_TYPE_P(data) != IS_UNDEF) {
  678. return NULL;
  679. }
  680. } else {
  681. return NULL;
  682. }
  683. } else {
  684. ZEND_ASSERT(&p->val != pData);
  685. data = &p->val;
  686. if ((flag & HASH_UPDATE_INDIRECT) && Z_TYPE_P(data) == IS_INDIRECT) {
  687. data = Z_INDIRECT_P(data);
  688. }
  689. }
  690. if (ht->pDestructor) {
  691. ht->pDestructor(data);
  692. }
  693. ZVAL_COPY_VALUE(data, pData);
  694. return data;
  695. }
  696. }
  697. ZEND_HASH_IF_FULL_DO_RESIZE(ht); /* If the Hash table is full, resize it */
  698. add_to_hash:
  699. if (!ZSTR_IS_INTERNED(key)) {
  700. zend_string_addref(key);
  701. HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
  702. }
  703. idx = ht->nNumUsed++;
  704. ht->nNumOfElements++;
  705. arData = ht->arData;
  706. p = arData + idx;
  707. p->key = key;
  708. p->h = h = ZSTR_H(key);
  709. nIndex = h | ht->nTableMask;
  710. Z_NEXT(p->val) = HT_HASH_EX(arData, nIndex);
  711. HT_HASH_EX(arData, nIndex) = HT_IDX_TO_HASH(idx);
  712. if (flag & HASH_LOOKUP) {
  713. ZVAL_NULL(&p->val);
  714. } else {
  715. ZVAL_COPY_VALUE(&p->val, pData);
  716. }
  717. return &p->val;
  718. }
  719. static zend_always_inline zval *_zend_hash_str_add_or_update_i(HashTable *ht, const char *str, size_t len, zend_ulong h, zval *pData, uint32_t flag)
  720. {
  721. zend_string *key;
  722. uint32_t nIndex;
  723. uint32_t idx;
  724. Bucket *p;
  725. IS_CONSISTENT(ht);
  726. HT_ASSERT_RC1(ht);
  727. if (UNEXPECTED(HT_FLAGS(ht) & (HASH_FLAG_UNINITIALIZED|HASH_FLAG_PACKED))) {
  728. if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
  729. zend_hash_real_init_mixed(ht);
  730. goto add_to_hash;
  731. } else {
  732. zend_hash_packed_to_hash(ht);
  733. }
  734. } else if ((flag & HASH_ADD_NEW) == 0) {
  735. p = zend_hash_str_find_bucket(ht, str, len, h);
  736. if (p) {
  737. zval *data;
  738. if (flag & HASH_LOOKUP) {
  739. return &p->val;
  740. } else if (flag & HASH_ADD) {
  741. if (!(flag & HASH_UPDATE_INDIRECT)) {
  742. return NULL;
  743. }
  744. ZEND_ASSERT(&p->val != pData);
  745. data = &p->val;
  746. if (Z_TYPE_P(data) == IS_INDIRECT) {
  747. data = Z_INDIRECT_P(data);
  748. if (Z_TYPE_P(data) != IS_UNDEF) {
  749. return NULL;
  750. }
  751. } else {
  752. return NULL;
  753. }
  754. } else {
  755. ZEND_ASSERT(&p->val != pData);
  756. data = &p->val;
  757. if ((flag & HASH_UPDATE_INDIRECT) && Z_TYPE_P(data) == IS_INDIRECT) {
  758. data = Z_INDIRECT_P(data);
  759. }
  760. }
  761. if (ht->pDestructor) {
  762. ht->pDestructor(data);
  763. }
  764. ZVAL_COPY_VALUE(data, pData);
  765. return data;
  766. }
  767. }
  768. ZEND_HASH_IF_FULL_DO_RESIZE(ht); /* If the Hash table is full, resize it */
  769. add_to_hash:
  770. idx = ht->nNumUsed++;
  771. ht->nNumOfElements++;
  772. p = ht->arData + idx;
  773. p->key = key = zend_string_init(str, len, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  774. p->h = ZSTR_H(key) = h;
  775. HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
  776. if (flag & HASH_LOOKUP) {
  777. ZVAL_NULL(&p->val);
  778. } else {
  779. ZVAL_COPY_VALUE(&p->val, pData);
  780. }
  781. nIndex = h | ht->nTableMask;
  782. Z_NEXT(p->val) = HT_HASH(ht, nIndex);
  783. HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
  784. return &p->val;
  785. }
  786. ZEND_API zval* ZEND_FASTCALL zend_hash_add_or_update(HashTable *ht, zend_string *key, zval *pData, uint32_t flag)
  787. {
  788. if (flag == HASH_ADD) {
  789. return zend_hash_add(ht, key, pData);
  790. } else if (flag == HASH_ADD_NEW) {
  791. return zend_hash_add_new(ht, key, pData);
  792. } else if (flag == HASH_UPDATE) {
  793. return zend_hash_update(ht, key, pData);
  794. } else {
  795. ZEND_ASSERT(flag == (HASH_UPDATE|HASH_UPDATE_INDIRECT));
  796. return zend_hash_update_ind(ht, key, pData);
  797. }
  798. }
  799. ZEND_API zval* ZEND_FASTCALL zend_hash_add(HashTable *ht, zend_string *key, zval *pData)
  800. {
  801. return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD);
  802. }
  803. ZEND_API zval* ZEND_FASTCALL zend_hash_update(HashTable *ht, zend_string *key, zval *pData)
  804. {
  805. return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE);
  806. }
  807. ZEND_API zval* ZEND_FASTCALL zend_hash_update_ind(HashTable *ht, zend_string *key, zval *pData)
  808. {
  809. return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT);
  810. }
  811. ZEND_API zval* ZEND_FASTCALL zend_hash_add_new(HashTable *ht, zend_string *key, zval *pData)
  812. {
  813. return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD_NEW);
  814. }
  815. ZEND_API zval* ZEND_FASTCALL zend_hash_lookup(HashTable *ht, zend_string *key)
  816. {
  817. return _zend_hash_add_or_update_i(ht, key, NULL, HASH_LOOKUP);
  818. }
  819. ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_or_update(HashTable *ht, const char *str, size_t len, zval *pData, uint32_t flag)
  820. {
  821. if (flag == HASH_ADD) {
  822. return zend_hash_str_add(ht, str, len, pData);
  823. } else if (flag == HASH_ADD_NEW) {
  824. return zend_hash_str_add_new(ht, str, len, pData);
  825. } else if (flag == HASH_UPDATE) {
  826. return zend_hash_str_update(ht, str, len, pData);
  827. } else {
  828. ZEND_ASSERT(flag == (HASH_UPDATE|HASH_UPDATE_INDIRECT));
  829. return zend_hash_str_update_ind(ht, str, len, pData);
  830. }
  831. }
  832. ZEND_API zval* ZEND_FASTCALL zend_hash_str_update(HashTable *ht, const char *str, size_t len, zval *pData)
  833. {
  834. zend_ulong h = zend_hash_func(str, len);
  835. return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_UPDATE);
  836. }
  837. ZEND_API zval* ZEND_FASTCALL zend_hash_str_update_ind(HashTable *ht, const char *str, size_t len, zval *pData)
  838. {
  839. zend_ulong h = zend_hash_func(str, len);
  840. return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT);
  841. }
  842. ZEND_API zval* ZEND_FASTCALL zend_hash_str_add(HashTable *ht, const char *str, size_t len, zval *pData)
  843. {
  844. zend_ulong h = zend_hash_func(str, len);
  845. return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_ADD);
  846. }
  847. ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_new(HashTable *ht, const char *str, size_t len, zval *pData)
  848. {
  849. zend_ulong h = zend_hash_func(str, len);
  850. return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_ADD_NEW);
  851. }
  852. ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_empty_element(HashTable *ht, zend_ulong h)
  853. {
  854. zval dummy;
  855. ZVAL_NULL(&dummy);
  856. return zend_hash_index_add(ht, h, &dummy);
  857. }
  858. ZEND_API zval* ZEND_FASTCALL zend_hash_add_empty_element(HashTable *ht, zend_string *key)
  859. {
  860. zval dummy;
  861. ZVAL_NULL(&dummy);
  862. return zend_hash_add(ht, key, &dummy);
  863. }
  864. ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_empty_element(HashTable *ht, const char *str, size_t len)
  865. {
  866. zval dummy;
  867. ZVAL_NULL(&dummy);
  868. return zend_hash_str_add(ht, str, len, &dummy);
  869. }
  870. static zend_always_inline zval *_zend_hash_index_add_or_update_i(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag)
  871. {
  872. uint32_t nIndex;
  873. uint32_t idx;
  874. Bucket *p;
  875. IS_CONSISTENT(ht);
  876. HT_ASSERT_RC1(ht);
  877. if ((flag & HASH_ADD_NEXT) && h == ZEND_LONG_MIN) {
  878. h = 0;
  879. }
  880. if (HT_FLAGS(ht) & HASH_FLAG_PACKED) {
  881. if ((flag & (HASH_ADD_NEW|HASH_ADD_NEXT)) != (HASH_ADD_NEW|HASH_ADD_NEXT)
  882. && h < ht->nNumUsed) {
  883. p = ht->arData + h;
  884. if (Z_TYPE(p->val) != IS_UNDEF) {
  885. if (flag & HASH_LOOKUP) {
  886. return &p->val;
  887. }
  888. replace:
  889. if (flag & HASH_ADD) {
  890. return NULL;
  891. }
  892. if (ht->pDestructor) {
  893. ht->pDestructor(&p->val);
  894. }
  895. ZVAL_COPY_VALUE(&p->val, pData);
  896. return &p->val;
  897. } else { /* we have to keep the order :( */
  898. goto convert_to_hash;
  899. }
  900. } else if (EXPECTED(h < ht->nTableSize)) {
  901. add_to_packed:
  902. p = ht->arData + h;
  903. /* incremental initialization of empty Buckets */
  904. if ((flag & (HASH_ADD_NEW|HASH_ADD_NEXT)) != (HASH_ADD_NEW|HASH_ADD_NEXT)) {
  905. if (h > ht->nNumUsed) {
  906. Bucket *q = ht->arData + ht->nNumUsed;
  907. while (q != p) {
  908. ZVAL_UNDEF(&q->val);
  909. q++;
  910. }
  911. }
  912. }
  913. ht->nNextFreeElement = ht->nNumUsed = h + 1;
  914. goto add;
  915. } else if ((h >> 1) < ht->nTableSize &&
  916. (ht->nTableSize >> 1) < ht->nNumOfElements) {
  917. zend_hash_packed_grow(ht);
  918. goto add_to_packed;
  919. } else {
  920. if (ht->nNumUsed >= ht->nTableSize) {
  921. ht->nTableSize += ht->nTableSize;
  922. }
  923. convert_to_hash:
  924. zend_hash_packed_to_hash(ht);
  925. }
  926. } else if (HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED) {
  927. if (h < ht->nTableSize) {
  928. zend_hash_real_init_packed_ex(ht);
  929. goto add_to_packed;
  930. }
  931. zend_hash_real_init_mixed(ht);
  932. } else {
  933. if ((flag & HASH_ADD_NEW) == 0 || ZEND_DEBUG) {
  934. p = zend_hash_index_find_bucket(ht, h);
  935. if (p) {
  936. if (flag & HASH_LOOKUP) {
  937. return &p->val;
  938. }
  939. ZEND_ASSERT((flag & HASH_ADD_NEW) == 0);
  940. goto replace;
  941. }
  942. }
  943. ZEND_HASH_IF_FULL_DO_RESIZE(ht); /* If the Hash table is full, resize it */
  944. }
  945. idx = ht->nNumUsed++;
  946. nIndex = h | ht->nTableMask;
  947. p = ht->arData + idx;
  948. Z_NEXT(p->val) = HT_HASH(ht, nIndex);
  949. HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
  950. if ((zend_long)h >= ht->nNextFreeElement) {
  951. ht->nNextFreeElement = (zend_long)h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
  952. }
  953. add:
  954. ht->nNumOfElements++;
  955. p->h = h;
  956. p->key = NULL;
  957. if (flag & HASH_LOOKUP) {
  958. ZVAL_NULL(&p->val);
  959. } else {
  960. ZVAL_COPY_VALUE(&p->val, pData);
  961. }
  962. return &p->val;
  963. }
  964. ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_or_update(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag)
  965. {
  966. if (flag == HASH_ADD) {
  967. return zend_hash_index_add(ht, h, pData);
  968. } else if (flag == (HASH_ADD|HASH_ADD_NEW)) {
  969. return zend_hash_index_add_new(ht, h, pData);
  970. } else if (flag == (HASH_ADD|HASH_ADD_NEXT)) {
  971. ZEND_ASSERT(h == ht->nNextFreeElement);
  972. return zend_hash_next_index_insert(ht, pData);
  973. } else if (flag == (HASH_ADD|HASH_ADD_NEW|HASH_ADD_NEXT)) {
  974. ZEND_ASSERT(h == ht->nNextFreeElement);
  975. return zend_hash_next_index_insert_new(ht, pData);
  976. } else {
  977. ZEND_ASSERT(flag == HASH_UPDATE);
  978. return zend_hash_index_update(ht, h, pData);
  979. }
  980. }
  981. ZEND_API zval* ZEND_FASTCALL zend_hash_index_add(HashTable *ht, zend_ulong h, zval *pData)
  982. {
  983. return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD);
  984. }
  985. ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_new(HashTable *ht, zend_ulong h, zval *pData)
  986. {
  987. return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD | HASH_ADD_NEW);
  988. }
  989. ZEND_API zval* ZEND_FASTCALL zend_hash_index_update(HashTable *ht, zend_ulong h, zval *pData)
  990. {
  991. return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_UPDATE);
  992. }
  993. ZEND_API zval* ZEND_FASTCALL zend_hash_next_index_insert(HashTable *ht, zval *pData)
  994. {
  995. return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEXT);
  996. }
  997. ZEND_API zval* ZEND_FASTCALL zend_hash_next_index_insert_new(HashTable *ht, zval *pData)
  998. {
  999. return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEW | HASH_ADD_NEXT);
  1000. }
  1001. ZEND_API zval* ZEND_FASTCALL zend_hash_index_lookup(HashTable *ht, zend_ulong h)
  1002. {
  1003. return _zend_hash_index_add_or_update_i(ht, h, NULL, HASH_LOOKUP);
  1004. }
  1005. ZEND_API zval* ZEND_FASTCALL zend_hash_set_bucket_key(HashTable *ht, Bucket *b, zend_string *key)
  1006. {
  1007. uint32_t nIndex;
  1008. uint32_t idx, i;
  1009. Bucket *p, *arData;
  1010. IS_CONSISTENT(ht);
  1011. HT_ASSERT_RC1(ht);
  1012. ZEND_ASSERT(!(HT_FLAGS(ht) & HASH_FLAG_PACKED));
  1013. p = zend_hash_find_bucket(ht, key, 0);
  1014. if (UNEXPECTED(p)) {
  1015. return (p == b) ? &p->val : NULL;
  1016. }
  1017. if (!ZSTR_IS_INTERNED(key)) {
  1018. zend_string_addref(key);
  1019. HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
  1020. }
  1021. arData = ht->arData;
  1022. /* del from hash */
  1023. idx = HT_IDX_TO_HASH(b - arData);
  1024. nIndex = b->h | ht->nTableMask;
  1025. i = HT_HASH_EX(arData, nIndex);
  1026. if (i == idx) {
  1027. HT_HASH_EX(arData, nIndex) = Z_NEXT(b->val);
  1028. } else {
  1029. p = HT_HASH_TO_BUCKET_EX(arData, i);
  1030. while (Z_NEXT(p->val) != idx) {
  1031. i = Z_NEXT(p->val);
  1032. p = HT_HASH_TO_BUCKET_EX(arData, i);
  1033. }
  1034. Z_NEXT(p->val) = Z_NEXT(b->val);
  1035. }
  1036. zend_string_release(b->key);
  1037. /* add to hash */
  1038. idx = b - arData;
  1039. b->key = key;
  1040. b->h = ZSTR_H(key);
  1041. nIndex = b->h | ht->nTableMask;
  1042. idx = HT_IDX_TO_HASH(idx);
  1043. i = HT_HASH_EX(arData, nIndex);
  1044. if (i == HT_INVALID_IDX || i < idx) {
  1045. Z_NEXT(b->val) = i;
  1046. HT_HASH_EX(arData, nIndex) = idx;
  1047. } else {
  1048. p = HT_HASH_TO_BUCKET_EX(arData, i);
  1049. while (Z_NEXT(p->val) != HT_INVALID_IDX && Z_NEXT(p->val) > idx) {
  1050. i = Z_NEXT(p->val);
  1051. p = HT_HASH_TO_BUCKET_EX(arData, i);
  1052. }
  1053. Z_NEXT(b->val) = Z_NEXT(p->val);
  1054. Z_NEXT(p->val) = idx;
  1055. }
  1056. return &b->val;
  1057. }
  1058. static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht)
  1059. {
  1060. IS_CONSISTENT(ht);
  1061. HT_ASSERT_RC1(ht);
  1062. if (ht->nNumUsed > ht->nNumOfElements + (ht->nNumOfElements >> 5)) { /* additional term is there to amortize the cost of compaction */
  1063. zend_hash_rehash(ht);
  1064. } else if (ht->nTableSize < HT_MAX_SIZE) { /* Let's double the table size */
  1065. void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
  1066. uint32_t nSize = ht->nTableSize + ht->nTableSize;
  1067. Bucket *old_buckets = ht->arData;
  1068. ht->nTableSize = nSize;
  1069. new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  1070. ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
  1071. HT_SET_DATA_ADDR(ht, new_data);
  1072. memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
  1073. pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  1074. zend_hash_rehash(ht);
  1075. } else {
  1076. zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket) + sizeof(uint32_t), sizeof(Bucket));
  1077. }
  1078. }
  1079. ZEND_API void ZEND_FASTCALL zend_hash_rehash(HashTable *ht)
  1080. {
  1081. Bucket *p;
  1082. uint32_t nIndex, i;
  1083. IS_CONSISTENT(ht);
  1084. if (UNEXPECTED(ht->nNumOfElements == 0)) {
  1085. if (!(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
  1086. ht->nNumUsed = 0;
  1087. HT_HASH_RESET(ht);
  1088. }
  1089. return;
  1090. }
  1091. HT_HASH_RESET(ht);
  1092. i = 0;
  1093. p = ht->arData;
  1094. if (HT_IS_WITHOUT_HOLES(ht)) {
  1095. do {
  1096. nIndex = p->h | ht->nTableMask;
  1097. Z_NEXT(p->val) = HT_HASH(ht, nIndex);
  1098. HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
  1099. p++;
  1100. } while (++i < ht->nNumUsed);
  1101. } else {
  1102. uint32_t old_num_used = ht->nNumUsed;
  1103. do {
  1104. if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) {
  1105. uint32_t j = i;
  1106. Bucket *q = p;
  1107. if (EXPECTED(!HT_HAS_ITERATORS(ht))) {
  1108. while (++i < ht->nNumUsed) {
  1109. p++;
  1110. if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
  1111. ZVAL_COPY_VALUE(&q->val, &p->val);
  1112. q->h = p->h;
  1113. nIndex = q->h | ht->nTableMask;
  1114. q->key = p->key;
  1115. Z_NEXT(q->val) = HT_HASH(ht, nIndex);
  1116. HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
  1117. if (UNEXPECTED(ht->nInternalPointer == i)) {
  1118. ht->nInternalPointer = j;
  1119. }
  1120. q++;
  1121. j++;
  1122. }
  1123. }
  1124. } else {
  1125. uint32_t iter_pos = zend_hash_iterators_lower_pos(ht, 0);
  1126. while (++i < ht->nNumUsed) {
  1127. p++;
  1128. if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
  1129. ZVAL_COPY_VALUE(&q->val, &p->val);
  1130. q->h = p->h;
  1131. nIndex = q->h | ht->nTableMask;
  1132. q->key = p->key;
  1133. Z_NEXT(q->val) = HT_HASH(ht, nIndex);
  1134. HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
  1135. if (UNEXPECTED(ht->nInternalPointer == i)) {
  1136. ht->nInternalPointer = j;
  1137. }
  1138. if (UNEXPECTED(i >= iter_pos)) {
  1139. do {
  1140. zend_hash_iterators_update(ht, iter_pos, j);
  1141. iter_pos = zend_hash_iterators_lower_pos(ht, iter_pos + 1);
  1142. } while (iter_pos < i);
  1143. }
  1144. q++;
  1145. j++;
  1146. }
  1147. }
  1148. }
  1149. ht->nNumUsed = j;
  1150. break;
  1151. }
  1152. nIndex = p->h | ht->nTableMask;
  1153. Z_NEXT(p->val) = HT_HASH(ht, nIndex);
  1154. HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
  1155. p++;
  1156. } while (++i < ht->nNumUsed);
  1157. /* Migrate pointer to one past the end of the array to the new one past the end, so that
  1158. * newly inserted elements are picked up correctly. */
  1159. if (UNEXPECTED(HT_HAS_ITERATORS(ht))) {
  1160. _zend_hash_iterators_update(ht, old_num_used, ht->nNumUsed);
  1161. }
  1162. }
  1163. }
  1164. static zend_always_inline void _zend_hash_del_el_ex(HashTable *ht, uint32_t idx, Bucket *p, Bucket *prev)
  1165. {
  1166. if (!(HT_FLAGS(ht) & HASH_FLAG_PACKED)) {
  1167. if (prev) {
  1168. Z_NEXT(prev->val) = Z_NEXT(p->val);
  1169. } else {
  1170. HT_HASH(ht, p->h | ht->nTableMask) = Z_NEXT(p->val);
  1171. }
  1172. }
  1173. idx = HT_HASH_TO_IDX(idx);
  1174. ht->nNumOfElements--;
  1175. if (ht->nInternalPointer == idx || UNEXPECTED(HT_HAS_ITERATORS(ht))) {
  1176. uint32_t new_idx;
  1177. new_idx = idx;
  1178. while (1) {
  1179. new_idx++;
  1180. if (new_idx >= ht->nNumUsed) {
  1181. break;
  1182. } else if (Z_TYPE(ht->arData[new_idx].val) != IS_UNDEF) {
  1183. break;
  1184. }
  1185. }
  1186. if (ht->nInternalPointer == idx) {
  1187. ht->nInternalPointer = new_idx;
  1188. }
  1189. zend_hash_iterators_update(ht, idx, new_idx);
  1190. }
  1191. if (ht->nNumUsed - 1 == idx) {
  1192. do {
  1193. ht->nNumUsed--;
  1194. } while (ht->nNumUsed > 0 && (UNEXPECTED(Z_TYPE(ht->arData[ht->nNumUsed-1].val) == IS_UNDEF)));
  1195. ht->nInternalPointer = MIN(ht->nInternalPointer, ht->nNumUsed);
  1196. }
  1197. if (p->key) {
  1198. zend_string_release(p->key);
  1199. }
  1200. if (ht->pDestructor) {
  1201. zval tmp;
  1202. ZVAL_COPY_VALUE(&tmp, &p->val);
  1203. ZVAL_UNDEF(&p->val);
  1204. ht->pDestructor(&tmp);
  1205. } else {
  1206. ZVAL_UNDEF(&p->val);
  1207. }
  1208. }
  1209. static zend_always_inline void _zend_hash_del_el(HashTable *ht, uint32_t idx, Bucket *p)
  1210. {
  1211. Bucket *prev = NULL;
  1212. if (!(HT_FLAGS(ht) & HASH_FLAG_PACKED)) {
  1213. uint32_t nIndex = p->h | ht->nTableMask;
  1214. uint32_t i = HT_HASH(ht, nIndex);
  1215. if (i != idx) {
  1216. prev = HT_HASH_TO_BUCKET(ht, i);
  1217. while (Z_NEXT(prev->val) != idx) {
  1218. i = Z_NEXT(prev->val);
  1219. prev = HT_HASH_TO_BUCKET(ht, i);
  1220. }
  1221. }
  1222. }
  1223. _zend_hash_del_el_ex(ht, idx, p, prev);
  1224. }
  1225. ZEND_API void ZEND_FASTCALL zend_hash_del_bucket(HashTable *ht, Bucket *p)
  1226. {
  1227. IS_CONSISTENT(ht);
  1228. HT_ASSERT_RC1(ht);
  1229. _zend_hash_del_el(ht, HT_IDX_TO_HASH(p - ht->arData), p);
  1230. }
  1231. ZEND_API zend_result ZEND_FASTCALL zend_hash_del(HashTable *ht, zend_string *key)
  1232. {
  1233. zend_ulong h;
  1234. uint32_t nIndex;
  1235. uint32_t idx;
  1236. Bucket *p;
  1237. Bucket *prev = NULL;
  1238. IS_CONSISTENT(ht);
  1239. HT_ASSERT_RC1(ht);
  1240. h = zend_string_hash_val(key);
  1241. nIndex = h | ht->nTableMask;
  1242. idx = HT_HASH(ht, nIndex);
  1243. while (idx != HT_INVALID_IDX) {
  1244. p = HT_HASH_TO_BUCKET(ht, idx);
  1245. if ((p->key == key) ||
  1246. (p->h == h &&
  1247. p->key &&
  1248. zend_string_equal_content(p->key, key))) {
  1249. _zend_hash_del_el_ex(ht, idx, p, prev);
  1250. return SUCCESS;
  1251. }
  1252. prev = p;
  1253. idx = Z_NEXT(p->val);
  1254. }
  1255. return FAILURE;
  1256. }
  1257. ZEND_API zend_result ZEND_FASTCALL zend_hash_del_ind(HashTable *ht, zend_string *key)
  1258. {
  1259. zend_ulong h;
  1260. uint32_t nIndex;
  1261. uint32_t idx;
  1262. Bucket *p;
  1263. Bucket *prev = NULL;
  1264. IS_CONSISTENT(ht);
  1265. HT_ASSERT_RC1(ht);
  1266. h = zend_string_hash_val(key);
  1267. nIndex = h | ht->nTableMask;
  1268. idx = HT_HASH(ht, nIndex);
  1269. while (idx != HT_INVALID_IDX) {
  1270. p = HT_HASH_TO_BUCKET(ht, idx);
  1271. if ((p->key == key) ||
  1272. (p->h == h &&
  1273. p->key &&
  1274. zend_string_equal_content(p->key, key))) {
  1275. if (Z_TYPE(p->val) == IS_INDIRECT) {
  1276. zval *data = Z_INDIRECT(p->val);
  1277. if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
  1278. return FAILURE;
  1279. } else {
  1280. if (ht->pDestructor) {
  1281. zval tmp;
  1282. ZVAL_COPY_VALUE(&tmp, data);
  1283. ZVAL_UNDEF(data);
  1284. ht->pDestructor(&tmp);
  1285. } else {
  1286. ZVAL_UNDEF(data);
  1287. }
  1288. HT_FLAGS(ht) |= HASH_FLAG_HAS_EMPTY_IND;
  1289. }
  1290. } else {
  1291. _zend_hash_del_el_ex(ht, idx, p, prev);
  1292. }
  1293. return SUCCESS;
  1294. }
  1295. prev = p;
  1296. idx = Z_NEXT(p->val);
  1297. }
  1298. return FAILURE;
  1299. }
  1300. ZEND_API zend_result ZEND_FASTCALL zend_hash_str_del_ind(HashTable *ht, const char *str, size_t len)
  1301. {
  1302. zend_ulong h;
  1303. uint32_t nIndex;
  1304. uint32_t idx;
  1305. Bucket *p;
  1306. Bucket *prev = NULL;
  1307. IS_CONSISTENT(ht);
  1308. HT_ASSERT_RC1(ht);
  1309. h = zend_inline_hash_func(str, len);
  1310. nIndex = h | ht->nTableMask;
  1311. idx = HT_HASH(ht, nIndex);
  1312. while (idx != HT_INVALID_IDX) {
  1313. p = HT_HASH_TO_BUCKET(ht, idx);
  1314. if ((p->h == h)
  1315. && p->key
  1316. && (ZSTR_LEN(p->key) == len)
  1317. && !memcmp(ZSTR_VAL(p->key), str, len)) {
  1318. if (Z_TYPE(p->val) == IS_INDIRECT) {
  1319. zval *data = Z_INDIRECT(p->val);
  1320. if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
  1321. return FAILURE;
  1322. } else {
  1323. if (ht->pDestructor) {
  1324. ht->pDestructor(data);
  1325. }
  1326. ZVAL_UNDEF(data);
  1327. HT_FLAGS(ht) |= HASH_FLAG_HAS_EMPTY_IND;
  1328. }
  1329. } else {
  1330. _zend_hash_del_el_ex(ht, idx, p, prev);
  1331. }
  1332. return SUCCESS;
  1333. }
  1334. prev = p;
  1335. idx = Z_NEXT(p->val);
  1336. }
  1337. return FAILURE;
  1338. }
  1339. ZEND_API zend_result ZEND_FASTCALL zend_hash_str_del(HashTable *ht, const char *str, size_t len)
  1340. {
  1341. zend_ulong h;
  1342. uint32_t nIndex;
  1343. uint32_t idx;
  1344. Bucket *p;
  1345. Bucket *prev = NULL;
  1346. IS_CONSISTENT(ht);
  1347. HT_ASSERT_RC1(ht);
  1348. h = zend_inline_hash_func(str, len);
  1349. nIndex = h | ht->nTableMask;
  1350. idx = HT_HASH(ht, nIndex);
  1351. while (idx != HT_INVALID_IDX) {
  1352. p = HT_HASH_TO_BUCKET(ht, idx);
  1353. if ((p->h == h)
  1354. && p->key
  1355. && (ZSTR_LEN(p->key) == len)
  1356. && !memcmp(ZSTR_VAL(p->key), str, len)) {
  1357. _zend_hash_del_el_ex(ht, idx, p, prev);
  1358. return SUCCESS;
  1359. }
  1360. prev = p;
  1361. idx = Z_NEXT(p->val);
  1362. }
  1363. return FAILURE;
  1364. }
  1365. ZEND_API zend_result ZEND_FASTCALL zend_hash_index_del(HashTable *ht, zend_ulong h)
  1366. {
  1367. uint32_t nIndex;
  1368. uint32_t idx;
  1369. Bucket *p;
  1370. Bucket *prev = NULL;
  1371. IS_CONSISTENT(ht);
  1372. HT_ASSERT_RC1(ht);
  1373. if (HT_FLAGS(ht) & HASH_FLAG_PACKED) {
  1374. if (h < ht->nNumUsed) {
  1375. p = ht->arData + h;
  1376. if (Z_TYPE(p->val) != IS_UNDEF) {
  1377. _zend_hash_del_el_ex(ht, HT_IDX_TO_HASH(h), p, NULL);
  1378. return SUCCESS;
  1379. }
  1380. }
  1381. return FAILURE;
  1382. }
  1383. nIndex = h | ht->nTableMask;
  1384. idx = HT_HASH(ht, nIndex);
  1385. while (idx != HT_INVALID_IDX) {
  1386. p = HT_HASH_TO_BUCKET(ht, idx);
  1387. if ((p->h == h) && (p->key == NULL)) {
  1388. _zend_hash_del_el_ex(ht, idx, p, prev);
  1389. return SUCCESS;
  1390. }
  1391. prev = p;
  1392. idx = Z_NEXT(p->val);
  1393. }
  1394. return FAILURE;
  1395. }
  1396. ZEND_API void ZEND_FASTCALL zend_hash_destroy(HashTable *ht)
  1397. {
  1398. Bucket *p, *end;
  1399. IS_CONSISTENT(ht);
  1400. HT_ASSERT(ht, GC_REFCOUNT(ht) <= 1);
  1401. if (ht->nNumUsed) {
  1402. p = ht->arData;
  1403. end = p + ht->nNumUsed;
  1404. if (ht->pDestructor) {
  1405. SET_INCONSISTENT(HT_IS_DESTROYING);
  1406. if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
  1407. if (HT_IS_WITHOUT_HOLES(ht)) {
  1408. do {
  1409. ht->pDestructor(&p->val);
  1410. } while (++p != end);
  1411. } else {
  1412. do {
  1413. if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
  1414. ht->pDestructor(&p->val);
  1415. }
  1416. } while (++p != end);
  1417. }
  1418. } else if (HT_IS_WITHOUT_HOLES(ht)) {
  1419. do {
  1420. ht->pDestructor(&p->val);
  1421. if (EXPECTED(p->key)) {
  1422. zend_string_release(p->key);
  1423. }
  1424. } while (++p != end);
  1425. } else {
  1426. do {
  1427. if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
  1428. ht->pDestructor(&p->val);
  1429. if (EXPECTED(p->key)) {
  1430. zend_string_release(p->key);
  1431. }
  1432. }
  1433. } while (++p != end);
  1434. }
  1435. SET_INCONSISTENT(HT_DESTROYED);
  1436. } else {
  1437. if (!HT_HAS_STATIC_KEYS_ONLY(ht)) {
  1438. do {
  1439. if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
  1440. if (EXPECTED(p->key)) {
  1441. zend_string_release(p->key);
  1442. }
  1443. }
  1444. } while (++p != end);
  1445. }
  1446. }
  1447. zend_hash_iterators_remove(ht);
  1448. } else if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
  1449. return;
  1450. }
  1451. pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  1452. }
  1453. ZEND_API void ZEND_FASTCALL zend_array_destroy(HashTable *ht)
  1454. {
  1455. Bucket *p, *end;
  1456. IS_CONSISTENT(ht);
  1457. HT_ASSERT(ht, GC_REFCOUNT(ht) <= 1);
  1458. /* break possible cycles */
  1459. GC_REMOVE_FROM_BUFFER(ht);
  1460. GC_TYPE_INFO(ht) = GC_NULL /*???| (GC_WHITE << 16)*/;
  1461. if (ht->nNumUsed) {
  1462. /* In some rare cases destructors of regular arrays may be changed */
  1463. if (UNEXPECTED(ht->pDestructor != ZVAL_PTR_DTOR)) {
  1464. zend_hash_destroy(ht);
  1465. goto free_ht;
  1466. }
  1467. p = ht->arData;
  1468. end = p + ht->nNumUsed;
  1469. SET_INCONSISTENT(HT_IS_DESTROYING);
  1470. if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
  1471. do {
  1472. i_zval_ptr_dtor(&p->val);
  1473. } while (++p != end);
  1474. } else if (HT_IS_WITHOUT_HOLES(ht)) {
  1475. do {
  1476. i_zval_ptr_dtor(&p->val);
  1477. if (EXPECTED(p->key)) {
  1478. zend_string_release_ex(p->key, 0);
  1479. }
  1480. } while (++p != end);
  1481. } else {
  1482. do {
  1483. if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
  1484. i_zval_ptr_dtor(&p->val);
  1485. if (EXPECTED(p->key)) {
  1486. zend_string_release_ex(p->key, 0);
  1487. }
  1488. }
  1489. } while (++p != end);
  1490. }
  1491. } else if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
  1492. goto free_ht;
  1493. }
  1494. SET_INCONSISTENT(HT_DESTROYED);
  1495. efree(HT_GET_DATA_ADDR(ht));
  1496. free_ht:
  1497. zend_hash_iterators_remove(ht);
  1498. FREE_HASHTABLE(ht);
  1499. }
  1500. ZEND_API void ZEND_FASTCALL zend_hash_clean(HashTable *ht)
  1501. {
  1502. Bucket *p, *end;
  1503. IS_CONSISTENT(ht);
  1504. HT_ASSERT_RC1(ht);
  1505. if (ht->nNumUsed) {
  1506. p = ht->arData;
  1507. end = p + ht->nNumUsed;
  1508. if (ht->pDestructor) {
  1509. if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
  1510. if (HT_IS_WITHOUT_HOLES(ht)) {
  1511. do {
  1512. ht->pDestructor(&p->val);
  1513. } while (++p != end);
  1514. } else {
  1515. do {
  1516. if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
  1517. ht->pDestructor(&p->val);
  1518. }
  1519. } while (++p != end);
  1520. }
  1521. } else if (HT_IS_WITHOUT_HOLES(ht)) {
  1522. do {
  1523. ht->pDestructor(&p->val);
  1524. if (EXPECTED(p->key)) {
  1525. zend_string_release(p->key);
  1526. }
  1527. } while (++p != end);
  1528. } else {
  1529. do {
  1530. if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
  1531. ht->pDestructor(&p->val);
  1532. if (EXPECTED(p->key)) {
  1533. zend_string_release(p->key);
  1534. }
  1535. }
  1536. } while (++p != end);
  1537. }
  1538. } else {
  1539. if (!HT_HAS_STATIC_KEYS_ONLY(ht)) {
  1540. if (HT_IS_WITHOUT_HOLES(ht)) {
  1541. do {
  1542. if (EXPECTED(p->key)) {
  1543. zend_string_release(p->key);
  1544. }
  1545. } while (++p != end);
  1546. } else {
  1547. do {
  1548. if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
  1549. if (EXPECTED(p->key)) {
  1550. zend_string_release(p->key);
  1551. }
  1552. }
  1553. } while (++p != end);
  1554. }
  1555. }
  1556. }
  1557. if (!(HT_FLAGS(ht) & HASH_FLAG_PACKED)) {
  1558. HT_HASH_RESET(ht);
  1559. }
  1560. }
  1561. ht->nNumUsed = 0;
  1562. ht->nNumOfElements = 0;
  1563. ht->nNextFreeElement = ZEND_LONG_MIN;
  1564. ht->nInternalPointer = 0;
  1565. }
  1566. ZEND_API void ZEND_FASTCALL zend_symtable_clean(HashTable *ht)
  1567. {
  1568. Bucket *p, *end;
  1569. IS_CONSISTENT(ht);
  1570. HT_ASSERT_RC1(ht);
  1571. if (ht->nNumUsed) {
  1572. p = ht->arData;
  1573. end = p + ht->nNumUsed;
  1574. if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
  1575. do {
  1576. i_zval_ptr_dtor(&p->val);
  1577. } while (++p != end);
  1578. } else if (HT_IS_WITHOUT_HOLES(ht)) {
  1579. do {
  1580. i_zval_ptr_dtor(&p->val);
  1581. if (EXPECTED(p->key)) {
  1582. zend_string_release(p->key);
  1583. }
  1584. } while (++p != end);
  1585. } else {
  1586. do {
  1587. if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
  1588. i_zval_ptr_dtor(&p->val);
  1589. if (EXPECTED(p->key)) {
  1590. zend_string_release(p->key);
  1591. }
  1592. }
  1593. } while (++p != end);
  1594. }
  1595. HT_HASH_RESET(ht);
  1596. }
  1597. ht->nNumUsed = 0;
  1598. ht->nNumOfElements = 0;
  1599. ht->nNextFreeElement = ZEND_LONG_MIN;
  1600. ht->nInternalPointer = 0;
  1601. }
  1602. ZEND_API void ZEND_FASTCALL zend_hash_graceful_destroy(HashTable *ht)
  1603. {
  1604. uint32_t idx;
  1605. Bucket *p;
  1606. IS_CONSISTENT(ht);
  1607. HT_ASSERT_RC1(ht);
  1608. p = ht->arData;
  1609. for (idx = 0; idx < ht->nNumUsed; idx++, p++) {
  1610. if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
  1611. _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
  1612. }
  1613. if (!(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
  1614. pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  1615. }
  1616. SET_INCONSISTENT(HT_DESTROYED);
  1617. }
  1618. ZEND_API void ZEND_FASTCALL zend_hash_graceful_reverse_destroy(HashTable *ht)
  1619. {
  1620. uint32_t idx;
  1621. Bucket *p;
  1622. IS_CONSISTENT(ht);
  1623. HT_ASSERT_RC1(ht);
  1624. idx = ht->nNumUsed;
  1625. p = ht->arData + ht->nNumUsed;
  1626. while (idx > 0) {
  1627. idx--;
  1628. p--;
  1629. if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
  1630. _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
  1631. }
  1632. if (!(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
  1633. pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  1634. }
  1635. SET_INCONSISTENT(HT_DESTROYED);
  1636. }
  1637. /* This is used to recurse elements and selectively delete certain entries
  1638. * from a hashtable. apply_func() receives the data and decides if the entry
  1639. * should be deleted or recursion should be stopped. The following three
  1640. * return codes are possible:
  1641. * ZEND_HASH_APPLY_KEEP - continue
  1642. * ZEND_HASH_APPLY_STOP - stop iteration
  1643. * ZEND_HASH_APPLY_REMOVE - delete the element, combinable with the former
  1644. */
  1645. ZEND_API void ZEND_FASTCALL zend_hash_apply(HashTable *ht, apply_func_t apply_func)
  1646. {
  1647. uint32_t idx;
  1648. Bucket *p;
  1649. int result;
  1650. IS_CONSISTENT(ht);
  1651. for (idx = 0; idx < ht->nNumUsed; idx++) {
  1652. p = ht->arData + idx;
  1653. if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
  1654. result = apply_func(&p->val);
  1655. if (result & ZEND_HASH_APPLY_REMOVE) {
  1656. HT_ASSERT_RC1(ht);
  1657. _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
  1658. }
  1659. if (result & ZEND_HASH_APPLY_STOP) {
  1660. break;
  1661. }
  1662. }
  1663. }
  1664. ZEND_API void ZEND_FASTCALL zend_hash_apply_with_argument(HashTable *ht, apply_func_arg_t apply_func, void *argument)
  1665. {
  1666. uint32_t idx;
  1667. Bucket *p;
  1668. int result;
  1669. IS_CONSISTENT(ht);
  1670. for (idx = 0; idx < ht->nNumUsed; idx++) {
  1671. p = ht->arData + idx;
  1672. if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
  1673. result = apply_func(&p->val, argument);
  1674. if (result & ZEND_HASH_APPLY_REMOVE) {
  1675. HT_ASSERT_RC1(ht);
  1676. _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
  1677. }
  1678. if (result & ZEND_HASH_APPLY_STOP) {
  1679. break;
  1680. }
  1681. }
  1682. }
  1683. ZEND_API void zend_hash_apply_with_arguments(HashTable *ht, apply_func_args_t apply_func, int num_args, ...)
  1684. {
  1685. uint32_t idx;
  1686. Bucket *p;
  1687. va_list args;
  1688. zend_hash_key hash_key;
  1689. int result;
  1690. IS_CONSISTENT(ht);
  1691. for (idx = 0; idx < ht->nNumUsed; idx++) {
  1692. p = ht->arData + idx;
  1693. if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
  1694. va_start(args, num_args);
  1695. hash_key.h = p->h;
  1696. hash_key.key = p->key;
  1697. result = apply_func(&p->val, num_args, args, &hash_key);
  1698. if (result & ZEND_HASH_APPLY_REMOVE) {
  1699. HT_ASSERT_RC1(ht);
  1700. _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
  1701. }
  1702. if (result & ZEND_HASH_APPLY_STOP) {
  1703. va_end(args);
  1704. break;
  1705. }
  1706. va_end(args);
  1707. }
  1708. }
  1709. ZEND_API void ZEND_FASTCALL zend_hash_reverse_apply(HashTable *ht, apply_func_t apply_func)
  1710. {
  1711. uint32_t idx;
  1712. Bucket *p;
  1713. int result;
  1714. IS_CONSISTENT(ht);
  1715. idx = ht->nNumUsed;
  1716. while (idx > 0) {
  1717. idx--;
  1718. p = ht->arData + idx;
  1719. if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
  1720. result = apply_func(&p->val);
  1721. if (result & ZEND_HASH_APPLY_REMOVE) {
  1722. HT_ASSERT_RC1(ht);
  1723. _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
  1724. }
  1725. if (result & ZEND_HASH_APPLY_STOP) {
  1726. break;
  1727. }
  1728. }
  1729. }
  1730. ZEND_API void ZEND_FASTCALL zend_hash_copy(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor)
  1731. {
  1732. uint32_t idx;
  1733. Bucket *p;
  1734. zval *new_entry, *data;
  1735. IS_CONSISTENT(source);
  1736. IS_CONSISTENT(target);
  1737. HT_ASSERT_RC1(target);
  1738. for (idx = 0; idx < source->nNumUsed; idx++) {
  1739. p = source->arData + idx;
  1740. if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
  1741. /* INDIRECT element may point to UNDEF-ined slots */
  1742. data = &p->val;
  1743. if (Z_TYPE_P(data) == IS_INDIRECT) {
  1744. data = Z_INDIRECT_P(data);
  1745. if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
  1746. continue;
  1747. }
  1748. }
  1749. if (p->key) {
  1750. new_entry = zend_hash_update(target, p->key, data);
  1751. } else {
  1752. new_entry = zend_hash_index_update(target, p->h, data);
  1753. }
  1754. if (pCopyConstructor) {
  1755. pCopyConstructor(new_entry);
  1756. }
  1757. }
  1758. }
  1759. static zend_always_inline bool zend_array_dup_element(HashTable *source, HashTable *target, uint32_t idx, Bucket *p, Bucket *q, bool packed, bool static_keys, bool with_holes)
  1760. {
  1761. zval *data = &p->val;
  1762. if (with_holes) {
  1763. if (!packed && Z_TYPE_INFO_P(data) == IS_INDIRECT) {
  1764. data = Z_INDIRECT_P(data);
  1765. }
  1766. if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) {
  1767. return 0;
  1768. }
  1769. } else if (!packed) {
  1770. /* INDIRECT element may point to UNDEF-ined slots */
  1771. if (Z_TYPE_INFO_P(data) == IS_INDIRECT) {
  1772. data = Z_INDIRECT_P(data);
  1773. if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) {
  1774. return 0;
  1775. }
  1776. }
  1777. }
  1778. do {
  1779. if (Z_OPT_REFCOUNTED_P(data)) {
  1780. if (Z_ISREF_P(data) && Z_REFCOUNT_P(data) == 1 &&
  1781. (Z_TYPE_P(Z_REFVAL_P(data)) != IS_ARRAY ||
  1782. Z_ARRVAL_P(Z_REFVAL_P(data)) != source)) {
  1783. data = Z_REFVAL_P(data);
  1784. if (!Z_OPT_REFCOUNTED_P(data)) {
  1785. break;
  1786. }
  1787. }
  1788. Z_ADDREF_P(data);
  1789. }
  1790. } while (0);
  1791. ZVAL_COPY_VALUE(&q->val, data);
  1792. q->h = p->h;
  1793. if (packed) {
  1794. q->key = NULL;
  1795. } else {
  1796. uint32_t nIndex;
  1797. q->key = p->key;
  1798. if (!static_keys && q->key) {
  1799. zend_string_addref(q->key);
  1800. }
  1801. nIndex = q->h | target->nTableMask;
  1802. Z_NEXT(q->val) = HT_HASH(target, nIndex);
  1803. HT_HASH(target, nIndex) = HT_IDX_TO_HASH(idx);
  1804. }
  1805. return 1;
  1806. }
  1807. static zend_always_inline void zend_array_dup_packed_elements(HashTable *source, HashTable *target, bool with_holes)
  1808. {
  1809. Bucket *p = source->arData;
  1810. Bucket *q = target->arData;
  1811. Bucket *end = p + source->nNumUsed;
  1812. do {
  1813. if (!zend_array_dup_element(source, target, 0, p, q, 1, 1, with_holes)) {
  1814. if (with_holes) {
  1815. ZVAL_UNDEF(&q->val);
  1816. }
  1817. }
  1818. p++; q++;
  1819. } while (p != end);
  1820. }
  1821. static zend_always_inline uint32_t zend_array_dup_elements(HashTable *source, HashTable *target, bool static_keys, bool with_holes)
  1822. {
  1823. uint32_t idx = 0;
  1824. Bucket *p = source->arData;
  1825. Bucket *q = target->arData;
  1826. Bucket *end = p + source->nNumUsed;
  1827. do {
  1828. if (!zend_array_dup_element(source, target, idx, p, q, 0, static_keys, with_holes)) {
  1829. uint32_t target_idx = idx;
  1830. idx++; p++;
  1831. while (p != end) {
  1832. if (zend_array_dup_element(source, target, target_idx, p, q, 0, static_keys, with_holes)) {
  1833. if (source->nInternalPointer == idx) {
  1834. target->nInternalPointer = target_idx;
  1835. }
  1836. target_idx++; q++;
  1837. }
  1838. idx++; p++;
  1839. }
  1840. return target_idx;
  1841. }
  1842. idx++; p++; q++;
  1843. } while (p != end);
  1844. return idx;
  1845. }
  1846. ZEND_API HashTable* ZEND_FASTCALL zend_array_dup(HashTable *source)
  1847. {
  1848. uint32_t idx;
  1849. HashTable *target;
  1850. IS_CONSISTENT(source);
  1851. ALLOC_HASHTABLE(target);
  1852. GC_SET_REFCOUNT(target, 1);
  1853. GC_TYPE_INFO(target) = GC_ARRAY;
  1854. target->pDestructor = ZVAL_PTR_DTOR;
  1855. if (source->nNumOfElements == 0) {
  1856. HT_FLAGS(target) = HASH_FLAG_UNINITIALIZED;
  1857. target->nTableMask = HT_MIN_MASK;
  1858. target->nNumUsed = 0;
  1859. target->nNumOfElements = 0;
  1860. target->nNextFreeElement = source->nNextFreeElement;
  1861. target->nInternalPointer = 0;
  1862. target->nTableSize = HT_MIN_SIZE;
  1863. HT_SET_DATA_ADDR(target, &uninitialized_bucket);
  1864. } else if (GC_FLAGS(source) & IS_ARRAY_IMMUTABLE) {
  1865. HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK;
  1866. target->nTableMask = source->nTableMask;
  1867. target->nNumUsed = source->nNumUsed;
  1868. target->nNumOfElements = source->nNumOfElements;
  1869. target->nNextFreeElement = source->nNextFreeElement;
  1870. target->nTableSize = source->nTableSize;
  1871. HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
  1872. target->nInternalPointer = source->nInternalPointer;
  1873. memcpy(HT_GET_DATA_ADDR(target), HT_GET_DATA_ADDR(source), HT_USED_SIZE(source));
  1874. } else if (HT_FLAGS(source) & HASH_FLAG_PACKED) {
  1875. HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK;
  1876. target->nTableMask = HT_MIN_MASK;
  1877. target->nNumUsed = source->nNumUsed;
  1878. target->nNumOfElements = source->nNumOfElements;
  1879. target->nNextFreeElement = source->nNextFreeElement;
  1880. target->nTableSize = source->nTableSize;
  1881. HT_SET_DATA_ADDR(target, emalloc(HT_SIZE_EX(target->nTableSize, HT_MIN_MASK)));
  1882. target->nInternalPointer =
  1883. (source->nInternalPointer < source->nNumUsed) ?
  1884. source->nInternalPointer : 0;
  1885. HT_HASH_RESET_PACKED(target);
  1886. if (HT_IS_WITHOUT_HOLES(target)) {
  1887. zend_array_dup_packed_elements(source, target, 0);
  1888. } else {
  1889. zend_array_dup_packed_elements(source, target, 1);
  1890. }
  1891. } else {
  1892. HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK;
  1893. target->nTableMask = source->nTableMask;
  1894. target->nNextFreeElement = source->nNextFreeElement;
  1895. target->nInternalPointer =
  1896. (source->nInternalPointer < source->nNumUsed) ?
  1897. source->nInternalPointer : 0;
  1898. target->nTableSize = source->nTableSize;
  1899. HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
  1900. HT_HASH_RESET(target);
  1901. if (HT_HAS_STATIC_KEYS_ONLY(target)) {
  1902. if (HT_IS_WITHOUT_HOLES(source)) {
  1903. idx = zend_array_dup_elements(source, target, 1, 0);
  1904. } else {
  1905. idx = zend_array_dup_elements(source, target, 1, 1);
  1906. }
  1907. } else {
  1908. if (HT_IS_WITHOUT_HOLES(source)) {
  1909. idx = zend_array_dup_elements(source, target, 0, 0);
  1910. } else {
  1911. idx = zend_array_dup_elements(source, target, 0, 1);
  1912. }
  1913. }
  1914. target->nNumUsed = idx;
  1915. target->nNumOfElements = idx;
  1916. }
  1917. return target;
  1918. }
  1919. ZEND_API void ZEND_FASTCALL zend_hash_merge(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor, bool overwrite)
  1920. {
  1921. uint32_t idx;
  1922. Bucket *p;
  1923. zval *t, *s;
  1924. IS_CONSISTENT(source);
  1925. IS_CONSISTENT(target);
  1926. HT_ASSERT_RC1(target);
  1927. if (overwrite) {
  1928. for (idx = 0; idx < source->nNumUsed; idx++) {
  1929. p = source->arData + idx;
  1930. s = &p->val;
  1931. if (UNEXPECTED(Z_TYPE_P(s) == IS_INDIRECT)) {
  1932. s = Z_INDIRECT_P(s);
  1933. }
  1934. if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) {
  1935. continue;
  1936. }
  1937. if (p->key) {
  1938. t = _zend_hash_add_or_update_i(target, p->key, s, HASH_UPDATE | HASH_UPDATE_INDIRECT);
  1939. if (pCopyConstructor) {
  1940. pCopyConstructor(t);
  1941. }
  1942. } else {
  1943. t = zend_hash_index_update(target, p->h, s);
  1944. if (pCopyConstructor) {
  1945. pCopyConstructor(t);
  1946. }
  1947. }
  1948. }
  1949. } else {
  1950. for (idx = 0; idx < source->nNumUsed; idx++) {
  1951. p = source->arData + idx;
  1952. s = &p->val;
  1953. if (UNEXPECTED(Z_TYPE_P(s) == IS_INDIRECT)) {
  1954. s = Z_INDIRECT_P(s);
  1955. }
  1956. if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) {
  1957. continue;
  1958. }
  1959. if (p->key) {
  1960. t = _zend_hash_add_or_update_i(target, p->key, s, HASH_ADD | HASH_UPDATE_INDIRECT);
  1961. if (t && pCopyConstructor) {
  1962. pCopyConstructor(t);
  1963. }
  1964. } else {
  1965. t = zend_hash_index_add(target, p->h, s);
  1966. if (t && pCopyConstructor) {
  1967. pCopyConstructor(t);
  1968. }
  1969. }
  1970. }
  1971. }
  1972. }
  1973. static bool ZEND_FASTCALL zend_hash_replace_checker_wrapper(HashTable *target, zval *source_data, Bucket *p, void *pParam, merge_checker_func_t merge_checker_func)
  1974. {
  1975. zend_hash_key hash_key;
  1976. hash_key.h = p->h;
  1977. hash_key.key = p->key;
  1978. return merge_checker_func(target, source_data, &hash_key, pParam);
  1979. }
  1980. ZEND_API void ZEND_FASTCALL zend_hash_merge_ex(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor, merge_checker_func_t pMergeSource, void *pParam)
  1981. {
  1982. uint32_t idx;
  1983. Bucket *p;
  1984. zval *t;
  1985. IS_CONSISTENT(source);
  1986. IS_CONSISTENT(target);
  1987. HT_ASSERT_RC1(target);
  1988. for (idx = 0; idx < source->nNumUsed; idx++) {
  1989. p = source->arData + idx;
  1990. if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
  1991. if (zend_hash_replace_checker_wrapper(target, &p->val, p, pParam, pMergeSource)) {
  1992. t = zend_hash_update(target, p->key, &p->val);
  1993. if (pCopyConstructor) {
  1994. pCopyConstructor(t);
  1995. }
  1996. }
  1997. }
  1998. }
  1999. /* Returns the hash table data if found and NULL if not. */
  2000. ZEND_API zval* ZEND_FASTCALL zend_hash_find(const HashTable *ht, zend_string *key)
  2001. {
  2002. Bucket *p;
  2003. IS_CONSISTENT(ht);
  2004. p = zend_hash_find_bucket(ht, key, 0);
  2005. return p ? &p->val : NULL;
  2006. }
  2007. ZEND_API zval* ZEND_FASTCALL zend_hash_find_known_hash(const HashTable *ht, zend_string *key)
  2008. {
  2009. Bucket *p;
  2010. IS_CONSISTENT(ht);
  2011. p = zend_hash_find_bucket(ht, key, 1);
  2012. return p ? &p->val : NULL;
  2013. }
  2014. ZEND_API zval* ZEND_FASTCALL zend_hash_str_find(const HashTable *ht, const char *str, size_t len)
  2015. {
  2016. zend_ulong h;
  2017. Bucket *p;
  2018. IS_CONSISTENT(ht);
  2019. h = zend_inline_hash_func(str, len);
  2020. p = zend_hash_str_find_bucket(ht, str, len, h);
  2021. return p ? &p->val : NULL;
  2022. }
  2023. ZEND_API zval* ZEND_FASTCALL zend_hash_index_find(const HashTable *ht, zend_ulong h)
  2024. {
  2025. Bucket *p;
  2026. IS_CONSISTENT(ht);
  2027. if (HT_FLAGS(ht) & HASH_FLAG_PACKED) {
  2028. if (h < ht->nNumUsed) {
  2029. p = ht->arData + h;
  2030. if (Z_TYPE(p->val) != IS_UNDEF) {
  2031. return &p->val;
  2032. }
  2033. }
  2034. return NULL;
  2035. }
  2036. p = zend_hash_index_find_bucket(ht, h);
  2037. return p ? &p->val : NULL;
  2038. }
  2039. ZEND_API zval* ZEND_FASTCALL _zend_hash_index_find(const HashTable *ht, zend_ulong h)
  2040. {
  2041. Bucket *p;
  2042. IS_CONSISTENT(ht);
  2043. p = zend_hash_index_find_bucket(ht, h);
  2044. return p ? &p->val : NULL;
  2045. }
  2046. ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_reset_ex(HashTable *ht, HashPosition *pos)
  2047. {
  2048. IS_CONSISTENT(ht);
  2049. HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
  2050. *pos = _zend_hash_get_valid_pos(ht, 0);
  2051. }
  2052. /* This function will be extremely optimized by remembering
  2053. * the end of the list
  2054. */
  2055. ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_end_ex(HashTable *ht, HashPosition *pos)
  2056. {
  2057. uint32_t idx;
  2058. IS_CONSISTENT(ht);
  2059. HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
  2060. idx = ht->nNumUsed;
  2061. while (idx > 0) {
  2062. idx--;
  2063. if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
  2064. *pos = idx;
  2065. return;
  2066. }
  2067. }
  2068. *pos = ht->nNumUsed;
  2069. }
  2070. ZEND_API zend_result ZEND_FASTCALL zend_hash_move_forward_ex(HashTable *ht, HashPosition *pos)
  2071. {
  2072. uint32_t idx;
  2073. IS_CONSISTENT(ht);
  2074. HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
  2075. idx = _zend_hash_get_valid_pos(ht, *pos);
  2076. if (idx < ht->nNumUsed) {
  2077. while (1) {
  2078. idx++;
  2079. if (idx >= ht->nNumUsed) {
  2080. *pos = ht->nNumUsed;
  2081. return SUCCESS;
  2082. }
  2083. if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
  2084. *pos = idx;
  2085. return SUCCESS;
  2086. }
  2087. }
  2088. } else {
  2089. return FAILURE;
  2090. }
  2091. }
  2092. ZEND_API zend_result ZEND_FASTCALL zend_hash_move_backwards_ex(HashTable *ht, HashPosition *pos)
  2093. {
  2094. uint32_t idx = *pos;
  2095. IS_CONSISTENT(ht);
  2096. HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
  2097. if (idx < ht->nNumUsed) {
  2098. while (idx > 0) {
  2099. idx--;
  2100. if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
  2101. *pos = idx;
  2102. return SUCCESS;
  2103. }
  2104. }
  2105. *pos = ht->nNumUsed;
  2106. return SUCCESS;
  2107. } else {
  2108. return FAILURE;
  2109. }
  2110. }
  2111. /* This function should be made binary safe */
  2112. ZEND_API int ZEND_FASTCALL zend_hash_get_current_key_ex(const HashTable *ht, zend_string **str_index, zend_ulong *num_index, HashPosition *pos)
  2113. {
  2114. uint32_t idx;
  2115. Bucket *p;
  2116. IS_CONSISTENT(ht);
  2117. idx = _zend_hash_get_valid_pos(ht, *pos);
  2118. if (idx < ht->nNumUsed) {
  2119. p = ht->arData + idx;
  2120. if (p->key) {
  2121. *str_index = p->key;
  2122. return HASH_KEY_IS_STRING;
  2123. } else {
  2124. *num_index = p->h;
  2125. return HASH_KEY_IS_LONG;
  2126. }
  2127. }
  2128. return HASH_KEY_NON_EXISTENT;
  2129. }
  2130. ZEND_API void ZEND_FASTCALL zend_hash_get_current_key_zval_ex(const HashTable *ht, zval *key, HashPosition *pos)
  2131. {
  2132. uint32_t idx;
  2133. Bucket *p;
  2134. IS_CONSISTENT(ht);
  2135. idx = _zend_hash_get_valid_pos(ht, *pos);
  2136. if (idx >= ht->nNumUsed) {
  2137. ZVAL_NULL(key);
  2138. } else {
  2139. p = ht->arData + idx;
  2140. if (p->key) {
  2141. ZVAL_STR_COPY(key, p->key);
  2142. } else {
  2143. ZVAL_LONG(key, p->h);
  2144. }
  2145. }
  2146. }
  2147. ZEND_API int ZEND_FASTCALL zend_hash_get_current_key_type_ex(HashTable *ht, HashPosition *pos)
  2148. {
  2149. uint32_t idx;
  2150. Bucket *p;
  2151. IS_CONSISTENT(ht);
  2152. idx = _zend_hash_get_valid_pos(ht, *pos);
  2153. if (idx < ht->nNumUsed) {
  2154. p = ht->arData + idx;
  2155. if (p->key) {
  2156. return HASH_KEY_IS_STRING;
  2157. } else {
  2158. return HASH_KEY_IS_LONG;
  2159. }
  2160. }
  2161. return HASH_KEY_NON_EXISTENT;
  2162. }
  2163. ZEND_API zval* ZEND_FASTCALL zend_hash_get_current_data_ex(HashTable *ht, HashPosition *pos)
  2164. {
  2165. uint32_t idx;
  2166. Bucket *p;
  2167. IS_CONSISTENT(ht);
  2168. idx = _zend_hash_get_valid_pos(ht, *pos);
  2169. if (idx < ht->nNumUsed) {
  2170. p = ht->arData + idx;
  2171. return &p->val;
  2172. } else {
  2173. return NULL;
  2174. }
  2175. }
  2176. ZEND_API void zend_hash_bucket_swap(Bucket *p, Bucket *q)
  2177. {
  2178. zval val;
  2179. zend_ulong h;
  2180. zend_string *key;
  2181. val = p->val;
  2182. h = p->h;
  2183. key = p->key;
  2184. p->val = q->val;
  2185. p->h = q->h;
  2186. p->key = q->key;
  2187. q->val = val;
  2188. q->h = h;
  2189. q->key = key;
  2190. }
  2191. ZEND_API void zend_hash_bucket_renum_swap(Bucket *p, Bucket *q)
  2192. {
  2193. zval val;
  2194. val = p->val;
  2195. p->val = q->val;
  2196. q->val = val;
  2197. }
  2198. ZEND_API void zend_hash_bucket_packed_swap(Bucket *p, Bucket *q)
  2199. {
  2200. zval val;
  2201. zend_ulong h;
  2202. val = p->val;
  2203. h = p->h;
  2204. p->val = q->val;
  2205. p->h = q->h;
  2206. q->val = val;
  2207. q->h = h;
  2208. }
  2209. ZEND_API void ZEND_FASTCALL zend_hash_sort_ex(HashTable *ht, sort_func_t sort, bucket_compare_func_t compar, bool renumber)
  2210. {
  2211. Bucket *p;
  2212. uint32_t i, j;
  2213. IS_CONSISTENT(ht);
  2214. HT_ASSERT_RC1(ht);
  2215. if (!(ht->nNumOfElements>1) && !(renumber && ht->nNumOfElements>0)) {
  2216. /* Doesn't require sorting */
  2217. return;
  2218. }
  2219. if (HT_IS_WITHOUT_HOLES(ht)) {
  2220. /* Store original order of elements in extra space to allow stable sorting. */
  2221. for (i = 0; i < ht->nNumUsed; i++) {
  2222. Z_EXTRA(ht->arData[i].val) = i;
  2223. }
  2224. } else {
  2225. /* Remove holes and store original order. */
  2226. for (j = 0, i = 0; j < ht->nNumUsed; j++) {
  2227. p = ht->arData + j;
  2228. if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
  2229. if (i != j) {
  2230. ht->arData[i] = *p;
  2231. }
  2232. Z_EXTRA(ht->arData[i].val) = i;
  2233. i++;
  2234. }
  2235. ht->nNumUsed = i;
  2236. }
  2237. if (!(HT_FLAGS(ht) & HASH_FLAG_PACKED)) {
  2238. /* We broke the hash colisions chains overriding Z_NEXT() by Z_EXTRA().
  2239. * Reset the hash headers table as well to avoid possilbe inconsistent
  2240. * access on recursive data structures.
  2241. *
  2242. * See Zend/tests/bug63882_2.phpt
  2243. */
  2244. HT_HASH_RESET(ht);
  2245. }
  2246. sort((void *)ht->arData, ht->nNumUsed, sizeof(Bucket), (compare_func_t) compar,
  2247. (swap_func_t)(renumber? zend_hash_bucket_renum_swap :
  2248. ((HT_FLAGS(ht) & HASH_FLAG_PACKED) ? zend_hash_bucket_packed_swap : zend_hash_bucket_swap)));
  2249. ht->nInternalPointer = 0;
  2250. if (renumber) {
  2251. for (j = 0; j < i; j++) {
  2252. p = ht->arData + j;
  2253. p->h = j;
  2254. if (p->key) {
  2255. zend_string_release(p->key);
  2256. p->key = NULL;
  2257. }
  2258. }
  2259. ht->nNextFreeElement = i;
  2260. }
  2261. if (HT_FLAGS(ht) & HASH_FLAG_PACKED) {
  2262. if (!renumber) {
  2263. zend_hash_packed_to_hash(ht);
  2264. }
  2265. } else {
  2266. if (renumber) {
  2267. void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
  2268. Bucket *old_buckets = ht->arData;
  2269. new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), (GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
  2270. HT_FLAGS(ht) |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
  2271. ht->nTableMask = HT_MIN_MASK;
  2272. HT_SET_DATA_ADDR(ht, new_data);
  2273. memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
  2274. pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  2275. HT_HASH_RESET_PACKED(ht);
  2276. } else {
  2277. zend_hash_rehash(ht);
  2278. }
  2279. }
  2280. }
  2281. static zend_always_inline int zend_hash_compare_impl(HashTable *ht1, HashTable *ht2, compare_func_t compar, bool ordered) {
  2282. uint32_t idx1, idx2;
  2283. if (ht1->nNumOfElements != ht2->nNumOfElements) {
  2284. return ht1->nNumOfElements > ht2->nNumOfElements ? 1 : -1;
  2285. }
  2286. for (idx1 = 0, idx2 = 0; idx1 < ht1->nNumUsed; idx1++) {
  2287. Bucket *p1 = ht1->arData + idx1, *p2;
  2288. zval *pData1, *pData2;
  2289. int result;
  2290. if (Z_TYPE(p1->val) == IS_UNDEF) continue;
  2291. if (ordered) {
  2292. while (1) {
  2293. ZEND_ASSERT(idx2 != ht2->nNumUsed);
  2294. p2 = ht2->arData + idx2;
  2295. if (Z_TYPE(p2->val) != IS_UNDEF) break;
  2296. idx2++;
  2297. }
  2298. if (p1->key == NULL && p2->key == NULL) { /* numeric indices */
  2299. if (p1->h != p2->h) {
  2300. return p1->h > p2->h ? 1 : -1;
  2301. }
  2302. } else if (p1->key != NULL && p2->key != NULL) { /* string indices */
  2303. if (ZSTR_LEN(p1->key) != ZSTR_LEN(p2->key)) {
  2304. return ZSTR_LEN(p1->key) > ZSTR_LEN(p2->key) ? 1 : -1;
  2305. }
  2306. result = memcmp(ZSTR_VAL(p1->key), ZSTR_VAL(p2->key), ZSTR_LEN(p1->key));
  2307. if (result != 0) {
  2308. return result;
  2309. }
  2310. } else {
  2311. /* Mixed key types: A string key is considered as larger */
  2312. return p1->key != NULL ? 1 : -1;
  2313. }
  2314. pData2 = &p2->val;
  2315. idx2++;
  2316. } else {
  2317. if (p1->key == NULL) { /* numeric index */
  2318. pData2 = zend_hash_index_find(ht2, p1->h);
  2319. if (pData2 == NULL) {
  2320. return 1;
  2321. }
  2322. } else { /* string index */
  2323. pData2 = zend_hash_find(ht2, p1->key);
  2324. if (pData2 == NULL) {
  2325. return 1;
  2326. }
  2327. }
  2328. }
  2329. pData1 = &p1->val;
  2330. if (Z_TYPE_P(pData1) == IS_INDIRECT) {
  2331. pData1 = Z_INDIRECT_P(pData1);
  2332. }
  2333. if (Z_TYPE_P(pData2) == IS_INDIRECT) {
  2334. pData2 = Z_INDIRECT_P(pData2);
  2335. }
  2336. if (Z_TYPE_P(pData1) == IS_UNDEF) {
  2337. if (Z_TYPE_P(pData2) != IS_UNDEF) {
  2338. return -1;
  2339. }
  2340. } else if (Z_TYPE_P(pData2) == IS_UNDEF) {
  2341. return 1;
  2342. } else {
  2343. result = compar(pData1, pData2);
  2344. if (result != 0) {
  2345. return result;
  2346. }
  2347. }
  2348. }
  2349. return 0;
  2350. }
  2351. ZEND_API int zend_hash_compare(HashTable *ht1, HashTable *ht2, compare_func_t compar, bool ordered)
  2352. {
  2353. int result;
  2354. IS_CONSISTENT(ht1);
  2355. IS_CONSISTENT(ht2);
  2356. if (ht1 == ht2) {
  2357. return 0;
  2358. }
  2359. /* It's enough to protect only one of the arrays.
  2360. * The second one may be referenced from the first and this may cause
  2361. * false recursion detection.
  2362. */
  2363. if (UNEXPECTED(GC_IS_RECURSIVE(ht1))) {
  2364. zend_error_noreturn(E_ERROR, "Nesting level too deep - recursive dependency?");
  2365. }
  2366. GC_TRY_PROTECT_RECURSION(ht1);
  2367. result = zend_hash_compare_impl(ht1, ht2, compar, ordered);
  2368. GC_TRY_UNPROTECT_RECURSION(ht1);
  2369. return result;
  2370. }
  2371. ZEND_API zval* ZEND_FASTCALL zend_hash_minmax(const HashTable *ht, bucket_compare_func_t compar, uint32_t flag)
  2372. {
  2373. uint32_t idx;
  2374. Bucket *p, *res;
  2375. IS_CONSISTENT(ht);
  2376. if (ht->nNumOfElements == 0 ) {
  2377. return NULL;
  2378. }
  2379. idx = 0;
  2380. while (1) {
  2381. if (idx == ht->nNumUsed) {
  2382. return NULL;
  2383. }
  2384. if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) break;
  2385. idx++;
  2386. }
  2387. res = ht->arData + idx;
  2388. for (; idx < ht->nNumUsed; idx++) {
  2389. p = ht->arData + idx;
  2390. if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
  2391. if (flag) {
  2392. if (compar(res, p) < 0) { /* max */
  2393. res = p;
  2394. }
  2395. } else {
  2396. if (compar(res, p) > 0) { /* min */
  2397. res = p;
  2398. }
  2399. }
  2400. }
  2401. return &res->val;
  2402. }
  2403. ZEND_API bool ZEND_FASTCALL _zend_handle_numeric_str_ex(const char *key, size_t length, zend_ulong *idx)
  2404. {
  2405. const char *tmp = key;
  2406. const char *end = key + length;
  2407. if (*tmp == '-') {
  2408. tmp++;
  2409. }
  2410. if ((*tmp == '0' && length > 1) /* numbers with leading zeros */
  2411. || (end - tmp > MAX_LENGTH_OF_LONG - 1) /* number too long */
  2412. || (SIZEOF_ZEND_LONG == 4 &&
  2413. end - tmp == MAX_LENGTH_OF_LONG - 1 &&
  2414. *tmp > '2')) { /* overflow */
  2415. return 0;
  2416. }
  2417. *idx = (*tmp - '0');
  2418. while (1) {
  2419. ++tmp;
  2420. if (tmp == end) {
  2421. if (*key == '-') {
  2422. if (*idx-1 > ZEND_LONG_MAX) { /* overflow */
  2423. return 0;
  2424. }
  2425. *idx = 0 - *idx;
  2426. } else if (*idx > ZEND_LONG_MAX) { /* overflow */
  2427. return 0;
  2428. }
  2429. return 1;
  2430. }
  2431. if (*tmp <= '9' && *tmp >= '0') {
  2432. *idx = (*idx * 10) + (*tmp - '0');
  2433. } else {
  2434. return 0;
  2435. }
  2436. }
  2437. }
  2438. /* Takes a "symtable" hashtable (contains integer and non-numeric string keys)
  2439. * and converts it to a "proptable" (contains only string keys).
  2440. * If the symtable didn't need duplicating, its refcount is incremented.
  2441. */
  2442. ZEND_API HashTable* ZEND_FASTCALL zend_symtable_to_proptable(HashTable *ht)
  2443. {
  2444. zend_ulong num_key;
  2445. zend_string *str_key;
  2446. zval *zv;
  2447. if (UNEXPECTED(HT_IS_PACKED(ht))) {
  2448. goto convert;
  2449. }
  2450. ZEND_HASH_FOREACH_STR_KEY(ht, str_key) {
  2451. if (!str_key) {
  2452. goto convert;
  2453. }
  2454. } ZEND_HASH_FOREACH_END();
  2455. if (!(GC_FLAGS(ht) & IS_ARRAY_IMMUTABLE)) {
  2456. GC_ADDREF(ht);
  2457. }
  2458. return ht;
  2459. convert:
  2460. {
  2461. HashTable *new_ht = zend_new_array(zend_hash_num_elements(ht));
  2462. ZEND_HASH_FOREACH_KEY_VAL(ht, num_key, str_key, zv) {
  2463. if (!str_key) {
  2464. str_key = zend_long_to_str(num_key);
  2465. zend_string_delref(str_key);
  2466. }
  2467. do {
  2468. if (Z_OPT_REFCOUNTED_P(zv)) {
  2469. if (Z_ISREF_P(zv) && Z_REFCOUNT_P(zv) == 1) {
  2470. zv = Z_REFVAL_P(zv);
  2471. if (!Z_OPT_REFCOUNTED_P(zv)) {
  2472. break;
  2473. }
  2474. }
  2475. Z_ADDREF_P(zv);
  2476. }
  2477. } while (0);
  2478. zend_hash_update(new_ht, str_key, zv);
  2479. } ZEND_HASH_FOREACH_END();
  2480. return new_ht;
  2481. }
  2482. }
  2483. /* Takes a "proptable" hashtable (contains only string keys) and converts it to
  2484. * a "symtable" (contains integer and non-numeric string keys).
  2485. * If the proptable didn't need duplicating, its refcount is incremented.
  2486. */
  2487. ZEND_API HashTable* ZEND_FASTCALL zend_proptable_to_symtable(HashTable *ht, bool always_duplicate)
  2488. {
  2489. zend_ulong num_key;
  2490. zend_string *str_key;
  2491. zval *zv;
  2492. ZEND_HASH_FOREACH_STR_KEY(ht, str_key) {
  2493. /* The `str_key &&` here might seem redundant: property tables should
  2494. * only have string keys. Unfortunately, this isn't true, at the very
  2495. * least because of ArrayObject, which stores a symtable where the
  2496. * property table should be.
  2497. */
  2498. if (str_key && ZEND_HANDLE_NUMERIC(str_key, num_key)) {
  2499. goto convert;
  2500. }
  2501. } ZEND_HASH_FOREACH_END();
  2502. if (always_duplicate) {
  2503. return zend_array_dup(ht);
  2504. }
  2505. if (EXPECTED(!(GC_FLAGS(ht) & IS_ARRAY_IMMUTABLE))) {
  2506. GC_ADDREF(ht);
  2507. }
  2508. return ht;
  2509. convert:
  2510. {
  2511. HashTable *new_ht = zend_new_array(zend_hash_num_elements(ht));
  2512. ZEND_HASH_FOREACH_KEY_VAL_IND(ht, num_key, str_key, zv) {
  2513. do {
  2514. if (Z_OPT_REFCOUNTED_P(zv)) {
  2515. if (Z_ISREF_P(zv) && Z_REFCOUNT_P(zv) == 1) {
  2516. zv = Z_REFVAL_P(zv);
  2517. if (!Z_OPT_REFCOUNTED_P(zv)) {
  2518. break;
  2519. }
  2520. }
  2521. Z_ADDREF_P(zv);
  2522. }
  2523. } while (0);
  2524. /* Again, thank ArrayObject for `!str_key ||`. */
  2525. if (!str_key || ZEND_HANDLE_NUMERIC(str_key, num_key)) {
  2526. zend_hash_index_update(new_ht, num_key, zv);
  2527. } else {
  2528. zend_hash_update(new_ht, str_key, zv);
  2529. }
  2530. } ZEND_HASH_FOREACH_END();
  2531. return new_ht;
  2532. }
  2533. }