zend_hash.c 68 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761
  1. /*
  2. +----------------------------------------------------------------------+
  3. | Zend Engine |
  4. +----------------------------------------------------------------------+
  5. | Copyright (c) 1998-2018 Zend Technologies Ltd. (http://www.zend.com) |
  6. +----------------------------------------------------------------------+
  7. | This source file is subject to version 2.00 of the Zend license, |
  8. | that is bundled with this package in the file LICENSE, and is |
  9. | available through the world-wide-web at the following url: |
  10. | http://www.zend.com/license/2_00.txt. |
  11. | If you did not receive a copy of the Zend license and are unable to |
  12. | obtain it through the world-wide-web, please send a note to |
  13. | license@zend.com so we can mail you a copy immediately. |
  14. +----------------------------------------------------------------------+
  15. | Authors: Andi Gutmans <andi@php.net> |
  16. | Zeev Suraski <zeev@php.net> |
  17. | Dmitry Stogov <dmitry@php.net> |
  18. +----------------------------------------------------------------------+
  19. */
  20. #include "zend.h"
  21. #include "zend_globals.h"
  22. #include "zend_variables.h"
  23. #ifdef __SSE2__
  24. # include <mmintrin.h>
  25. # include <emmintrin.h>
  26. #endif
  27. #if ZEND_DEBUG
  28. # define HT_ASSERT(ht, expr) \
  29. ZEND_ASSERT((expr) || (HT_FLAGS(ht) & HASH_FLAG_ALLOW_COW_VIOLATION))
  30. #else
  31. # define HT_ASSERT(ht, expr)
  32. #endif
  33. #define HT_ASSERT_RC1(ht) HT_ASSERT(ht, GC_REFCOUNT(ht) == 1)
  34. #define HT_POISONED_PTR ((HashTable *) (intptr_t) -1)
  35. #if ZEND_DEBUG
  36. #define HT_OK 0x00
  37. #define HT_IS_DESTROYING 0x01
  38. #define HT_DESTROYED 0x02
  39. #define HT_CLEANING 0x03
  40. static void _zend_is_inconsistent(const HashTable *ht, const char *file, int line)
  41. {
  42. if ((HT_FLAGS(ht) & HASH_FLAG_CONSISTENCY) == HT_OK) {
  43. return;
  44. }
  45. switch (HT_FLAGS(ht) & HASH_FLAG_CONSISTENCY) {
  46. case HT_IS_DESTROYING:
  47. zend_output_debug_string(1, "%s(%d) : ht=%p is being destroyed", file, line, ht);
  48. break;
  49. case HT_DESTROYED:
  50. zend_output_debug_string(1, "%s(%d) : ht=%p is already destroyed", file, line, ht);
  51. break;
  52. case HT_CLEANING:
  53. zend_output_debug_string(1, "%s(%d) : ht=%p is being cleaned", file, line, ht);
  54. break;
  55. default:
  56. zend_output_debug_string(1, "%s(%d) : ht=%p is inconsistent", file, line, ht);
  57. break;
  58. }
  59. ZEND_ASSERT(0);
  60. }
  61. #define IS_CONSISTENT(a) _zend_is_inconsistent(a, __FILE__, __LINE__);
  62. #define SET_INCONSISTENT(n) do { \
  63. HT_FLAGS(ht) = (HT_FLAGS(ht) & ~HASH_FLAG_CONSISTENCY) | (n); \
  64. } while (0)
  65. #else
  66. #define IS_CONSISTENT(a)
  67. #define SET_INCONSISTENT(n)
  68. #endif
  69. #define ZEND_HASH_IF_FULL_DO_RESIZE(ht) \
  70. if ((ht)->nNumUsed >= (ht)->nTableSize) { \
  71. zend_hash_do_resize(ht); \
  72. }
  73. static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht);
  74. static zend_always_inline uint32_t zend_hash_check_size(uint32_t nSize)
  75. {
  76. #if defined(ZEND_WIN32)
  77. unsigned long index;
  78. #endif
  79. /* Use big enough power of 2 */
  80. /* size should be between HT_MIN_SIZE and HT_MAX_SIZE */
  81. if (nSize <= HT_MIN_SIZE) {
  82. return HT_MIN_SIZE;
  83. } else if (UNEXPECTED(nSize >= HT_MAX_SIZE)) {
  84. zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", nSize, sizeof(Bucket), sizeof(Bucket));
  85. }
  86. #if defined(ZEND_WIN32)
  87. if (BitScanReverse(&index, nSize - 1)) {
  88. return 0x2 << ((31 - index) ^ 0x1f);
  89. } else {
  90. /* nSize is ensured to be in the valid range, fall back to it
  91. rather than using an undefined bis scan result. */
  92. return nSize;
  93. }
  94. #elif (defined(__GNUC__) || __has_builtin(__builtin_clz)) && defined(PHP_HAVE_BUILTIN_CLZ)
  95. return 0x2 << (__builtin_clz(nSize - 1) ^ 0x1f);
  96. #else
  97. nSize -= 1;
  98. nSize |= (nSize >> 1);
  99. nSize |= (nSize >> 2);
  100. nSize |= (nSize >> 4);
  101. nSize |= (nSize >> 8);
  102. nSize |= (nSize >> 16);
  103. return nSize + 1;
  104. #endif
  105. }
  106. static zend_always_inline void zend_hash_real_init_packed_ex(HashTable *ht)
  107. {
  108. HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
  109. HT_FLAGS(ht) |= HASH_FLAG_INITIALIZED | HASH_FLAG_PACKED;
  110. HT_HASH_RESET_PACKED(ht);
  111. }
  112. static zend_always_inline void zend_hash_real_init_mixed_ex(HashTable *ht)
  113. {
  114. uint32_t nSize = ht->nTableSize;
  115. ht->nTableMask = HT_SIZE_TO_MASK(nSize);
  116. HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
  117. HT_FLAGS(ht) |= HASH_FLAG_INITIALIZED;
  118. if (EXPECTED(ht->nTableMask == HT_SIZE_TO_MASK(HT_MIN_SIZE))) {
  119. Bucket *arData = ht->arData;
  120. #ifdef __SSE2__
  121. __m128i xmm0 = _mm_setzero_si128();
  122. xmm0 = _mm_cmpeq_epi8(xmm0, xmm0);
  123. _mm_storeu_si128((__m128i*)&HT_HASH_EX(arData, -16), xmm0);
  124. _mm_storeu_si128((__m128i*)&HT_HASH_EX(arData, -12), xmm0);
  125. _mm_storeu_si128((__m128i*)&HT_HASH_EX(arData, -8), xmm0);
  126. _mm_storeu_si128((__m128i*)&HT_HASH_EX(arData, -4), xmm0);
  127. #else
  128. HT_HASH_EX(arData, -16) = -1;
  129. HT_HASH_EX(arData, -15) = -1;
  130. HT_HASH_EX(arData, -14) = -1;
  131. HT_HASH_EX(arData, -13) = -1;
  132. HT_HASH_EX(arData, -12) = -1;
  133. HT_HASH_EX(arData, -11) = -1;
  134. HT_HASH_EX(arData, -10) = -1;
  135. HT_HASH_EX(arData, -9) = -1;
  136. HT_HASH_EX(arData, -8) = -1;
  137. HT_HASH_EX(arData, -7) = -1;
  138. HT_HASH_EX(arData, -6) = -1;
  139. HT_HASH_EX(arData, -5) = -1;
  140. HT_HASH_EX(arData, -4) = -1;
  141. HT_HASH_EX(arData, -3) = -1;
  142. HT_HASH_EX(arData, -2) = -1;
  143. HT_HASH_EX(arData, -1) = -1;
  144. #endif
  145. } else {
  146. HT_HASH_RESET(ht);
  147. }
  148. }
  149. static zend_always_inline void zend_hash_real_init_ex(HashTable *ht, int packed)
  150. {
  151. HT_ASSERT_RC1(ht);
  152. ZEND_ASSERT(!(HT_FLAGS(ht) & HASH_FLAG_INITIALIZED));
  153. if (packed) {
  154. zend_hash_real_init_packed_ex(ht);
  155. } else {
  156. zend_hash_real_init_mixed_ex(ht);
  157. }
  158. }
  159. static const uint32_t uninitialized_bucket[-HT_MIN_MASK] =
  160. {HT_INVALID_IDX, HT_INVALID_IDX};
  161. ZEND_API const HashTable zend_empty_array = {
  162. .gc.refcount = 2,
  163. .gc.u.type_info = IS_ARRAY | (GC_IMMUTABLE << GC_FLAGS_SHIFT),
  164. .u.flags = HASH_FLAG_STATIC_KEYS,
  165. .nTableMask = HT_MIN_MASK,
  166. .arData = (Bucket*)&uninitialized_bucket[2],
  167. .nNumUsed = 0,
  168. .nNumOfElements = 0,
  169. .nTableSize = HT_MIN_SIZE,
  170. .nInternalPointer = 0,
  171. .nNextFreeElement = 0,
  172. .pDestructor = ZVAL_PTR_DTOR
  173. };
  174. static zend_always_inline void _zend_hash_init_int(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, zend_bool persistent)
  175. {
  176. GC_SET_REFCOUNT(ht, 1);
  177. GC_TYPE_INFO(ht) = IS_ARRAY | (persistent ? (GC_PERSISTENT << GC_FLAGS_SHIFT) : (GC_COLLECTABLE << GC_FLAGS_SHIFT));
  178. HT_FLAGS(ht) = HASH_FLAG_STATIC_KEYS;
  179. ht->nTableMask = HT_MIN_MASK;
  180. HT_SET_DATA_ADDR(ht, &uninitialized_bucket);
  181. ht->nNumUsed = 0;
  182. ht->nNumOfElements = 0;
  183. ht->nInternalPointer = 0;
  184. ht->nNextFreeElement = 0;
  185. ht->pDestructor = pDestructor;
  186. ht->nTableSize = zend_hash_check_size(nSize);
  187. }
  188. ZEND_API void ZEND_FASTCALL _zend_hash_init(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, zend_bool persistent)
  189. {
  190. _zend_hash_init_int(ht, nSize, pDestructor, persistent);
  191. }
  192. ZEND_API HashTable* ZEND_FASTCALL _zend_new_array_0(void)
  193. {
  194. HashTable *ht = emalloc(sizeof(HashTable));
  195. _zend_hash_init_int(ht, HT_MIN_SIZE, ZVAL_PTR_DTOR, 0);
  196. return ht;
  197. }
  198. ZEND_API HashTable* ZEND_FASTCALL _zend_new_array(uint32_t nSize)
  199. {
  200. HashTable *ht = emalloc(sizeof(HashTable));
  201. _zend_hash_init_int(ht, nSize, ZVAL_PTR_DTOR, 0);
  202. return ht;
  203. }
  204. static void ZEND_FASTCALL zend_hash_packed_grow(HashTable *ht)
  205. {
  206. HT_ASSERT_RC1(ht);
  207. if (ht->nTableSize >= HT_MAX_SIZE) {
  208. zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket), sizeof(Bucket));
  209. }
  210. ht->nTableSize += ht->nTableSize;
  211. HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), HT_USED_SIZE(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
  212. }
  213. ZEND_API void ZEND_FASTCALL zend_hash_real_init(HashTable *ht, zend_bool packed)
  214. {
  215. IS_CONSISTENT(ht);
  216. HT_ASSERT_RC1(ht);
  217. zend_hash_real_init_ex(ht, packed);
  218. }
  219. ZEND_API void ZEND_FASTCALL zend_hash_real_init_packed(HashTable *ht)
  220. {
  221. IS_CONSISTENT(ht);
  222. HT_ASSERT_RC1(ht);
  223. zend_hash_real_init_packed_ex(ht);
  224. }
  225. ZEND_API void ZEND_FASTCALL zend_hash_real_init_mixed(HashTable *ht)
  226. {
  227. IS_CONSISTENT(ht);
  228. HT_ASSERT_RC1(ht);
  229. zend_hash_real_init_mixed_ex(ht);
  230. }
  231. ZEND_API void ZEND_FASTCALL zend_hash_packed_to_hash(HashTable *ht)
  232. {
  233. void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
  234. Bucket *old_buckets = ht->arData;
  235. uint32_t nSize = ht->nTableSize;
  236. HT_ASSERT_RC1(ht);
  237. HT_FLAGS(ht) &= ~HASH_FLAG_PACKED;
  238. new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  239. ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
  240. HT_SET_DATA_ADDR(ht, new_data);
  241. memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
  242. pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  243. zend_hash_rehash(ht);
  244. }
  245. ZEND_API void ZEND_FASTCALL zend_hash_to_packed(HashTable *ht)
  246. {
  247. void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
  248. Bucket *old_buckets = ht->arData;
  249. HT_ASSERT_RC1(ht);
  250. new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  251. HT_FLAGS(ht) |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
  252. ht->nTableMask = HT_MIN_MASK;
  253. HT_SET_DATA_ADDR(ht, new_data);
  254. HT_HASH_RESET_PACKED(ht);
  255. memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
  256. pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  257. }
  258. ZEND_API void ZEND_FASTCALL zend_hash_extend(HashTable *ht, uint32_t nSize, zend_bool packed)
  259. {
  260. HT_ASSERT_RC1(ht);
  261. if (nSize == 0) return;
  262. if (UNEXPECTED(!(HT_FLAGS(ht) & HASH_FLAG_INITIALIZED))) {
  263. if (nSize > ht->nTableSize) {
  264. ht->nTableSize = zend_hash_check_size(nSize);
  265. }
  266. zend_hash_real_init(ht, packed);
  267. } else {
  268. if (packed) {
  269. ZEND_ASSERT(HT_FLAGS(ht) & HASH_FLAG_PACKED);
  270. if (nSize > ht->nTableSize) {
  271. ht->nTableSize = zend_hash_check_size(nSize);
  272. HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), HT_USED_SIZE(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
  273. }
  274. } else {
  275. ZEND_ASSERT(!(HT_FLAGS(ht) & HASH_FLAG_PACKED));
  276. if (nSize > ht->nTableSize) {
  277. void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
  278. Bucket *old_buckets = ht->arData;
  279. nSize = zend_hash_check_size(nSize);
  280. ht->nTableSize = nSize;
  281. new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  282. ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
  283. HT_SET_DATA_ADDR(ht, new_data);
  284. memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
  285. pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  286. zend_hash_rehash(ht);
  287. }
  288. }
  289. }
  290. }
  291. ZEND_API void ZEND_FASTCALL zend_hash_discard(HashTable *ht, uint32_t nNumUsed)
  292. {
  293. Bucket *p, *end, *arData;
  294. uint32_t nIndex;
  295. arData = ht->arData;
  296. p = arData + ht->nNumUsed;
  297. end = arData + nNumUsed;
  298. ht->nNumUsed = nNumUsed;
  299. while (p != end) {
  300. p--;
  301. if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
  302. ht->nNumOfElements--;
  303. /* Collision pointers always directed from higher to lower buckets */
  304. nIndex = p->h | ht->nTableMask;
  305. HT_HASH_EX(arData, nIndex) = Z_NEXT(p->val);
  306. }
  307. }
  308. static uint32_t zend_array_recalc_elements(HashTable *ht)
  309. {
  310. zval *val;
  311. uint32_t num = ht->nNumOfElements;
  312. ZEND_HASH_FOREACH_VAL(ht, val) {
  313. if (Z_TYPE_P(val) == IS_INDIRECT) {
  314. if (UNEXPECTED(Z_TYPE_P(Z_INDIRECT_P(val)) == IS_UNDEF)) {
  315. num--;
  316. }
  317. }
  318. } ZEND_HASH_FOREACH_END();
  319. return num;
  320. }
  321. /* }}} */
  322. ZEND_API uint32_t zend_array_count(HashTable *ht)
  323. {
  324. uint32_t num;
  325. if (UNEXPECTED(HT_FLAGS(ht) & HASH_FLAG_HAS_EMPTY_IND)) {
  326. num = zend_array_recalc_elements(ht);
  327. if (UNEXPECTED(ht->nNumOfElements == num)) {
  328. HT_FLAGS(ht) &= ~HASH_FLAG_HAS_EMPTY_IND;
  329. }
  330. } else if (UNEXPECTED(ht == &EG(symbol_table))) {
  331. num = zend_array_recalc_elements(ht);
  332. } else {
  333. num = zend_hash_num_elements(ht);
  334. }
  335. return num;
  336. }
  337. /* }}} */
  338. static zend_always_inline HashPosition _zend_hash_get_valid_pos(const HashTable *ht, HashPosition pos)
  339. {
  340. while (pos < ht->nNumUsed && Z_ISUNDEF(ht->arData[pos].val)) {
  341. pos++;
  342. }
  343. return pos;
  344. }
  345. static zend_always_inline HashPosition _zend_hash_get_current_pos(const HashTable *ht)
  346. {
  347. return _zend_hash_get_valid_pos(ht, ht->nInternalPointer);
  348. }
  349. ZEND_API HashPosition ZEND_FASTCALL zend_hash_get_current_pos(const HashTable *ht)
  350. {
  351. return _zend_hash_get_current_pos(ht);
  352. }
  353. ZEND_API uint32_t ZEND_FASTCALL zend_hash_iterator_add(HashTable *ht, HashPosition pos)
  354. {
  355. HashTableIterator *iter = EG(ht_iterators);
  356. HashTableIterator *end = iter + EG(ht_iterators_count);
  357. uint32_t idx;
  358. if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
  359. HT_INC_ITERATORS_COUNT(ht);
  360. }
  361. while (iter != end) {
  362. if (iter->ht == NULL) {
  363. iter->ht = ht;
  364. iter->pos = pos;
  365. idx = iter - EG(ht_iterators);
  366. if (idx + 1 > EG(ht_iterators_used)) {
  367. EG(ht_iterators_used) = idx + 1;
  368. }
  369. return idx;
  370. }
  371. iter++;
  372. }
  373. if (EG(ht_iterators) == EG(ht_iterators_slots)) {
  374. EG(ht_iterators) = emalloc(sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
  375. memcpy(EG(ht_iterators), EG(ht_iterators_slots), sizeof(HashTableIterator) * EG(ht_iterators_count));
  376. } else {
  377. EG(ht_iterators) = erealloc(EG(ht_iterators), sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
  378. }
  379. iter = EG(ht_iterators) + EG(ht_iterators_count);
  380. EG(ht_iterators_count) += 8;
  381. iter->ht = ht;
  382. iter->pos = pos;
  383. memset(iter + 1, 0, sizeof(HashTableIterator) * 7);
  384. idx = iter - EG(ht_iterators);
  385. EG(ht_iterators_used) = idx + 1;
  386. return idx;
  387. }
  388. ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos(uint32_t idx, HashTable *ht)
  389. {
  390. HashTableIterator *iter = EG(ht_iterators) + idx;
  391. ZEND_ASSERT(idx != (uint32_t)-1);
  392. if (UNEXPECTED(iter->ht != ht)) {
  393. if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
  394. && EXPECTED(!HT_ITERATORS_OVERFLOW(iter->ht))) {
  395. HT_DEC_ITERATORS_COUNT(iter->ht);
  396. }
  397. if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
  398. HT_INC_ITERATORS_COUNT(ht);
  399. }
  400. iter->ht = ht;
  401. iter->pos = _zend_hash_get_current_pos(ht);
  402. }
  403. return iter->pos;
  404. }
  405. ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos_ex(uint32_t idx, zval *array)
  406. {
  407. HashTable *ht = Z_ARRVAL_P(array);
  408. HashTableIterator *iter = EG(ht_iterators) + idx;
  409. ZEND_ASSERT(idx != (uint32_t)-1);
  410. if (UNEXPECTED(iter->ht != ht)) {
  411. if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
  412. && EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
  413. HT_DEC_ITERATORS_COUNT(iter->ht);
  414. }
  415. SEPARATE_ARRAY(array);
  416. ht = Z_ARRVAL_P(array);
  417. if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
  418. HT_INC_ITERATORS_COUNT(ht);
  419. }
  420. iter->ht = ht;
  421. iter->pos = _zend_hash_get_current_pos(ht);
  422. }
  423. return iter->pos;
  424. }
  425. ZEND_API void ZEND_FASTCALL zend_hash_iterator_del(uint32_t idx)
  426. {
  427. HashTableIterator *iter = EG(ht_iterators) + idx;
  428. ZEND_ASSERT(idx != (uint32_t)-1);
  429. if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
  430. && EXPECTED(!HT_ITERATORS_OVERFLOW(iter->ht))) {
  431. ZEND_ASSERT(HT_ITERATORS_COUNT(iter->ht) != 0);
  432. HT_DEC_ITERATORS_COUNT(iter->ht);
  433. }
  434. iter->ht = NULL;
  435. if (idx == EG(ht_iterators_used) - 1) {
  436. while (idx > 0 && EG(ht_iterators)[idx - 1].ht == NULL) {
  437. idx--;
  438. }
  439. EG(ht_iterators_used) = idx;
  440. }
  441. }
  442. static zend_never_inline void ZEND_FASTCALL _zend_hash_iterators_remove(HashTable *ht)
  443. {
  444. HashTableIterator *iter = EG(ht_iterators);
  445. HashTableIterator *end = iter + EG(ht_iterators_used);
  446. while (iter != end) {
  447. if (iter->ht == ht) {
  448. iter->ht = HT_POISONED_PTR;
  449. }
  450. iter++;
  451. }
  452. }
  453. static zend_always_inline void zend_hash_iterators_remove(HashTable *ht)
  454. {
  455. if (UNEXPECTED(HT_HAS_ITERATORS(ht))) {
  456. _zend_hash_iterators_remove(ht);
  457. }
  458. }
  459. ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterators_lower_pos(HashTable *ht, HashPosition start)
  460. {
  461. HashTableIterator *iter = EG(ht_iterators);
  462. HashTableIterator *end = iter + EG(ht_iterators_used);
  463. HashPosition res = ht->nNumUsed;
  464. while (iter != end) {
  465. if (iter->ht == ht) {
  466. if (iter->pos >= start && iter->pos < res) {
  467. res = iter->pos;
  468. }
  469. }
  470. iter++;
  471. }
  472. return res;
  473. }
  474. ZEND_API void ZEND_FASTCALL _zend_hash_iterators_update(HashTable *ht, HashPosition from, HashPosition to)
  475. {
  476. HashTableIterator *iter = EG(ht_iterators);
  477. HashTableIterator *end = iter + EG(ht_iterators_used);
  478. while (iter != end) {
  479. if (iter->ht == ht && iter->pos == from) {
  480. iter->pos = to;
  481. }
  482. iter++;
  483. }
  484. }
  485. ZEND_API void ZEND_FASTCALL zend_hash_iterators_advance(HashTable *ht, HashPosition step)
  486. {
  487. HashTableIterator *iter = EG(ht_iterators);
  488. HashTableIterator *end = iter + EG(ht_iterators_used);
  489. while (iter != end) {
  490. if (iter->ht == ht) {
  491. iter->pos += step;
  492. }
  493. iter++;
  494. }
  495. }
  496. static zend_always_inline Bucket *zend_hash_find_bucket(const HashTable *ht, zend_string *key, zend_bool known_hash)
  497. {
  498. zend_ulong h;
  499. uint32_t nIndex;
  500. uint32_t idx;
  501. Bucket *p, *arData;
  502. if (known_hash) {
  503. h = ZSTR_H(key);
  504. } else {
  505. h = zend_string_hash_val(key);
  506. }
  507. arData = ht->arData;
  508. nIndex = h | ht->nTableMask;
  509. idx = HT_HASH_EX(arData, nIndex);
  510. if (UNEXPECTED(idx == HT_INVALID_IDX)) {
  511. return NULL;
  512. }
  513. p = HT_HASH_TO_BUCKET_EX(arData, idx);
  514. if (EXPECTED(p->key == key)) { /* check for the same interned string */
  515. return p;
  516. }
  517. while (1) {
  518. if (p->h == ZSTR_H(key) &&
  519. EXPECTED(p->key) &&
  520. zend_string_equal_content(p->key, key)) {
  521. return p;
  522. }
  523. idx = Z_NEXT(p->val);
  524. if (idx == HT_INVALID_IDX) {
  525. return NULL;
  526. }
  527. p = HT_HASH_TO_BUCKET_EX(arData, idx);
  528. if (p->key == key) { /* check for the same interned string */
  529. return p;
  530. }
  531. }
  532. }
  533. static zend_always_inline Bucket *zend_hash_str_find_bucket(const HashTable *ht, const char *str, size_t len, zend_ulong h)
  534. {
  535. uint32_t nIndex;
  536. uint32_t idx;
  537. Bucket *p, *arData;
  538. arData = ht->arData;
  539. nIndex = h | ht->nTableMask;
  540. idx = HT_HASH_EX(arData, nIndex);
  541. while (idx != HT_INVALID_IDX) {
  542. ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
  543. p = HT_HASH_TO_BUCKET_EX(arData, idx);
  544. if ((p->h == h)
  545. && p->key
  546. && (ZSTR_LEN(p->key) == len)
  547. && !memcmp(ZSTR_VAL(p->key), str, len)) {
  548. return p;
  549. }
  550. idx = Z_NEXT(p->val);
  551. }
  552. return NULL;
  553. }
  554. static zend_always_inline Bucket *zend_hash_index_find_bucket(const HashTable *ht, zend_ulong h)
  555. {
  556. uint32_t nIndex;
  557. uint32_t idx;
  558. Bucket *p, *arData;
  559. arData = ht->arData;
  560. nIndex = h | ht->nTableMask;
  561. idx = HT_HASH_EX(arData, nIndex);
  562. while (idx != HT_INVALID_IDX) {
  563. ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
  564. p = HT_HASH_TO_BUCKET_EX(arData, idx);
  565. if (p->h == h && !p->key) {
  566. return p;
  567. }
  568. idx = Z_NEXT(p->val);
  569. }
  570. return NULL;
  571. }
  572. static zend_always_inline zval *_zend_hash_add_or_update_i(HashTable *ht, zend_string *key, zval *pData, uint32_t flag)
  573. {
  574. zend_ulong h;
  575. uint32_t nIndex;
  576. uint32_t idx;
  577. Bucket *p, *arData;
  578. IS_CONSISTENT(ht);
  579. HT_ASSERT_RC1(ht);
  580. if (UNEXPECTED(!(HT_FLAGS(ht) & HASH_FLAG_INITIALIZED))) {
  581. zend_hash_real_init_mixed(ht);
  582. if (!ZSTR_IS_INTERNED(key)) {
  583. zend_string_addref(key);
  584. HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
  585. zend_string_hash_val(key);
  586. }
  587. goto add_to_hash;
  588. } else if (HT_FLAGS(ht) & HASH_FLAG_PACKED) {
  589. zend_hash_packed_to_hash(ht);
  590. if (!ZSTR_IS_INTERNED(key)) {
  591. zend_string_addref(key);
  592. HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
  593. zend_string_hash_val(key);
  594. }
  595. } else if ((flag & HASH_ADD_NEW) == 0) {
  596. p = zend_hash_find_bucket(ht, key, 0);
  597. if (p) {
  598. zval *data;
  599. if (flag & HASH_ADD) {
  600. if (!(flag & HASH_UPDATE_INDIRECT)) {
  601. return NULL;
  602. }
  603. ZEND_ASSERT(&p->val != pData);
  604. data = &p->val;
  605. if (Z_TYPE_P(data) == IS_INDIRECT) {
  606. data = Z_INDIRECT_P(data);
  607. if (Z_TYPE_P(data) != IS_UNDEF) {
  608. return NULL;
  609. }
  610. } else {
  611. return NULL;
  612. }
  613. } else {
  614. ZEND_ASSERT(&p->val != pData);
  615. data = &p->val;
  616. if ((flag & HASH_UPDATE_INDIRECT) && Z_TYPE_P(data) == IS_INDIRECT) {
  617. data = Z_INDIRECT_P(data);
  618. }
  619. }
  620. if (ht->pDestructor) {
  621. ht->pDestructor(data);
  622. }
  623. ZVAL_COPY_VALUE(data, pData);
  624. return data;
  625. }
  626. if (!ZSTR_IS_INTERNED(key)) {
  627. zend_string_addref(key);
  628. HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
  629. }
  630. } else if (!ZSTR_IS_INTERNED(key)) {
  631. zend_string_addref(key);
  632. HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
  633. zend_string_hash_val(key);
  634. }
  635. ZEND_HASH_IF_FULL_DO_RESIZE(ht); /* If the Hash table is full, resize it */
  636. add_to_hash:
  637. idx = ht->nNumUsed++;
  638. ht->nNumOfElements++;
  639. arData = ht->arData;
  640. p = arData + idx;
  641. p->key = key;
  642. p->h = h = ZSTR_H(key);
  643. nIndex = h | ht->nTableMask;
  644. Z_NEXT(p->val) = HT_HASH_EX(arData, nIndex);
  645. HT_HASH_EX(arData, nIndex) = HT_IDX_TO_HASH(idx);
  646. ZVAL_COPY_VALUE(&p->val, pData);
  647. return &p->val;
  648. }
  649. static zend_always_inline zval *_zend_hash_str_add_or_update_i(HashTable *ht, const char *str, size_t len, zend_ulong h, zval *pData, uint32_t flag)
  650. {
  651. zend_string *key;
  652. uint32_t nIndex;
  653. uint32_t idx;
  654. Bucket *p;
  655. IS_CONSISTENT(ht);
  656. HT_ASSERT_RC1(ht);
  657. if (UNEXPECTED(!(HT_FLAGS(ht) & HASH_FLAG_INITIALIZED))) {
  658. zend_hash_real_init_mixed(ht);
  659. goto add_to_hash;
  660. } else if (HT_FLAGS(ht) & HASH_FLAG_PACKED) {
  661. zend_hash_packed_to_hash(ht);
  662. } else if ((flag & HASH_ADD_NEW) == 0) {
  663. p = zend_hash_str_find_bucket(ht, str, len, h);
  664. if (p) {
  665. zval *data;
  666. if (flag & HASH_ADD) {
  667. if (!(flag & HASH_UPDATE_INDIRECT)) {
  668. return NULL;
  669. }
  670. ZEND_ASSERT(&p->val != pData);
  671. data = &p->val;
  672. if (Z_TYPE_P(data) == IS_INDIRECT) {
  673. data = Z_INDIRECT_P(data);
  674. if (Z_TYPE_P(data) != IS_UNDEF) {
  675. return NULL;
  676. }
  677. } else {
  678. return NULL;
  679. }
  680. } else {
  681. ZEND_ASSERT(&p->val != pData);
  682. data = &p->val;
  683. if ((flag & HASH_UPDATE_INDIRECT) && Z_TYPE_P(data) == IS_INDIRECT) {
  684. data = Z_INDIRECT_P(data);
  685. }
  686. }
  687. if (ht->pDestructor) {
  688. ht->pDestructor(data);
  689. }
  690. ZVAL_COPY_VALUE(data, pData);
  691. return data;
  692. }
  693. }
  694. ZEND_HASH_IF_FULL_DO_RESIZE(ht); /* If the Hash table is full, resize it */
  695. add_to_hash:
  696. idx = ht->nNumUsed++;
  697. ht->nNumOfElements++;
  698. p = ht->arData + idx;
  699. p->key = key = zend_string_init(str, len, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  700. p->h = ZSTR_H(key) = h;
  701. HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
  702. ZVAL_COPY_VALUE(&p->val, pData);
  703. nIndex = h | ht->nTableMask;
  704. Z_NEXT(p->val) = HT_HASH(ht, nIndex);
  705. HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
  706. return &p->val;
  707. }
  708. ZEND_API zval* ZEND_FASTCALL zend_hash_add_or_update(HashTable *ht, zend_string *key, zval *pData, uint32_t flag)
  709. {
  710. if (flag == HASH_ADD) {
  711. return zend_hash_add(ht, key, pData);
  712. } else if (flag == HASH_ADD_NEW) {
  713. return zend_hash_add_new(ht, key, pData);
  714. } else if (flag == HASH_UPDATE) {
  715. return zend_hash_update(ht, key, pData);
  716. } else {
  717. ZEND_ASSERT(flag == (HASH_UPDATE|HASH_UPDATE_INDIRECT));
  718. return zend_hash_update_ind(ht, key, pData);
  719. }
  720. }
  721. ZEND_API zval* ZEND_FASTCALL zend_hash_add(HashTable *ht, zend_string *key, zval *pData)
  722. {
  723. return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD);
  724. }
  725. ZEND_API zval* ZEND_FASTCALL zend_hash_update(HashTable *ht, zend_string *key, zval *pData)
  726. {
  727. return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE);
  728. }
  729. ZEND_API zval* ZEND_FASTCALL zend_hash_update_ind(HashTable *ht, zend_string *key, zval *pData)
  730. {
  731. return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT);
  732. }
  733. ZEND_API zval* ZEND_FASTCALL zend_hash_add_new(HashTable *ht, zend_string *key, zval *pData)
  734. {
  735. return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD_NEW);
  736. }
  737. ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_or_update(HashTable *ht, const char *str, size_t len, zval *pData, uint32_t flag)
  738. {
  739. if (flag == HASH_ADD) {
  740. return zend_hash_str_add(ht, str, len, pData);
  741. } else if (flag == HASH_ADD_NEW) {
  742. return zend_hash_str_add_new(ht, str, len, pData);
  743. } else if (flag == HASH_UPDATE) {
  744. return zend_hash_str_update(ht, str, len, pData);
  745. } else {
  746. ZEND_ASSERT(flag == (HASH_UPDATE|HASH_UPDATE_INDIRECT));
  747. return zend_hash_str_update_ind(ht, str, len, pData);
  748. }
  749. }
  750. ZEND_API zval* ZEND_FASTCALL zend_hash_str_update(HashTable *ht, const char *str, size_t len, zval *pData)
  751. {
  752. zend_ulong h = zend_hash_func(str, len);
  753. return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_UPDATE);
  754. }
  755. ZEND_API zval* ZEND_FASTCALL zend_hash_str_update_ind(HashTable *ht, const char *str, size_t len, zval *pData)
  756. {
  757. zend_ulong h = zend_hash_func(str, len);
  758. return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT);
  759. }
  760. ZEND_API zval* ZEND_FASTCALL zend_hash_str_add(HashTable *ht, const char *str, size_t len, zval *pData)
  761. {
  762. zend_ulong h = zend_hash_func(str, len);
  763. return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_ADD);
  764. }
  765. ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_new(HashTable *ht, const char *str, size_t len, zval *pData)
  766. {
  767. zend_ulong h = zend_hash_func(str, len);
  768. return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_ADD_NEW);
  769. }
  770. ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_empty_element(HashTable *ht, zend_ulong h)
  771. {
  772. zval dummy;
  773. ZVAL_NULL(&dummy);
  774. return zend_hash_index_add(ht, h, &dummy);
  775. }
  776. ZEND_API zval* ZEND_FASTCALL zend_hash_add_empty_element(HashTable *ht, zend_string *key)
  777. {
  778. zval dummy;
  779. ZVAL_NULL(&dummy);
  780. return zend_hash_add(ht, key, &dummy);
  781. }
  782. ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_empty_element(HashTable *ht, const char *str, size_t len)
  783. {
  784. zval dummy;
  785. ZVAL_NULL(&dummy);
  786. return zend_hash_str_add(ht, str, len, &dummy);
  787. }
  788. static zend_always_inline zval *_zend_hash_index_add_or_update_i(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag)
  789. {
  790. uint32_t nIndex;
  791. uint32_t idx;
  792. Bucket *p;
  793. IS_CONSISTENT(ht);
  794. HT_ASSERT_RC1(ht);
  795. if (HT_FLAGS(ht) & HASH_FLAG_PACKED) {
  796. if (h < ht->nNumUsed) {
  797. p = ht->arData + h;
  798. if (Z_TYPE(p->val) != IS_UNDEF) {
  799. replace:
  800. if (flag & HASH_ADD) {
  801. return NULL;
  802. }
  803. if (ht->pDestructor) {
  804. ht->pDestructor(&p->val);
  805. }
  806. ZVAL_COPY_VALUE(&p->val, pData);
  807. return &p->val;
  808. } else { /* we have to keep the order :( */
  809. goto convert_to_hash;
  810. }
  811. } else if (EXPECTED(h < ht->nTableSize)) {
  812. add_to_packed:
  813. p = ht->arData + h;
  814. /* incremental initialization of empty Buckets */
  815. if ((flag & (HASH_ADD_NEW|HASH_ADD_NEXT)) != (HASH_ADD_NEW|HASH_ADD_NEXT)) {
  816. if (h > ht->nNumUsed) {
  817. Bucket *q = ht->arData + ht->nNumUsed;
  818. while (q != p) {
  819. ZVAL_UNDEF(&q->val);
  820. q++;
  821. }
  822. }
  823. }
  824. ht->nNextFreeElement = ht->nNumUsed = h + 1;
  825. goto add;
  826. } else if ((h >> 1) < ht->nTableSize &&
  827. (ht->nTableSize >> 1) < ht->nNumOfElements) {
  828. zend_hash_packed_grow(ht);
  829. goto add_to_packed;
  830. } else {
  831. if (ht->nNumUsed >= ht->nTableSize) {
  832. ht->nTableSize += ht->nTableSize;
  833. }
  834. convert_to_hash:
  835. zend_hash_packed_to_hash(ht);
  836. }
  837. } else if (!(HT_FLAGS(ht) & HASH_FLAG_INITIALIZED)) {
  838. if (h < ht->nTableSize) {
  839. zend_hash_real_init_packed_ex(ht);
  840. goto add_to_packed;
  841. }
  842. zend_hash_real_init_mixed(ht);
  843. } else {
  844. if ((flag & HASH_ADD_NEW) == 0) {
  845. p = zend_hash_index_find_bucket(ht, h);
  846. if (p) {
  847. goto replace;
  848. }
  849. }
  850. ZEND_HASH_IF_FULL_DO_RESIZE(ht); /* If the Hash table is full, resize it */
  851. }
  852. idx = ht->nNumUsed++;
  853. nIndex = h | ht->nTableMask;
  854. p = ht->arData + idx;
  855. Z_NEXT(p->val) = HT_HASH(ht, nIndex);
  856. HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
  857. if ((zend_long)h >= (zend_long)ht->nNextFreeElement) {
  858. ht->nNextFreeElement = h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
  859. }
  860. add:
  861. ht->nNumOfElements++;
  862. p->h = h;
  863. p->key = NULL;
  864. ZVAL_COPY_VALUE(&p->val, pData);
  865. return &p->val;
  866. }
  867. ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_or_update(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag)
  868. {
  869. if (flag == HASH_ADD) {
  870. return zend_hash_index_add(ht, h, pData);
  871. } else if (flag == (HASH_ADD|HASH_ADD_NEW)) {
  872. return zend_hash_index_add_new(ht, h, pData);
  873. } else if (flag == (HASH_ADD|HASH_ADD_NEXT)) {
  874. ZEND_ASSERT(h == ht->nNextFreeElement);
  875. return zend_hash_next_index_insert(ht, pData);
  876. } else if (flag == (HASH_ADD|HASH_ADD_NEW|HASH_ADD_NEXT)) {
  877. ZEND_ASSERT(h == ht->nNextFreeElement);
  878. return zend_hash_next_index_insert_new(ht, pData);
  879. } else {
  880. ZEND_ASSERT(flag == HASH_UPDATE);
  881. return zend_hash_index_update(ht, h, pData);
  882. }
  883. }
  884. ZEND_API zval* ZEND_FASTCALL zend_hash_index_add(HashTable *ht, zend_ulong h, zval *pData)
  885. {
  886. return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD);
  887. }
  888. ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_new(HashTable *ht, zend_ulong h, zval *pData)
  889. {
  890. return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD | HASH_ADD_NEW);
  891. }
  892. ZEND_API zval* ZEND_FASTCALL zend_hash_index_update(HashTable *ht, zend_ulong h, zval *pData)
  893. {
  894. return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_UPDATE);
  895. }
  896. ZEND_API zval* ZEND_FASTCALL zend_hash_next_index_insert(HashTable *ht, zval *pData)
  897. {
  898. return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEXT);
  899. }
  900. ZEND_API zval* ZEND_FASTCALL zend_hash_next_index_insert_new(HashTable *ht, zval *pData)
  901. {
  902. return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEW | HASH_ADD_NEXT);
  903. }
  904. static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht)
  905. {
  906. IS_CONSISTENT(ht);
  907. HT_ASSERT_RC1(ht);
  908. if (ht->nNumUsed > ht->nNumOfElements + (ht->nNumOfElements >> 5)) { /* additional term is there to amortize the cost of compaction */
  909. zend_hash_rehash(ht);
  910. } else if (ht->nTableSize < HT_MAX_SIZE) { /* Let's double the table size */
  911. void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
  912. uint32_t nSize = ht->nTableSize + ht->nTableSize;
  913. Bucket *old_buckets = ht->arData;
  914. ht->nTableSize = nSize;
  915. new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  916. ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
  917. HT_SET_DATA_ADDR(ht, new_data);
  918. memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
  919. pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  920. zend_hash_rehash(ht);
  921. } else {
  922. zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket) + sizeof(uint32_t), sizeof(Bucket));
  923. }
  924. }
  925. ZEND_API int ZEND_FASTCALL zend_hash_rehash(HashTable *ht)
  926. {
  927. Bucket *p;
  928. uint32_t nIndex, i;
  929. IS_CONSISTENT(ht);
  930. if (UNEXPECTED(ht->nNumOfElements == 0)) {
  931. if (HT_FLAGS(ht) & HASH_FLAG_INITIALIZED) {
  932. ht->nNumUsed = 0;
  933. HT_HASH_RESET(ht);
  934. }
  935. return SUCCESS;
  936. }
  937. HT_HASH_RESET(ht);
  938. i = 0;
  939. p = ht->arData;
  940. if (HT_IS_WITHOUT_HOLES(ht)) {
  941. do {
  942. nIndex = p->h | ht->nTableMask;
  943. Z_NEXT(p->val) = HT_HASH(ht, nIndex);
  944. HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
  945. p++;
  946. } while (++i < ht->nNumUsed);
  947. } else {
  948. uint32_t old_num_used = ht->nNumUsed;
  949. do {
  950. if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) {
  951. uint32_t j = i;
  952. Bucket *q = p;
  953. if (EXPECTED(!HT_HAS_ITERATORS(ht))) {
  954. while (++i < ht->nNumUsed) {
  955. p++;
  956. if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
  957. ZVAL_COPY_VALUE(&q->val, &p->val);
  958. q->h = p->h;
  959. nIndex = q->h | ht->nTableMask;
  960. q->key = p->key;
  961. Z_NEXT(q->val) = HT_HASH(ht, nIndex);
  962. HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
  963. if (UNEXPECTED(ht->nInternalPointer == i)) {
  964. ht->nInternalPointer = j;
  965. }
  966. q++;
  967. j++;
  968. }
  969. }
  970. } else {
  971. uint32_t iter_pos = zend_hash_iterators_lower_pos(ht, 0);
  972. while (++i < ht->nNumUsed) {
  973. p++;
  974. if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
  975. ZVAL_COPY_VALUE(&q->val, &p->val);
  976. q->h = p->h;
  977. nIndex = q->h | ht->nTableMask;
  978. q->key = p->key;
  979. Z_NEXT(q->val) = HT_HASH(ht, nIndex);
  980. HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
  981. if (UNEXPECTED(ht->nInternalPointer == i)) {
  982. ht->nInternalPointer = j;
  983. }
  984. if (UNEXPECTED(i >= iter_pos)) {
  985. do {
  986. zend_hash_iterators_update(ht, iter_pos, j);
  987. iter_pos = zend_hash_iterators_lower_pos(ht, iter_pos + 1);
  988. } while (iter_pos < i);
  989. }
  990. q++;
  991. j++;
  992. }
  993. }
  994. }
  995. ht->nNumUsed = j;
  996. break;
  997. }
  998. nIndex = p->h | ht->nTableMask;
  999. Z_NEXT(p->val) = HT_HASH(ht, nIndex);
  1000. HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
  1001. p++;
  1002. } while (++i < ht->nNumUsed);
  1003. /* Migrate pointer to one past the end of the array to the new one past the end, so that
  1004. * newly inserted elements are picked up correctly. */
  1005. if (UNEXPECTED(HT_HAS_ITERATORS(ht))) {
  1006. _zend_hash_iterators_update(ht, old_num_used, ht->nNumUsed);
  1007. }
  1008. }
  1009. return SUCCESS;
  1010. }
  1011. static zend_always_inline void _zend_hash_del_el_ex(HashTable *ht, uint32_t idx, Bucket *p, Bucket *prev)
  1012. {
  1013. if (!(HT_FLAGS(ht) & HASH_FLAG_PACKED)) {
  1014. if (prev) {
  1015. Z_NEXT(prev->val) = Z_NEXT(p->val);
  1016. } else {
  1017. HT_HASH(ht, p->h | ht->nTableMask) = Z_NEXT(p->val);
  1018. }
  1019. }
  1020. idx = HT_HASH_TO_IDX(idx);
  1021. ht->nNumOfElements--;
  1022. if (ht->nInternalPointer == idx || UNEXPECTED(HT_HAS_ITERATORS(ht))) {
  1023. uint32_t new_idx;
  1024. new_idx = idx;
  1025. while (1) {
  1026. new_idx++;
  1027. if (new_idx >= ht->nNumUsed) {
  1028. break;
  1029. } else if (Z_TYPE(ht->arData[new_idx].val) != IS_UNDEF) {
  1030. break;
  1031. }
  1032. }
  1033. if (ht->nInternalPointer == idx) {
  1034. ht->nInternalPointer = new_idx;
  1035. }
  1036. zend_hash_iterators_update(ht, idx, new_idx);
  1037. }
  1038. if (ht->nNumUsed - 1 == idx) {
  1039. do {
  1040. ht->nNumUsed--;
  1041. } while (ht->nNumUsed > 0 && (UNEXPECTED(Z_TYPE(ht->arData[ht->nNumUsed-1].val) == IS_UNDEF)));
  1042. ht->nInternalPointer = MIN(ht->nInternalPointer, ht->nNumUsed);
  1043. }
  1044. if (p->key) {
  1045. zend_string_release(p->key);
  1046. }
  1047. if (ht->pDestructor) {
  1048. zval tmp;
  1049. ZVAL_COPY_VALUE(&tmp, &p->val);
  1050. ZVAL_UNDEF(&p->val);
  1051. ht->pDestructor(&tmp);
  1052. } else {
  1053. ZVAL_UNDEF(&p->val);
  1054. }
  1055. }
  1056. static zend_always_inline void _zend_hash_del_el(HashTable *ht, uint32_t idx, Bucket *p)
  1057. {
  1058. Bucket *prev = NULL;
  1059. if (!(HT_FLAGS(ht) & HASH_FLAG_PACKED)) {
  1060. uint32_t nIndex = p->h | ht->nTableMask;
  1061. uint32_t i = HT_HASH(ht, nIndex);
  1062. if (i != idx) {
  1063. prev = HT_HASH_TO_BUCKET(ht, i);
  1064. while (Z_NEXT(prev->val) != idx) {
  1065. i = Z_NEXT(prev->val);
  1066. prev = HT_HASH_TO_BUCKET(ht, i);
  1067. }
  1068. }
  1069. }
  1070. _zend_hash_del_el_ex(ht, idx, p, prev);
  1071. }
  1072. ZEND_API void ZEND_FASTCALL zend_hash_del_bucket(HashTable *ht, Bucket *p)
  1073. {
  1074. IS_CONSISTENT(ht);
  1075. HT_ASSERT_RC1(ht);
  1076. _zend_hash_del_el(ht, HT_IDX_TO_HASH(p - ht->arData), p);
  1077. }
  1078. ZEND_API int ZEND_FASTCALL zend_hash_del(HashTable *ht, zend_string *key)
  1079. {
  1080. zend_ulong h;
  1081. uint32_t nIndex;
  1082. uint32_t idx;
  1083. Bucket *p;
  1084. Bucket *prev = NULL;
  1085. IS_CONSISTENT(ht);
  1086. HT_ASSERT_RC1(ht);
  1087. h = zend_string_hash_val(key);
  1088. nIndex = h | ht->nTableMask;
  1089. idx = HT_HASH(ht, nIndex);
  1090. while (idx != HT_INVALID_IDX) {
  1091. p = HT_HASH_TO_BUCKET(ht, idx);
  1092. if ((p->key == key) ||
  1093. (p->h == h &&
  1094. p->key &&
  1095. zend_string_equal_content(p->key, key))) {
  1096. _zend_hash_del_el_ex(ht, idx, p, prev);
  1097. return SUCCESS;
  1098. }
  1099. prev = p;
  1100. idx = Z_NEXT(p->val);
  1101. }
  1102. return FAILURE;
  1103. }
  1104. ZEND_API int ZEND_FASTCALL zend_hash_del_ind(HashTable *ht, zend_string *key)
  1105. {
  1106. zend_ulong h;
  1107. uint32_t nIndex;
  1108. uint32_t idx;
  1109. Bucket *p;
  1110. Bucket *prev = NULL;
  1111. IS_CONSISTENT(ht);
  1112. HT_ASSERT_RC1(ht);
  1113. h = zend_string_hash_val(key);
  1114. nIndex = h | ht->nTableMask;
  1115. idx = HT_HASH(ht, nIndex);
  1116. while (idx != HT_INVALID_IDX) {
  1117. p = HT_HASH_TO_BUCKET(ht, idx);
  1118. if ((p->key == key) ||
  1119. (p->h == h &&
  1120. p->key &&
  1121. zend_string_equal_content(p->key, key))) {
  1122. if (Z_TYPE(p->val) == IS_INDIRECT) {
  1123. zval *data = Z_INDIRECT(p->val);
  1124. if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
  1125. return FAILURE;
  1126. } else {
  1127. if (ht->pDestructor) {
  1128. zval tmp;
  1129. ZVAL_COPY_VALUE(&tmp, data);
  1130. ZVAL_UNDEF(data);
  1131. ht->pDestructor(&tmp);
  1132. } else {
  1133. ZVAL_UNDEF(data);
  1134. }
  1135. HT_FLAGS(ht) |= HASH_FLAG_HAS_EMPTY_IND;
  1136. }
  1137. } else {
  1138. _zend_hash_del_el_ex(ht, idx, p, prev);
  1139. }
  1140. return SUCCESS;
  1141. }
  1142. prev = p;
  1143. idx = Z_NEXT(p->val);
  1144. }
  1145. return FAILURE;
  1146. }
  1147. ZEND_API int ZEND_FASTCALL zend_hash_str_del_ind(HashTable *ht, const char *str, size_t len)
  1148. {
  1149. zend_ulong h;
  1150. uint32_t nIndex;
  1151. uint32_t idx;
  1152. Bucket *p;
  1153. Bucket *prev = NULL;
  1154. IS_CONSISTENT(ht);
  1155. HT_ASSERT_RC1(ht);
  1156. h = zend_inline_hash_func(str, len);
  1157. nIndex = h | ht->nTableMask;
  1158. idx = HT_HASH(ht, nIndex);
  1159. while (idx != HT_INVALID_IDX) {
  1160. p = HT_HASH_TO_BUCKET(ht, idx);
  1161. if ((p->h == h)
  1162. && p->key
  1163. && (ZSTR_LEN(p->key) == len)
  1164. && !memcmp(ZSTR_VAL(p->key), str, len)) {
  1165. if (Z_TYPE(p->val) == IS_INDIRECT) {
  1166. zval *data = Z_INDIRECT(p->val);
  1167. if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
  1168. return FAILURE;
  1169. } else {
  1170. if (ht->pDestructor) {
  1171. ht->pDestructor(data);
  1172. }
  1173. ZVAL_UNDEF(data);
  1174. HT_FLAGS(ht) |= HASH_FLAG_HAS_EMPTY_IND;
  1175. }
  1176. } else {
  1177. _zend_hash_del_el_ex(ht, idx, p, prev);
  1178. }
  1179. return SUCCESS;
  1180. }
  1181. prev = p;
  1182. idx = Z_NEXT(p->val);
  1183. }
  1184. return FAILURE;
  1185. }
  1186. ZEND_API int ZEND_FASTCALL zend_hash_str_del(HashTable *ht, const char *str, size_t len)
  1187. {
  1188. zend_ulong h;
  1189. uint32_t nIndex;
  1190. uint32_t idx;
  1191. Bucket *p;
  1192. Bucket *prev = NULL;
  1193. IS_CONSISTENT(ht);
  1194. HT_ASSERT_RC1(ht);
  1195. h = zend_inline_hash_func(str, len);
  1196. nIndex = h | ht->nTableMask;
  1197. idx = HT_HASH(ht, nIndex);
  1198. while (idx != HT_INVALID_IDX) {
  1199. p = HT_HASH_TO_BUCKET(ht, idx);
  1200. if ((p->h == h)
  1201. && p->key
  1202. && (ZSTR_LEN(p->key) == len)
  1203. && !memcmp(ZSTR_VAL(p->key), str, len)) {
  1204. _zend_hash_del_el_ex(ht, idx, p, prev);
  1205. return SUCCESS;
  1206. }
  1207. prev = p;
  1208. idx = Z_NEXT(p->val);
  1209. }
  1210. return FAILURE;
  1211. }
  1212. ZEND_API int ZEND_FASTCALL zend_hash_index_del(HashTable *ht, zend_ulong h)
  1213. {
  1214. uint32_t nIndex;
  1215. uint32_t idx;
  1216. Bucket *p;
  1217. Bucket *prev = NULL;
  1218. IS_CONSISTENT(ht);
  1219. HT_ASSERT_RC1(ht);
  1220. if (HT_FLAGS(ht) & HASH_FLAG_PACKED) {
  1221. if (h < ht->nNumUsed) {
  1222. p = ht->arData + h;
  1223. if (Z_TYPE(p->val) != IS_UNDEF) {
  1224. _zend_hash_del_el_ex(ht, HT_IDX_TO_HASH(h), p, NULL);
  1225. return SUCCESS;
  1226. }
  1227. }
  1228. return FAILURE;
  1229. }
  1230. nIndex = h | ht->nTableMask;
  1231. idx = HT_HASH(ht, nIndex);
  1232. while (idx != HT_INVALID_IDX) {
  1233. p = HT_HASH_TO_BUCKET(ht, idx);
  1234. if ((p->h == h) && (p->key == NULL)) {
  1235. _zend_hash_del_el_ex(ht, idx, p, prev);
  1236. return SUCCESS;
  1237. }
  1238. prev = p;
  1239. idx = Z_NEXT(p->val);
  1240. }
  1241. return FAILURE;
  1242. }
  1243. ZEND_API void ZEND_FASTCALL zend_hash_destroy(HashTable *ht)
  1244. {
  1245. Bucket *p, *end;
  1246. IS_CONSISTENT(ht);
  1247. HT_ASSERT(ht, GC_REFCOUNT(ht) <= 1);
  1248. if (ht->nNumUsed) {
  1249. p = ht->arData;
  1250. end = p + ht->nNumUsed;
  1251. if (ht->pDestructor) {
  1252. SET_INCONSISTENT(HT_IS_DESTROYING);
  1253. if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
  1254. if (HT_IS_WITHOUT_HOLES(ht)) {
  1255. do {
  1256. ht->pDestructor(&p->val);
  1257. } while (++p != end);
  1258. } else {
  1259. do {
  1260. if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
  1261. ht->pDestructor(&p->val);
  1262. }
  1263. } while (++p != end);
  1264. }
  1265. } else if (HT_IS_WITHOUT_HOLES(ht)) {
  1266. do {
  1267. ht->pDestructor(&p->val);
  1268. if (EXPECTED(p->key)) {
  1269. zend_string_release(p->key);
  1270. }
  1271. } while (++p != end);
  1272. } else {
  1273. do {
  1274. if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
  1275. ht->pDestructor(&p->val);
  1276. if (EXPECTED(p->key)) {
  1277. zend_string_release(p->key);
  1278. }
  1279. }
  1280. } while (++p != end);
  1281. }
  1282. SET_INCONSISTENT(HT_DESTROYED);
  1283. } else {
  1284. if (!HT_HAS_STATIC_KEYS_ONLY(ht)) {
  1285. do {
  1286. if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
  1287. if (EXPECTED(p->key)) {
  1288. zend_string_release(p->key);
  1289. }
  1290. }
  1291. } while (++p != end);
  1292. }
  1293. }
  1294. zend_hash_iterators_remove(ht);
  1295. } else if (EXPECTED(!(HT_FLAGS(ht) & HASH_FLAG_INITIALIZED))) {
  1296. return;
  1297. }
  1298. pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  1299. }
  1300. ZEND_API void ZEND_FASTCALL zend_array_destroy(HashTable *ht)
  1301. {
  1302. Bucket *p, *end;
  1303. IS_CONSISTENT(ht);
  1304. HT_ASSERT(ht, GC_REFCOUNT(ht) <= 1);
  1305. /* break possible cycles */
  1306. GC_REMOVE_FROM_BUFFER(ht);
  1307. GC_TYPE_INFO(ht) = IS_NULL /*???| (GC_WHITE << 16)*/;
  1308. if (ht->nNumUsed) {
  1309. /* In some rare cases destructors of regular arrays may be changed */
  1310. if (UNEXPECTED(ht->pDestructor != ZVAL_PTR_DTOR)) {
  1311. zend_hash_destroy(ht);
  1312. goto free_ht;
  1313. }
  1314. p = ht->arData;
  1315. end = p + ht->nNumUsed;
  1316. SET_INCONSISTENT(HT_IS_DESTROYING);
  1317. if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
  1318. do {
  1319. i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
  1320. } while (++p != end);
  1321. } else if (HT_IS_WITHOUT_HOLES(ht)) {
  1322. do {
  1323. i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
  1324. if (EXPECTED(p->key)) {
  1325. zend_string_release_ex(p->key, 0);
  1326. }
  1327. } while (++p != end);
  1328. } else {
  1329. do {
  1330. if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
  1331. i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
  1332. if (EXPECTED(p->key)) {
  1333. zend_string_release_ex(p->key, 0);
  1334. }
  1335. }
  1336. } while (++p != end);
  1337. }
  1338. } else if (EXPECTED(!(HT_FLAGS(ht) & HASH_FLAG_INITIALIZED))) {
  1339. goto free_ht;
  1340. }
  1341. zend_hash_iterators_remove(ht);
  1342. SET_INCONSISTENT(HT_DESTROYED);
  1343. efree(HT_GET_DATA_ADDR(ht));
  1344. free_ht:
  1345. FREE_HASHTABLE(ht);
  1346. }
  1347. ZEND_API void ZEND_FASTCALL zend_hash_clean(HashTable *ht)
  1348. {
  1349. Bucket *p, *end;
  1350. IS_CONSISTENT(ht);
  1351. HT_ASSERT_RC1(ht);
  1352. if (ht->nNumUsed) {
  1353. p = ht->arData;
  1354. end = p + ht->nNumUsed;
  1355. if (ht->pDestructor) {
  1356. if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
  1357. if (HT_IS_WITHOUT_HOLES(ht)) {
  1358. do {
  1359. ht->pDestructor(&p->val);
  1360. } while (++p != end);
  1361. } else {
  1362. do {
  1363. if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
  1364. ht->pDestructor(&p->val);
  1365. }
  1366. } while (++p != end);
  1367. }
  1368. } else if (HT_IS_WITHOUT_HOLES(ht)) {
  1369. do {
  1370. ht->pDestructor(&p->val);
  1371. if (EXPECTED(p->key)) {
  1372. zend_string_release(p->key);
  1373. }
  1374. } while (++p != end);
  1375. } else {
  1376. do {
  1377. if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
  1378. ht->pDestructor(&p->val);
  1379. if (EXPECTED(p->key)) {
  1380. zend_string_release(p->key);
  1381. }
  1382. }
  1383. } while (++p != end);
  1384. }
  1385. } else {
  1386. if (!HT_HAS_STATIC_KEYS_ONLY(ht)) {
  1387. if (HT_IS_WITHOUT_HOLES(ht)) {
  1388. do {
  1389. if (EXPECTED(p->key)) {
  1390. zend_string_release(p->key);
  1391. }
  1392. } while (++p != end);
  1393. } else {
  1394. do {
  1395. if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
  1396. if (EXPECTED(p->key)) {
  1397. zend_string_release(p->key);
  1398. }
  1399. }
  1400. } while (++p != end);
  1401. }
  1402. }
  1403. }
  1404. if (!(HT_FLAGS(ht) & HASH_FLAG_PACKED)) {
  1405. HT_HASH_RESET(ht);
  1406. }
  1407. }
  1408. ht->nNumUsed = 0;
  1409. ht->nNumOfElements = 0;
  1410. ht->nNextFreeElement = 0;
  1411. ht->nInternalPointer = 0;
  1412. }
  1413. ZEND_API void ZEND_FASTCALL zend_symtable_clean(HashTable *ht)
  1414. {
  1415. Bucket *p, *end;
  1416. IS_CONSISTENT(ht);
  1417. HT_ASSERT_RC1(ht);
  1418. if (ht->nNumUsed) {
  1419. p = ht->arData;
  1420. end = p + ht->nNumUsed;
  1421. if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
  1422. do {
  1423. i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
  1424. } while (++p != end);
  1425. } else if (HT_IS_WITHOUT_HOLES(ht)) {
  1426. do {
  1427. i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
  1428. if (EXPECTED(p->key)) {
  1429. zend_string_release(p->key);
  1430. }
  1431. } while (++p != end);
  1432. } else {
  1433. do {
  1434. if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
  1435. i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
  1436. if (EXPECTED(p->key)) {
  1437. zend_string_release(p->key);
  1438. }
  1439. }
  1440. } while (++p != end);
  1441. }
  1442. HT_HASH_RESET(ht);
  1443. }
  1444. ht->nNumUsed = 0;
  1445. ht->nNumOfElements = 0;
  1446. ht->nNextFreeElement = 0;
  1447. ht->nInternalPointer = 0;
  1448. }
  1449. ZEND_API void ZEND_FASTCALL zend_hash_graceful_destroy(HashTable *ht)
  1450. {
  1451. uint32_t idx;
  1452. Bucket *p;
  1453. IS_CONSISTENT(ht);
  1454. HT_ASSERT_RC1(ht);
  1455. p = ht->arData;
  1456. for (idx = 0; idx < ht->nNumUsed; idx++, p++) {
  1457. if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
  1458. _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
  1459. }
  1460. if (HT_FLAGS(ht) & HASH_FLAG_INITIALIZED) {
  1461. pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  1462. }
  1463. SET_INCONSISTENT(HT_DESTROYED);
  1464. }
  1465. ZEND_API void ZEND_FASTCALL zend_hash_graceful_reverse_destroy(HashTable *ht)
  1466. {
  1467. uint32_t idx;
  1468. Bucket *p;
  1469. IS_CONSISTENT(ht);
  1470. HT_ASSERT_RC1(ht);
  1471. idx = ht->nNumUsed;
  1472. p = ht->arData + ht->nNumUsed;
  1473. while (idx > 0) {
  1474. idx--;
  1475. p--;
  1476. if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
  1477. _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
  1478. }
  1479. if (HT_FLAGS(ht) & HASH_FLAG_INITIALIZED) {
  1480. pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  1481. }
  1482. SET_INCONSISTENT(HT_DESTROYED);
  1483. }
  1484. /* This is used to recurse elements and selectively delete certain entries
  1485. * from a hashtable. apply_func() receives the data and decides if the entry
  1486. * should be deleted or recursion should be stopped. The following three
  1487. * return codes are possible:
  1488. * ZEND_HASH_APPLY_KEEP - continue
  1489. * ZEND_HASH_APPLY_STOP - stop iteration
  1490. * ZEND_HASH_APPLY_REMOVE - delete the element, combineable with the former
  1491. */
  1492. ZEND_API void ZEND_FASTCALL zend_hash_apply(HashTable *ht, apply_func_t apply_func)
  1493. {
  1494. uint32_t idx;
  1495. Bucket *p;
  1496. int result;
  1497. IS_CONSISTENT(ht);
  1498. for (idx = 0; idx < ht->nNumUsed; idx++) {
  1499. p = ht->arData + idx;
  1500. if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
  1501. result = apply_func(&p->val);
  1502. if (result & ZEND_HASH_APPLY_REMOVE) {
  1503. HT_ASSERT_RC1(ht);
  1504. _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
  1505. }
  1506. if (result & ZEND_HASH_APPLY_STOP) {
  1507. break;
  1508. }
  1509. }
  1510. }
  1511. ZEND_API void ZEND_FASTCALL zend_hash_apply_with_argument(HashTable *ht, apply_func_arg_t apply_func, void *argument)
  1512. {
  1513. uint32_t idx;
  1514. Bucket *p;
  1515. int result;
  1516. IS_CONSISTENT(ht);
  1517. for (idx = 0; idx < ht->nNumUsed; idx++) {
  1518. p = ht->arData + idx;
  1519. if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
  1520. result = apply_func(&p->val, argument);
  1521. if (result & ZEND_HASH_APPLY_REMOVE) {
  1522. HT_ASSERT_RC1(ht);
  1523. _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
  1524. }
  1525. if (result & ZEND_HASH_APPLY_STOP) {
  1526. break;
  1527. }
  1528. }
  1529. }
  1530. ZEND_API void zend_hash_apply_with_arguments(HashTable *ht, apply_func_args_t apply_func, int num_args, ...)
  1531. {
  1532. uint32_t idx;
  1533. Bucket *p;
  1534. va_list args;
  1535. zend_hash_key hash_key;
  1536. int result;
  1537. IS_CONSISTENT(ht);
  1538. for (idx = 0; idx < ht->nNumUsed; idx++) {
  1539. p = ht->arData + idx;
  1540. if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
  1541. va_start(args, num_args);
  1542. hash_key.h = p->h;
  1543. hash_key.key = p->key;
  1544. result = apply_func(&p->val, num_args, args, &hash_key);
  1545. if (result & ZEND_HASH_APPLY_REMOVE) {
  1546. HT_ASSERT_RC1(ht);
  1547. _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
  1548. }
  1549. if (result & ZEND_HASH_APPLY_STOP) {
  1550. va_end(args);
  1551. break;
  1552. }
  1553. va_end(args);
  1554. }
  1555. }
  1556. ZEND_API void ZEND_FASTCALL zend_hash_reverse_apply(HashTable *ht, apply_func_t apply_func)
  1557. {
  1558. uint32_t idx;
  1559. Bucket *p;
  1560. int result;
  1561. IS_CONSISTENT(ht);
  1562. idx = ht->nNumUsed;
  1563. while (idx > 0) {
  1564. idx--;
  1565. p = ht->arData + idx;
  1566. if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
  1567. result = apply_func(&p->val);
  1568. if (result & ZEND_HASH_APPLY_REMOVE) {
  1569. HT_ASSERT_RC1(ht);
  1570. _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
  1571. }
  1572. if (result & ZEND_HASH_APPLY_STOP) {
  1573. break;
  1574. }
  1575. }
  1576. }
  1577. ZEND_API void ZEND_FASTCALL zend_hash_copy(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor)
  1578. {
  1579. uint32_t idx;
  1580. Bucket *p;
  1581. zval *new_entry, *data;
  1582. IS_CONSISTENT(source);
  1583. IS_CONSISTENT(target);
  1584. HT_ASSERT_RC1(target);
  1585. for (idx = 0; idx < source->nNumUsed; idx++) {
  1586. p = source->arData + idx;
  1587. if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
  1588. /* INDIRECT element may point to UNDEF-ined slots */
  1589. data = &p->val;
  1590. if (Z_TYPE_P(data) == IS_INDIRECT) {
  1591. data = Z_INDIRECT_P(data);
  1592. if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
  1593. continue;
  1594. }
  1595. }
  1596. if (p->key) {
  1597. new_entry = zend_hash_update(target, p->key, data);
  1598. } else {
  1599. new_entry = zend_hash_index_update(target, p->h, data);
  1600. }
  1601. if (pCopyConstructor) {
  1602. pCopyConstructor(new_entry);
  1603. }
  1604. }
  1605. }
  1606. static zend_always_inline int zend_array_dup_element(HashTable *source, HashTable *target, uint32_t idx, Bucket *p, Bucket *q, int packed, int static_keys, int with_holes)
  1607. {
  1608. zval *data = &p->val;
  1609. if (with_holes) {
  1610. if (!packed && Z_TYPE_INFO_P(data) == IS_INDIRECT) {
  1611. data = Z_INDIRECT_P(data);
  1612. }
  1613. if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) {
  1614. return 0;
  1615. }
  1616. } else if (!packed) {
  1617. /* INDIRECT element may point to UNDEF-ined slots */
  1618. if (Z_TYPE_INFO_P(data) == IS_INDIRECT) {
  1619. data = Z_INDIRECT_P(data);
  1620. if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) {
  1621. return 0;
  1622. }
  1623. }
  1624. }
  1625. do {
  1626. if (Z_OPT_REFCOUNTED_P(data)) {
  1627. if (Z_ISREF_P(data) && Z_REFCOUNT_P(data) == 1 &&
  1628. (Z_TYPE_P(Z_REFVAL_P(data)) != IS_ARRAY ||
  1629. Z_ARRVAL_P(Z_REFVAL_P(data)) != source)) {
  1630. data = Z_REFVAL_P(data);
  1631. if (!Z_OPT_REFCOUNTED_P(data)) {
  1632. break;
  1633. }
  1634. }
  1635. Z_ADDREF_P(data);
  1636. }
  1637. } while (0);
  1638. ZVAL_COPY_VALUE(&q->val, data);
  1639. q->h = p->h;
  1640. if (packed) {
  1641. q->key = NULL;
  1642. } else {
  1643. uint32_t nIndex;
  1644. q->key = p->key;
  1645. if (!static_keys && q->key) {
  1646. zend_string_addref(q->key);
  1647. }
  1648. nIndex = q->h | target->nTableMask;
  1649. Z_NEXT(q->val) = HT_HASH(target, nIndex);
  1650. HT_HASH(target, nIndex) = HT_IDX_TO_HASH(idx);
  1651. }
  1652. return 1;
  1653. }
  1654. static zend_always_inline void zend_array_dup_packed_elements(HashTable *source, HashTable *target, int with_holes)
  1655. {
  1656. Bucket *p = source->arData;
  1657. Bucket *q = target->arData;
  1658. Bucket *end = p + source->nNumUsed;
  1659. do {
  1660. if (!zend_array_dup_element(source, target, 0, p, q, 1, 1, with_holes)) {
  1661. if (with_holes) {
  1662. ZVAL_UNDEF(&q->val);
  1663. }
  1664. }
  1665. p++; q++;
  1666. } while (p != end);
  1667. }
  1668. static zend_always_inline uint32_t zend_array_dup_elements(HashTable *source, HashTable *target, int static_keys, int with_holes)
  1669. {
  1670. uint32_t idx = 0;
  1671. Bucket *p = source->arData;
  1672. Bucket *q = target->arData;
  1673. Bucket *end = p + source->nNumUsed;
  1674. do {
  1675. if (!zend_array_dup_element(source, target, idx, p, q, 0, static_keys, with_holes)) {
  1676. uint32_t target_idx = idx;
  1677. idx++; p++;
  1678. while (p != end) {
  1679. if (zend_array_dup_element(source, target, target_idx, p, q, 0, static_keys, with_holes)) {
  1680. if (source->nInternalPointer == idx) {
  1681. target->nInternalPointer = target_idx;
  1682. }
  1683. target_idx++; q++;
  1684. }
  1685. idx++; p++;
  1686. }
  1687. return target_idx;
  1688. }
  1689. idx++; p++; q++;
  1690. } while (p != end);
  1691. return idx;
  1692. }
  1693. ZEND_API HashTable* ZEND_FASTCALL zend_array_dup(HashTable *source)
  1694. {
  1695. uint32_t idx;
  1696. HashTable *target;
  1697. IS_CONSISTENT(source);
  1698. ALLOC_HASHTABLE(target);
  1699. GC_SET_REFCOUNT(target, 1);
  1700. GC_TYPE_INFO(target) = IS_ARRAY | (GC_COLLECTABLE << GC_FLAGS_SHIFT);
  1701. target->nTableSize = source->nTableSize;
  1702. target->pDestructor = ZVAL_PTR_DTOR;
  1703. if (source->nNumOfElements == 0) {
  1704. uint32_t mask = HASH_FLAG_MASK & ~(HASH_FLAG_INITIALIZED|HASH_FLAG_PACKED);
  1705. HT_FLAGS(target) = (HT_FLAGS(source) & mask) | HASH_FLAG_STATIC_KEYS;
  1706. target->nTableMask = HT_MIN_MASK;
  1707. target->nNumUsed = 0;
  1708. target->nNumOfElements = 0;
  1709. target->nNextFreeElement = source->nNextFreeElement;
  1710. target->nInternalPointer = 0;
  1711. HT_SET_DATA_ADDR(target, &uninitialized_bucket);
  1712. } else if (GC_FLAGS(source) & IS_ARRAY_IMMUTABLE) {
  1713. HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK;
  1714. target->nTableMask = source->nTableMask;
  1715. target->nNumUsed = source->nNumUsed;
  1716. target->nNumOfElements = source->nNumOfElements;
  1717. target->nNextFreeElement = source->nNextFreeElement;
  1718. HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
  1719. target->nInternalPointer = source->nInternalPointer;
  1720. memcpy(HT_GET_DATA_ADDR(target), HT_GET_DATA_ADDR(source), HT_USED_SIZE(source));
  1721. } else if (HT_FLAGS(source) & HASH_FLAG_PACKED) {
  1722. HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK;
  1723. target->nTableMask = HT_MIN_MASK;
  1724. target->nNumUsed = source->nNumUsed;
  1725. target->nNumOfElements = source->nNumOfElements;
  1726. target->nNextFreeElement = source->nNextFreeElement;
  1727. HT_SET_DATA_ADDR(target, emalloc(HT_SIZE_EX(target->nTableSize, HT_MIN_MASK)));
  1728. target->nInternalPointer =
  1729. (source->nInternalPointer < source->nNumUsed) ?
  1730. source->nInternalPointer : 0;
  1731. HT_HASH_RESET_PACKED(target);
  1732. if (HT_IS_WITHOUT_HOLES(target)) {
  1733. zend_array_dup_packed_elements(source, target, 0);
  1734. } else {
  1735. zend_array_dup_packed_elements(source, target, 1);
  1736. }
  1737. } else {
  1738. HT_FLAGS(target) = HT_FLAGS(source) & HASH_FLAG_MASK;
  1739. target->nTableMask = source->nTableMask;
  1740. target->nNextFreeElement = source->nNextFreeElement;
  1741. target->nInternalPointer =
  1742. (source->nInternalPointer < source->nNumUsed) ?
  1743. source->nInternalPointer : 0;
  1744. HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
  1745. HT_HASH_RESET(target);
  1746. if (HT_HAS_STATIC_KEYS_ONLY(target)) {
  1747. if (HT_IS_WITHOUT_HOLES(source)) {
  1748. idx = zend_array_dup_elements(source, target, 1, 0);
  1749. } else {
  1750. idx = zend_array_dup_elements(source, target, 1, 1);
  1751. }
  1752. } else {
  1753. if (HT_IS_WITHOUT_HOLES(source)) {
  1754. idx = zend_array_dup_elements(source, target, 0, 0);
  1755. } else {
  1756. idx = zend_array_dup_elements(source, target, 0, 1);
  1757. }
  1758. }
  1759. target->nNumUsed = idx;
  1760. target->nNumOfElements = idx;
  1761. }
  1762. return target;
  1763. }
  1764. ZEND_API void ZEND_FASTCALL zend_hash_merge(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor, zend_bool overwrite)
  1765. {
  1766. uint32_t idx;
  1767. Bucket *p;
  1768. zval *t, *s;
  1769. IS_CONSISTENT(source);
  1770. IS_CONSISTENT(target);
  1771. HT_ASSERT_RC1(target);
  1772. if (overwrite) {
  1773. for (idx = 0; idx < source->nNumUsed; idx++) {
  1774. p = source->arData + idx;
  1775. s = &p->val;
  1776. if (UNEXPECTED(Z_TYPE_P(s) == IS_INDIRECT)) {
  1777. s = Z_INDIRECT_P(s);
  1778. }
  1779. if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) {
  1780. continue;
  1781. }
  1782. if (p->key) {
  1783. t = _zend_hash_add_or_update_i(target, p->key, s, HASH_UPDATE | HASH_UPDATE_INDIRECT);
  1784. if (pCopyConstructor) {
  1785. pCopyConstructor(t);
  1786. }
  1787. } else {
  1788. t = zend_hash_index_update(target, p->h, s);
  1789. if (pCopyConstructor) {
  1790. pCopyConstructor(t);
  1791. }
  1792. }
  1793. }
  1794. } else {
  1795. for (idx = 0; idx < source->nNumUsed; idx++) {
  1796. p = source->arData + idx;
  1797. s = &p->val;
  1798. if (UNEXPECTED(Z_TYPE_P(s) == IS_INDIRECT)) {
  1799. s = Z_INDIRECT_P(s);
  1800. }
  1801. if (UNEXPECTED(Z_TYPE_P(s) == IS_UNDEF)) {
  1802. continue;
  1803. }
  1804. if (p->key) {
  1805. t = _zend_hash_add_or_update_i(target, p->key, s, HASH_ADD | HASH_UPDATE_INDIRECT);
  1806. if (t && pCopyConstructor) {
  1807. pCopyConstructor(t);
  1808. }
  1809. } else {
  1810. t = zend_hash_index_add(target, p->h, s);
  1811. if (t && pCopyConstructor) {
  1812. pCopyConstructor(t);
  1813. }
  1814. }
  1815. }
  1816. }
  1817. }
  1818. static zend_bool ZEND_FASTCALL zend_hash_replace_checker_wrapper(HashTable *target, zval *source_data, Bucket *p, void *pParam, merge_checker_func_t merge_checker_func)
  1819. {
  1820. zend_hash_key hash_key;
  1821. hash_key.h = p->h;
  1822. hash_key.key = p->key;
  1823. return merge_checker_func(target, source_data, &hash_key, pParam);
  1824. }
  1825. ZEND_API void ZEND_FASTCALL zend_hash_merge_ex(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor, merge_checker_func_t pMergeSource, void *pParam)
  1826. {
  1827. uint32_t idx;
  1828. Bucket *p;
  1829. zval *t;
  1830. IS_CONSISTENT(source);
  1831. IS_CONSISTENT(target);
  1832. HT_ASSERT_RC1(target);
  1833. for (idx = 0; idx < source->nNumUsed; idx++) {
  1834. p = source->arData + idx;
  1835. if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
  1836. if (zend_hash_replace_checker_wrapper(target, &p->val, p, pParam, pMergeSource)) {
  1837. t = zend_hash_update(target, p->key, &p->val);
  1838. if (pCopyConstructor) {
  1839. pCopyConstructor(t);
  1840. }
  1841. }
  1842. }
  1843. }
  1844. /* Returns the hash table data if found and NULL if not. */
  1845. ZEND_API zval* ZEND_FASTCALL zend_hash_find(const HashTable *ht, zend_string *key)
  1846. {
  1847. Bucket *p;
  1848. IS_CONSISTENT(ht);
  1849. p = zend_hash_find_bucket(ht, key, 0);
  1850. return p ? &p->val : NULL;
  1851. }
  1852. ZEND_API zval* ZEND_FASTCALL _zend_hash_find_known_hash(const HashTable *ht, zend_string *key)
  1853. {
  1854. Bucket *p;
  1855. IS_CONSISTENT(ht);
  1856. p = zend_hash_find_bucket(ht, key, 1);
  1857. return p ? &p->val : NULL;
  1858. }
  1859. ZEND_API zval* ZEND_FASTCALL zend_hash_str_find(const HashTable *ht, const char *str, size_t len)
  1860. {
  1861. zend_ulong h;
  1862. Bucket *p;
  1863. IS_CONSISTENT(ht);
  1864. h = zend_inline_hash_func(str, len);
  1865. p = zend_hash_str_find_bucket(ht, str, len, h);
  1866. return p ? &p->val : NULL;
  1867. }
  1868. ZEND_API zend_bool ZEND_FASTCALL zend_hash_exists(const HashTable *ht, zend_string *key)
  1869. {
  1870. Bucket *p;
  1871. IS_CONSISTENT(ht);
  1872. p = zend_hash_find_bucket(ht, key, 0);
  1873. return p ? 1 : 0;
  1874. }
  1875. ZEND_API zend_bool ZEND_FASTCALL zend_hash_str_exists(const HashTable *ht, const char *str, size_t len)
  1876. {
  1877. zend_ulong h;
  1878. Bucket *p;
  1879. IS_CONSISTENT(ht);
  1880. h = zend_inline_hash_func(str, len);
  1881. p = zend_hash_str_find_bucket(ht, str, len, h);
  1882. return p ? 1 : 0;
  1883. }
  1884. ZEND_API zval* ZEND_FASTCALL zend_hash_index_find(const HashTable *ht, zend_ulong h)
  1885. {
  1886. Bucket *p;
  1887. IS_CONSISTENT(ht);
  1888. if (HT_FLAGS(ht) & HASH_FLAG_PACKED) {
  1889. if (h < ht->nNumUsed) {
  1890. p = ht->arData + h;
  1891. if (Z_TYPE(p->val) != IS_UNDEF) {
  1892. return &p->val;
  1893. }
  1894. }
  1895. return NULL;
  1896. }
  1897. p = zend_hash_index_find_bucket(ht, h);
  1898. return p ? &p->val : NULL;
  1899. }
  1900. ZEND_API zval* ZEND_FASTCALL _zend_hash_index_find(const HashTable *ht, zend_ulong h)
  1901. {
  1902. Bucket *p;
  1903. IS_CONSISTENT(ht);
  1904. p = zend_hash_index_find_bucket(ht, h);
  1905. return p ? &p->val : NULL;
  1906. }
  1907. ZEND_API zend_bool ZEND_FASTCALL zend_hash_index_exists(const HashTable *ht, zend_ulong h)
  1908. {
  1909. Bucket *p;
  1910. IS_CONSISTENT(ht);
  1911. if (HT_FLAGS(ht) & HASH_FLAG_PACKED) {
  1912. if (h < ht->nNumUsed) {
  1913. if (Z_TYPE(ht->arData[h].val) != IS_UNDEF) {
  1914. return 1;
  1915. }
  1916. }
  1917. return 0;
  1918. }
  1919. p = zend_hash_index_find_bucket(ht, h);
  1920. return p ? 1 : 0;
  1921. }
  1922. ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_reset_ex(HashTable *ht, HashPosition *pos)
  1923. {
  1924. IS_CONSISTENT(ht);
  1925. HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
  1926. *pos = _zend_hash_get_valid_pos(ht, 0);
  1927. }
  1928. /* This function will be extremely optimized by remembering
  1929. * the end of the list
  1930. */
  1931. ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_end_ex(HashTable *ht, HashPosition *pos)
  1932. {
  1933. uint32_t idx;
  1934. IS_CONSISTENT(ht);
  1935. HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
  1936. idx = ht->nNumUsed;
  1937. while (idx > 0) {
  1938. idx--;
  1939. if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
  1940. *pos = idx;
  1941. return;
  1942. }
  1943. }
  1944. *pos = ht->nNumUsed;
  1945. }
  1946. ZEND_API int ZEND_FASTCALL zend_hash_move_forward_ex(HashTable *ht, HashPosition *pos)
  1947. {
  1948. uint32_t idx;
  1949. IS_CONSISTENT(ht);
  1950. HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
  1951. idx = _zend_hash_get_valid_pos(ht, *pos);
  1952. if (idx < ht->nNumUsed) {
  1953. while (1) {
  1954. idx++;
  1955. if (idx >= ht->nNumUsed) {
  1956. *pos = ht->nNumUsed;
  1957. return SUCCESS;
  1958. }
  1959. if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
  1960. *pos = idx;
  1961. return SUCCESS;
  1962. }
  1963. }
  1964. } else {
  1965. return FAILURE;
  1966. }
  1967. }
  1968. ZEND_API int ZEND_FASTCALL zend_hash_move_backwards_ex(HashTable *ht, HashPosition *pos)
  1969. {
  1970. uint32_t idx = *pos;
  1971. IS_CONSISTENT(ht);
  1972. HT_ASSERT(ht, &ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
  1973. if (idx < ht->nNumUsed) {
  1974. while (idx > 0) {
  1975. idx--;
  1976. if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
  1977. *pos = idx;
  1978. return SUCCESS;
  1979. }
  1980. }
  1981. *pos = ht->nNumUsed;
  1982. return SUCCESS;
  1983. } else {
  1984. return FAILURE;
  1985. }
  1986. }
  1987. /* This function should be made binary safe */
  1988. ZEND_API int ZEND_FASTCALL zend_hash_get_current_key_ex(const HashTable *ht, zend_string **str_index, zend_ulong *num_index, HashPosition *pos)
  1989. {
  1990. uint32_t idx;
  1991. Bucket *p;
  1992. IS_CONSISTENT(ht);
  1993. idx = _zend_hash_get_valid_pos(ht, *pos);
  1994. if (idx < ht->nNumUsed) {
  1995. p = ht->arData + idx;
  1996. if (p->key) {
  1997. *str_index = p->key;
  1998. return HASH_KEY_IS_STRING;
  1999. } else {
  2000. *num_index = p->h;
  2001. return HASH_KEY_IS_LONG;
  2002. }
  2003. }
  2004. return HASH_KEY_NON_EXISTENT;
  2005. }
  2006. ZEND_API void ZEND_FASTCALL zend_hash_get_current_key_zval_ex(const HashTable *ht, zval *key, HashPosition *pos)
  2007. {
  2008. uint32_t idx;
  2009. Bucket *p;
  2010. IS_CONSISTENT(ht);
  2011. idx = _zend_hash_get_valid_pos(ht, *pos);
  2012. if (idx >= ht->nNumUsed) {
  2013. ZVAL_NULL(key);
  2014. } else {
  2015. p = ht->arData + idx;
  2016. if (p->key) {
  2017. ZVAL_STR_COPY(key, p->key);
  2018. } else {
  2019. ZVAL_LONG(key, p->h);
  2020. }
  2021. }
  2022. }
  2023. ZEND_API int ZEND_FASTCALL zend_hash_get_current_key_type_ex(HashTable *ht, HashPosition *pos)
  2024. {
  2025. uint32_t idx;
  2026. Bucket *p;
  2027. IS_CONSISTENT(ht);
  2028. idx = _zend_hash_get_valid_pos(ht, *pos);
  2029. if (idx < ht->nNumUsed) {
  2030. p = ht->arData + idx;
  2031. if (p->key) {
  2032. return HASH_KEY_IS_STRING;
  2033. } else {
  2034. return HASH_KEY_IS_LONG;
  2035. }
  2036. }
  2037. return HASH_KEY_NON_EXISTENT;
  2038. }
  2039. ZEND_API zval* ZEND_FASTCALL zend_hash_get_current_data_ex(HashTable *ht, HashPosition *pos)
  2040. {
  2041. uint32_t idx;
  2042. Bucket *p;
  2043. IS_CONSISTENT(ht);
  2044. idx = _zend_hash_get_valid_pos(ht, *pos);
  2045. if (idx < ht->nNumUsed) {
  2046. p = ht->arData + idx;
  2047. return &p->val;
  2048. } else {
  2049. return NULL;
  2050. }
  2051. }
  2052. ZEND_API void zend_hash_bucket_swap(Bucket *p, Bucket *q)
  2053. {
  2054. zval val;
  2055. zend_ulong h;
  2056. zend_string *key;
  2057. ZVAL_COPY_VALUE(&val, &p->val);
  2058. h = p->h;
  2059. key = p->key;
  2060. ZVAL_COPY_VALUE(&p->val, &q->val);
  2061. p->h = q->h;
  2062. p->key = q->key;
  2063. ZVAL_COPY_VALUE(&q->val, &val);
  2064. q->h = h;
  2065. q->key = key;
  2066. }
  2067. ZEND_API void zend_hash_bucket_renum_swap(Bucket *p, Bucket *q)
  2068. {
  2069. zval val;
  2070. ZVAL_COPY_VALUE(&val, &p->val);
  2071. ZVAL_COPY_VALUE(&p->val, &q->val);
  2072. ZVAL_COPY_VALUE(&q->val, &val);
  2073. }
  2074. ZEND_API void zend_hash_bucket_packed_swap(Bucket *p, Bucket *q)
  2075. {
  2076. zval val;
  2077. zend_ulong h;
  2078. ZVAL_COPY_VALUE(&val, &p->val);
  2079. h = p->h;
  2080. ZVAL_COPY_VALUE(&p->val, &q->val);
  2081. p->h = q->h;
  2082. ZVAL_COPY_VALUE(&q->val, &val);
  2083. q->h = h;
  2084. }
  2085. ZEND_API int ZEND_FASTCALL zend_hash_sort_ex(HashTable *ht, sort_func_t sort, compare_func_t compar, zend_bool renumber)
  2086. {
  2087. Bucket *p;
  2088. uint32_t i, j;
  2089. IS_CONSISTENT(ht);
  2090. HT_ASSERT_RC1(ht);
  2091. if (!(ht->nNumOfElements>1) && !(renumber && ht->nNumOfElements>0)) { /* Doesn't require sorting */
  2092. return SUCCESS;
  2093. }
  2094. if (HT_IS_WITHOUT_HOLES(ht)) {
  2095. i = ht->nNumUsed;
  2096. } else {
  2097. for (j = 0, i = 0; j < ht->nNumUsed; j++) {
  2098. p = ht->arData + j;
  2099. if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
  2100. if (i != j) {
  2101. ht->arData[i] = *p;
  2102. }
  2103. i++;
  2104. }
  2105. }
  2106. sort((void *)ht->arData, i, sizeof(Bucket), compar,
  2107. (swap_func_t)(renumber? zend_hash_bucket_renum_swap :
  2108. ((HT_FLAGS(ht) & HASH_FLAG_PACKED) ? zend_hash_bucket_packed_swap : zend_hash_bucket_swap)));
  2109. ht->nNumUsed = i;
  2110. ht->nInternalPointer = 0;
  2111. if (renumber) {
  2112. for (j = 0; j < i; j++) {
  2113. p = ht->arData + j;
  2114. p->h = j;
  2115. if (p->key) {
  2116. zend_string_release(p->key);
  2117. p->key = NULL;
  2118. }
  2119. }
  2120. ht->nNextFreeElement = i;
  2121. }
  2122. if (HT_FLAGS(ht) & HASH_FLAG_PACKED) {
  2123. if (!renumber) {
  2124. zend_hash_packed_to_hash(ht);
  2125. }
  2126. } else {
  2127. if (renumber) {
  2128. void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
  2129. Bucket *old_buckets = ht->arData;
  2130. new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), (GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
  2131. HT_FLAGS(ht) |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
  2132. ht->nTableMask = HT_MIN_MASK;
  2133. HT_SET_DATA_ADDR(ht, new_data);
  2134. memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
  2135. pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  2136. HT_HASH_RESET_PACKED(ht);
  2137. } else {
  2138. zend_hash_rehash(ht);
  2139. }
  2140. }
  2141. return SUCCESS;
  2142. }
  2143. static zend_always_inline int zend_hash_compare_impl(HashTable *ht1, HashTable *ht2, compare_func_t compar, zend_bool ordered) {
  2144. uint32_t idx1, idx2;
  2145. if (ht1->nNumOfElements != ht2->nNumOfElements) {
  2146. return ht1->nNumOfElements > ht2->nNumOfElements ? 1 : -1;
  2147. }
  2148. for (idx1 = 0, idx2 = 0; idx1 < ht1->nNumUsed; idx1++) {
  2149. Bucket *p1 = ht1->arData + idx1, *p2;
  2150. zval *pData1, *pData2;
  2151. int result;
  2152. if (Z_TYPE(p1->val) == IS_UNDEF) continue;
  2153. if (ordered) {
  2154. while (1) {
  2155. ZEND_ASSERT(idx2 != ht2->nNumUsed);
  2156. p2 = ht2->arData + idx2;
  2157. if (Z_TYPE(p2->val) != IS_UNDEF) break;
  2158. idx2++;
  2159. }
  2160. if (p1->key == NULL && p2->key == NULL) { /* numeric indices */
  2161. if (p1->h != p2->h) {
  2162. return p1->h > p2->h ? 1 : -1;
  2163. }
  2164. } else if (p1->key != NULL && p2->key != NULL) { /* string indices */
  2165. if (ZSTR_LEN(p1->key) != ZSTR_LEN(p2->key)) {
  2166. return ZSTR_LEN(p1->key) > ZSTR_LEN(p2->key) ? 1 : -1;
  2167. }
  2168. result = memcmp(ZSTR_VAL(p1->key), ZSTR_VAL(p2->key), ZSTR_LEN(p1->key));
  2169. if (result != 0) {
  2170. return result;
  2171. }
  2172. } else {
  2173. /* Mixed key types: A string key is considered as larger */
  2174. return p1->key != NULL ? 1 : -1;
  2175. }
  2176. pData2 = &p2->val;
  2177. idx2++;
  2178. } else {
  2179. if (p1->key == NULL) { /* numeric index */
  2180. pData2 = zend_hash_index_find(ht2, p1->h);
  2181. if (pData2 == NULL) {
  2182. return 1;
  2183. }
  2184. } else { /* string index */
  2185. pData2 = zend_hash_find(ht2, p1->key);
  2186. if (pData2 == NULL) {
  2187. return 1;
  2188. }
  2189. }
  2190. }
  2191. pData1 = &p1->val;
  2192. if (Z_TYPE_P(pData1) == IS_INDIRECT) {
  2193. pData1 = Z_INDIRECT_P(pData1);
  2194. }
  2195. if (Z_TYPE_P(pData2) == IS_INDIRECT) {
  2196. pData2 = Z_INDIRECT_P(pData2);
  2197. }
  2198. if (Z_TYPE_P(pData1) == IS_UNDEF) {
  2199. if (Z_TYPE_P(pData2) != IS_UNDEF) {
  2200. return -1;
  2201. }
  2202. } else if (Z_TYPE_P(pData2) == IS_UNDEF) {
  2203. return 1;
  2204. } else {
  2205. result = compar(pData1, pData2);
  2206. if (result != 0) {
  2207. return result;
  2208. }
  2209. }
  2210. }
  2211. return 0;
  2212. }
  2213. ZEND_API int zend_hash_compare(HashTable *ht1, HashTable *ht2, compare_func_t compar, zend_bool ordered)
  2214. {
  2215. int result;
  2216. IS_CONSISTENT(ht1);
  2217. IS_CONSISTENT(ht2);
  2218. if (ht1 == ht2) {
  2219. return 0;
  2220. }
  2221. /* It's enough to protect only one of the arrays.
  2222. * The second one may be referenced from the first and this may cause
  2223. * false recursion detection.
  2224. */
  2225. if (UNEXPECTED(GC_IS_RECURSIVE(ht1))) {
  2226. zend_error_noreturn(E_ERROR, "Nesting level too deep - recursive dependency?");
  2227. }
  2228. if (!(GC_FLAGS(ht1) & GC_IMMUTABLE)) {
  2229. GC_PROTECT_RECURSION(ht1);
  2230. }
  2231. result = zend_hash_compare_impl(ht1, ht2, compar, ordered);
  2232. if (!(GC_FLAGS(ht1) & GC_IMMUTABLE)) {
  2233. GC_UNPROTECT_RECURSION(ht1);
  2234. }
  2235. return result;
  2236. }
  2237. ZEND_API zval* ZEND_FASTCALL zend_hash_minmax(const HashTable *ht, compare_func_t compar, uint32_t flag)
  2238. {
  2239. uint32_t idx;
  2240. Bucket *p, *res;
  2241. IS_CONSISTENT(ht);
  2242. if (ht->nNumOfElements == 0 ) {
  2243. return NULL;
  2244. }
  2245. idx = 0;
  2246. while (1) {
  2247. if (idx == ht->nNumUsed) {
  2248. return NULL;
  2249. }
  2250. if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) break;
  2251. idx++;
  2252. }
  2253. res = ht->arData + idx;
  2254. for (; idx < ht->nNumUsed; idx++) {
  2255. p = ht->arData + idx;
  2256. if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
  2257. if (flag) {
  2258. if (compar(res, p) < 0) { /* max */
  2259. res = p;
  2260. }
  2261. } else {
  2262. if (compar(res, p) > 0) { /* min */
  2263. res = p;
  2264. }
  2265. }
  2266. }
  2267. return &res->val;
  2268. }
  2269. ZEND_API int ZEND_FASTCALL _zend_handle_numeric_str_ex(const char *key, size_t length, zend_ulong *idx)
  2270. {
  2271. register const char *tmp = key;
  2272. const char *end = key + length;
  2273. if (*tmp == '-') {
  2274. tmp++;
  2275. }
  2276. if ((*tmp == '0' && length > 1) /* numbers with leading zeros */
  2277. || (end - tmp > MAX_LENGTH_OF_LONG - 1) /* number too long */
  2278. || (SIZEOF_ZEND_LONG == 4 &&
  2279. end - tmp == MAX_LENGTH_OF_LONG - 1 &&
  2280. *tmp > '2')) { /* overflow */
  2281. return 0;
  2282. }
  2283. *idx = (*tmp - '0');
  2284. while (1) {
  2285. ++tmp;
  2286. if (tmp == end) {
  2287. if (*key == '-') {
  2288. if (*idx-1 > ZEND_LONG_MAX) { /* overflow */
  2289. return 0;
  2290. }
  2291. *idx = 0 - *idx;
  2292. } else if (*idx > ZEND_LONG_MAX) { /* overflow */
  2293. return 0;
  2294. }
  2295. return 1;
  2296. }
  2297. if (*tmp <= '9' && *tmp >= '0') {
  2298. *idx = (*idx * 10) + (*tmp - '0');
  2299. } else {
  2300. return 0;
  2301. }
  2302. }
  2303. }
  2304. /* Takes a "symtable" hashtable (contains integer and non-numeric string keys)
  2305. * and converts it to a "proptable" (contains only string keys).
  2306. * If the symtable didn't need duplicating, its refcount is incremented.
  2307. */
  2308. ZEND_API HashTable* ZEND_FASTCALL zend_symtable_to_proptable(HashTable *ht)
  2309. {
  2310. zend_ulong num_key;
  2311. zend_string *str_key;
  2312. zval *zv;
  2313. if (UNEXPECTED(HT_IS_PACKED(ht))) {
  2314. goto convert;
  2315. }
  2316. ZEND_HASH_FOREACH_STR_KEY(ht, str_key) {
  2317. if (!str_key) {
  2318. goto convert;
  2319. }
  2320. } ZEND_HASH_FOREACH_END();
  2321. if (!(GC_FLAGS(ht) & IS_ARRAY_IMMUTABLE)) {
  2322. GC_ADDREF(ht);
  2323. }
  2324. return ht;
  2325. convert:
  2326. {
  2327. HashTable *new_ht = zend_new_array(zend_hash_num_elements(ht));
  2328. ZEND_HASH_FOREACH_KEY_VAL(ht, num_key, str_key, zv) {
  2329. if (!str_key) {
  2330. str_key = zend_long_to_str(num_key);
  2331. zend_string_delref(str_key);
  2332. }
  2333. do {
  2334. if (Z_OPT_REFCOUNTED_P(zv)) {
  2335. if (Z_ISREF_P(zv) && Z_REFCOUNT_P(zv) == 1) {
  2336. zv = Z_REFVAL_P(zv);
  2337. if (!Z_OPT_REFCOUNTED_P(zv)) {
  2338. break;
  2339. }
  2340. }
  2341. Z_ADDREF_P(zv);
  2342. }
  2343. } while (0);
  2344. zend_hash_update(new_ht, str_key, zv);
  2345. } ZEND_HASH_FOREACH_END();
  2346. return new_ht;
  2347. }
  2348. }
  2349. /* Takes a "proptable" hashtable (contains only string keys) and converts it to
  2350. * a "symtable" (contains integer and non-numeric string keys).
  2351. * If the proptable didn't need duplicating, its refcount is incremented.
  2352. */
  2353. ZEND_API HashTable* ZEND_FASTCALL zend_proptable_to_symtable(HashTable *ht, zend_bool always_duplicate)
  2354. {
  2355. zend_ulong num_key;
  2356. zend_string *str_key;
  2357. zval *zv;
  2358. ZEND_HASH_FOREACH_STR_KEY(ht, str_key) {
  2359. /* The `str_key &&` here might seem redundant: property tables should
  2360. * only have string keys. Unfortunately, this isn't true, at the very
  2361. * least because of ArrayObject, which stores a symtable where the
  2362. * property table should be.
  2363. */
  2364. if (str_key && ZEND_HANDLE_NUMERIC(str_key, num_key)) {
  2365. goto convert;
  2366. }
  2367. } ZEND_HASH_FOREACH_END();
  2368. if (always_duplicate) {
  2369. return zend_array_dup(ht);
  2370. }
  2371. if (EXPECTED(!(GC_FLAGS(ht) & IS_ARRAY_IMMUTABLE))) {
  2372. GC_ADDREF(ht);
  2373. }
  2374. return ht;
  2375. convert:
  2376. {
  2377. HashTable *new_ht = zend_new_array(zend_hash_num_elements(ht));
  2378. ZEND_HASH_FOREACH_KEY_VAL_IND(ht, num_key, str_key, zv) {
  2379. do {
  2380. if (Z_OPT_REFCOUNTED_P(zv)) {
  2381. if (Z_ISREF_P(zv) && Z_REFCOUNT_P(zv) == 1) {
  2382. zv = Z_REFVAL_P(zv);
  2383. if (!Z_OPT_REFCOUNTED_P(zv)) {
  2384. break;
  2385. }
  2386. }
  2387. Z_ADDREF_P(zv);
  2388. }
  2389. } while (0);
  2390. /* Again, thank ArrayObject for `!str_key ||`. */
  2391. if (!str_key || ZEND_HANDLE_NUMERIC(str_key, num_key)) {
  2392. zend_hash_index_update(new_ht, num_key, zv);
  2393. } else {
  2394. zend_hash_update(new_ht, str_key, zv);
  2395. }
  2396. } ZEND_HASH_FOREACH_END();
  2397. return new_ht;
  2398. }
  2399. }
  2400. /*
  2401. * Local variables:
  2402. * tab-width: 4
  2403. * c-basic-offset: 4
  2404. * indent-tabs-mode: t
  2405. * End:
  2406. * vim600: sw=4 ts=4 fdm=marker
  2407. * vim<600: sw=4 ts=4
  2408. */