zend_gc.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715
  1. /*
  2. +----------------------------------------------------------------------+
  3. | Zend Engine |
  4. +----------------------------------------------------------------------+
  5. | Copyright (c) Zend Technologies Ltd. (http://www.zend.com) |
  6. +----------------------------------------------------------------------+
  7. | This source file is subject to version 2.00 of the Zend license, |
  8. | that is bundled with this package in the file LICENSE, and is |
  9. | available through the world-wide-web at the following url: |
  10. | http://www.zend.com/license/2_00.txt. |
  11. | If you did not receive a copy of the Zend license and are unable to |
  12. | obtain it through the world-wide-web, please send a note to |
  13. | license@zend.com so we can mail you a copy immediately. |
  14. +----------------------------------------------------------------------+
  15. | Authors: David Wang <planetbeing@gmail.com> |
  16. | Dmitry Stogov <dmitry@php.net> |
  17. +----------------------------------------------------------------------+
  18. */
  19. /**
  20. * zend_gc_collect_cycles
  21. * ======================
  22. *
  23. * Colors and its meaning
  24. * ----------------------
  25. *
  26. * BLACK (GC_BLACK) - In use or free.
  27. * GREY (GC_GREY) - Possible member of cycle.
  28. * WHITE (GC_WHITE) - Member of garbage cycle.
  29. * PURPLE (GC_PURPLE) - Possible root of cycle.
  30. *
  31. * Colors described in the paper but not used
  32. * ------------------------------------------
  33. *
  34. * GREEN - Acyclic
  35. * RED - Candidate cycle undergoing
  36. * ORANGE - Candidate cycle awaiting epoch boundary.
  37. *
  38. *
  39. * Flow
  40. * =====
  41. *
  42. * The garbage collect cycle starts from 'gc_mark_roots', which traverses the
  43. * possible roots, and calls mark_grey for roots are marked purple with
  44. * depth-first traverse.
  45. *
  46. * After all possible roots are traversed and marked,
  47. * gc_scan_roots will be called, and each root will be called with
  48. * gc_scan(root->ref)
  49. *
  50. * gc_scan checks the colors of possible members.
  51. *
  52. * If the node is marked as grey and the refcount > 0
  53. * gc_scan_black will be called on that node to scan it's subgraph.
  54. * otherwise (refcount == 0), it marks the node white.
  55. *
  56. * A node MAY be added to possible roots when ZEND_UNSET_VAR happens or
  57. * zend_assign_to_variable is called only when possible garbage node is
  58. * produced.
  59. * gc_possible_root() will be called to add the nodes to possible roots.
  60. *
  61. *
  62. * For objects, we call their get_gc handler (by default 'zend_std_get_gc') to
  63. * get the object properties to scan.
  64. *
  65. *
  66. * @see http://researcher.watson.ibm.com/researcher/files/us-bacon/Bacon01Concurrent.pdf
  67. */
  68. #include "zend.h"
  69. #include "zend_API.h"
  70. #include "zend_fibers.h"
  71. #ifndef GC_BENCH
  72. # define GC_BENCH 0
  73. #endif
  74. #ifndef ZEND_GC_DEBUG
  75. # define ZEND_GC_DEBUG 0
  76. #endif
  77. /* GC_INFO layout */
  78. #define GC_ADDRESS 0x0fffffu
  79. #define GC_COLOR 0x300000u
  80. #define GC_BLACK 0x000000u /* must be zero */
  81. #define GC_WHITE 0x100000u
  82. #define GC_GREY 0x200000u
  83. #define GC_PURPLE 0x300000u
  84. /* Debug tracing */
  85. #if ZEND_GC_DEBUG > 1
  86. # define GC_TRACE(format, ...) fprintf(stderr, format "\n", ##__VA_ARGS__);
  87. # define GC_TRACE_REF(ref, format, ...) \
  88. do { \
  89. gc_trace_ref((zend_refcounted *) ref); \
  90. fprintf(stderr, format "\n", ##__VA_ARGS__); \
  91. } while (0)
  92. # define GC_TRACE_SET_COLOR(ref, color) \
  93. GC_TRACE_REF(ref, "->%s", gc_color_name(color))
  94. #else
  95. # define GC_TRACE_REF(ref, format, ...)
  96. # define GC_TRACE_SET_COLOR(ref, new_color)
  97. # define GC_TRACE(str)
  98. #endif
  99. /* GC_INFO access */
  100. #define GC_REF_ADDRESS(ref) \
  101. (((GC_TYPE_INFO(ref)) & (GC_ADDRESS << GC_INFO_SHIFT)) >> GC_INFO_SHIFT)
  102. #define GC_REF_COLOR(ref) \
  103. (((GC_TYPE_INFO(ref)) & (GC_COLOR << GC_INFO_SHIFT)) >> GC_INFO_SHIFT)
  104. #define GC_REF_CHECK_COLOR(ref, color) \
  105. ((GC_TYPE_INFO(ref) & (GC_COLOR << GC_INFO_SHIFT)) == ((color) << GC_INFO_SHIFT))
  106. #define GC_REF_SET_INFO(ref, info) do { \
  107. GC_TYPE_INFO(ref) = \
  108. (GC_TYPE_INFO(ref) & (GC_TYPE_MASK | GC_FLAGS_MASK)) | \
  109. ((info) << GC_INFO_SHIFT); \
  110. } while (0)
  111. #define GC_REF_SET_COLOR(ref, c) do { \
  112. GC_TRACE_SET_COLOR(ref, c); \
  113. GC_TYPE_INFO(ref) = \
  114. (GC_TYPE_INFO(ref) & ~(GC_COLOR << GC_INFO_SHIFT)) | \
  115. ((c) << GC_INFO_SHIFT); \
  116. } while (0)
  117. #define GC_REF_SET_BLACK(ref) do { \
  118. GC_TRACE_SET_COLOR(ref, GC_BLACK); \
  119. GC_TYPE_INFO(ref) &= ~(GC_COLOR << GC_INFO_SHIFT); \
  120. } while (0)
  121. #define GC_REF_SET_PURPLE(ref) do { \
  122. GC_TRACE_SET_COLOR(ref, GC_PURPLE); \
  123. GC_TYPE_INFO(ref) |= (GC_COLOR << GC_INFO_SHIFT); \
  124. } while (0)
  125. /* bit stealing tags for gc_root_buffer.ref */
  126. #define GC_BITS 0x3
  127. #define GC_ROOT 0x0 /* possible root of circular garbage */
  128. #define GC_UNUSED 0x1 /* part of linked list of unused buffers */
  129. #define GC_GARBAGE 0x2 /* garbage to delete */
  130. #define GC_DTOR_GARBAGE 0x3 /* garbage on which only the dtor should be invoked */
  131. #define GC_GET_PTR(ptr) \
  132. ((void*)(((uintptr_t)(ptr)) & ~GC_BITS))
  133. #define GC_IS_ROOT(ptr) \
  134. ((((uintptr_t)(ptr)) & GC_BITS) == GC_ROOT)
  135. #define GC_IS_UNUSED(ptr) \
  136. ((((uintptr_t)(ptr)) & GC_BITS) == GC_UNUSED)
  137. #define GC_IS_GARBAGE(ptr) \
  138. ((((uintptr_t)(ptr)) & GC_BITS) == GC_GARBAGE)
  139. #define GC_IS_DTOR_GARBAGE(ptr) \
  140. ((((uintptr_t)(ptr)) & GC_BITS) == GC_DTOR_GARBAGE)
  141. #define GC_MAKE_GARBAGE(ptr) \
  142. ((void*)(((uintptr_t)(ptr)) | GC_GARBAGE))
  143. #define GC_MAKE_DTOR_GARBAGE(ptr) \
  144. ((void*)(((uintptr_t)(ptr)) | GC_DTOR_GARBAGE))
  145. /* GC address conversion */
  146. #define GC_IDX2PTR(idx) (GC_G(buf) + (idx))
  147. #define GC_PTR2IDX(ptr) ((ptr) - GC_G(buf))
  148. #define GC_IDX2LIST(idx) ((void*)(uintptr_t)(((idx) * sizeof(void*)) | GC_UNUSED))
  149. #define GC_LIST2IDX(list) (((uint32_t)(uintptr_t)(list)) / sizeof(void*))
  150. /* GC buffers */
  151. #define GC_INVALID 0
  152. #define GC_FIRST_ROOT 1
  153. #define GC_DEFAULT_BUF_SIZE (16 * 1024)
  154. #define GC_BUF_GROW_STEP (128 * 1024)
  155. #define GC_MAX_UNCOMPRESSED (512 * 1024)
  156. #define GC_MAX_BUF_SIZE 0x40000000
  157. #define GC_THRESHOLD_DEFAULT (10000 + GC_FIRST_ROOT)
  158. #define GC_THRESHOLD_STEP 10000
  159. #define GC_THRESHOLD_MAX 1000000000
  160. #define GC_THRESHOLD_TRIGGER 100
  161. /* GC flags */
  162. #define GC_HAS_DESTRUCTORS (1<<0)
  163. /* unused buffers */
  164. #define GC_HAS_UNUSED() \
  165. (GC_G(unused) != GC_INVALID)
  166. #define GC_FETCH_UNUSED() \
  167. gc_fetch_unused()
  168. #define GC_LINK_UNUSED(root) \
  169. gc_link_unused(root)
  170. #define GC_HAS_NEXT_UNUSED_UNDER_THRESHOLD() \
  171. (GC_G(first_unused) < GC_G(gc_threshold))
  172. #define GC_HAS_NEXT_UNUSED() \
  173. (GC_G(first_unused) != GC_G(buf_size))
  174. #define GC_FETCH_NEXT_UNUSED() \
  175. gc_fetch_next_unused()
  176. ZEND_API int (*gc_collect_cycles)(void);
  177. typedef struct _gc_root_buffer {
  178. zend_refcounted *ref;
  179. } gc_root_buffer;
  180. typedef struct _zend_gc_globals {
  181. gc_root_buffer *buf; /* preallocated arrays of buffers */
  182. bool gc_enabled;
  183. bool gc_active; /* GC currently running, forbid nested GC */
  184. bool gc_protected; /* GC protected, forbid root additions */
  185. bool gc_full;
  186. uint32_t unused; /* linked list of unused buffers */
  187. uint32_t first_unused; /* first unused buffer */
  188. uint32_t gc_threshold; /* GC collection threshold */
  189. uint32_t buf_size; /* size of the GC buffer */
  190. uint32_t num_roots; /* number of roots in GC buffer */
  191. uint32_t gc_runs;
  192. uint32_t collected;
  193. #if GC_BENCH
  194. uint32_t root_buf_length;
  195. uint32_t root_buf_peak;
  196. uint32_t zval_possible_root;
  197. uint32_t zval_buffered;
  198. uint32_t zval_remove_from_buffer;
  199. uint32_t zval_marked_grey;
  200. #endif
  201. } zend_gc_globals;
  202. #ifdef ZTS
  203. static int gc_globals_id;
  204. static size_t gc_globals_offset;
  205. #define GC_G(v) ZEND_TSRMG_FAST(gc_globals_offset, zend_gc_globals *, v)
  206. #else
  207. #define GC_G(v) (gc_globals.v)
  208. static zend_gc_globals gc_globals;
  209. #endif
  210. #if GC_BENCH
  211. # define GC_BENCH_INC(counter) GC_G(counter)++
  212. # define GC_BENCH_DEC(counter) GC_G(counter)--
  213. # define GC_BENCH_PEAK(peak, counter) do { \
  214. if (GC_G(counter) > GC_G(peak)) { \
  215. GC_G(peak) = GC_G(counter); \
  216. } \
  217. } while (0)
  218. #else
  219. # define GC_BENCH_INC(counter)
  220. # define GC_BENCH_DEC(counter)
  221. # define GC_BENCH_PEAK(peak, counter)
  222. #endif
  223. #define GC_STACK_SEGMENT_SIZE (((4096 - ZEND_MM_OVERHEAD) / sizeof(void*)) - 2)
  224. typedef struct _gc_stack gc_stack;
  225. struct _gc_stack {
  226. gc_stack *prev;
  227. gc_stack *next;
  228. zend_refcounted *data[GC_STACK_SEGMENT_SIZE];
  229. };
  230. #define GC_STACK_DCL(init) \
  231. gc_stack *_stack = init; \
  232. size_t _top = 0;
  233. #define GC_STACK_PUSH(ref) \
  234. gc_stack_push(&_stack, &_top, ref);
  235. #define GC_STACK_POP() \
  236. gc_stack_pop(&_stack, &_top)
  237. static zend_never_inline gc_stack* gc_stack_next(gc_stack *stack)
  238. {
  239. if (UNEXPECTED(!stack->next)) {
  240. gc_stack *segment = emalloc(sizeof(gc_stack));
  241. segment->prev = stack;
  242. segment->next = NULL;
  243. stack->next = segment;
  244. }
  245. return stack->next;
  246. }
  247. static zend_always_inline void gc_stack_push(gc_stack **stack, size_t *top, zend_refcounted *ref)
  248. {
  249. if (UNEXPECTED(*top == GC_STACK_SEGMENT_SIZE)) {
  250. (*stack) = gc_stack_next(*stack);
  251. (*top) = 0;
  252. }
  253. (*stack)->data[(*top)++] = ref;
  254. }
  255. static zend_always_inline zend_refcounted* gc_stack_pop(gc_stack **stack, size_t *top)
  256. {
  257. if (UNEXPECTED((*top) == 0)) {
  258. if (!(*stack)->prev) {
  259. return NULL;
  260. } else {
  261. (*stack) = (*stack)->prev;
  262. (*top) = GC_STACK_SEGMENT_SIZE - 1;
  263. return (*stack)->data[GC_STACK_SEGMENT_SIZE - 1];
  264. }
  265. } else {
  266. return (*stack)->data[--(*top)];
  267. }
  268. }
  269. static void gc_stack_free(gc_stack *stack)
  270. {
  271. gc_stack *p = stack->next;
  272. while (p) {
  273. stack = p->next;
  274. efree(p);
  275. p = stack;
  276. }
  277. }
  278. static zend_always_inline uint32_t gc_compress(uint32_t idx)
  279. {
  280. if (EXPECTED(idx < GC_MAX_UNCOMPRESSED)) {
  281. return idx;
  282. }
  283. return (idx % GC_MAX_UNCOMPRESSED) | GC_MAX_UNCOMPRESSED;
  284. }
  285. static zend_always_inline gc_root_buffer* gc_decompress(zend_refcounted *ref, uint32_t idx)
  286. {
  287. gc_root_buffer *root = GC_IDX2PTR(idx);
  288. if (EXPECTED(GC_GET_PTR(root->ref) == ref)) {
  289. return root;
  290. }
  291. while (1) {
  292. idx += GC_MAX_UNCOMPRESSED;
  293. ZEND_ASSERT(idx < GC_G(first_unused));
  294. root = GC_IDX2PTR(idx);
  295. if (GC_GET_PTR(root->ref) == ref) {
  296. return root;
  297. }
  298. }
  299. }
  300. static zend_always_inline uint32_t gc_fetch_unused(void)
  301. {
  302. uint32_t idx;
  303. gc_root_buffer *root;
  304. ZEND_ASSERT(GC_HAS_UNUSED());
  305. idx = GC_G(unused);
  306. root = GC_IDX2PTR(idx);
  307. ZEND_ASSERT(GC_IS_UNUSED(root->ref));
  308. GC_G(unused) = GC_LIST2IDX(root->ref);
  309. return idx;
  310. }
  311. static zend_always_inline void gc_link_unused(gc_root_buffer *root)
  312. {
  313. root->ref = GC_IDX2LIST(GC_G(unused));
  314. GC_G(unused) = GC_PTR2IDX(root);
  315. }
  316. static zend_always_inline uint32_t gc_fetch_next_unused(void)
  317. {
  318. uint32_t idx;
  319. ZEND_ASSERT(GC_HAS_NEXT_UNUSED());
  320. idx = GC_G(first_unused);
  321. GC_G(first_unused) = GC_G(first_unused) + 1;
  322. return idx;
  323. }
  324. #if ZEND_GC_DEBUG > 1
  325. static const char *gc_color_name(uint32_t color) {
  326. switch (color) {
  327. case GC_BLACK: return "black";
  328. case GC_WHITE: return "white";
  329. case GC_GREY: return "grey";
  330. case GC_PURPLE: return "purple";
  331. default: return "unknown";
  332. }
  333. }
  334. static void gc_trace_ref(zend_refcounted *ref) {
  335. if (GC_TYPE(ref) == IS_OBJECT) {
  336. zend_object *obj = (zend_object *) ref;
  337. fprintf(stderr, "[%p] rc=%d addr=%d %s object(%s)#%d ",
  338. ref, GC_REFCOUNT(ref), GC_REF_ADDRESS(ref),
  339. gc_color_name(GC_REF_COLOR(ref)),
  340. obj->ce->name->val, obj->handle);
  341. } else if (GC_TYPE(ref) == IS_ARRAY) {
  342. zend_array *arr = (zend_array *) ref;
  343. fprintf(stderr, "[%p] rc=%d addr=%d %s array(%d) ",
  344. ref, GC_REFCOUNT(ref), GC_REF_ADDRESS(ref),
  345. gc_color_name(GC_REF_COLOR(ref)),
  346. zend_hash_num_elements(arr));
  347. } else {
  348. fprintf(stderr, "[%p] rc=%d addr=%d %s %s ",
  349. ref, GC_REFCOUNT(ref), GC_REF_ADDRESS(ref),
  350. gc_color_name(GC_REF_COLOR(ref)),
  351. GC_TYPE(ref) == IS_REFERENCE
  352. ? "reference" : zend_get_type_by_const(GC_TYPE(ref)));
  353. }
  354. }
  355. #endif
  356. static zend_always_inline void gc_remove_from_roots(gc_root_buffer *root)
  357. {
  358. GC_LINK_UNUSED(root);
  359. GC_G(num_roots)--;
  360. GC_BENCH_DEC(root_buf_length);
  361. }
  362. static void root_buffer_dtor(zend_gc_globals *gc_globals)
  363. {
  364. if (gc_globals->buf) {
  365. free(gc_globals->buf);
  366. gc_globals->buf = NULL;
  367. }
  368. }
  369. static void gc_globals_ctor_ex(zend_gc_globals *gc_globals)
  370. {
  371. gc_globals->gc_enabled = 0;
  372. gc_globals->gc_active = 0;
  373. gc_globals->gc_protected = 1;
  374. gc_globals->gc_full = 0;
  375. gc_globals->buf = NULL;
  376. gc_globals->unused = GC_INVALID;
  377. gc_globals->first_unused = GC_INVALID;
  378. gc_globals->gc_threshold = GC_INVALID;
  379. gc_globals->buf_size = GC_INVALID;
  380. gc_globals->num_roots = 0;
  381. gc_globals->gc_runs = 0;
  382. gc_globals->collected = 0;
  383. #if GC_BENCH
  384. gc_globals->root_buf_length = 0;
  385. gc_globals->root_buf_peak = 0;
  386. gc_globals->zval_possible_root = 0;
  387. gc_globals->zval_buffered = 0;
  388. gc_globals->zval_remove_from_buffer = 0;
  389. gc_globals->zval_marked_grey = 0;
  390. #endif
  391. }
  392. void gc_globals_ctor(void)
  393. {
  394. #ifdef ZTS
  395. ts_allocate_fast_id(&gc_globals_id, &gc_globals_offset, sizeof(zend_gc_globals), (ts_allocate_ctor) gc_globals_ctor_ex, (ts_allocate_dtor) root_buffer_dtor);
  396. #else
  397. gc_globals_ctor_ex(&gc_globals);
  398. #endif
  399. }
  400. void gc_globals_dtor(void)
  401. {
  402. #ifndef ZTS
  403. root_buffer_dtor(&gc_globals);
  404. #endif
  405. }
  406. void gc_reset(void)
  407. {
  408. if (GC_G(buf)) {
  409. GC_G(gc_active) = 0;
  410. GC_G(gc_protected) = 0;
  411. GC_G(gc_full) = 0;
  412. GC_G(unused) = GC_INVALID;
  413. GC_G(first_unused) = GC_FIRST_ROOT;
  414. GC_G(num_roots) = 0;
  415. GC_G(gc_runs) = 0;
  416. GC_G(collected) = 0;
  417. #if GC_BENCH
  418. GC_G(root_buf_length) = 0;
  419. GC_G(root_buf_peak) = 0;
  420. GC_G(zval_possible_root) = 0;
  421. GC_G(zval_buffered) = 0;
  422. GC_G(zval_remove_from_buffer) = 0;
  423. GC_G(zval_marked_grey) = 0;
  424. #endif
  425. }
  426. }
  427. ZEND_API bool gc_enable(bool enable)
  428. {
  429. bool old_enabled = GC_G(gc_enabled);
  430. GC_G(gc_enabled) = enable;
  431. if (enable && !old_enabled && GC_G(buf) == NULL) {
  432. GC_G(buf) = (gc_root_buffer*) pemalloc(sizeof(gc_root_buffer) * GC_DEFAULT_BUF_SIZE, 1);
  433. GC_G(buf)[0].ref = NULL;
  434. GC_G(buf_size) = GC_DEFAULT_BUF_SIZE;
  435. GC_G(gc_threshold) = GC_THRESHOLD_DEFAULT;
  436. gc_reset();
  437. }
  438. return old_enabled;
  439. }
  440. ZEND_API bool gc_enabled(void)
  441. {
  442. return GC_G(gc_enabled);
  443. }
  444. ZEND_API bool gc_protect(bool protect)
  445. {
  446. bool old_protected = GC_G(gc_protected);
  447. GC_G(gc_protected) = protect;
  448. return old_protected;
  449. }
  450. ZEND_API bool gc_protected(void)
  451. {
  452. return GC_G(gc_protected);
  453. }
  454. static void gc_grow_root_buffer(void)
  455. {
  456. size_t new_size;
  457. if (GC_G(buf_size) >= GC_MAX_BUF_SIZE) {
  458. if (!GC_G(gc_full)) {
  459. zend_error(E_WARNING, "GC buffer overflow (GC disabled)\n");
  460. GC_G(gc_active) = 1;
  461. GC_G(gc_protected) = 1;
  462. GC_G(gc_full) = 1;
  463. return;
  464. }
  465. }
  466. if (GC_G(buf_size) < GC_BUF_GROW_STEP) {
  467. new_size = GC_G(buf_size) * 2;
  468. } else {
  469. new_size = GC_G(buf_size) + GC_BUF_GROW_STEP;
  470. }
  471. if (new_size > GC_MAX_BUF_SIZE) {
  472. new_size = GC_MAX_BUF_SIZE;
  473. }
  474. GC_G(buf) = perealloc(GC_G(buf), sizeof(gc_root_buffer) * new_size, 1);
  475. GC_G(buf_size) = new_size;
  476. }
  477. static void gc_adjust_threshold(int count)
  478. {
  479. uint32_t new_threshold;
  480. /* TODO Very simple heuristic for dynamic GC buffer resizing:
  481. * If there are "too few" collections, increase the collection threshold
  482. * by a fixed step */
  483. if (count < GC_THRESHOLD_TRIGGER) {
  484. /* increase */
  485. if (GC_G(gc_threshold) < GC_THRESHOLD_MAX) {
  486. new_threshold = GC_G(gc_threshold) + GC_THRESHOLD_STEP;
  487. if (new_threshold > GC_THRESHOLD_MAX) {
  488. new_threshold = GC_THRESHOLD_MAX;
  489. }
  490. if (new_threshold > GC_G(buf_size)) {
  491. gc_grow_root_buffer();
  492. }
  493. if (new_threshold <= GC_G(buf_size)) {
  494. GC_G(gc_threshold) = new_threshold;
  495. }
  496. }
  497. } else if (GC_G(gc_threshold) > GC_THRESHOLD_DEFAULT) {
  498. new_threshold = GC_G(gc_threshold) - GC_THRESHOLD_STEP;
  499. if (new_threshold < GC_THRESHOLD_DEFAULT) {
  500. new_threshold = GC_THRESHOLD_DEFAULT;
  501. }
  502. GC_G(gc_threshold) = new_threshold;
  503. }
  504. }
  505. static zend_never_inline void ZEND_FASTCALL gc_possible_root_when_full(zend_refcounted *ref)
  506. {
  507. uint32_t idx;
  508. gc_root_buffer *newRoot;
  509. ZEND_ASSERT(GC_TYPE(ref) == IS_ARRAY || GC_TYPE(ref) == IS_OBJECT);
  510. ZEND_ASSERT(GC_INFO(ref) == 0);
  511. if (GC_G(gc_enabled) && !GC_G(gc_active)) {
  512. GC_ADDREF(ref);
  513. gc_adjust_threshold(gc_collect_cycles());
  514. if (UNEXPECTED(GC_DELREF(ref)) == 0) {
  515. rc_dtor_func(ref);
  516. return;
  517. } else if (UNEXPECTED(GC_INFO(ref))) {
  518. return;
  519. }
  520. }
  521. if (GC_HAS_UNUSED()) {
  522. idx = GC_FETCH_UNUSED();
  523. } else if (EXPECTED(GC_HAS_NEXT_UNUSED())) {
  524. idx = GC_FETCH_NEXT_UNUSED();
  525. } else {
  526. gc_grow_root_buffer();
  527. if (UNEXPECTED(!GC_HAS_NEXT_UNUSED())) {
  528. return;
  529. }
  530. idx = GC_FETCH_NEXT_UNUSED();
  531. }
  532. newRoot = GC_IDX2PTR(idx);
  533. newRoot->ref = ref; /* GC_ROOT tag is 0 */
  534. GC_TRACE_SET_COLOR(ref, GC_PURPLE);
  535. idx = gc_compress(idx);
  536. GC_REF_SET_INFO(ref, idx | GC_PURPLE);
  537. GC_G(num_roots)++;
  538. GC_BENCH_INC(zval_buffered);
  539. GC_BENCH_INC(root_buf_length);
  540. GC_BENCH_PEAK(root_buf_peak, root_buf_length);
  541. }
  542. ZEND_API void ZEND_FASTCALL gc_possible_root(zend_refcounted *ref)
  543. {
  544. uint32_t idx;
  545. gc_root_buffer *newRoot;
  546. if (UNEXPECTED(GC_G(gc_protected))) {
  547. return;
  548. }
  549. GC_BENCH_INC(zval_possible_root);
  550. if (EXPECTED(GC_HAS_UNUSED())) {
  551. idx = GC_FETCH_UNUSED();
  552. } else if (EXPECTED(GC_HAS_NEXT_UNUSED_UNDER_THRESHOLD())) {
  553. idx = GC_FETCH_NEXT_UNUSED();
  554. } else {
  555. gc_possible_root_when_full(ref);
  556. return;
  557. }
  558. ZEND_ASSERT(GC_TYPE(ref) == IS_ARRAY || GC_TYPE(ref) == IS_OBJECT);
  559. ZEND_ASSERT(GC_INFO(ref) == 0);
  560. newRoot = GC_IDX2PTR(idx);
  561. newRoot->ref = ref; /* GC_ROOT tag is 0 */
  562. GC_TRACE_SET_COLOR(ref, GC_PURPLE);
  563. idx = gc_compress(idx);
  564. GC_REF_SET_INFO(ref, idx | GC_PURPLE);
  565. GC_G(num_roots)++;
  566. GC_BENCH_INC(zval_buffered);
  567. GC_BENCH_INC(root_buf_length);
  568. GC_BENCH_PEAK(root_buf_peak, root_buf_length);
  569. }
  570. static zend_never_inline void ZEND_FASTCALL gc_remove_compressed(zend_refcounted *ref, uint32_t idx)
  571. {
  572. gc_root_buffer *root = gc_decompress(ref, idx);
  573. gc_remove_from_roots(root);
  574. }
  575. ZEND_API void ZEND_FASTCALL gc_remove_from_buffer(zend_refcounted *ref)
  576. {
  577. gc_root_buffer *root;
  578. uint32_t idx = GC_REF_ADDRESS(ref);
  579. GC_BENCH_INC(zval_remove_from_buffer);
  580. if (!GC_REF_CHECK_COLOR(ref, GC_BLACK)) {
  581. GC_TRACE_SET_COLOR(ref, GC_BLACK);
  582. }
  583. GC_REF_SET_INFO(ref, 0);
  584. /* Perform decompression only in case of large buffers */
  585. if (UNEXPECTED(GC_G(first_unused) >= GC_MAX_UNCOMPRESSED)) {
  586. gc_remove_compressed(ref, idx);
  587. return;
  588. }
  589. ZEND_ASSERT(idx);
  590. root = GC_IDX2PTR(idx);
  591. gc_remove_from_roots(root);
  592. }
  593. static void gc_scan_black(zend_refcounted *ref, gc_stack *stack)
  594. {
  595. HashTable *ht = NULL;
  596. Bucket *p, *end;
  597. zval *zv;
  598. GC_STACK_DCL(stack);
  599. tail_call:
  600. if (GC_TYPE(ref) == IS_OBJECT) {
  601. zend_object *obj = (zend_object*)ref;
  602. if (EXPECTED(!(OBJ_FLAGS(ref) & IS_OBJ_FREE_CALLED))) {
  603. int n;
  604. zval *zv, *end;
  605. ht = obj->handlers->get_gc(obj, &zv, &n);
  606. if (UNEXPECTED(ht)) {
  607. GC_ADDREF(ht);
  608. if (!GC_REF_CHECK_COLOR(ht, GC_BLACK)) {
  609. GC_REF_SET_BLACK(ht);
  610. } else {
  611. ht = NULL;
  612. }
  613. }
  614. if (EXPECTED(!ht)) {
  615. if (!n) goto next;
  616. end = zv + n;
  617. while (!Z_REFCOUNTED_P(--end)) {
  618. if (zv == end) goto next;
  619. }
  620. } else {
  621. if (!n) goto handle_ht;
  622. end = zv + n;
  623. }
  624. while (zv != end) {
  625. if (Z_REFCOUNTED_P(zv)) {
  626. ref = Z_COUNTED_P(zv);
  627. GC_ADDREF(ref);
  628. if (!GC_REF_CHECK_COLOR(ref, GC_BLACK)) {
  629. GC_REF_SET_BLACK(ref);
  630. GC_STACK_PUSH(ref);
  631. }
  632. }
  633. zv++;
  634. }
  635. if (EXPECTED(!ht)) {
  636. ref = Z_COUNTED_P(zv);
  637. GC_ADDREF(ref);
  638. if (!GC_REF_CHECK_COLOR(ref, GC_BLACK)) {
  639. GC_REF_SET_BLACK(ref);
  640. goto tail_call;
  641. }
  642. goto next;
  643. }
  644. } else {
  645. goto next;
  646. }
  647. } else if (GC_TYPE(ref) == IS_ARRAY) {
  648. ZEND_ASSERT((zend_array*)ref != &EG(symbol_table));
  649. ht = (zend_array*)ref;
  650. } else if (GC_TYPE(ref) == IS_REFERENCE) {
  651. if (Z_REFCOUNTED(((zend_reference*)ref)->val)) {
  652. ref = Z_COUNTED(((zend_reference*)ref)->val);
  653. GC_ADDREF(ref);
  654. if (!GC_REF_CHECK_COLOR(ref, GC_BLACK)) {
  655. GC_REF_SET_BLACK(ref);
  656. goto tail_call;
  657. }
  658. }
  659. goto next;
  660. } else {
  661. goto next;
  662. }
  663. handle_ht:
  664. if (!ht->nNumUsed) goto next;
  665. p = ht->arData;
  666. end = p + ht->nNumUsed;
  667. while (1) {
  668. end--;
  669. zv = &end->val;
  670. if (Z_TYPE_P(zv) == IS_INDIRECT) {
  671. zv = Z_INDIRECT_P(zv);
  672. }
  673. if (Z_REFCOUNTED_P(zv)) {
  674. break;
  675. }
  676. if (p == end) goto next;
  677. }
  678. while (p != end) {
  679. zv = &p->val;
  680. if (Z_TYPE_P(zv) == IS_INDIRECT) {
  681. zv = Z_INDIRECT_P(zv);
  682. }
  683. if (Z_REFCOUNTED_P(zv)) {
  684. ref = Z_COUNTED_P(zv);
  685. GC_ADDREF(ref);
  686. if (!GC_REF_CHECK_COLOR(ref, GC_BLACK)) {
  687. GC_REF_SET_BLACK(ref);
  688. GC_STACK_PUSH(ref);
  689. }
  690. }
  691. p++;
  692. }
  693. zv = &p->val;
  694. if (Z_TYPE_P(zv) == IS_INDIRECT) {
  695. zv = Z_INDIRECT_P(zv);
  696. }
  697. ref = Z_COUNTED_P(zv);
  698. GC_ADDREF(ref);
  699. if (!GC_REF_CHECK_COLOR(ref, GC_BLACK)) {
  700. GC_REF_SET_BLACK(ref);
  701. goto tail_call;
  702. }
  703. next:
  704. ref = GC_STACK_POP();
  705. if (ref) {
  706. goto tail_call;
  707. }
  708. }
  709. static void gc_mark_grey(zend_refcounted *ref, gc_stack *stack)
  710. {
  711. HashTable *ht = NULL;
  712. Bucket *p, *end;
  713. zval *zv;
  714. GC_STACK_DCL(stack);
  715. do {
  716. GC_BENCH_INC(zval_marked_grey);
  717. if (GC_TYPE(ref) == IS_OBJECT) {
  718. zend_object *obj = (zend_object*)ref;
  719. if (EXPECTED(!(OBJ_FLAGS(ref) & IS_OBJ_FREE_CALLED))) {
  720. int n;
  721. zval *zv, *end;
  722. ht = obj->handlers->get_gc(obj, &zv, &n);
  723. if (UNEXPECTED(ht)) {
  724. GC_DELREF(ht);
  725. if (!GC_REF_CHECK_COLOR(ht, GC_GREY)) {
  726. GC_REF_SET_COLOR(ht, GC_GREY);
  727. } else {
  728. ht = NULL;
  729. }
  730. }
  731. if (EXPECTED(!ht)) {
  732. if (!n) goto next;
  733. end = zv + n;
  734. while (!Z_REFCOUNTED_P(--end)) {
  735. if (zv == end) goto next;
  736. }
  737. } else {
  738. if (!n) goto handle_ht;
  739. end = zv + n;
  740. }
  741. while (zv != end) {
  742. if (Z_REFCOUNTED_P(zv)) {
  743. ref = Z_COUNTED_P(zv);
  744. GC_DELREF(ref);
  745. if (!GC_REF_CHECK_COLOR(ref, GC_GREY)) {
  746. GC_REF_SET_COLOR(ref, GC_GREY);
  747. GC_STACK_PUSH(ref);
  748. }
  749. }
  750. zv++;
  751. }
  752. if (EXPECTED(!ht)) {
  753. ref = Z_COUNTED_P(zv);
  754. GC_DELREF(ref);
  755. if (!GC_REF_CHECK_COLOR(ref, GC_GREY)) {
  756. GC_REF_SET_COLOR(ref, GC_GREY);
  757. continue;
  758. }
  759. goto next;
  760. }
  761. } else {
  762. goto next;
  763. }
  764. } else if (GC_TYPE(ref) == IS_ARRAY) {
  765. ZEND_ASSERT(((zend_array*)ref) != &EG(symbol_table));
  766. ht = (zend_array*)ref;
  767. } else if (GC_TYPE(ref) == IS_REFERENCE) {
  768. if (Z_REFCOUNTED(((zend_reference*)ref)->val)) {
  769. ref = Z_COUNTED(((zend_reference*)ref)->val);
  770. GC_DELREF(ref);
  771. if (!GC_REF_CHECK_COLOR(ref, GC_GREY)) {
  772. GC_REF_SET_COLOR(ref, GC_GREY);
  773. continue;
  774. }
  775. }
  776. goto next;
  777. } else {
  778. goto next;
  779. }
  780. handle_ht:
  781. if (!ht->nNumUsed) goto next;
  782. p = ht->arData;
  783. end = p + ht->nNumUsed;
  784. while (1) {
  785. end--;
  786. zv = &end->val;
  787. if (Z_TYPE_P(zv) == IS_INDIRECT) {
  788. zv = Z_INDIRECT_P(zv);
  789. }
  790. if (Z_REFCOUNTED_P(zv)) {
  791. break;
  792. }
  793. if (p == end) goto next;
  794. }
  795. while (p != end) {
  796. zv = &p->val;
  797. if (Z_TYPE_P(zv) == IS_INDIRECT) {
  798. zv = Z_INDIRECT_P(zv);
  799. }
  800. if (Z_REFCOUNTED_P(zv)) {
  801. ref = Z_COUNTED_P(zv);
  802. GC_DELREF(ref);
  803. if (!GC_REF_CHECK_COLOR(ref, GC_GREY)) {
  804. GC_REF_SET_COLOR(ref, GC_GREY);
  805. GC_STACK_PUSH(ref);
  806. }
  807. }
  808. p++;
  809. }
  810. zv = &p->val;
  811. if (Z_TYPE_P(zv) == IS_INDIRECT) {
  812. zv = Z_INDIRECT_P(zv);
  813. }
  814. ref = Z_COUNTED_P(zv);
  815. GC_DELREF(ref);
  816. if (!GC_REF_CHECK_COLOR(ref, GC_GREY)) {
  817. GC_REF_SET_COLOR(ref, GC_GREY);
  818. continue;
  819. }
  820. next:
  821. ref = GC_STACK_POP();
  822. } while (ref);
  823. }
  824. /* Two-Finger compaction algorithm */
  825. static void gc_compact(void)
  826. {
  827. if (GC_G(num_roots) + GC_FIRST_ROOT != GC_G(first_unused)) {
  828. if (GC_G(num_roots)) {
  829. gc_root_buffer *free = GC_IDX2PTR(GC_FIRST_ROOT);
  830. gc_root_buffer *scan = GC_IDX2PTR(GC_G(first_unused) - 1);
  831. gc_root_buffer *end = GC_IDX2PTR(GC_G(num_roots));
  832. uint32_t idx;
  833. zend_refcounted *p;
  834. while (free < scan) {
  835. while (!GC_IS_UNUSED(free->ref)) {
  836. free++;
  837. }
  838. while (GC_IS_UNUSED(scan->ref)) {
  839. scan--;
  840. }
  841. if (scan > free) {
  842. p = scan->ref;
  843. free->ref = p;
  844. p = GC_GET_PTR(p);
  845. idx = gc_compress(GC_PTR2IDX(free));
  846. GC_REF_SET_INFO(p, idx | GC_REF_COLOR(p));
  847. free++;
  848. scan--;
  849. if (scan <= end) {
  850. break;
  851. }
  852. }
  853. }
  854. }
  855. GC_G(unused) = GC_INVALID;
  856. GC_G(first_unused) = GC_G(num_roots) + GC_FIRST_ROOT;
  857. }
  858. }
  859. static void gc_mark_roots(gc_stack *stack)
  860. {
  861. gc_root_buffer *current, *last;
  862. gc_compact();
  863. current = GC_IDX2PTR(GC_FIRST_ROOT);
  864. last = GC_IDX2PTR(GC_G(first_unused));
  865. while (current != last) {
  866. if (GC_IS_ROOT(current->ref)) {
  867. if (GC_REF_CHECK_COLOR(current->ref, GC_PURPLE)) {
  868. GC_REF_SET_COLOR(current->ref, GC_GREY);
  869. gc_mark_grey(current->ref, stack);
  870. }
  871. }
  872. current++;
  873. }
  874. }
  875. static void gc_scan(zend_refcounted *ref, gc_stack *stack)
  876. {
  877. Bucket *p, *end;
  878. zval *zv;
  879. GC_STACK_DCL(stack);
  880. tail_call:
  881. if (!GC_REF_CHECK_COLOR(ref, GC_WHITE)) {
  882. goto next;
  883. }
  884. if (GC_REFCOUNT(ref) > 0) {
  885. if (!GC_REF_CHECK_COLOR(ref, GC_BLACK)) {
  886. GC_REF_SET_BLACK(ref);
  887. if (UNEXPECTED(!_stack->next)) {
  888. gc_stack_next(_stack);
  889. }
  890. /* Split stack and reuse the tail */
  891. _stack->next->prev = NULL;
  892. gc_scan_black(ref, _stack->next);
  893. _stack->next->prev = _stack;
  894. }
  895. goto next;
  896. }
  897. if (GC_TYPE(ref) == IS_OBJECT) {
  898. zend_object *obj = (zend_object*)ref;
  899. if (EXPECTED(!(OBJ_FLAGS(ref) & IS_OBJ_FREE_CALLED))) {
  900. int n;
  901. zval *zv, *end;
  902. HashTable *ht = obj->handlers->get_gc(obj, &zv, &n);
  903. if (UNEXPECTED(ht)) {
  904. if (GC_REF_CHECK_COLOR(ht, GC_GREY)) {
  905. GC_REF_SET_COLOR(ht, GC_WHITE);
  906. GC_STACK_PUSH((zend_refcounted *) ht);
  907. }
  908. }
  909. if (!n) goto next;
  910. end = zv + n;
  911. while (!Z_REFCOUNTED_P(--end)) {
  912. if (zv == end) goto next;
  913. }
  914. while (zv != end) {
  915. if (Z_REFCOUNTED_P(zv)) {
  916. ref = Z_COUNTED_P(zv);
  917. if (GC_REF_CHECK_COLOR(ref, GC_GREY)) {
  918. GC_REF_SET_COLOR(ref, GC_WHITE);
  919. GC_STACK_PUSH(ref);
  920. }
  921. }
  922. zv++;
  923. }
  924. ref = Z_COUNTED_P(zv);
  925. if (GC_REF_CHECK_COLOR(ref, GC_GREY)) {
  926. GC_REF_SET_COLOR(ref, GC_WHITE);
  927. goto tail_call;
  928. }
  929. }
  930. } else if (GC_TYPE(ref) == IS_ARRAY) {
  931. HashTable *ht = (HashTable *)ref;
  932. ZEND_ASSERT(ht != &EG(symbol_table));
  933. if (!ht->nNumUsed) goto next;
  934. p = ht->arData;
  935. end = p + ht->nNumUsed;
  936. while (1) {
  937. end--;
  938. zv = &end->val;
  939. if (Z_TYPE_P(zv) == IS_INDIRECT) {
  940. zv = Z_INDIRECT_P(zv);
  941. }
  942. if (Z_REFCOUNTED_P(zv)) {
  943. break;
  944. }
  945. if (p == end) goto next;
  946. }
  947. while (p != end) {
  948. zv = &p->val;
  949. if (Z_TYPE_P(zv) == IS_INDIRECT) {
  950. zv = Z_INDIRECT_P(zv);
  951. }
  952. if (Z_REFCOUNTED_P(zv)) {
  953. ref = Z_COUNTED_P(zv);
  954. if (GC_REF_CHECK_COLOR(ref, GC_GREY)) {
  955. GC_REF_SET_COLOR(ref, GC_WHITE);
  956. GC_STACK_PUSH(ref);
  957. }
  958. }
  959. p++;
  960. }
  961. zv = &p->val;
  962. if (Z_TYPE_P(zv) == IS_INDIRECT) {
  963. zv = Z_INDIRECT_P(zv);
  964. }
  965. ref = Z_COUNTED_P(zv);
  966. if (GC_REF_CHECK_COLOR(ref, GC_GREY)) {
  967. GC_REF_SET_COLOR(ref, GC_WHITE);
  968. goto tail_call;
  969. }
  970. } else if (GC_TYPE(ref) == IS_REFERENCE) {
  971. if (Z_REFCOUNTED(((zend_reference*)ref)->val)) {
  972. ref = Z_COUNTED(((zend_reference*)ref)->val);
  973. if (GC_REF_CHECK_COLOR(ref, GC_GREY)) {
  974. GC_REF_SET_COLOR(ref, GC_WHITE);
  975. goto tail_call;
  976. }
  977. }
  978. }
  979. next:
  980. ref = GC_STACK_POP();
  981. if (ref) {
  982. goto tail_call;
  983. }
  984. }
  985. static void gc_scan_roots(gc_stack *stack)
  986. {
  987. gc_root_buffer *current = GC_IDX2PTR(GC_FIRST_ROOT);
  988. gc_root_buffer *last = GC_IDX2PTR(GC_G(first_unused));
  989. while (current != last) {
  990. if (GC_IS_ROOT(current->ref)) {
  991. if (GC_REF_CHECK_COLOR(current->ref, GC_GREY)) {
  992. GC_REF_SET_COLOR(current->ref, GC_WHITE);
  993. gc_scan(current->ref, stack);
  994. }
  995. }
  996. current++;
  997. }
  998. }
  999. static void gc_add_garbage(zend_refcounted *ref)
  1000. {
  1001. uint32_t idx;
  1002. gc_root_buffer *buf;
  1003. if (GC_HAS_UNUSED()) {
  1004. idx = GC_FETCH_UNUSED();
  1005. } else if (GC_HAS_NEXT_UNUSED()) {
  1006. idx = GC_FETCH_NEXT_UNUSED();
  1007. } else {
  1008. gc_grow_root_buffer();
  1009. if (UNEXPECTED(!GC_HAS_NEXT_UNUSED())) {
  1010. return;
  1011. }
  1012. idx = GC_FETCH_NEXT_UNUSED();
  1013. }
  1014. buf = GC_IDX2PTR(idx);
  1015. buf->ref = GC_MAKE_GARBAGE(ref);
  1016. idx = gc_compress(idx);
  1017. GC_REF_SET_INFO(ref, idx | GC_BLACK);
  1018. GC_G(num_roots)++;
  1019. }
  1020. static int gc_collect_white(zend_refcounted *ref, uint32_t *flags, gc_stack *stack)
  1021. {
  1022. int count = 0;
  1023. HashTable *ht = NULL;
  1024. Bucket *p, *end;
  1025. zval *zv;
  1026. GC_STACK_DCL(stack);
  1027. do {
  1028. /* don't count references for compatibility ??? */
  1029. if (GC_TYPE(ref) != IS_REFERENCE) {
  1030. count++;
  1031. }
  1032. if (GC_TYPE(ref) == IS_OBJECT) {
  1033. zend_object *obj = (zend_object*)ref;
  1034. if (EXPECTED(!(OBJ_FLAGS(ref) & IS_OBJ_FREE_CALLED))) {
  1035. int n;
  1036. zval *zv, *end;
  1037. /* optimization: color is GC_BLACK (0) */
  1038. if (!GC_INFO(ref)) {
  1039. gc_add_garbage(ref);
  1040. }
  1041. if (!(OBJ_FLAGS(obj) & IS_OBJ_DESTRUCTOR_CALLED)
  1042. && (obj->handlers->dtor_obj != zend_objects_destroy_object
  1043. || obj->ce->destructor != NULL)) {
  1044. *flags |= GC_HAS_DESTRUCTORS;
  1045. }
  1046. ht = obj->handlers->get_gc(obj, &zv, &n);
  1047. if (UNEXPECTED(ht)) {
  1048. GC_ADDREF(ht);
  1049. if (GC_REF_CHECK_COLOR(ht, GC_WHITE)) {
  1050. GC_REF_SET_BLACK(ht);
  1051. } else {
  1052. ht = NULL;
  1053. }
  1054. }
  1055. if (EXPECTED(!ht)) {
  1056. if (!n) goto next;
  1057. end = zv + n;
  1058. while (!Z_REFCOUNTED_P(--end)) {
  1059. if (zv == end) goto next;
  1060. }
  1061. } else {
  1062. if (!n) goto handle_ht;
  1063. end = zv + n;
  1064. }
  1065. while (zv != end) {
  1066. if (Z_REFCOUNTED_P(zv)) {
  1067. ref = Z_COUNTED_P(zv);
  1068. GC_ADDREF(ref);
  1069. if (GC_REF_CHECK_COLOR(ref, GC_WHITE)) {
  1070. GC_REF_SET_BLACK(ref);
  1071. GC_STACK_PUSH(ref);
  1072. }
  1073. }
  1074. zv++;
  1075. }
  1076. if (EXPECTED(!ht)) {
  1077. ref = Z_COUNTED_P(zv);
  1078. GC_ADDREF(ref);
  1079. if (GC_REF_CHECK_COLOR(ref, GC_WHITE)) {
  1080. GC_REF_SET_BLACK(ref);
  1081. continue;
  1082. }
  1083. goto next;
  1084. }
  1085. } else {
  1086. goto next;
  1087. }
  1088. } else if (GC_TYPE(ref) == IS_ARRAY) {
  1089. /* optimization: color is GC_BLACK (0) */
  1090. if (!GC_INFO(ref)) {
  1091. gc_add_garbage(ref);
  1092. }
  1093. ht = (zend_array*)ref;
  1094. } else if (GC_TYPE(ref) == IS_REFERENCE) {
  1095. if (Z_REFCOUNTED(((zend_reference*)ref)->val)) {
  1096. ref = Z_COUNTED(((zend_reference*)ref)->val);
  1097. GC_ADDREF(ref);
  1098. if (GC_REF_CHECK_COLOR(ref, GC_WHITE)) {
  1099. GC_REF_SET_BLACK(ref);
  1100. continue;
  1101. }
  1102. }
  1103. goto next;
  1104. } else {
  1105. goto next;
  1106. }
  1107. handle_ht:
  1108. if (!ht->nNumUsed) goto next;
  1109. p = ht->arData;
  1110. end = p + ht->nNumUsed;
  1111. while (1) {
  1112. end--;
  1113. zv = &end->val;
  1114. if (Z_TYPE_P(zv) == IS_INDIRECT) {
  1115. zv = Z_INDIRECT_P(zv);
  1116. }
  1117. if (Z_REFCOUNTED_P(zv)) {
  1118. break;
  1119. }
  1120. if (p == end) goto next;
  1121. }
  1122. while (p != end) {
  1123. zv = &p->val;
  1124. if (Z_TYPE_P(zv) == IS_INDIRECT) {
  1125. zv = Z_INDIRECT_P(zv);
  1126. }
  1127. if (Z_REFCOUNTED_P(zv)) {
  1128. ref = Z_COUNTED_P(zv);
  1129. GC_ADDREF(ref);
  1130. if (GC_REF_CHECK_COLOR(ref, GC_WHITE)) {
  1131. GC_REF_SET_BLACK(ref);
  1132. GC_STACK_PUSH(ref);
  1133. }
  1134. }
  1135. p++;
  1136. }
  1137. zv = &p->val;
  1138. if (Z_TYPE_P(zv) == IS_INDIRECT) {
  1139. zv = Z_INDIRECT_P(zv);
  1140. }
  1141. ref = Z_COUNTED_P(zv);
  1142. GC_ADDREF(ref);
  1143. if (GC_REF_CHECK_COLOR(ref, GC_WHITE)) {
  1144. GC_REF_SET_BLACK(ref);
  1145. continue;
  1146. }
  1147. next:
  1148. ref = GC_STACK_POP();
  1149. } while (ref);
  1150. return count;
  1151. }
  1152. static int gc_collect_roots(uint32_t *flags, gc_stack *stack)
  1153. {
  1154. uint32_t idx, end;
  1155. zend_refcounted *ref;
  1156. int count = 0;
  1157. gc_root_buffer *current = GC_IDX2PTR(GC_FIRST_ROOT);
  1158. gc_root_buffer *last = GC_IDX2PTR(GC_G(first_unused));
  1159. /* remove non-garbage from the list */
  1160. while (current != last) {
  1161. if (GC_IS_ROOT(current->ref)) {
  1162. if (GC_REF_CHECK_COLOR(current->ref, GC_BLACK)) {
  1163. GC_REF_SET_INFO(current->ref, 0); /* reset GC_ADDRESS() and keep GC_BLACK */
  1164. gc_remove_from_roots(current);
  1165. }
  1166. }
  1167. current++;
  1168. }
  1169. gc_compact();
  1170. /* Root buffer might be reallocated during gc_collect_white,
  1171. * make sure to reload pointers. */
  1172. idx = GC_FIRST_ROOT;
  1173. end = GC_G(first_unused);
  1174. while (idx != end) {
  1175. current = GC_IDX2PTR(idx);
  1176. ref = current->ref;
  1177. ZEND_ASSERT(GC_IS_ROOT(ref));
  1178. current->ref = GC_MAKE_GARBAGE(ref);
  1179. if (GC_REF_CHECK_COLOR(ref, GC_WHITE)) {
  1180. GC_REF_SET_BLACK(ref);
  1181. count += gc_collect_white(ref, flags, stack);
  1182. }
  1183. idx++;
  1184. }
  1185. return count;
  1186. }
  1187. static int gc_remove_nested_data_from_buffer(zend_refcounted *ref, gc_root_buffer *root, gc_stack *stack)
  1188. {
  1189. HashTable *ht = NULL;
  1190. Bucket *p, *end;
  1191. zval *zv;
  1192. int count = 0;
  1193. GC_STACK_DCL(stack);
  1194. do {
  1195. if (root) {
  1196. root = NULL;
  1197. count++;
  1198. } else if (GC_REF_ADDRESS(ref) != 0
  1199. && GC_REF_CHECK_COLOR(ref, GC_BLACK)) {
  1200. GC_TRACE_REF(ref, "removing from buffer");
  1201. GC_REMOVE_FROM_BUFFER(ref);
  1202. count++;
  1203. } else if (GC_TYPE(ref) == IS_REFERENCE) {
  1204. if (Z_REFCOUNTED(((zend_reference*)ref)->val)) {
  1205. ref = Z_COUNTED(((zend_reference*)ref)->val);
  1206. continue;
  1207. }
  1208. goto next;
  1209. } else {
  1210. goto next;
  1211. }
  1212. if (GC_TYPE(ref) == IS_OBJECT) {
  1213. zend_object *obj = (zend_object*)ref;
  1214. if (EXPECTED(!(OBJ_FLAGS(ref) & IS_OBJ_FREE_CALLED))) {
  1215. int n;
  1216. zval *zv, *end;
  1217. ht = obj->handlers->get_gc(obj, &zv, &n);
  1218. if (EXPECTED(!ht)) {
  1219. if (!n) goto next;
  1220. end = zv + n;
  1221. while (!Z_REFCOUNTED_P(--end)) {
  1222. if (zv == end) goto next;
  1223. }
  1224. } else {
  1225. if (!n) goto handle_ht;
  1226. end = zv + n;
  1227. }
  1228. while (zv != end) {
  1229. if (Z_REFCOUNTED_P(zv)) {
  1230. ref = Z_COUNTED_P(zv);
  1231. GC_STACK_PUSH(ref);
  1232. }
  1233. zv++;
  1234. }
  1235. if (EXPECTED(!ht)) {
  1236. ref = Z_COUNTED_P(zv);
  1237. continue;
  1238. }
  1239. handle_ht:
  1240. if (GC_REF_ADDRESS(ht) != 0 && GC_REF_CHECK_COLOR(ht, GC_BLACK)) {
  1241. GC_TRACE_REF(ht, "removing from buffer");
  1242. GC_REMOVE_FROM_BUFFER(ht);
  1243. }
  1244. } else {
  1245. goto next;
  1246. }
  1247. } else if (GC_TYPE(ref) == IS_ARRAY) {
  1248. ht = (zend_array*)ref;
  1249. } else {
  1250. goto next;
  1251. }
  1252. if (!ht->nNumUsed) goto next;
  1253. p = ht->arData;
  1254. end = p + ht->nNumUsed;
  1255. while (1) {
  1256. end--;
  1257. zv = &end->val;
  1258. if (Z_TYPE_P(zv) == IS_INDIRECT) {
  1259. zv = Z_INDIRECT_P(zv);
  1260. }
  1261. if (Z_REFCOUNTED_P(zv)) {
  1262. break;
  1263. }
  1264. if (p == end) goto next;
  1265. }
  1266. while (p != end) {
  1267. zv = &p->val;
  1268. if (Z_TYPE_P(zv) == IS_INDIRECT) {
  1269. zv = Z_INDIRECT_P(zv);
  1270. }
  1271. if (Z_REFCOUNTED_P(zv)) {
  1272. ref = Z_COUNTED_P(zv);
  1273. GC_STACK_PUSH(ref);
  1274. }
  1275. p++;
  1276. }
  1277. zv = &p->val;
  1278. if (Z_TYPE_P(zv) == IS_INDIRECT) {
  1279. zv = Z_INDIRECT_P(zv);
  1280. }
  1281. ref = Z_COUNTED_P(zv);
  1282. continue;
  1283. next:
  1284. ref = GC_STACK_POP();
  1285. } while (ref);
  1286. return count;
  1287. }
  1288. static void zend_get_gc_buffer_release(void);
  1289. static void zend_gc_root_tmpvars(void);
  1290. ZEND_API int zend_gc_collect_cycles(void)
  1291. {
  1292. int total_count = 0;
  1293. bool should_rerun_gc = 0;
  1294. bool did_rerun_gc = 0;
  1295. rerun_gc:
  1296. if (GC_G(num_roots)) {
  1297. int count;
  1298. gc_root_buffer *current, *last;
  1299. zend_refcounted *p;
  1300. uint32_t gc_flags = 0;
  1301. uint32_t idx, end;
  1302. gc_stack stack;
  1303. stack.prev = NULL;
  1304. stack.next = NULL;
  1305. if (GC_G(gc_active)) {
  1306. return 0;
  1307. }
  1308. GC_TRACE("Collecting cycles");
  1309. GC_G(gc_runs)++;
  1310. GC_G(gc_active) = 1;
  1311. GC_TRACE("Marking roots");
  1312. gc_mark_roots(&stack);
  1313. GC_TRACE("Scanning roots");
  1314. gc_scan_roots(&stack);
  1315. GC_TRACE("Collecting roots");
  1316. count = gc_collect_roots(&gc_flags, &stack);
  1317. if (!GC_G(num_roots)) {
  1318. /* nothing to free */
  1319. GC_TRACE("Nothing to free");
  1320. gc_stack_free(&stack);
  1321. GC_G(gc_active) = 0;
  1322. goto finish;
  1323. }
  1324. zend_fiber_switch_block();
  1325. end = GC_G(first_unused);
  1326. if (gc_flags & GC_HAS_DESTRUCTORS) {
  1327. GC_TRACE("Calling destructors");
  1328. /* During a destructor call, new externally visible references to nested data may
  1329. * be introduced. These references can be introduced in a way that does not
  1330. * modify any refcounts, so we have no real way to detect this situation
  1331. * short of rerunning full GC tracing. What we do instead is to only run
  1332. * destructors at this point and automatically re-run GC afterwards. */
  1333. should_rerun_gc = 1;
  1334. /* Mark all roots for which a dtor will be invoked as DTOR_GARBAGE. Additionally
  1335. * color them purple. This serves a double purpose: First, they should be
  1336. * considered new potential roots for the next GC run. Second, it will prevent
  1337. * their removal from the root buffer by nested data removal. */
  1338. idx = GC_FIRST_ROOT;
  1339. current = GC_IDX2PTR(GC_FIRST_ROOT);
  1340. while (idx != end) {
  1341. if (GC_IS_GARBAGE(current->ref)) {
  1342. p = GC_GET_PTR(current->ref);
  1343. if (GC_TYPE(p) == IS_OBJECT && !(OBJ_FLAGS(p) & IS_OBJ_DESTRUCTOR_CALLED)) {
  1344. zend_object *obj = (zend_object *) p;
  1345. if (obj->handlers->dtor_obj != zend_objects_destroy_object
  1346. || obj->ce->destructor) {
  1347. current->ref = GC_MAKE_DTOR_GARBAGE(obj);
  1348. GC_REF_SET_COLOR(obj, GC_PURPLE);
  1349. } else {
  1350. GC_ADD_FLAGS(obj, IS_OBJ_DESTRUCTOR_CALLED);
  1351. }
  1352. }
  1353. }
  1354. current++;
  1355. idx++;
  1356. }
  1357. /* Remove nested data for objects on which a destructor will be called.
  1358. * This will not remove the objects themselves, as they have been colored
  1359. * purple. */
  1360. idx = GC_FIRST_ROOT;
  1361. current = GC_IDX2PTR(GC_FIRST_ROOT);
  1362. while (idx != end) {
  1363. if (GC_IS_DTOR_GARBAGE(current->ref)) {
  1364. p = GC_GET_PTR(current->ref);
  1365. count -= gc_remove_nested_data_from_buffer(p, current, &stack);
  1366. }
  1367. current++;
  1368. idx++;
  1369. }
  1370. /* Actually call destructors.
  1371. *
  1372. * The root buffer might be reallocated during destructors calls,
  1373. * make sure to reload pointers as necessary. */
  1374. idx = GC_FIRST_ROOT;
  1375. while (idx != end) {
  1376. current = GC_IDX2PTR(idx);
  1377. if (GC_IS_DTOR_GARBAGE(current->ref)) {
  1378. p = GC_GET_PTR(current->ref);
  1379. /* Mark this is as a normal root for the next GC run,
  1380. * it's no longer garbage for this run. */
  1381. current->ref = p;
  1382. /* Double check that the destructor hasn't been called yet. It could have
  1383. * already been invoked indirectly by some other destructor. */
  1384. if (!(OBJ_FLAGS(p) & IS_OBJ_DESTRUCTOR_CALLED)) {
  1385. zend_object *obj = (zend_object*)p;
  1386. GC_TRACE_REF(obj, "calling destructor");
  1387. GC_ADD_FLAGS(obj, IS_OBJ_DESTRUCTOR_CALLED);
  1388. GC_ADDREF(obj);
  1389. obj->handlers->dtor_obj(obj);
  1390. GC_DELREF(obj);
  1391. }
  1392. }
  1393. idx++;
  1394. }
  1395. if (GC_G(gc_protected)) {
  1396. /* something went wrong */
  1397. zend_get_gc_buffer_release();
  1398. zend_fiber_switch_unblock();
  1399. return 0;
  1400. }
  1401. }
  1402. gc_stack_free(&stack);
  1403. /* Destroy zvals. The root buffer may be reallocated. */
  1404. GC_TRACE("Destroying zvals");
  1405. idx = GC_FIRST_ROOT;
  1406. while (idx != end) {
  1407. current = GC_IDX2PTR(idx);
  1408. if (GC_IS_GARBAGE(current->ref)) {
  1409. p = GC_GET_PTR(current->ref);
  1410. GC_TRACE_REF(p, "destroying");
  1411. if (GC_TYPE(p) == IS_OBJECT) {
  1412. zend_object *obj = (zend_object*)p;
  1413. EG(objects_store).object_buckets[obj->handle] = SET_OBJ_INVALID(obj);
  1414. GC_TYPE_INFO(obj) = GC_NULL |
  1415. (GC_TYPE_INFO(obj) & ~GC_TYPE_MASK);
  1416. /* Modify current before calling free_obj (bug #78811: free_obj() can cause the root buffer (with current) to be reallocated.) */
  1417. current->ref = GC_MAKE_GARBAGE(((char*)obj) - obj->handlers->offset);
  1418. if (!(OBJ_FLAGS(obj) & IS_OBJ_FREE_CALLED)) {
  1419. GC_ADD_FLAGS(obj, IS_OBJ_FREE_CALLED);
  1420. GC_ADDREF(obj);
  1421. obj->handlers->free_obj(obj);
  1422. GC_DELREF(obj);
  1423. }
  1424. ZEND_OBJECTS_STORE_ADD_TO_FREE_LIST(obj->handle);
  1425. } else if (GC_TYPE(p) == IS_ARRAY) {
  1426. zend_array *arr = (zend_array*)p;
  1427. GC_TYPE_INFO(arr) = GC_NULL |
  1428. (GC_TYPE_INFO(arr) & ~GC_TYPE_MASK);
  1429. /* GC may destroy arrays with rc>1. This is valid and safe. */
  1430. HT_ALLOW_COW_VIOLATION(arr);
  1431. zend_hash_destroy(arr);
  1432. }
  1433. }
  1434. idx++;
  1435. }
  1436. /* Free objects */
  1437. current = GC_IDX2PTR(GC_FIRST_ROOT);
  1438. last = GC_IDX2PTR(end);
  1439. while (current != last) {
  1440. if (GC_IS_GARBAGE(current->ref)) {
  1441. p = GC_GET_PTR(current->ref);
  1442. GC_LINK_UNUSED(current);
  1443. GC_G(num_roots)--;
  1444. efree(p);
  1445. }
  1446. current++;
  1447. }
  1448. zend_fiber_switch_unblock();
  1449. GC_TRACE("Collection finished");
  1450. GC_G(collected) += count;
  1451. total_count += count;
  1452. GC_G(gc_active) = 0;
  1453. }
  1454. gc_compact();
  1455. /* Objects with destructors were removed from this GC run. Rerun GC right away to clean them
  1456. * up. We do this only once: If we encounter more destructors on the second run, we'll not
  1457. * run GC another time. */
  1458. if (should_rerun_gc && !did_rerun_gc) {
  1459. did_rerun_gc = 1;
  1460. goto rerun_gc;
  1461. }
  1462. finish:
  1463. zend_get_gc_buffer_release();
  1464. zend_gc_root_tmpvars();
  1465. return total_count;
  1466. }
  1467. ZEND_API void zend_gc_get_status(zend_gc_status *status)
  1468. {
  1469. status->runs = GC_G(gc_runs);
  1470. status->collected = GC_G(collected);
  1471. status->threshold = GC_G(gc_threshold);
  1472. status->num_roots = GC_G(num_roots);
  1473. }
  1474. ZEND_API zend_get_gc_buffer *zend_get_gc_buffer_create(void) {
  1475. /* There can only be one get_gc() call active at a time,
  1476. * so there only needs to be one buffer. */
  1477. zend_get_gc_buffer *gc_buffer = &EG(get_gc_buffer);
  1478. gc_buffer->cur = gc_buffer->start;
  1479. return gc_buffer;
  1480. }
  1481. ZEND_API void zend_get_gc_buffer_grow(zend_get_gc_buffer *gc_buffer) {
  1482. size_t old_capacity = gc_buffer->end - gc_buffer->start;
  1483. size_t new_capacity = old_capacity == 0 ? 64 : old_capacity * 2;
  1484. gc_buffer->start = erealloc(gc_buffer->start, new_capacity * sizeof(zval));
  1485. gc_buffer->end = gc_buffer->start + new_capacity;
  1486. gc_buffer->cur = gc_buffer->start + old_capacity;
  1487. }
  1488. static void zend_get_gc_buffer_release() {
  1489. zend_get_gc_buffer *gc_buffer = &EG(get_gc_buffer);
  1490. efree(gc_buffer->start);
  1491. gc_buffer->start = gc_buffer->end = gc_buffer->cur = NULL;
  1492. }
  1493. /* TMPVAR operands are destroyed using zval_ptr_dtor_nogc(), because they usually cannot contain
  1494. * cycles. However, there are some rare exceptions where this is possible, in which case we rely
  1495. * on the producing code to root the value. If a GC run occurs between the rooting and consumption
  1496. * of the value, we would end up leaking it. To avoid this, root all live TMPVAR values here. */
  1497. static void zend_gc_root_tmpvars(void) {
  1498. zend_execute_data *ex = EG(current_execute_data);
  1499. for (; ex; ex = ex->prev_execute_data) {
  1500. zend_function *func = ex->func;
  1501. if (!func || !ZEND_USER_CODE(func->type)) {
  1502. continue;
  1503. }
  1504. uint32_t op_num = ex->opline - ex->func->op_array.opcodes;
  1505. for (uint32_t i = 0; i < func->op_array.last_live_range; i++) {
  1506. const zend_live_range *range = &func->op_array.live_range[i];
  1507. if (range->start > op_num) {
  1508. break;
  1509. }
  1510. if (range->end <= op_num) {
  1511. continue;
  1512. }
  1513. uint32_t kind = range->var & ZEND_LIVE_MASK;
  1514. if (kind == ZEND_LIVE_TMPVAR) {
  1515. uint32_t var_num = range->var & ~ZEND_LIVE_MASK;
  1516. zval *var = ZEND_CALL_VAR(ex, var_num);
  1517. if (Z_REFCOUNTED_P(var)) {
  1518. gc_check_possible_root(Z_COUNTED_P(var));
  1519. }
  1520. }
  1521. }
  1522. }
  1523. }
  1524. #ifdef ZTS
  1525. size_t zend_gc_globals_size(void)
  1526. {
  1527. return sizeof(zend_gc_globals);
  1528. }
  1529. #endif