zend_alloc.c 87 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943
  1. /*
  2. +----------------------------------------------------------------------+
  3. | Zend Engine |
  4. +----------------------------------------------------------------------+
  5. | Copyright (c) 1998-2018 Zend Technologies Ltd. (http://www.zend.com) |
  6. +----------------------------------------------------------------------+
  7. | This source file is subject to version 2.00 of the Zend license, |
  8. | that is bundled with this package in the file LICENSE, and is |
  9. | available through the world-wide-web at the following url: |
  10. | http://www.zend.com/license/2_00.txt. |
  11. | If you did not receive a copy of the Zend license and are unable to |
  12. | obtain it through the world-wide-web, please send a note to |
  13. | license@zend.com so we can mail you a copy immediately. |
  14. +----------------------------------------------------------------------+
  15. | Authors: Andi Gutmans <andi@php.net> |
  16. | Zeev Suraski <zeev@php.net> |
  17. | Dmitry Stogov <dmitry@php.net> |
  18. +----------------------------------------------------------------------+
  19. */
  20. /*
  21. * zend_alloc is designed to be a modern CPU cache friendly memory manager
  22. * for PHP. Most ideas are taken from jemalloc and tcmalloc implementations.
  23. *
  24. * All allocations are split into 3 categories:
  25. *
  26. * Huge - the size is greater than CHUNK size (~2M by default), allocation is
  27. * performed using mmap(). The result is aligned on 2M boundary.
  28. *
  29. * Large - a number of 4096K pages inside a CHUNK. Large blocks
  30. * are always aligned on page boundary.
  31. *
  32. * Small - less than 3/4 of page size. Small sizes are rounded up to nearest
  33. * greater predefined small size (there are 30 predefined sizes:
  34. * 8, 16, 24, 32, ... 3072). Small blocks are allocated from
  35. * RUNs. Each RUN is allocated as a single or few following pages.
  36. * Allocation inside RUNs implemented using linked list of free
  37. * elements. The result is aligned to 8 bytes.
  38. *
  39. * zend_alloc allocates memory from OS by CHUNKs, these CHUNKs and huge memory
  40. * blocks are always aligned to CHUNK boundary. So it's very easy to determine
  41. * the CHUNK owning the certain pointer. Regular CHUNKs reserve a single
  42. * page at start for special purpose. It contains bitset of free pages,
  43. * few bitset for available runs of predefined small sizes, map of pages that
  44. * keeps information about usage of each page in this CHUNK, etc.
  45. *
  46. * zend_alloc provides familiar emalloc/efree/erealloc API, but in addition it
  47. * provides specialized and optimized routines to allocate blocks of predefined
  48. * sizes (e.g. emalloc_2(), emallc_4(), ..., emalloc_large(), etc)
  49. * The library uses C preprocessor tricks that substitute calls to emalloc()
  50. * with more specialized routines when the requested size is known.
  51. */
  52. #include "zend.h"
  53. #include "zend_alloc.h"
  54. #include "zend_globals.h"
  55. #include "zend_operators.h"
  56. #include "zend_multiply.h"
  57. #include "zend_bitset.h"
  58. #ifdef HAVE_SIGNAL_H
  59. # include <signal.h>
  60. #endif
  61. #ifdef HAVE_UNISTD_H
  62. # include <unistd.h>
  63. #endif
  64. #ifdef ZEND_WIN32
  65. # include <wincrypt.h>
  66. # include <process.h>
  67. #endif
  68. #include <stdio.h>
  69. #include <stdlib.h>
  70. #include <string.h>
  71. #include <sys/types.h>
  72. #include <sys/stat.h>
  73. #if HAVE_LIMITS_H
  74. #include <limits.h>
  75. #endif
  76. #include <fcntl.h>
  77. #include <errno.h>
  78. #ifndef _WIN32
  79. # ifdef HAVE_MREMAP
  80. # ifndef _GNU_SOURCE
  81. # define _GNU_SOURCE
  82. # endif
  83. # ifndef __USE_GNU
  84. # define __USE_GNU
  85. # endif
  86. # endif
  87. # include <sys/mman.h>
  88. # ifndef MAP_ANON
  89. # ifdef MAP_ANONYMOUS
  90. # define MAP_ANON MAP_ANONYMOUS
  91. # endif
  92. # endif
  93. # ifndef MREMAP_MAYMOVE
  94. # define MREMAP_MAYMOVE 0
  95. # endif
  96. # ifndef MAP_FAILED
  97. # define MAP_FAILED ((void*)-1)
  98. # endif
  99. # ifndef MAP_POPULATE
  100. # define MAP_POPULATE 0
  101. # endif
  102. # if defined(_SC_PAGESIZE) || (_SC_PAGE_SIZE)
  103. # define REAL_PAGE_SIZE _real_page_size
  104. static size_t _real_page_size = ZEND_MM_PAGE_SIZE;
  105. # endif
  106. #endif
  107. #ifndef REAL_PAGE_SIZE
  108. # define REAL_PAGE_SIZE ZEND_MM_PAGE_SIZE
  109. #endif
  110. #ifndef ZEND_MM_STAT
  111. # define ZEND_MM_STAT 1 /* track current and peak memory usage */
  112. #endif
  113. #ifndef ZEND_MM_LIMIT
  114. # define ZEND_MM_LIMIT 1 /* support for user-defined memory limit */
  115. #endif
  116. #ifndef ZEND_MM_CUSTOM
  117. # define ZEND_MM_CUSTOM 1 /* support for custom memory allocator */
  118. /* USE_ZEND_ALLOC=0 may switch to system malloc() */
  119. #endif
  120. #ifndef ZEND_MM_STORAGE
  121. # define ZEND_MM_STORAGE 1 /* support for custom memory storage */
  122. #endif
  123. #ifndef ZEND_MM_ERROR
  124. # define ZEND_MM_ERROR 1 /* report system errors */
  125. #endif
  126. #ifndef ZEND_MM_CHECK
  127. # define ZEND_MM_CHECK(condition, message) do { \
  128. if (UNEXPECTED(!(condition))) { \
  129. zend_mm_panic(message); \
  130. } \
  131. } while (0)
  132. #endif
  133. typedef uint32_t zend_mm_page_info; /* 4-byte integer */
  134. typedef zend_ulong zend_mm_bitset; /* 4-byte or 8-byte integer */
  135. #define ZEND_MM_ALIGNED_OFFSET(size, alignment) \
  136. (((size_t)(size)) & ((alignment) - 1))
  137. #define ZEND_MM_ALIGNED_BASE(size, alignment) \
  138. (((size_t)(size)) & ~((alignment) - 1))
  139. #define ZEND_MM_SIZE_TO_NUM(size, alignment) \
  140. (((size_t)(size) + ((alignment) - 1)) / (alignment))
  141. #define ZEND_MM_BITSET_LEN (sizeof(zend_mm_bitset) * 8) /* 32 or 64 */
  142. #define ZEND_MM_PAGE_MAP_LEN (ZEND_MM_PAGES / ZEND_MM_BITSET_LEN) /* 16 or 8 */
  143. typedef zend_mm_bitset zend_mm_page_map[ZEND_MM_PAGE_MAP_LEN]; /* 64B */
  144. #define ZEND_MM_IS_FRUN 0x00000000
  145. #define ZEND_MM_IS_LRUN 0x40000000
  146. #define ZEND_MM_IS_SRUN 0x80000000
  147. #define ZEND_MM_LRUN_PAGES_MASK 0x000003ff
  148. #define ZEND_MM_LRUN_PAGES_OFFSET 0
  149. #define ZEND_MM_SRUN_BIN_NUM_MASK 0x0000001f
  150. #define ZEND_MM_SRUN_BIN_NUM_OFFSET 0
  151. #define ZEND_MM_SRUN_FREE_COUNTER_MASK 0x01ff0000
  152. #define ZEND_MM_SRUN_FREE_COUNTER_OFFSET 16
  153. #define ZEND_MM_NRUN_OFFSET_MASK 0x01ff0000
  154. #define ZEND_MM_NRUN_OFFSET_OFFSET 16
  155. #define ZEND_MM_LRUN_PAGES(info) (((info) & ZEND_MM_LRUN_PAGES_MASK) >> ZEND_MM_LRUN_PAGES_OFFSET)
  156. #define ZEND_MM_SRUN_BIN_NUM(info) (((info) & ZEND_MM_SRUN_BIN_NUM_MASK) >> ZEND_MM_SRUN_BIN_NUM_OFFSET)
  157. #define ZEND_MM_SRUN_FREE_COUNTER(info) (((info) & ZEND_MM_SRUN_FREE_COUNTER_MASK) >> ZEND_MM_SRUN_FREE_COUNTER_OFFSET)
  158. #define ZEND_MM_NRUN_OFFSET(info) (((info) & ZEND_MM_NRUN_OFFSET_MASK) >> ZEND_MM_NRUN_OFFSET_OFFSET)
  159. #define ZEND_MM_FRUN() ZEND_MM_IS_FRUN
  160. #define ZEND_MM_LRUN(count) (ZEND_MM_IS_LRUN | ((count) << ZEND_MM_LRUN_PAGES_OFFSET))
  161. #define ZEND_MM_SRUN(bin_num) (ZEND_MM_IS_SRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET))
  162. #define ZEND_MM_SRUN_EX(bin_num, count) (ZEND_MM_IS_SRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET) | ((count) << ZEND_MM_SRUN_FREE_COUNTER_OFFSET))
  163. #define ZEND_MM_NRUN(bin_num, offset) (ZEND_MM_IS_SRUN | ZEND_MM_IS_LRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET) | ((offset) << ZEND_MM_NRUN_OFFSET_OFFSET))
  164. #define ZEND_MM_BINS 30
  165. typedef struct _zend_mm_page zend_mm_page;
  166. typedef struct _zend_mm_bin zend_mm_bin;
  167. typedef struct _zend_mm_free_slot zend_mm_free_slot;
  168. typedef struct _zend_mm_chunk zend_mm_chunk;
  169. typedef struct _zend_mm_huge_list zend_mm_huge_list;
  170. int zend_mm_use_huge_pages = 0;
  171. /*
  172. * Memory is retrived from OS by chunks of fixed size 2MB.
  173. * Inside chunk it's managed by pages of fixed size 4096B.
  174. * So each chunk consists from 512 pages.
  175. * The first page of each chunk is reseved for chunk header.
  176. * It contains service information about all pages.
  177. *
  178. * free_pages - current number of free pages in this chunk
  179. *
  180. * free_tail - number of continuous free pages at the end of chunk
  181. *
  182. * free_map - bitset (a bit for each page). The bit is set if the corresponding
  183. * page is allocated. Allocator for "lage sizes" may easily find a
  184. * free page (or a continuous number of pages) searching for zero
  185. * bits.
  186. *
  187. * map - contains service information for each page. (32-bits for each
  188. * page).
  189. * usage:
  190. * (2 bits)
  191. * FRUN - free page,
  192. * LRUN - first page of "large" allocation
  193. * SRUN - first page of a bin used for "small" allocation
  194. *
  195. * lrun_pages:
  196. * (10 bits) number of allocated pages
  197. *
  198. * srun_bin_num:
  199. * (5 bits) bin number (e.g. 0 for sizes 0-2, 1 for 3-4,
  200. * 2 for 5-8, 3 for 9-16 etc) see zend_alloc_sizes.h
  201. */
  202. struct _zend_mm_heap {
  203. #if ZEND_MM_CUSTOM
  204. int use_custom_heap;
  205. #endif
  206. #if ZEND_MM_STORAGE
  207. zend_mm_storage *storage;
  208. #endif
  209. #if ZEND_MM_STAT
  210. size_t size; /* current memory usage */
  211. size_t peak; /* peak memory usage */
  212. #endif
  213. zend_mm_free_slot *free_slot[ZEND_MM_BINS]; /* free lists for small sizes */
  214. #if ZEND_MM_STAT || ZEND_MM_LIMIT
  215. size_t real_size; /* current size of allocated pages */
  216. #endif
  217. #if ZEND_MM_STAT
  218. size_t real_peak; /* peak size of allocated pages */
  219. #endif
  220. #if ZEND_MM_LIMIT
  221. size_t limit; /* memory limit */
  222. int overflow; /* memory overflow flag */
  223. #endif
  224. zend_mm_huge_list *huge_list; /* list of huge allocated blocks */
  225. zend_mm_chunk *main_chunk;
  226. zend_mm_chunk *cached_chunks; /* list of unused chunks */
  227. int chunks_count; /* number of alocated chunks */
  228. int peak_chunks_count; /* peak number of allocated chunks for current request */
  229. int cached_chunks_count; /* number of cached chunks */
  230. double avg_chunks_count; /* average number of chunks allocated per request */
  231. int last_chunks_delete_boundary; /* numer of chunks after last deletion */
  232. int last_chunks_delete_count; /* number of deletion over the last boundary */
  233. #if ZEND_MM_CUSTOM
  234. union {
  235. struct {
  236. void *(*_malloc)(size_t);
  237. void (*_free)(void*);
  238. void *(*_realloc)(void*, size_t);
  239. } std;
  240. struct {
  241. void *(*_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
  242. void (*_free)(void* ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
  243. void *(*_realloc)(void*, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
  244. } debug;
  245. } custom_heap;
  246. #endif
  247. };
  248. struct _zend_mm_chunk {
  249. zend_mm_heap *heap;
  250. zend_mm_chunk *next;
  251. zend_mm_chunk *prev;
  252. uint32_t free_pages; /* number of free pages */
  253. uint32_t free_tail; /* number of free pages at the end of chunk */
  254. uint32_t num;
  255. char reserve[64 - (sizeof(void*) * 3 + sizeof(uint32_t) * 3)];
  256. zend_mm_heap heap_slot; /* used only in main chunk */
  257. zend_mm_page_map free_map; /* 512 bits or 64 bytes */
  258. zend_mm_page_info map[ZEND_MM_PAGES]; /* 2 KB = 512 * 4 */
  259. };
  260. struct _zend_mm_page {
  261. char bytes[ZEND_MM_PAGE_SIZE];
  262. };
  263. /*
  264. * bin - is one or few continuous pages (up to 8) used for allocation of
  265. * a particular "small size".
  266. */
  267. struct _zend_mm_bin {
  268. char bytes[ZEND_MM_PAGE_SIZE * 8];
  269. };
  270. struct _zend_mm_free_slot {
  271. zend_mm_free_slot *next_free_slot;
  272. };
  273. struct _zend_mm_huge_list {
  274. void *ptr;
  275. size_t size;
  276. zend_mm_huge_list *next;
  277. #if ZEND_DEBUG
  278. zend_mm_debug_info dbg;
  279. #endif
  280. };
  281. #define ZEND_MM_PAGE_ADDR(chunk, page_num) \
  282. ((void*)(((zend_mm_page*)(chunk)) + (page_num)))
  283. #define _BIN_DATA_SIZE(num, size, elements, pages, x, y) size,
  284. static const uint32_t bin_data_size[] = {
  285. ZEND_MM_BINS_INFO(_BIN_DATA_SIZE, x, y)
  286. };
  287. #define _BIN_DATA_ELEMENTS(num, size, elements, pages, x, y) elements,
  288. static const uint32_t bin_elements[] = {
  289. ZEND_MM_BINS_INFO(_BIN_DATA_ELEMENTS, x, y)
  290. };
  291. #define _BIN_DATA_PAGES(num, size, elements, pages, x, y) pages,
  292. static const uint32_t bin_pages[] = {
  293. ZEND_MM_BINS_INFO(_BIN_DATA_PAGES, x, y)
  294. };
  295. #if ZEND_DEBUG
  296. ZEND_COLD void zend_debug_alloc_output(char *format, ...)
  297. {
  298. char output_buf[256];
  299. va_list args;
  300. va_start(args, format);
  301. vsprintf(output_buf, format, args);
  302. va_end(args);
  303. #ifdef ZEND_WIN32
  304. OutputDebugString(output_buf);
  305. #else
  306. fprintf(stderr, "%s", output_buf);
  307. #endif
  308. }
  309. #endif
  310. static ZEND_COLD ZEND_NORETURN void zend_mm_panic(const char *message)
  311. {
  312. fprintf(stderr, "%s\n", message);
  313. /* See http://support.microsoft.com/kb/190351 */
  314. #ifdef ZEND_WIN32
  315. fflush(stderr);
  316. #endif
  317. #if ZEND_DEBUG && defined(HAVE_KILL) && defined(HAVE_GETPID)
  318. kill(getpid(), SIGSEGV);
  319. #endif
  320. exit(1);
  321. }
  322. static ZEND_COLD ZEND_NORETURN void zend_mm_safe_error(zend_mm_heap *heap,
  323. const char *format,
  324. size_t limit,
  325. #if ZEND_DEBUG
  326. const char *filename,
  327. uint32_t lineno,
  328. #endif
  329. size_t size)
  330. {
  331. heap->overflow = 1;
  332. zend_try {
  333. zend_error_noreturn(E_ERROR,
  334. format,
  335. limit,
  336. #if ZEND_DEBUG
  337. filename,
  338. lineno,
  339. #endif
  340. size);
  341. } zend_catch {
  342. } zend_end_try();
  343. heap->overflow = 0;
  344. zend_bailout();
  345. exit(1);
  346. }
  347. #ifdef _WIN32
  348. void
  349. stderr_last_error(char *msg)
  350. {
  351. LPSTR buf = NULL;
  352. DWORD err = GetLastError();
  353. if (!FormatMessage(
  354. FORMAT_MESSAGE_ALLOCATE_BUFFER |
  355. FORMAT_MESSAGE_FROM_SYSTEM |
  356. FORMAT_MESSAGE_IGNORE_INSERTS,
  357. NULL,
  358. err,
  359. MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
  360. (LPSTR)&buf,
  361. 0, NULL)) {
  362. fprintf(stderr, "\n%s: [0x%08lx]\n", msg, err);
  363. }
  364. else {
  365. fprintf(stderr, "\n%s: [0x%08lx] %s\n", msg, err, buf);
  366. }
  367. }
  368. #endif
  369. /*****************/
  370. /* OS Allocation */
  371. /*****************/
  372. static void *zend_mm_mmap_fixed(void *addr, size_t size)
  373. {
  374. #ifdef _WIN32
  375. return VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
  376. #else
  377. int flags = MAP_PRIVATE | MAP_ANON;
  378. #if defined(MAP_EXCL)
  379. flags |= MAP_FIXED | MAP_EXCL;
  380. #endif
  381. /* MAP_FIXED leads to discarding of the old mapping, so it can't be used. */
  382. void *ptr = mmap(addr, size, PROT_READ | PROT_WRITE, flags /*| MAP_POPULATE | MAP_HUGETLB*/, -1, 0);
  383. if (ptr == MAP_FAILED) {
  384. #if ZEND_MM_ERROR && !defined(MAP_EXCL)
  385. fprintf(stderr, "\nmmap() failed: [%d] %s\n", errno, strerror(errno));
  386. #endif
  387. return NULL;
  388. } else if (ptr != addr) {
  389. if (munmap(ptr, size) != 0) {
  390. #if ZEND_MM_ERROR
  391. fprintf(stderr, "\nmunmap() failed: [%d] %s\n", errno, strerror(errno));
  392. #endif
  393. }
  394. return NULL;
  395. }
  396. return ptr;
  397. #endif
  398. }
  399. static void *zend_mm_mmap(size_t size)
  400. {
  401. #ifdef _WIN32
  402. void *ptr = VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
  403. if (ptr == NULL) {
  404. #if ZEND_MM_ERROR
  405. stderr_last_error("VirtualAlloc() failed");
  406. #endif
  407. return NULL;
  408. }
  409. return ptr;
  410. #else
  411. void *ptr;
  412. #ifdef MAP_HUGETLB
  413. if (zend_mm_use_huge_pages && size == ZEND_MM_CHUNK_SIZE) {
  414. ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_HUGETLB, -1, 0);
  415. if (ptr != MAP_FAILED) {
  416. return ptr;
  417. }
  418. }
  419. #endif
  420. ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
  421. if (ptr == MAP_FAILED) {
  422. #if ZEND_MM_ERROR
  423. fprintf(stderr, "\nmmap() failed: [%d] %s\n", errno, strerror(errno));
  424. #endif
  425. return NULL;
  426. }
  427. return ptr;
  428. #endif
  429. }
  430. static void zend_mm_munmap(void *addr, size_t size)
  431. {
  432. #ifdef _WIN32
  433. if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
  434. #if ZEND_MM_ERROR
  435. stderr_last_error("VirtualFree() failed");
  436. #endif
  437. }
  438. #else
  439. if (munmap(addr, size) != 0) {
  440. #if ZEND_MM_ERROR
  441. fprintf(stderr, "\nmunmap() failed: [%d] %s\n", errno, strerror(errno));
  442. #endif
  443. }
  444. #endif
  445. }
  446. /***********/
  447. /* Bitmask */
  448. /***********/
  449. /* number of trailing set (1) bits */
  450. static zend_always_inline int zend_mm_bitset_nts(zend_mm_bitset bitset)
  451. {
  452. #if (defined(__GNUC__) || __has_builtin(__builtin_ctzl)) && SIZEOF_ZEND_LONG == SIZEOF_LONG && defined(PHP_HAVE_BUILTIN_CTZL)
  453. return __builtin_ctzl(~bitset);
  454. #elif (defined(__GNUC__) || __has_builtin(__builtin_ctzll)) && defined(PHP_HAVE_BUILTIN_CTZLL)
  455. return __builtin_ctzll(~bitset);
  456. #elif defined(_WIN32)
  457. unsigned long index;
  458. #if defined(_WIN64)
  459. if (!BitScanForward64(&index, ~bitset)) {
  460. #else
  461. if (!BitScanForward(&index, ~bitset)) {
  462. #endif
  463. /* undefined behavior */
  464. return 32;
  465. }
  466. return (int)index;
  467. #else
  468. int n;
  469. if (bitset == (zend_mm_bitset)-1) return ZEND_MM_BITSET_LEN;
  470. n = 0;
  471. #if SIZEOF_ZEND_LONG == 8
  472. if (sizeof(zend_mm_bitset) == 8) {
  473. if ((bitset & 0xffffffff) == 0xffffffff) {n += 32; bitset = bitset >> Z_UL(32);}
  474. }
  475. #endif
  476. if ((bitset & 0x0000ffff) == 0x0000ffff) {n += 16; bitset = bitset >> 16;}
  477. if ((bitset & 0x000000ff) == 0x000000ff) {n += 8; bitset = bitset >> 8;}
  478. if ((bitset & 0x0000000f) == 0x0000000f) {n += 4; bitset = bitset >> 4;}
  479. if ((bitset & 0x00000003) == 0x00000003) {n += 2; bitset = bitset >> 2;}
  480. return n + (bitset & 1);
  481. #endif
  482. }
  483. static zend_always_inline int zend_mm_bitset_find_zero(zend_mm_bitset *bitset, int size)
  484. {
  485. int i = 0;
  486. do {
  487. zend_mm_bitset tmp = bitset[i];
  488. if (tmp != (zend_mm_bitset)-1) {
  489. return i * ZEND_MM_BITSET_LEN + zend_mm_bitset_nts(tmp);
  490. }
  491. i++;
  492. } while (i < size);
  493. return -1;
  494. }
  495. static zend_always_inline int zend_mm_bitset_find_one(zend_mm_bitset *bitset, int size)
  496. {
  497. int i = 0;
  498. do {
  499. zend_mm_bitset tmp = bitset[i];
  500. if (tmp != 0) {
  501. return i * ZEND_MM_BITSET_LEN + zend_ulong_ntz(tmp);
  502. }
  503. i++;
  504. } while (i < size);
  505. return -1;
  506. }
  507. static zend_always_inline int zend_mm_bitset_find_zero_and_set(zend_mm_bitset *bitset, int size)
  508. {
  509. int i = 0;
  510. do {
  511. zend_mm_bitset tmp = bitset[i];
  512. if (tmp != (zend_mm_bitset)-1) {
  513. int n = zend_mm_bitset_nts(tmp);
  514. bitset[i] |= Z_UL(1) << n;
  515. return i * ZEND_MM_BITSET_LEN + n;
  516. }
  517. i++;
  518. } while (i < size);
  519. return -1;
  520. }
  521. static zend_always_inline int zend_mm_bitset_is_set(zend_mm_bitset *bitset, int bit)
  522. {
  523. return ZEND_BIT_TEST(bitset, bit);
  524. }
  525. static zend_always_inline void zend_mm_bitset_set_bit(zend_mm_bitset *bitset, int bit)
  526. {
  527. bitset[bit / ZEND_MM_BITSET_LEN] |= (Z_L(1) << (bit & (ZEND_MM_BITSET_LEN-1)));
  528. }
  529. static zend_always_inline void zend_mm_bitset_reset_bit(zend_mm_bitset *bitset, int bit)
  530. {
  531. bitset[bit / ZEND_MM_BITSET_LEN] &= ~(Z_L(1) << (bit & (ZEND_MM_BITSET_LEN-1)));
  532. }
  533. static zend_always_inline void zend_mm_bitset_set_range(zend_mm_bitset *bitset, int start, int len)
  534. {
  535. if (len == 1) {
  536. zend_mm_bitset_set_bit(bitset, start);
  537. } else {
  538. int pos = start / ZEND_MM_BITSET_LEN;
  539. int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
  540. int bit = start & (ZEND_MM_BITSET_LEN - 1);
  541. zend_mm_bitset tmp;
  542. if (pos != end) {
  543. /* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
  544. tmp = (zend_mm_bitset)-1 << bit;
  545. bitset[pos++] |= tmp;
  546. while (pos != end) {
  547. /* set all bits */
  548. bitset[pos++] = (zend_mm_bitset)-1;
  549. }
  550. end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
  551. /* set bits from "0" to "end" */
  552. tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
  553. bitset[pos] |= tmp;
  554. } else {
  555. end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
  556. /* set bits from "bit" to "end" */
  557. tmp = (zend_mm_bitset)-1 << bit;
  558. tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
  559. bitset[pos] |= tmp;
  560. }
  561. }
  562. }
  563. static zend_always_inline void zend_mm_bitset_reset_range(zend_mm_bitset *bitset, int start, int len)
  564. {
  565. if (len == 1) {
  566. zend_mm_bitset_reset_bit(bitset, start);
  567. } else {
  568. int pos = start / ZEND_MM_BITSET_LEN;
  569. int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
  570. int bit = start & (ZEND_MM_BITSET_LEN - 1);
  571. zend_mm_bitset tmp;
  572. if (pos != end) {
  573. /* reset bits from "bit" to ZEND_MM_BITSET_LEN-1 */
  574. tmp = ~((Z_L(1) << bit) - 1);
  575. bitset[pos++] &= ~tmp;
  576. while (pos != end) {
  577. /* set all bits */
  578. bitset[pos++] = 0;
  579. }
  580. end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
  581. /* reset bits from "0" to "end" */
  582. tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
  583. bitset[pos] &= ~tmp;
  584. } else {
  585. end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
  586. /* reset bits from "bit" to "end" */
  587. tmp = (zend_mm_bitset)-1 << bit;
  588. tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
  589. bitset[pos] &= ~tmp;
  590. }
  591. }
  592. }
  593. static zend_always_inline int zend_mm_bitset_is_free_range(zend_mm_bitset *bitset, int start, int len)
  594. {
  595. if (len == 1) {
  596. return !zend_mm_bitset_is_set(bitset, start);
  597. } else {
  598. int pos = start / ZEND_MM_BITSET_LEN;
  599. int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
  600. int bit = start & (ZEND_MM_BITSET_LEN - 1);
  601. zend_mm_bitset tmp;
  602. if (pos != end) {
  603. /* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
  604. tmp = (zend_mm_bitset)-1 << bit;
  605. if ((bitset[pos++] & tmp) != 0) {
  606. return 0;
  607. }
  608. while (pos != end) {
  609. /* set all bits */
  610. if (bitset[pos++] != 0) {
  611. return 0;
  612. }
  613. }
  614. end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
  615. /* set bits from "0" to "end" */
  616. tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
  617. return (bitset[pos] & tmp) == 0;
  618. } else {
  619. end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
  620. /* set bits from "bit" to "end" */
  621. tmp = (zend_mm_bitset)-1 << bit;
  622. tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
  623. return (bitset[pos] & tmp) == 0;
  624. }
  625. }
  626. }
  627. /**********/
  628. /* Chunks */
  629. /**********/
  630. static void *zend_mm_chunk_alloc_int(size_t size, size_t alignment)
  631. {
  632. void *ptr = zend_mm_mmap(size);
  633. if (ptr == NULL) {
  634. return NULL;
  635. } else if (ZEND_MM_ALIGNED_OFFSET(ptr, alignment) == 0) {
  636. #ifdef MADV_HUGEPAGE
  637. if (zend_mm_use_huge_pages) {
  638. madvise(ptr, size, MADV_HUGEPAGE);
  639. }
  640. #endif
  641. return ptr;
  642. } else {
  643. size_t offset;
  644. /* chunk has to be aligned */
  645. zend_mm_munmap(ptr, size);
  646. ptr = zend_mm_mmap(size + alignment - REAL_PAGE_SIZE);
  647. #ifdef _WIN32
  648. offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
  649. zend_mm_munmap(ptr, size + alignment - REAL_PAGE_SIZE);
  650. ptr = zend_mm_mmap_fixed((void*)((char*)ptr + (alignment - offset)), size);
  651. offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
  652. if (offset != 0) {
  653. zend_mm_munmap(ptr, size);
  654. return NULL;
  655. }
  656. return ptr;
  657. #else
  658. offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
  659. if (offset != 0) {
  660. offset = alignment - offset;
  661. zend_mm_munmap(ptr, offset);
  662. ptr = (char*)ptr + offset;
  663. alignment -= offset;
  664. }
  665. if (alignment > REAL_PAGE_SIZE) {
  666. zend_mm_munmap((char*)ptr + size, alignment - REAL_PAGE_SIZE);
  667. }
  668. # ifdef MADV_HUGEPAGE
  669. if (zend_mm_use_huge_pages) {
  670. madvise(ptr, size, MADV_HUGEPAGE);
  671. }
  672. # endif
  673. #endif
  674. return ptr;
  675. }
  676. }
  677. static void *zend_mm_chunk_alloc(zend_mm_heap *heap, size_t size, size_t alignment)
  678. {
  679. #if ZEND_MM_STORAGE
  680. if (UNEXPECTED(heap->storage)) {
  681. void *ptr = heap->storage->handlers.chunk_alloc(heap->storage, size, alignment);
  682. ZEND_ASSERT(((zend_uintptr_t)((char*)ptr + (alignment-1)) & (alignment-1)) == (zend_uintptr_t)ptr);
  683. return ptr;
  684. }
  685. #endif
  686. return zend_mm_chunk_alloc_int(size, alignment);
  687. }
  688. static void zend_mm_chunk_free(zend_mm_heap *heap, void *addr, size_t size)
  689. {
  690. #if ZEND_MM_STORAGE
  691. if (UNEXPECTED(heap->storage)) {
  692. heap->storage->handlers.chunk_free(heap->storage, addr, size);
  693. return;
  694. }
  695. #endif
  696. zend_mm_munmap(addr, size);
  697. }
  698. static int zend_mm_chunk_truncate(zend_mm_heap *heap, void *addr, size_t old_size, size_t new_size)
  699. {
  700. #if ZEND_MM_STORAGE
  701. if (UNEXPECTED(heap->storage)) {
  702. if (heap->storage->handlers.chunk_truncate) {
  703. return heap->storage->handlers.chunk_truncate(heap->storage, addr, old_size, new_size);
  704. } else {
  705. return 0;
  706. }
  707. }
  708. #endif
  709. #ifndef _WIN32
  710. zend_mm_munmap((char*)addr + new_size, old_size - new_size);
  711. return 1;
  712. #else
  713. return 0;
  714. #endif
  715. }
  716. static int zend_mm_chunk_extend(zend_mm_heap *heap, void *addr, size_t old_size, size_t new_size)
  717. {
  718. #if ZEND_MM_STORAGE
  719. if (UNEXPECTED(heap->storage)) {
  720. if (heap->storage->handlers.chunk_extend) {
  721. return heap->storage->handlers.chunk_extend(heap->storage, addr, old_size, new_size);
  722. } else {
  723. return 0;
  724. }
  725. }
  726. #endif
  727. #ifndef _WIN32
  728. return (zend_mm_mmap_fixed((char*)addr + old_size, new_size - old_size) != NULL);
  729. #else
  730. return 0;
  731. #endif
  732. }
  733. static zend_always_inline void zend_mm_chunk_init(zend_mm_heap *heap, zend_mm_chunk *chunk)
  734. {
  735. chunk->heap = heap;
  736. chunk->next = heap->main_chunk;
  737. chunk->prev = heap->main_chunk->prev;
  738. chunk->prev->next = chunk;
  739. chunk->next->prev = chunk;
  740. /* mark first pages as allocated */
  741. chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
  742. chunk->free_tail = ZEND_MM_FIRST_PAGE;
  743. /* the younger chunks have bigger number */
  744. chunk->num = chunk->prev->num + 1;
  745. /* mark first pages as allocated */
  746. chunk->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
  747. chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
  748. }
  749. /***********************/
  750. /* Huge Runs (forward) */
  751. /***********************/
  752. static size_t zend_mm_get_huge_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
  753. static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
  754. static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
  755. #if ZEND_DEBUG
  756. static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
  757. #else
  758. static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
  759. #endif
  760. /**************/
  761. /* Large Runs */
  762. /**************/
  763. #if ZEND_DEBUG
  764. static void *zend_mm_alloc_pages(zend_mm_heap *heap, uint32_t pages_count, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  765. #else
  766. static void *zend_mm_alloc_pages(zend_mm_heap *heap, uint32_t pages_count ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  767. #endif
  768. {
  769. zend_mm_chunk *chunk = heap->main_chunk;
  770. uint32_t page_num, len;
  771. int steps = 0;
  772. while (1) {
  773. if (UNEXPECTED(chunk->free_pages < pages_count)) {
  774. goto not_found;
  775. #if 0
  776. } else if (UNEXPECTED(chunk->free_pages + chunk->free_tail == ZEND_MM_PAGES)) {
  777. if (UNEXPECTED(ZEND_MM_PAGES - chunk->free_tail < pages_count)) {
  778. goto not_found;
  779. } else {
  780. page_num = chunk->free_tail;
  781. goto found;
  782. }
  783. } else if (0) {
  784. /* First-Fit Search */
  785. int free_tail = chunk->free_tail;
  786. zend_mm_bitset *bitset = chunk->free_map;
  787. zend_mm_bitset tmp = *(bitset++);
  788. int i = 0;
  789. while (1) {
  790. /* skip allocated blocks */
  791. while (tmp == (zend_mm_bitset)-1) {
  792. i += ZEND_MM_BITSET_LEN;
  793. if (i == ZEND_MM_PAGES) {
  794. goto not_found;
  795. }
  796. tmp = *(bitset++);
  797. }
  798. /* find first 0 bit */
  799. page_num = i + zend_mm_bitset_nts(tmp);
  800. /* reset bits from 0 to "bit" */
  801. tmp &= tmp + 1;
  802. /* skip free blocks */
  803. while (tmp == 0) {
  804. i += ZEND_MM_BITSET_LEN;
  805. len = i - page_num;
  806. if (len >= pages_count) {
  807. goto found;
  808. } else if (i >= free_tail) {
  809. goto not_found;
  810. }
  811. tmp = *(bitset++);
  812. }
  813. /* find first 1 bit */
  814. len = (i + zend_ulong_ntz(tmp)) - page_num;
  815. if (len >= pages_count) {
  816. goto found;
  817. }
  818. /* set bits from 0 to "bit" */
  819. tmp |= tmp - 1;
  820. }
  821. #endif
  822. } else {
  823. /* Best-Fit Search */
  824. int best = -1;
  825. uint32_t best_len = ZEND_MM_PAGES;
  826. uint32_t free_tail = chunk->free_tail;
  827. zend_mm_bitset *bitset = chunk->free_map;
  828. zend_mm_bitset tmp = *(bitset++);
  829. uint32_t i = 0;
  830. while (1) {
  831. /* skip allocated blocks */
  832. while (tmp == (zend_mm_bitset)-1) {
  833. i += ZEND_MM_BITSET_LEN;
  834. if (i == ZEND_MM_PAGES) {
  835. if (best > 0) {
  836. page_num = best;
  837. goto found;
  838. } else {
  839. goto not_found;
  840. }
  841. }
  842. tmp = *(bitset++);
  843. }
  844. /* find first 0 bit */
  845. page_num = i + zend_mm_bitset_nts(tmp);
  846. /* reset bits from 0 to "bit" */
  847. tmp &= tmp + 1;
  848. /* skip free blocks */
  849. while (tmp == 0) {
  850. i += ZEND_MM_BITSET_LEN;
  851. if (i >= free_tail || i == ZEND_MM_PAGES) {
  852. len = ZEND_MM_PAGES - page_num;
  853. if (len >= pages_count && len < best_len) {
  854. chunk->free_tail = page_num + pages_count;
  855. goto found;
  856. } else {
  857. /* set accurate value */
  858. chunk->free_tail = page_num;
  859. if (best > 0) {
  860. page_num = best;
  861. goto found;
  862. } else {
  863. goto not_found;
  864. }
  865. }
  866. }
  867. tmp = *(bitset++);
  868. }
  869. /* find first 1 bit */
  870. len = i + zend_ulong_ntz(tmp) - page_num;
  871. if (len >= pages_count) {
  872. if (len == pages_count) {
  873. goto found;
  874. } else if (len < best_len) {
  875. best_len = len;
  876. best = page_num;
  877. }
  878. }
  879. /* set bits from 0 to "bit" */
  880. tmp |= tmp - 1;
  881. }
  882. }
  883. not_found:
  884. if (chunk->next == heap->main_chunk) {
  885. get_chunk:
  886. if (heap->cached_chunks) {
  887. heap->cached_chunks_count--;
  888. chunk = heap->cached_chunks;
  889. heap->cached_chunks = chunk->next;
  890. } else {
  891. #if ZEND_MM_LIMIT
  892. if (UNEXPECTED(ZEND_MM_CHUNK_SIZE > heap->limit - heap->real_size)) {
  893. if (zend_mm_gc(heap)) {
  894. goto get_chunk;
  895. } else if (heap->overflow == 0) {
  896. #if ZEND_DEBUG
  897. zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
  898. #else
  899. zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, ZEND_MM_PAGE_SIZE * pages_count);
  900. #endif
  901. return NULL;
  902. }
  903. }
  904. #endif
  905. chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(heap, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
  906. if (UNEXPECTED(chunk == NULL)) {
  907. /* insufficient memory */
  908. if (zend_mm_gc(heap) &&
  909. (chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(heap, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE)) != NULL) {
  910. /* pass */
  911. } else {
  912. #if !ZEND_MM_LIMIT
  913. zend_mm_safe_error(heap, "Out of memory");
  914. #elif ZEND_DEBUG
  915. zend_mm_safe_error(heap, "Out of memory (allocated %zu) at %s:%d (tried to allocate %zu bytes)", heap->real_size, __zend_filename, __zend_lineno, size);
  916. #else
  917. zend_mm_safe_error(heap, "Out of memory (allocated %zu) (tried to allocate %zu bytes)", heap->real_size, ZEND_MM_PAGE_SIZE * pages_count);
  918. #endif
  919. return NULL;
  920. }
  921. }
  922. #if ZEND_MM_STAT
  923. do {
  924. size_t size = heap->real_size + ZEND_MM_CHUNK_SIZE;
  925. size_t peak = MAX(heap->real_peak, size);
  926. heap->real_size = size;
  927. heap->real_peak = peak;
  928. } while (0);
  929. #elif ZEND_MM_LIMIT
  930. heap->real_size += ZEND_MM_CHUNK_SIZE;
  931. #endif
  932. }
  933. heap->chunks_count++;
  934. if (heap->chunks_count > heap->peak_chunks_count) {
  935. heap->peak_chunks_count = heap->chunks_count;
  936. }
  937. zend_mm_chunk_init(heap, chunk);
  938. page_num = ZEND_MM_FIRST_PAGE;
  939. len = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
  940. goto found;
  941. } else {
  942. chunk = chunk->next;
  943. steps++;
  944. }
  945. }
  946. found:
  947. if (steps > 2 && pages_count < 8) {
  948. /* move chunk into the head of the linked-list */
  949. chunk->prev->next = chunk->next;
  950. chunk->next->prev = chunk->prev;
  951. chunk->next = heap->main_chunk->next;
  952. chunk->prev = heap->main_chunk;
  953. chunk->prev->next = chunk;
  954. chunk->next->prev = chunk;
  955. }
  956. /* mark run as allocated */
  957. chunk->free_pages -= pages_count;
  958. zend_mm_bitset_set_range(chunk->free_map, page_num, pages_count);
  959. chunk->map[page_num] = ZEND_MM_LRUN(pages_count);
  960. if (page_num == chunk->free_tail) {
  961. chunk->free_tail = page_num + pages_count;
  962. }
  963. return ZEND_MM_PAGE_ADDR(chunk, page_num);
  964. }
  965. static zend_always_inline void *zend_mm_alloc_large_ex(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  966. {
  967. int pages_count = (int)ZEND_MM_SIZE_TO_NUM(size, ZEND_MM_PAGE_SIZE);
  968. #if ZEND_DEBUG
  969. void *ptr = zend_mm_alloc_pages(heap, pages_count, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  970. #else
  971. void *ptr = zend_mm_alloc_pages(heap, pages_count ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  972. #endif
  973. #if ZEND_MM_STAT
  974. do {
  975. size_t size = heap->size + pages_count * ZEND_MM_PAGE_SIZE;
  976. size_t peak = MAX(heap->peak, size);
  977. heap->size = size;
  978. heap->peak = peak;
  979. } while (0);
  980. #endif
  981. return ptr;
  982. }
  983. #if ZEND_DEBUG
  984. static zend_never_inline void *zend_mm_alloc_large(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  985. {
  986. return zend_mm_alloc_large_ex(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  987. }
  988. #else
  989. static zend_never_inline void *zend_mm_alloc_large(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  990. {
  991. return zend_mm_alloc_large_ex(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  992. }
  993. #endif
  994. static zend_always_inline void zend_mm_delete_chunk(zend_mm_heap *heap, zend_mm_chunk *chunk)
  995. {
  996. chunk->next->prev = chunk->prev;
  997. chunk->prev->next = chunk->next;
  998. heap->chunks_count--;
  999. if (heap->chunks_count + heap->cached_chunks_count < heap->avg_chunks_count + 0.1
  1000. || (heap->chunks_count == heap->last_chunks_delete_boundary
  1001. && heap->last_chunks_delete_count >= 4)) {
  1002. /* delay deletion */
  1003. heap->cached_chunks_count++;
  1004. chunk->next = heap->cached_chunks;
  1005. heap->cached_chunks = chunk;
  1006. } else {
  1007. #if ZEND_MM_STAT || ZEND_MM_LIMIT
  1008. heap->real_size -= ZEND_MM_CHUNK_SIZE;
  1009. #endif
  1010. if (!heap->cached_chunks) {
  1011. if (heap->chunks_count != heap->last_chunks_delete_boundary) {
  1012. heap->last_chunks_delete_boundary = heap->chunks_count;
  1013. heap->last_chunks_delete_count = 0;
  1014. } else {
  1015. heap->last_chunks_delete_count++;
  1016. }
  1017. }
  1018. if (!heap->cached_chunks || chunk->num > heap->cached_chunks->num) {
  1019. zend_mm_chunk_free(heap, chunk, ZEND_MM_CHUNK_SIZE);
  1020. } else {
  1021. //TODO: select the best chunk to delete???
  1022. chunk->next = heap->cached_chunks->next;
  1023. zend_mm_chunk_free(heap, heap->cached_chunks, ZEND_MM_CHUNK_SIZE);
  1024. heap->cached_chunks = chunk;
  1025. }
  1026. }
  1027. }
  1028. static zend_always_inline void zend_mm_free_pages_ex(zend_mm_heap *heap, zend_mm_chunk *chunk, uint32_t page_num, uint32_t pages_count, int free_chunk)
  1029. {
  1030. chunk->free_pages += pages_count;
  1031. zend_mm_bitset_reset_range(chunk->free_map, page_num, pages_count);
  1032. chunk->map[page_num] = 0;
  1033. if (chunk->free_tail == page_num + pages_count) {
  1034. /* this setting may be not accurate */
  1035. chunk->free_tail = page_num;
  1036. }
  1037. if (free_chunk && chunk->free_pages == ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE) {
  1038. zend_mm_delete_chunk(heap, chunk);
  1039. }
  1040. }
  1041. static zend_never_inline void zend_mm_free_pages(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count)
  1042. {
  1043. zend_mm_free_pages_ex(heap, chunk, page_num, pages_count, 1);
  1044. }
  1045. static zend_always_inline void zend_mm_free_large(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count)
  1046. {
  1047. #if ZEND_MM_STAT
  1048. heap->size -= pages_count * ZEND_MM_PAGE_SIZE;
  1049. #endif
  1050. zend_mm_free_pages(heap, chunk, page_num, pages_count);
  1051. }
  1052. /**************/
  1053. /* Small Runs */
  1054. /**************/
  1055. /* higher set bit number (0->N/A, 1->1, 2->2, 4->3, 8->4, 127->7, 128->8 etc) */
  1056. static zend_always_inline int zend_mm_small_size_to_bit(int size)
  1057. {
  1058. #if (defined(__GNUC__) || __has_builtin(__builtin_clz)) && defined(PHP_HAVE_BUILTIN_CLZ)
  1059. return (__builtin_clz(size) ^ 0x1f) + 1;
  1060. #elif defined(_WIN32)
  1061. unsigned long index;
  1062. if (!BitScanReverse(&index, (unsigned long)size)) {
  1063. /* undefined behavior */
  1064. return 64;
  1065. }
  1066. return (((31 - (int)index) ^ 0x1f) + 1);
  1067. #else
  1068. int n = 16;
  1069. if (size <= 0x00ff) {n -= 8; size = size << 8;}
  1070. if (size <= 0x0fff) {n -= 4; size = size << 4;}
  1071. if (size <= 0x3fff) {n -= 2; size = size << 2;}
  1072. if (size <= 0x7fff) {n -= 1;}
  1073. return n;
  1074. #endif
  1075. }
  1076. #ifndef MAX
  1077. # define MAX(a, b) (((a) > (b)) ? (a) : (b))
  1078. #endif
  1079. #ifndef MIN
  1080. # define MIN(a, b) (((a) < (b)) ? (a) : (b))
  1081. #endif
  1082. static zend_always_inline int zend_mm_small_size_to_bin(size_t size)
  1083. {
  1084. #if 0
  1085. int n;
  1086. /*0, 1, 2, 3, 4, 5, 6, 7, 8, 9 10, 11, 12*/
  1087. static const int f1[] = { 3, 3, 3, 3, 3, 3, 3, 4, 5, 6, 7, 8, 9};
  1088. static const int f2[] = { 0, 0, 0, 0, 0, 0, 0, 4, 8, 12, 16, 20, 24};
  1089. if (UNEXPECTED(size <= 2)) return 0;
  1090. n = zend_mm_small_size_to_bit(size - 1);
  1091. return ((size-1) >> f1[n]) + f2[n];
  1092. #else
  1093. unsigned int t1, t2;
  1094. if (size <= 64) {
  1095. /* we need to support size == 0 ... */
  1096. return (size - !!size) >> 3;
  1097. } else {
  1098. t1 = size - 1;
  1099. t2 = zend_mm_small_size_to_bit(t1) - 3;
  1100. t1 = t1 >> t2;
  1101. t2 = t2 - 3;
  1102. t2 = t2 << 2;
  1103. return (int)(t1 + t2);
  1104. }
  1105. #endif
  1106. }
  1107. #define ZEND_MM_SMALL_SIZE_TO_BIN(size) zend_mm_small_size_to_bin(size)
  1108. static zend_never_inline void *zend_mm_alloc_small_slow(zend_mm_heap *heap, uint32_t bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1109. {
  1110. zend_mm_chunk *chunk;
  1111. int page_num;
  1112. zend_mm_bin *bin;
  1113. zend_mm_free_slot *p, *end;
  1114. #if ZEND_DEBUG
  1115. bin = (zend_mm_bin*)zend_mm_alloc_pages(heap, bin_pages[bin_num], bin_data_size[bin_num] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1116. #else
  1117. bin = (zend_mm_bin*)zend_mm_alloc_pages(heap, bin_pages[bin_num] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1118. #endif
  1119. if (UNEXPECTED(bin == NULL)) {
  1120. /* insufficient memory */
  1121. return NULL;
  1122. }
  1123. chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(bin, ZEND_MM_CHUNK_SIZE);
  1124. page_num = ZEND_MM_ALIGNED_OFFSET(bin, ZEND_MM_CHUNK_SIZE) / ZEND_MM_PAGE_SIZE;
  1125. chunk->map[page_num] = ZEND_MM_SRUN(bin_num);
  1126. if (bin_pages[bin_num] > 1) {
  1127. uint32_t i = 1;
  1128. do {
  1129. chunk->map[page_num+i] = ZEND_MM_NRUN(bin_num, i);
  1130. i++;
  1131. } while (i < bin_pages[bin_num]);
  1132. }
  1133. /* create a linked list of elements from 1 to last */
  1134. end = (zend_mm_free_slot*)((char*)bin + (bin_data_size[bin_num] * (bin_elements[bin_num] - 1)));
  1135. heap->free_slot[bin_num] = p = (zend_mm_free_slot*)((char*)bin + bin_data_size[bin_num]);
  1136. do {
  1137. p->next_free_slot = (zend_mm_free_slot*)((char*)p + bin_data_size[bin_num]);
  1138. #if ZEND_DEBUG
  1139. do {
  1140. zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
  1141. dbg->size = 0;
  1142. } while (0);
  1143. #endif
  1144. p = (zend_mm_free_slot*)((char*)p + bin_data_size[bin_num]);
  1145. } while (p != end);
  1146. /* terminate list using NULL */
  1147. p->next_free_slot = NULL;
  1148. #if ZEND_DEBUG
  1149. do {
  1150. zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
  1151. dbg->size = 0;
  1152. } while (0);
  1153. #endif
  1154. /* return first element */
  1155. return (char*)bin;
  1156. }
  1157. static zend_always_inline void *zend_mm_alloc_small(zend_mm_heap *heap, size_t size, int bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1158. {
  1159. #if ZEND_MM_STAT
  1160. do {
  1161. size_t size = heap->size + bin_data_size[bin_num];
  1162. size_t peak = MAX(heap->peak, size);
  1163. heap->size = size;
  1164. heap->peak = peak;
  1165. } while (0);
  1166. #endif
  1167. if (EXPECTED(heap->free_slot[bin_num] != NULL)) {
  1168. zend_mm_free_slot *p = heap->free_slot[bin_num];
  1169. heap->free_slot[bin_num] = p->next_free_slot;
  1170. return (void*)p;
  1171. } else {
  1172. return zend_mm_alloc_small_slow(heap, bin_num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1173. }
  1174. }
  1175. static zend_always_inline void zend_mm_free_small(zend_mm_heap *heap, void *ptr, int bin_num)
  1176. {
  1177. zend_mm_free_slot *p;
  1178. #if ZEND_MM_STAT
  1179. heap->size -= bin_data_size[bin_num];
  1180. #endif
  1181. #if ZEND_DEBUG
  1182. do {
  1183. zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)ptr + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
  1184. dbg->size = 0;
  1185. } while (0);
  1186. #endif
  1187. p = (zend_mm_free_slot*)ptr;
  1188. p->next_free_slot = heap->free_slot[bin_num];
  1189. heap->free_slot[bin_num] = p;
  1190. }
  1191. /********/
  1192. /* Heap */
  1193. /********/
  1194. #if ZEND_DEBUG
  1195. static zend_always_inline zend_mm_debug_info *zend_mm_get_debug_info(zend_mm_heap *heap, void *ptr)
  1196. {
  1197. size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
  1198. zend_mm_chunk *chunk;
  1199. int page_num;
  1200. zend_mm_page_info info;
  1201. ZEND_MM_CHECK(page_offset != 0, "zend_mm_heap corrupted");
  1202. chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
  1203. page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
  1204. info = chunk->map[page_num];
  1205. ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
  1206. if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
  1207. int bin_num = ZEND_MM_SRUN_BIN_NUM(info);
  1208. return (zend_mm_debug_info*)((char*)ptr + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
  1209. } else /* if (info & ZEND_MM_IS_LRUN) */ {
  1210. int pages_count = ZEND_MM_LRUN_PAGES(info);
  1211. return (zend_mm_debug_info*)((char*)ptr + ZEND_MM_PAGE_SIZE * pages_count - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
  1212. }
  1213. }
  1214. #endif
  1215. static zend_always_inline void *zend_mm_alloc_heap(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1216. {
  1217. void *ptr;
  1218. #if ZEND_DEBUG
  1219. size_t real_size = size;
  1220. zend_mm_debug_info *dbg;
  1221. /* special handling for zero-size allocation */
  1222. size = MAX(size, 1);
  1223. size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
  1224. if (UNEXPECTED(size < real_size)) {
  1225. zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu + %zu)", ZEND_MM_ALIGNED_SIZE(real_size), ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
  1226. return NULL;
  1227. }
  1228. #endif
  1229. if (EXPECTED(size <= ZEND_MM_MAX_SMALL_SIZE)) {
  1230. ptr = zend_mm_alloc_small(heap, size, ZEND_MM_SMALL_SIZE_TO_BIN(size) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1231. #if ZEND_DEBUG
  1232. dbg = zend_mm_get_debug_info(heap, ptr);
  1233. dbg->size = real_size;
  1234. dbg->filename = __zend_filename;
  1235. dbg->orig_filename = __zend_orig_filename;
  1236. dbg->lineno = __zend_lineno;
  1237. dbg->orig_lineno = __zend_orig_lineno;
  1238. #endif
  1239. return ptr;
  1240. } else if (EXPECTED(size <= ZEND_MM_MAX_LARGE_SIZE)) {
  1241. ptr = zend_mm_alloc_large(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1242. #if ZEND_DEBUG
  1243. dbg = zend_mm_get_debug_info(heap, ptr);
  1244. dbg->size = real_size;
  1245. dbg->filename = __zend_filename;
  1246. dbg->orig_filename = __zend_orig_filename;
  1247. dbg->lineno = __zend_lineno;
  1248. dbg->orig_lineno = __zend_orig_lineno;
  1249. #endif
  1250. return ptr;
  1251. } else {
  1252. #if ZEND_DEBUG
  1253. size = real_size;
  1254. #endif
  1255. return zend_mm_alloc_huge(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1256. }
  1257. }
  1258. static zend_always_inline void zend_mm_free_heap(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1259. {
  1260. size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
  1261. if (UNEXPECTED(page_offset == 0)) {
  1262. if (ptr != NULL) {
  1263. zend_mm_free_huge(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1264. }
  1265. } else {
  1266. zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
  1267. int page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
  1268. zend_mm_page_info info = chunk->map[page_num];
  1269. ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
  1270. if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
  1271. zend_mm_free_small(heap, ptr, ZEND_MM_SRUN_BIN_NUM(info));
  1272. } else /* if (info & ZEND_MM_IS_LRUN) */ {
  1273. int pages_count = ZEND_MM_LRUN_PAGES(info);
  1274. ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
  1275. zend_mm_free_large(heap, chunk, page_num, pages_count);
  1276. }
  1277. }
  1278. }
  1279. static size_t zend_mm_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1280. {
  1281. size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
  1282. if (UNEXPECTED(page_offset == 0)) {
  1283. return zend_mm_get_huge_block_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1284. } else {
  1285. zend_mm_chunk *chunk;
  1286. #if 0 && ZEND_DEBUG
  1287. zend_mm_debug_info *dbg = zend_mm_get_debug_info(heap, ptr);
  1288. return dbg->size;
  1289. #else
  1290. int page_num;
  1291. zend_mm_page_info info;
  1292. chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
  1293. page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
  1294. info = chunk->map[page_num];
  1295. ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
  1296. if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
  1297. return bin_data_size[ZEND_MM_SRUN_BIN_NUM(info)];
  1298. } else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
  1299. return ZEND_MM_LRUN_PAGES(info) * ZEND_MM_PAGE_SIZE;
  1300. }
  1301. #endif
  1302. }
  1303. }
  1304. static zend_never_inline void *zend_mm_realloc_slow(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1305. {
  1306. void *ret;
  1307. #if ZEND_MM_STAT
  1308. do {
  1309. size_t orig_peak = heap->peak;
  1310. size_t orig_real_peak = heap->real_peak;
  1311. #endif
  1312. ret = zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1313. memcpy(ret, ptr, copy_size);
  1314. zend_mm_free_heap(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1315. #if ZEND_MM_STAT
  1316. heap->peak = MAX(orig_peak, heap->size);
  1317. heap->real_peak = MAX(orig_real_peak, heap->real_size);
  1318. } while (0);
  1319. #endif
  1320. return ret;
  1321. }
  1322. static zend_never_inline void *zend_mm_realloc_huge(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1323. {
  1324. size_t old_size;
  1325. size_t new_size;
  1326. #if ZEND_DEBUG
  1327. size_t real_size;
  1328. #endif
  1329. old_size = zend_mm_get_huge_block_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1330. #if ZEND_DEBUG
  1331. real_size = size;
  1332. size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
  1333. #endif
  1334. if (size > ZEND_MM_MAX_LARGE_SIZE) {
  1335. #if ZEND_DEBUG
  1336. size = real_size;
  1337. #endif
  1338. #ifdef ZEND_WIN32
  1339. /* On Windows we don't have ability to extend huge blocks in-place.
  1340. * We allocate them with 2MB size granularity, to avoid many
  1341. * reallocations when they are extended by small pieces
  1342. */
  1343. new_size = ZEND_MM_ALIGNED_SIZE_EX(size, MAX(REAL_PAGE_SIZE, ZEND_MM_CHUNK_SIZE));
  1344. #else
  1345. new_size = ZEND_MM_ALIGNED_SIZE_EX(size, REAL_PAGE_SIZE);
  1346. #endif
  1347. if (new_size == old_size) {
  1348. #if ZEND_DEBUG
  1349. zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1350. #else
  1351. zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1352. #endif
  1353. return ptr;
  1354. } else if (new_size < old_size) {
  1355. /* unmup tail */
  1356. if (zend_mm_chunk_truncate(heap, ptr, old_size, new_size)) {
  1357. #if ZEND_MM_STAT || ZEND_MM_LIMIT
  1358. heap->real_size -= old_size - new_size;
  1359. #endif
  1360. #if ZEND_MM_STAT
  1361. heap->size -= old_size - new_size;
  1362. #endif
  1363. #if ZEND_DEBUG
  1364. zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1365. #else
  1366. zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1367. #endif
  1368. return ptr;
  1369. }
  1370. } else /* if (new_size > old_size) */ {
  1371. #if ZEND_MM_LIMIT
  1372. if (UNEXPECTED(new_size - old_size > heap->limit - heap->real_size)) {
  1373. if (zend_mm_gc(heap) && new_size - old_size <= heap->limit - heap->real_size) {
  1374. /* pass */
  1375. } else if (heap->overflow == 0) {
  1376. #if ZEND_DEBUG
  1377. zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
  1378. #else
  1379. zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, size);
  1380. #endif
  1381. return NULL;
  1382. }
  1383. }
  1384. #endif
  1385. /* try to map tail right after this block */
  1386. if (zend_mm_chunk_extend(heap, ptr, old_size, new_size)) {
  1387. #if ZEND_MM_STAT || ZEND_MM_LIMIT
  1388. heap->real_size += new_size - old_size;
  1389. #endif
  1390. #if ZEND_MM_STAT
  1391. heap->real_peak = MAX(heap->real_peak, heap->real_size);
  1392. heap->size += new_size - old_size;
  1393. heap->peak = MAX(heap->peak, heap->size);
  1394. #endif
  1395. #if ZEND_DEBUG
  1396. zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1397. #else
  1398. zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1399. #endif
  1400. return ptr;
  1401. }
  1402. }
  1403. }
  1404. return zend_mm_realloc_slow(heap, ptr, size, MIN(old_size, copy_size) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1405. }
  1406. static zend_always_inline void *zend_mm_realloc_heap(zend_mm_heap *heap, void *ptr, size_t size, zend_bool use_copy_size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1407. {
  1408. size_t page_offset;
  1409. size_t old_size;
  1410. size_t new_size;
  1411. void *ret;
  1412. #if ZEND_DEBUG
  1413. zend_mm_debug_info *dbg;
  1414. #endif
  1415. page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
  1416. if (UNEXPECTED(page_offset == 0)) {
  1417. if (EXPECTED(ptr == NULL)) {
  1418. return _zend_mm_alloc(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1419. } else {
  1420. return zend_mm_realloc_huge(heap, ptr, size, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1421. }
  1422. } else {
  1423. zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
  1424. int page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
  1425. zend_mm_page_info info = chunk->map[page_num];
  1426. #if ZEND_DEBUG
  1427. size_t real_size = size;
  1428. size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
  1429. #endif
  1430. ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
  1431. if (info & ZEND_MM_IS_SRUN) {
  1432. int old_bin_num = ZEND_MM_SRUN_BIN_NUM(info);
  1433. do {
  1434. old_size = bin_data_size[old_bin_num];
  1435. /* Check if requested size fits into current bin */
  1436. if (size <= old_size) {
  1437. /* Check if truncation is necessary */
  1438. if (old_bin_num > 0 && size < bin_data_size[old_bin_num - 1]) {
  1439. /* truncation */
  1440. ret = zend_mm_alloc_small(heap, size, ZEND_MM_SMALL_SIZE_TO_BIN(size) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1441. copy_size = use_copy_size ? MIN(size, copy_size) : size;
  1442. memcpy(ret, ptr, copy_size);
  1443. zend_mm_free_small(heap, ptr, old_bin_num);
  1444. } else {
  1445. /* reallocation in-place */
  1446. ret = ptr;
  1447. }
  1448. } else if (size <= ZEND_MM_MAX_SMALL_SIZE) {
  1449. /* small extension */
  1450. #if ZEND_MM_STAT
  1451. do {
  1452. size_t orig_peak = heap->peak;
  1453. size_t orig_real_peak = heap->real_peak;
  1454. #endif
  1455. ret = zend_mm_alloc_small(heap, size, ZEND_MM_SMALL_SIZE_TO_BIN(size) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1456. copy_size = use_copy_size ? MIN(old_size, copy_size) : old_size;
  1457. memcpy(ret, ptr, copy_size);
  1458. zend_mm_free_small(heap, ptr, old_bin_num);
  1459. #if ZEND_MM_STAT
  1460. heap->peak = MAX(orig_peak, heap->size);
  1461. heap->real_peak = MAX(orig_real_peak, heap->real_size);
  1462. } while (0);
  1463. #endif
  1464. } else {
  1465. /* slow reallocation */
  1466. break;
  1467. }
  1468. #if ZEND_DEBUG
  1469. dbg = zend_mm_get_debug_info(heap, ret);
  1470. dbg->size = real_size;
  1471. dbg->filename = __zend_filename;
  1472. dbg->orig_filename = __zend_orig_filename;
  1473. dbg->lineno = __zend_lineno;
  1474. dbg->orig_lineno = __zend_orig_lineno;
  1475. #endif
  1476. return ret;
  1477. } while (0);
  1478. } else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
  1479. ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
  1480. old_size = ZEND_MM_LRUN_PAGES(info) * ZEND_MM_PAGE_SIZE;
  1481. if (size > ZEND_MM_MAX_SMALL_SIZE && size <= ZEND_MM_MAX_LARGE_SIZE) {
  1482. new_size = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE);
  1483. if (new_size == old_size) {
  1484. #if ZEND_DEBUG
  1485. dbg = zend_mm_get_debug_info(heap, ptr);
  1486. dbg->size = real_size;
  1487. dbg->filename = __zend_filename;
  1488. dbg->orig_filename = __zend_orig_filename;
  1489. dbg->lineno = __zend_lineno;
  1490. dbg->orig_lineno = __zend_orig_lineno;
  1491. #endif
  1492. return ptr;
  1493. } else if (new_size < old_size) {
  1494. /* free tail pages */
  1495. int new_pages_count = (int)(new_size / ZEND_MM_PAGE_SIZE);
  1496. int rest_pages_count = (int)((old_size - new_size) / ZEND_MM_PAGE_SIZE);
  1497. #if ZEND_MM_STAT
  1498. heap->size -= rest_pages_count * ZEND_MM_PAGE_SIZE;
  1499. #endif
  1500. chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
  1501. chunk->free_pages += rest_pages_count;
  1502. zend_mm_bitset_reset_range(chunk->free_map, page_num + new_pages_count, rest_pages_count);
  1503. #if ZEND_DEBUG
  1504. dbg = zend_mm_get_debug_info(heap, ptr);
  1505. dbg->size = real_size;
  1506. dbg->filename = __zend_filename;
  1507. dbg->orig_filename = __zend_orig_filename;
  1508. dbg->lineno = __zend_lineno;
  1509. dbg->orig_lineno = __zend_orig_lineno;
  1510. #endif
  1511. return ptr;
  1512. } else /* if (new_size > old_size) */ {
  1513. int new_pages_count = (int)(new_size / ZEND_MM_PAGE_SIZE);
  1514. int old_pages_count = (int)(old_size / ZEND_MM_PAGE_SIZE);
  1515. /* try to allocate tail pages after this block */
  1516. if (page_num + new_pages_count <= ZEND_MM_PAGES &&
  1517. zend_mm_bitset_is_free_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_count)) {
  1518. #if ZEND_MM_STAT
  1519. do {
  1520. size_t size = heap->size + (new_size - old_size);
  1521. size_t peak = MAX(heap->peak, size);
  1522. heap->size = size;
  1523. heap->peak = peak;
  1524. } while (0);
  1525. #endif
  1526. chunk->free_pages -= new_pages_count - old_pages_count;
  1527. zend_mm_bitset_set_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_count);
  1528. chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
  1529. #if ZEND_DEBUG
  1530. dbg = zend_mm_get_debug_info(heap, ptr);
  1531. dbg->size = real_size;
  1532. dbg->filename = __zend_filename;
  1533. dbg->orig_filename = __zend_orig_filename;
  1534. dbg->lineno = __zend_lineno;
  1535. dbg->orig_lineno = __zend_orig_lineno;
  1536. #endif
  1537. return ptr;
  1538. }
  1539. }
  1540. }
  1541. }
  1542. #if ZEND_DEBUG
  1543. size = real_size;
  1544. #endif
  1545. }
  1546. copy_size = MIN(old_size, copy_size);
  1547. return zend_mm_realloc_slow(heap, ptr, size, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1548. }
  1549. /*********************/
  1550. /* Huge Runs (again) */
  1551. /*********************/
  1552. #if ZEND_DEBUG
  1553. static void zend_mm_add_huge_block(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1554. #else
  1555. static void zend_mm_add_huge_block(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1556. #endif
  1557. {
  1558. zend_mm_huge_list *list = (zend_mm_huge_list*)zend_mm_alloc_heap(heap, sizeof(zend_mm_huge_list) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1559. list->ptr = ptr;
  1560. list->size = size;
  1561. list->next = heap->huge_list;
  1562. #if ZEND_DEBUG
  1563. list->dbg.size = dbg_size;
  1564. list->dbg.filename = __zend_filename;
  1565. list->dbg.orig_filename = __zend_orig_filename;
  1566. list->dbg.lineno = __zend_lineno;
  1567. list->dbg.orig_lineno = __zend_orig_lineno;
  1568. #endif
  1569. heap->huge_list = list;
  1570. }
  1571. static size_t zend_mm_del_huge_block(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1572. {
  1573. zend_mm_huge_list *prev = NULL;
  1574. zend_mm_huge_list *list = heap->huge_list;
  1575. while (list != NULL) {
  1576. if (list->ptr == ptr) {
  1577. size_t size;
  1578. if (prev) {
  1579. prev->next = list->next;
  1580. } else {
  1581. heap->huge_list = list->next;
  1582. }
  1583. size = list->size;
  1584. zend_mm_free_heap(heap, list ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1585. return size;
  1586. }
  1587. prev = list;
  1588. list = list->next;
  1589. }
  1590. ZEND_MM_CHECK(0, "zend_mm_heap corrupted");
  1591. return 0;
  1592. }
  1593. static size_t zend_mm_get_huge_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1594. {
  1595. zend_mm_huge_list *list = heap->huge_list;
  1596. while (list != NULL) {
  1597. if (list->ptr == ptr) {
  1598. return list->size;
  1599. }
  1600. list = list->next;
  1601. }
  1602. ZEND_MM_CHECK(0, "zend_mm_heap corrupted");
  1603. return 0;
  1604. }
  1605. #if ZEND_DEBUG
  1606. static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1607. #else
  1608. static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1609. #endif
  1610. {
  1611. zend_mm_huge_list *list = heap->huge_list;
  1612. while (list != NULL) {
  1613. if (list->ptr == ptr) {
  1614. list->size = size;
  1615. #if ZEND_DEBUG
  1616. list->dbg.size = dbg_size;
  1617. list->dbg.filename = __zend_filename;
  1618. list->dbg.orig_filename = __zend_orig_filename;
  1619. list->dbg.lineno = __zend_lineno;
  1620. list->dbg.orig_lineno = __zend_orig_lineno;
  1621. #endif
  1622. return;
  1623. }
  1624. list = list->next;
  1625. }
  1626. }
  1627. static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1628. {
  1629. #ifdef ZEND_WIN32
  1630. /* On Windows we don't have ability to extend huge blocks in-place.
  1631. * We allocate them with 2MB size granularity, to avoid many
  1632. * reallocations when they are extended by small pieces
  1633. */
  1634. size_t alignment = MAX(REAL_PAGE_SIZE, ZEND_MM_CHUNK_SIZE);
  1635. #else
  1636. size_t alignment = REAL_PAGE_SIZE;
  1637. #endif
  1638. size_t new_size = ZEND_MM_ALIGNED_SIZE_EX(size, alignment);
  1639. void *ptr;
  1640. if (UNEXPECTED(new_size < size)) {
  1641. zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu + %zu)", size, alignment);
  1642. }
  1643. #if ZEND_MM_LIMIT
  1644. if (UNEXPECTED(new_size > heap->limit - heap->real_size)) {
  1645. if (zend_mm_gc(heap) && new_size <= heap->limit - heap->real_size) {
  1646. /* pass */
  1647. } else if (heap->overflow == 0) {
  1648. #if ZEND_DEBUG
  1649. zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
  1650. #else
  1651. zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, size);
  1652. #endif
  1653. return NULL;
  1654. }
  1655. }
  1656. #endif
  1657. ptr = zend_mm_chunk_alloc(heap, new_size, ZEND_MM_CHUNK_SIZE);
  1658. if (UNEXPECTED(ptr == NULL)) {
  1659. /* insufficient memory */
  1660. if (zend_mm_gc(heap) &&
  1661. (ptr = zend_mm_chunk_alloc(heap, new_size, ZEND_MM_CHUNK_SIZE)) != NULL) {
  1662. /* pass */
  1663. } else {
  1664. #if !ZEND_MM_LIMIT
  1665. zend_mm_safe_error(heap, "Out of memory");
  1666. #elif ZEND_DEBUG
  1667. zend_mm_safe_error(heap, "Out of memory (allocated %zu) at %s:%d (tried to allocate %zu bytes)", heap->real_size, __zend_filename, __zend_lineno, size);
  1668. #else
  1669. zend_mm_safe_error(heap, "Out of memory (allocated %zu) (tried to allocate %zu bytes)", heap->real_size, size);
  1670. #endif
  1671. return NULL;
  1672. }
  1673. }
  1674. #if ZEND_DEBUG
  1675. zend_mm_add_huge_block(heap, ptr, new_size, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1676. #else
  1677. zend_mm_add_huge_block(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1678. #endif
  1679. #if ZEND_MM_STAT
  1680. do {
  1681. size_t size = heap->real_size + new_size;
  1682. size_t peak = MAX(heap->real_peak, size);
  1683. heap->real_size = size;
  1684. heap->real_peak = peak;
  1685. } while (0);
  1686. do {
  1687. size_t size = heap->size + new_size;
  1688. size_t peak = MAX(heap->peak, size);
  1689. heap->size = size;
  1690. heap->peak = peak;
  1691. } while (0);
  1692. #elif ZEND_MM_LIMIT
  1693. heap->real_size += new_size;
  1694. #endif
  1695. return ptr;
  1696. }
  1697. static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1698. {
  1699. size_t size;
  1700. ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE) == 0, "zend_mm_heap corrupted");
  1701. size = zend_mm_del_huge_block(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1702. zend_mm_chunk_free(heap, ptr, size);
  1703. #if ZEND_MM_STAT || ZEND_MM_LIMIT
  1704. heap->real_size -= size;
  1705. #endif
  1706. #if ZEND_MM_STAT
  1707. heap->size -= size;
  1708. #endif
  1709. }
  1710. /******************/
  1711. /* Initialization */
  1712. /******************/
  1713. static zend_mm_heap *zend_mm_init(void)
  1714. {
  1715. zend_mm_chunk *chunk = (zend_mm_chunk*)zend_mm_chunk_alloc_int(ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
  1716. zend_mm_heap *heap;
  1717. if (UNEXPECTED(chunk == NULL)) {
  1718. #if ZEND_MM_ERROR
  1719. #ifdef _WIN32
  1720. stderr_last_error("Can't initialize heap");
  1721. #else
  1722. fprintf(stderr, "\nCan't initialize heap: [%d] %s\n", errno, strerror(errno));
  1723. #endif
  1724. #endif
  1725. return NULL;
  1726. }
  1727. heap = &chunk->heap_slot;
  1728. chunk->heap = heap;
  1729. chunk->next = chunk;
  1730. chunk->prev = chunk;
  1731. chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
  1732. chunk->free_tail = ZEND_MM_FIRST_PAGE;
  1733. chunk->num = 0;
  1734. chunk->free_map[0] = (Z_L(1) << ZEND_MM_FIRST_PAGE) - 1;
  1735. chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
  1736. heap->main_chunk = chunk;
  1737. heap->cached_chunks = NULL;
  1738. heap->chunks_count = 1;
  1739. heap->peak_chunks_count = 1;
  1740. heap->cached_chunks_count = 0;
  1741. heap->avg_chunks_count = 1.0;
  1742. heap->last_chunks_delete_boundary = 0;
  1743. heap->last_chunks_delete_count = 0;
  1744. #if ZEND_MM_STAT || ZEND_MM_LIMIT
  1745. heap->real_size = ZEND_MM_CHUNK_SIZE;
  1746. #endif
  1747. #if ZEND_MM_STAT
  1748. heap->real_peak = ZEND_MM_CHUNK_SIZE;
  1749. heap->size = 0;
  1750. heap->peak = 0;
  1751. #endif
  1752. #if ZEND_MM_LIMIT
  1753. heap->limit = ((size_t)Z_L(-1) >> (size_t)Z_L(1));
  1754. heap->overflow = 0;
  1755. #endif
  1756. #if ZEND_MM_CUSTOM
  1757. heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_NONE;
  1758. #endif
  1759. #if ZEND_MM_STORAGE
  1760. heap->storage = NULL;
  1761. #endif
  1762. heap->huge_list = NULL;
  1763. return heap;
  1764. }
  1765. ZEND_API size_t zend_mm_gc(zend_mm_heap *heap)
  1766. {
  1767. zend_mm_free_slot *p, **q;
  1768. zend_mm_chunk *chunk;
  1769. size_t page_offset;
  1770. int page_num;
  1771. zend_mm_page_info info;
  1772. uint32_t i, free_counter;
  1773. int has_free_pages;
  1774. size_t collected = 0;
  1775. #if ZEND_MM_CUSTOM
  1776. if (heap->use_custom_heap) {
  1777. return 0;
  1778. }
  1779. #endif
  1780. for (i = 0; i < ZEND_MM_BINS; i++) {
  1781. has_free_pages = 0;
  1782. p = heap->free_slot[i];
  1783. while (p != NULL) {
  1784. chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(p, ZEND_MM_CHUNK_SIZE);
  1785. ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
  1786. page_offset = ZEND_MM_ALIGNED_OFFSET(p, ZEND_MM_CHUNK_SIZE);
  1787. ZEND_ASSERT(page_offset != 0);
  1788. page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
  1789. info = chunk->map[page_num];
  1790. ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
  1791. if (info & ZEND_MM_IS_LRUN) {
  1792. page_num -= ZEND_MM_NRUN_OFFSET(info);
  1793. info = chunk->map[page_num];
  1794. ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
  1795. ZEND_ASSERT(!(info & ZEND_MM_IS_LRUN));
  1796. }
  1797. ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(info) == i);
  1798. free_counter = ZEND_MM_SRUN_FREE_COUNTER(info) + 1;
  1799. if (free_counter == bin_elements[i]) {
  1800. has_free_pages = 1;
  1801. }
  1802. chunk->map[page_num] = ZEND_MM_SRUN_EX(i, free_counter);
  1803. p = p->next_free_slot;
  1804. }
  1805. if (!has_free_pages) {
  1806. continue;
  1807. }
  1808. q = &heap->free_slot[i];
  1809. p = *q;
  1810. while (p != NULL) {
  1811. chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(p, ZEND_MM_CHUNK_SIZE);
  1812. ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
  1813. page_offset = ZEND_MM_ALIGNED_OFFSET(p, ZEND_MM_CHUNK_SIZE);
  1814. ZEND_ASSERT(page_offset != 0);
  1815. page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
  1816. info = chunk->map[page_num];
  1817. ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
  1818. if (info & ZEND_MM_IS_LRUN) {
  1819. page_num -= ZEND_MM_NRUN_OFFSET(info);
  1820. info = chunk->map[page_num];
  1821. ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
  1822. ZEND_ASSERT(!(info & ZEND_MM_IS_LRUN));
  1823. }
  1824. ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(info) == i);
  1825. if (ZEND_MM_SRUN_FREE_COUNTER(info) == bin_elements[i]) {
  1826. /* remove from cache */
  1827. p = p->next_free_slot;
  1828. *q = p;
  1829. } else {
  1830. q = &p->next_free_slot;
  1831. p = *q;
  1832. }
  1833. }
  1834. }
  1835. chunk = heap->main_chunk;
  1836. do {
  1837. i = ZEND_MM_FIRST_PAGE;
  1838. while (i < chunk->free_tail) {
  1839. if (zend_mm_bitset_is_set(chunk->free_map, i)) {
  1840. info = chunk->map[i];
  1841. if (info & ZEND_MM_IS_SRUN) {
  1842. int bin_num = ZEND_MM_SRUN_BIN_NUM(info);
  1843. int pages_count = bin_pages[bin_num];
  1844. if (ZEND_MM_SRUN_FREE_COUNTER(info) == bin_elements[bin_num]) {
  1845. /* all elemens are free */
  1846. zend_mm_free_pages_ex(heap, chunk, i, pages_count, 0);
  1847. collected += pages_count;
  1848. } else {
  1849. /* reset counter */
  1850. chunk->map[i] = ZEND_MM_SRUN(bin_num);
  1851. }
  1852. i += bin_pages[bin_num];
  1853. } else /* if (info & ZEND_MM_IS_LRUN) */ {
  1854. i += ZEND_MM_LRUN_PAGES(info);
  1855. }
  1856. } else {
  1857. i++;
  1858. }
  1859. }
  1860. if (chunk->free_pages == ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE) {
  1861. zend_mm_chunk *next_chunk = chunk->next;
  1862. zend_mm_delete_chunk(heap, chunk);
  1863. chunk = next_chunk;
  1864. } else {
  1865. chunk = chunk->next;
  1866. }
  1867. } while (chunk != heap->main_chunk);
  1868. return collected * ZEND_MM_PAGE_SIZE;
  1869. }
  1870. #if ZEND_DEBUG
  1871. /******************/
  1872. /* Leak detection */
  1873. /******************/
  1874. static zend_long zend_mm_find_leaks_small(zend_mm_chunk *p, uint32_t i, uint32_t j, zend_leak_info *leak)
  1875. {
  1876. int empty = 1;
  1877. zend_long count = 0;
  1878. int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
  1879. zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * (j + 1) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
  1880. while (j < bin_elements[bin_num]) {
  1881. if (dbg->size != 0) {
  1882. if (dbg->filename == leak->filename && dbg->lineno == leak->lineno) {
  1883. count++;
  1884. dbg->size = 0;
  1885. dbg->filename = NULL;
  1886. dbg->lineno = 0;
  1887. } else {
  1888. empty = 0;
  1889. }
  1890. }
  1891. j++;
  1892. dbg = (zend_mm_debug_info*)((char*)dbg + bin_data_size[bin_num]);
  1893. }
  1894. if (empty) {
  1895. zend_mm_bitset_reset_range(p->free_map, i, bin_pages[bin_num]);
  1896. }
  1897. return count;
  1898. }
  1899. static zend_long zend_mm_find_leaks(zend_mm_heap *heap, zend_mm_chunk *p, uint32_t i, zend_leak_info *leak)
  1900. {
  1901. zend_long count = 0;
  1902. do {
  1903. while (i < p->free_tail) {
  1904. if (zend_mm_bitset_is_set(p->free_map, i)) {
  1905. if (p->map[i] & ZEND_MM_IS_SRUN) {
  1906. int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
  1907. count += zend_mm_find_leaks_small(p, i, 0, leak);
  1908. i += bin_pages[bin_num];
  1909. } else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
  1910. int pages_count = ZEND_MM_LRUN_PAGES(p->map[i]);
  1911. zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * (i + pages_count) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
  1912. if (dbg->filename == leak->filename && dbg->lineno == leak->lineno) {
  1913. count++;
  1914. }
  1915. zend_mm_bitset_reset_range(p->free_map, i, pages_count);
  1916. i += pages_count;
  1917. }
  1918. } else {
  1919. i++;
  1920. }
  1921. }
  1922. p = p->next;
  1923. i = ZEND_MM_FIRST_PAGE;
  1924. } while (p != heap->main_chunk);
  1925. return count;
  1926. }
  1927. static zend_long zend_mm_find_leaks_huge(zend_mm_heap *heap, zend_mm_huge_list *list)
  1928. {
  1929. zend_long count = 0;
  1930. zend_mm_huge_list *prev = list;
  1931. zend_mm_huge_list *p = list->next;
  1932. while (p) {
  1933. if (p->dbg.filename == list->dbg.filename && p->dbg.lineno == list->dbg.lineno) {
  1934. prev->next = p->next;
  1935. zend_mm_chunk_free(heap, p->ptr, p->size);
  1936. zend_mm_free_heap(heap, p, NULL, 0, NULL, 0);
  1937. count++;
  1938. } else {
  1939. prev = p;
  1940. }
  1941. p = prev->next;
  1942. }
  1943. return count;
  1944. }
  1945. static void zend_mm_check_leaks(zend_mm_heap *heap)
  1946. {
  1947. zend_mm_huge_list *list;
  1948. zend_mm_chunk *p;
  1949. zend_leak_info leak;
  1950. zend_long repeated = 0;
  1951. uint32_t total = 0;
  1952. uint32_t i, j;
  1953. /* find leaked huge blocks and free them */
  1954. list = heap->huge_list;
  1955. while (list) {
  1956. zend_mm_huge_list *q = list;
  1957. leak.addr = list->ptr;
  1958. leak.size = list->dbg.size;
  1959. leak.filename = list->dbg.filename;
  1960. leak.orig_filename = list->dbg.orig_filename;
  1961. leak.lineno = list->dbg.lineno;
  1962. leak.orig_lineno = list->dbg.orig_lineno;
  1963. zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
  1964. zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
  1965. repeated = zend_mm_find_leaks_huge(heap, list);
  1966. total += 1 + repeated;
  1967. if (repeated) {
  1968. zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated);
  1969. }
  1970. heap->huge_list = list = list->next;
  1971. zend_mm_chunk_free(heap, q->ptr, q->size);
  1972. zend_mm_free_heap(heap, q, NULL, 0, NULL, 0);
  1973. }
  1974. /* for each chunk */
  1975. p = heap->main_chunk;
  1976. do {
  1977. i = ZEND_MM_FIRST_PAGE;
  1978. while (i < p->free_tail) {
  1979. if (zend_mm_bitset_is_set(p->free_map, i)) {
  1980. if (p->map[i] & ZEND_MM_IS_SRUN) {
  1981. int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
  1982. zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
  1983. j = 0;
  1984. while (j < bin_elements[bin_num]) {
  1985. if (dbg->size != 0) {
  1986. leak.addr = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * j);
  1987. leak.size = dbg->size;
  1988. leak.filename = dbg->filename;
  1989. leak.orig_filename = dbg->orig_filename;
  1990. leak.lineno = dbg->lineno;
  1991. leak.orig_lineno = dbg->orig_lineno;
  1992. zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
  1993. zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
  1994. dbg->size = 0;
  1995. dbg->filename = NULL;
  1996. dbg->lineno = 0;
  1997. repeated = zend_mm_find_leaks_small(p, i, j + 1, &leak) +
  1998. zend_mm_find_leaks(heap, p, i + bin_pages[bin_num], &leak);
  1999. total += 1 + repeated;
  2000. if (repeated) {
  2001. zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated);
  2002. }
  2003. }
  2004. dbg = (zend_mm_debug_info*)((char*)dbg + bin_data_size[bin_num]);
  2005. j++;
  2006. }
  2007. i += bin_pages[bin_num];
  2008. } else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
  2009. int pages_count = ZEND_MM_LRUN_PAGES(p->map[i]);
  2010. zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * (i + pages_count) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
  2011. leak.addr = (void*)((char*)p + ZEND_MM_PAGE_SIZE * i);
  2012. leak.size = dbg->size;
  2013. leak.filename = dbg->filename;
  2014. leak.orig_filename = dbg->orig_filename;
  2015. leak.lineno = dbg->lineno;
  2016. leak.orig_lineno = dbg->orig_lineno;
  2017. zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
  2018. zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
  2019. zend_mm_bitset_reset_range(p->free_map, i, pages_count);
  2020. repeated = zend_mm_find_leaks(heap, p, i + pages_count, &leak);
  2021. total += 1 + repeated;
  2022. if (repeated) {
  2023. zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated);
  2024. }
  2025. i += pages_count;
  2026. }
  2027. } else {
  2028. i++;
  2029. }
  2030. }
  2031. p = p->next;
  2032. } while (p != heap->main_chunk);
  2033. if (total) {
  2034. zend_message_dispatcher(ZMSG_MEMORY_LEAKS_GRAND_TOTAL, &total);
  2035. }
  2036. }
  2037. #endif
  2038. void zend_mm_shutdown(zend_mm_heap *heap, int full, int silent)
  2039. {
  2040. zend_mm_chunk *p;
  2041. zend_mm_huge_list *list;
  2042. #if ZEND_MM_CUSTOM
  2043. if (heap->use_custom_heap) {
  2044. if (full) {
  2045. if (ZEND_DEBUG && heap->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
  2046. heap->custom_heap.debug._free(heap ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC);
  2047. } else {
  2048. heap->custom_heap.std._free(heap);
  2049. }
  2050. }
  2051. return;
  2052. }
  2053. #endif
  2054. #if ZEND_DEBUG
  2055. if (!silent) {
  2056. zend_mm_check_leaks(heap);
  2057. }
  2058. #endif
  2059. /* free huge blocks */
  2060. list = heap->huge_list;
  2061. heap->huge_list = NULL;
  2062. while (list) {
  2063. zend_mm_huge_list *q = list;
  2064. list = list->next;
  2065. zend_mm_chunk_free(heap, q->ptr, q->size);
  2066. }
  2067. /* move all chunks except of the first one into the cache */
  2068. p = heap->main_chunk->next;
  2069. while (p != heap->main_chunk) {
  2070. zend_mm_chunk *q = p->next;
  2071. p->next = heap->cached_chunks;
  2072. heap->cached_chunks = p;
  2073. p = q;
  2074. heap->chunks_count--;
  2075. heap->cached_chunks_count++;
  2076. }
  2077. if (full) {
  2078. /* free all cached chunks */
  2079. while (heap->cached_chunks) {
  2080. p = heap->cached_chunks;
  2081. heap->cached_chunks = p->next;
  2082. zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
  2083. }
  2084. /* free the first chunk */
  2085. zend_mm_chunk_free(heap, heap->main_chunk, ZEND_MM_CHUNK_SIZE);
  2086. } else {
  2087. zend_mm_heap old_heap;
  2088. /* free some cached chunks to keep average count */
  2089. heap->avg_chunks_count = (heap->avg_chunks_count + (double)heap->peak_chunks_count) / 2.0;
  2090. while ((double)heap->cached_chunks_count + 0.9 > heap->avg_chunks_count &&
  2091. heap->cached_chunks) {
  2092. p = heap->cached_chunks;
  2093. heap->cached_chunks = p->next;
  2094. zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
  2095. heap->cached_chunks_count--;
  2096. }
  2097. /* clear cached chunks */
  2098. p = heap->cached_chunks;
  2099. while (p != NULL) {
  2100. zend_mm_chunk *q = p->next;
  2101. memset(p, 0, sizeof(zend_mm_chunk));
  2102. p->next = q;
  2103. p = q;
  2104. }
  2105. /* reinitialize the first chunk and heap */
  2106. old_heap = *heap;
  2107. p = heap->main_chunk;
  2108. memset(p, 0, ZEND_MM_FIRST_PAGE * ZEND_MM_PAGE_SIZE);
  2109. *heap = old_heap;
  2110. memset(heap->free_slot, 0, sizeof(heap->free_slot));
  2111. heap->main_chunk = p;
  2112. p->heap = &p->heap_slot;
  2113. p->next = p;
  2114. p->prev = p;
  2115. p->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
  2116. p->free_tail = ZEND_MM_FIRST_PAGE;
  2117. p->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
  2118. p->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
  2119. heap->chunks_count = 1;
  2120. heap->peak_chunks_count = 1;
  2121. heap->last_chunks_delete_boundary = 0;
  2122. heap->last_chunks_delete_count = 0;
  2123. #if ZEND_MM_STAT || ZEND_MM_LIMIT
  2124. heap->real_size = ZEND_MM_CHUNK_SIZE;
  2125. #endif
  2126. #if ZEND_MM_STAT
  2127. heap->real_peak = ZEND_MM_CHUNK_SIZE;
  2128. heap->size = heap->peak = 0;
  2129. #endif
  2130. }
  2131. }
  2132. /**************/
  2133. /* PUBLIC API */
  2134. /**************/
  2135. ZEND_API void* ZEND_FASTCALL _zend_mm_alloc(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2136. {
  2137. return zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2138. }
  2139. ZEND_API void ZEND_FASTCALL _zend_mm_free(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2140. {
  2141. zend_mm_free_heap(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2142. }
  2143. void* ZEND_FASTCALL _zend_mm_realloc(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2144. {
  2145. return zend_mm_realloc_heap(heap, ptr, size, 0, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2146. }
  2147. void* ZEND_FASTCALL _zend_mm_realloc2(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2148. {
  2149. return zend_mm_realloc_heap(heap, ptr, size, 1, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2150. }
  2151. ZEND_API size_t ZEND_FASTCALL _zend_mm_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2152. {
  2153. return zend_mm_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2154. }
  2155. /**********************/
  2156. /* Allocation Manager */
  2157. /**********************/
  2158. typedef struct _zend_alloc_globals {
  2159. zend_mm_heap *mm_heap;
  2160. } zend_alloc_globals;
  2161. #ifdef ZTS
  2162. static int alloc_globals_id;
  2163. # define AG(v) ZEND_TSRMG(alloc_globals_id, zend_alloc_globals *, v)
  2164. #else
  2165. # define AG(v) (alloc_globals.v)
  2166. static zend_alloc_globals alloc_globals;
  2167. #endif
  2168. ZEND_API int is_zend_mm(void)
  2169. {
  2170. #if ZEND_MM_CUSTOM
  2171. return !AG(mm_heap)->use_custom_heap;
  2172. #else
  2173. return 1;
  2174. #endif
  2175. }
  2176. #if !ZEND_DEBUG && defined(HAVE_BUILTIN_CONSTANT_P)
  2177. #undef _emalloc
  2178. #if ZEND_MM_CUSTOM
  2179. # define ZEND_MM_CUSTOM_ALLOCATOR(size) do { \
  2180. if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
  2181. if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) { \
  2182. return AG(mm_heap)->custom_heap.debug._malloc(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
  2183. } else { \
  2184. return AG(mm_heap)->custom_heap.std._malloc(size); \
  2185. } \
  2186. } \
  2187. } while (0)
  2188. # define ZEND_MM_CUSTOM_DEALLOCATOR(ptr) do { \
  2189. if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
  2190. if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) { \
  2191. AG(mm_heap)->custom_heap.debug._free(ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
  2192. } else { \
  2193. AG(mm_heap)->custom_heap.std._free(ptr); \
  2194. } \
  2195. return; \
  2196. } \
  2197. } while (0)
  2198. #else
  2199. # define ZEND_MM_CUSTOM_ALLOCATOR(size)
  2200. # define ZEND_MM_CUSTOM_DEALLOCATOR(ptr)
  2201. #endif
  2202. # define _ZEND_BIN_ALLOCATOR(_num, _size, _elements, _pages, x, y) \
  2203. ZEND_API void* ZEND_FASTCALL _emalloc_ ## _size(void) { \
  2204. ZEND_MM_CUSTOM_ALLOCATOR(_size); \
  2205. return zend_mm_alloc_small(AG(mm_heap), _size, _num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
  2206. }
  2207. ZEND_MM_BINS_INFO(_ZEND_BIN_ALLOCATOR, x, y)
  2208. ZEND_API void* ZEND_FASTCALL _emalloc_large(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2209. {
  2210. ZEND_MM_CUSTOM_ALLOCATOR(size);
  2211. return zend_mm_alloc_large_ex(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2212. }
  2213. ZEND_API void* ZEND_FASTCALL _emalloc_huge(size_t size)
  2214. {
  2215. ZEND_MM_CUSTOM_ALLOCATOR(size);
  2216. return zend_mm_alloc_huge(AG(mm_heap), size);
  2217. }
  2218. #if ZEND_DEBUG
  2219. # define _ZEND_BIN_FREE(_num, _size, _elements, _pages, x, y) \
  2220. ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \
  2221. ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \
  2222. { \
  2223. size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE); \
  2224. zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
  2225. int page_num = page_offset / ZEND_MM_PAGE_SIZE; \
  2226. ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
  2227. ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_SRUN); \
  2228. ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(chunk->map[page_num]) == _num); \
  2229. zend_mm_free_small(AG(mm_heap), ptr, _num); \
  2230. } \
  2231. }
  2232. #else
  2233. # define _ZEND_BIN_FREE(_num, _size, _elements, _pages, x, y) \
  2234. ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \
  2235. ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \
  2236. { \
  2237. zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
  2238. ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
  2239. zend_mm_free_small(AG(mm_heap), ptr, _num); \
  2240. } \
  2241. }
  2242. #endif
  2243. ZEND_MM_BINS_INFO(_ZEND_BIN_FREE, x, y)
  2244. ZEND_API void ZEND_FASTCALL _efree_large(void *ptr, size_t size)
  2245. {
  2246. ZEND_MM_CUSTOM_DEALLOCATOR(ptr);
  2247. {
  2248. size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
  2249. zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
  2250. int page_num = page_offset / ZEND_MM_PAGE_SIZE;
  2251. uint32_t pages_count = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE) / ZEND_MM_PAGE_SIZE;
  2252. ZEND_MM_CHECK(chunk->heap == AG(mm_heap) && ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
  2253. ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_LRUN);
  2254. ZEND_ASSERT(ZEND_MM_LRUN_PAGES(chunk->map[page_num]) == pages_count);
  2255. zend_mm_free_large(AG(mm_heap), chunk, page_num, pages_count);
  2256. }
  2257. }
  2258. ZEND_API void ZEND_FASTCALL _efree_huge(void *ptr, size_t size)
  2259. {
  2260. ZEND_MM_CUSTOM_DEALLOCATOR(ptr);
  2261. zend_mm_free_huge(AG(mm_heap), ptr);
  2262. }
  2263. #endif
  2264. ZEND_API void* ZEND_FASTCALL _emalloc(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2265. {
  2266. #if ZEND_MM_CUSTOM
  2267. if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
  2268. if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
  2269. return AG(mm_heap)->custom_heap.debug._malloc(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2270. } else {
  2271. return AG(mm_heap)->custom_heap.std._malloc(size);
  2272. }
  2273. }
  2274. #endif
  2275. return zend_mm_alloc_heap(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2276. }
  2277. ZEND_API void ZEND_FASTCALL _efree(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2278. {
  2279. #if ZEND_MM_CUSTOM
  2280. if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
  2281. if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
  2282. AG(mm_heap)->custom_heap.debug._free(ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2283. } else {
  2284. AG(mm_heap)->custom_heap.std._free(ptr);
  2285. }
  2286. return;
  2287. }
  2288. #endif
  2289. zend_mm_free_heap(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2290. }
  2291. ZEND_API void* ZEND_FASTCALL _erealloc(void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2292. {
  2293. #if ZEND_MM_CUSTOM
  2294. if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
  2295. if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
  2296. return AG(mm_heap)->custom_heap.debug._realloc(ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2297. } else {
  2298. return AG(mm_heap)->custom_heap.std._realloc(ptr, size);
  2299. }
  2300. }
  2301. #endif
  2302. return zend_mm_realloc_heap(AG(mm_heap), ptr, size, 0, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2303. }
  2304. ZEND_API void* ZEND_FASTCALL _erealloc2(void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2305. {
  2306. #if ZEND_MM_CUSTOM
  2307. if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
  2308. if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
  2309. return AG(mm_heap)->custom_heap.debug._realloc(ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2310. } else {
  2311. return AG(mm_heap)->custom_heap.std._realloc(ptr, size);
  2312. }
  2313. }
  2314. #endif
  2315. return zend_mm_realloc_heap(AG(mm_heap), ptr, size, 1, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2316. }
  2317. ZEND_API size_t ZEND_FASTCALL _zend_mem_block_size(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2318. {
  2319. #if ZEND_MM_CUSTOM
  2320. if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
  2321. return 0;
  2322. }
  2323. #endif
  2324. return zend_mm_size(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2325. }
  2326. ZEND_API void* ZEND_FASTCALL _safe_emalloc(size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2327. {
  2328. return emalloc_rel(zend_safe_address_guarded(nmemb, size, offset));
  2329. }
  2330. ZEND_API void* ZEND_FASTCALL _safe_malloc(size_t nmemb, size_t size, size_t offset)
  2331. {
  2332. return pemalloc(zend_safe_address_guarded(nmemb, size, offset), 1);
  2333. }
  2334. ZEND_API void* ZEND_FASTCALL _safe_erealloc(void *ptr, size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2335. {
  2336. return erealloc_rel(ptr, zend_safe_address_guarded(nmemb, size, offset));
  2337. }
  2338. ZEND_API void* ZEND_FASTCALL _safe_realloc(void *ptr, size_t nmemb, size_t size, size_t offset)
  2339. {
  2340. return perealloc(ptr, zend_safe_address_guarded(nmemb, size, offset), 1);
  2341. }
  2342. ZEND_API void* ZEND_FASTCALL _ecalloc(size_t nmemb, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2343. {
  2344. void *p;
  2345. size = zend_safe_address_guarded(nmemb, size, 0);
  2346. p = emalloc_rel(size);
  2347. memset(p, 0, size);
  2348. return p;
  2349. }
  2350. ZEND_API char* ZEND_FASTCALL _estrdup(const char *s ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2351. {
  2352. size_t length;
  2353. char *p;
  2354. length = strlen(s);
  2355. if (UNEXPECTED(length + 1 == 0)) {
  2356. zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (1 * %zu + 1)", length);
  2357. }
  2358. p = (char *) _emalloc(length + 1 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2359. memcpy(p, s, length+1);
  2360. return p;
  2361. }
  2362. ZEND_API char* ZEND_FASTCALL _estrndup(const char *s, size_t length ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2363. {
  2364. char *p;
  2365. if (UNEXPECTED(length + 1 == 0)) {
  2366. zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (1 * %zu + 1)", length);
  2367. }
  2368. p = (char *) _emalloc(length + 1 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2369. memcpy(p, s, length);
  2370. p[length] = 0;
  2371. return p;
  2372. }
  2373. ZEND_API char* ZEND_FASTCALL zend_strndup(const char *s, size_t length)
  2374. {
  2375. char *p;
  2376. if (UNEXPECTED(length + 1 == 0)) {
  2377. zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (1 * %zu + 1)", length);
  2378. }
  2379. p = (char *) malloc(length + 1);
  2380. if (UNEXPECTED(p == NULL)) {
  2381. return p;
  2382. }
  2383. if (EXPECTED(length)) {
  2384. memcpy(p, s, length);
  2385. }
  2386. p[length] = 0;
  2387. return p;
  2388. }
  2389. ZEND_API int zend_set_memory_limit(size_t memory_limit)
  2390. {
  2391. #if ZEND_MM_LIMIT
  2392. AG(mm_heap)->limit = (memory_limit >= ZEND_MM_CHUNK_SIZE) ? memory_limit : ZEND_MM_CHUNK_SIZE;
  2393. #endif
  2394. return SUCCESS;
  2395. }
  2396. ZEND_API size_t zend_memory_usage(int real_usage)
  2397. {
  2398. #if ZEND_MM_STAT
  2399. if (real_usage) {
  2400. return AG(mm_heap)->real_size;
  2401. } else {
  2402. size_t usage = AG(mm_heap)->size;
  2403. return usage;
  2404. }
  2405. #endif
  2406. return 0;
  2407. }
  2408. ZEND_API size_t zend_memory_peak_usage(int real_usage)
  2409. {
  2410. #if ZEND_MM_STAT
  2411. if (real_usage) {
  2412. return AG(mm_heap)->real_peak;
  2413. } else {
  2414. return AG(mm_heap)->peak;
  2415. }
  2416. #endif
  2417. return 0;
  2418. }
  2419. ZEND_API void shutdown_memory_manager(int silent, int full_shutdown)
  2420. {
  2421. zend_mm_shutdown(AG(mm_heap), full_shutdown, silent);
  2422. }
  2423. static void alloc_globals_ctor(zend_alloc_globals *alloc_globals)
  2424. {
  2425. char *tmp;
  2426. #if ZEND_MM_CUSTOM
  2427. tmp = getenv("USE_ZEND_ALLOC");
  2428. if (tmp && !zend_atoi(tmp, 0)) {
  2429. alloc_globals->mm_heap = malloc(sizeof(zend_mm_heap));
  2430. memset(alloc_globals->mm_heap, 0, sizeof(zend_mm_heap));
  2431. alloc_globals->mm_heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_STD;
  2432. alloc_globals->mm_heap->custom_heap.std._malloc = __zend_malloc;
  2433. alloc_globals->mm_heap->custom_heap.std._free = free;
  2434. alloc_globals->mm_heap->custom_heap.std._realloc = __zend_realloc;
  2435. return;
  2436. }
  2437. #endif
  2438. tmp = getenv("USE_ZEND_ALLOC_HUGE_PAGES");
  2439. if (tmp && zend_atoi(tmp, 0)) {
  2440. zend_mm_use_huge_pages = 1;
  2441. }
  2442. ZEND_TSRMLS_CACHE_UPDATE();
  2443. alloc_globals->mm_heap = zend_mm_init();
  2444. }
  2445. #ifdef ZTS
  2446. static void alloc_globals_dtor(zend_alloc_globals *alloc_globals)
  2447. {
  2448. zend_mm_shutdown(alloc_globals->mm_heap, 1, 1);
  2449. }
  2450. #endif
  2451. ZEND_API void start_memory_manager(void)
  2452. {
  2453. #ifdef ZTS
  2454. ts_allocate_id(&alloc_globals_id, sizeof(zend_alloc_globals), (ts_allocate_ctor) alloc_globals_ctor, (ts_allocate_dtor) alloc_globals_dtor);
  2455. #else
  2456. alloc_globals_ctor(&alloc_globals);
  2457. #endif
  2458. #ifndef _WIN32
  2459. # if defined(_SC_PAGESIZE)
  2460. REAL_PAGE_SIZE = sysconf(_SC_PAGESIZE);
  2461. # elif defined(_SC_PAGE_SIZE)
  2462. REAL_PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
  2463. # endif
  2464. #endif
  2465. }
  2466. ZEND_API zend_mm_heap *zend_mm_set_heap(zend_mm_heap *new_heap)
  2467. {
  2468. zend_mm_heap *old_heap;
  2469. old_heap = AG(mm_heap);
  2470. AG(mm_heap) = (zend_mm_heap*)new_heap;
  2471. return (zend_mm_heap*)old_heap;
  2472. }
  2473. ZEND_API zend_mm_heap *zend_mm_get_heap(void)
  2474. {
  2475. return AG(mm_heap);
  2476. }
  2477. ZEND_API int zend_mm_is_custom_heap(zend_mm_heap *new_heap)
  2478. {
  2479. #if ZEND_MM_CUSTOM
  2480. return AG(mm_heap)->use_custom_heap;
  2481. #else
  2482. return 0;
  2483. #endif
  2484. }
  2485. ZEND_API void zend_mm_set_custom_handlers(zend_mm_heap *heap,
  2486. void* (*_malloc)(size_t),
  2487. void (*_free)(void*),
  2488. void* (*_realloc)(void*, size_t))
  2489. {
  2490. #if ZEND_MM_CUSTOM
  2491. zend_mm_heap *_heap = (zend_mm_heap*)heap;
  2492. if (!_malloc && !_free && !_realloc) {
  2493. _heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_NONE;
  2494. } else {
  2495. _heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_STD;
  2496. _heap->custom_heap.std._malloc = _malloc;
  2497. _heap->custom_heap.std._free = _free;
  2498. _heap->custom_heap.std._realloc = _realloc;
  2499. }
  2500. #endif
  2501. }
  2502. ZEND_API void zend_mm_get_custom_handlers(zend_mm_heap *heap,
  2503. void* (**_malloc)(size_t),
  2504. void (**_free)(void*),
  2505. void* (**_realloc)(void*, size_t))
  2506. {
  2507. #if ZEND_MM_CUSTOM
  2508. zend_mm_heap *_heap = (zend_mm_heap*)heap;
  2509. if (heap->use_custom_heap) {
  2510. *_malloc = _heap->custom_heap.std._malloc;
  2511. *_free = _heap->custom_heap.std._free;
  2512. *_realloc = _heap->custom_heap.std._realloc;
  2513. } else {
  2514. *_malloc = NULL;
  2515. *_free = NULL;
  2516. *_realloc = NULL;
  2517. }
  2518. #else
  2519. *_malloc = NULL;
  2520. *_free = NULL;
  2521. *_realloc = NULL;
  2522. #endif
  2523. }
  2524. #if ZEND_DEBUG
  2525. ZEND_API void zend_mm_set_custom_debug_handlers(zend_mm_heap *heap,
  2526. void* (*_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
  2527. void (*_free)(void* ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
  2528. void* (*_realloc)(void*, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC))
  2529. {
  2530. #if ZEND_MM_CUSTOM
  2531. zend_mm_heap *_heap = (zend_mm_heap*)heap;
  2532. _heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_DEBUG;
  2533. _heap->custom_heap.debug._malloc = _malloc;
  2534. _heap->custom_heap.debug._free = _free;
  2535. _heap->custom_heap.debug._realloc = _realloc;
  2536. #endif
  2537. }
  2538. #endif
  2539. ZEND_API zend_mm_storage *zend_mm_get_storage(zend_mm_heap *heap)
  2540. {
  2541. #if ZEND_MM_STORAGE
  2542. return heap->storage;
  2543. #else
  2544. return NULL
  2545. #endif
  2546. }
  2547. ZEND_API zend_mm_heap *zend_mm_startup(void)
  2548. {
  2549. return zend_mm_init();
  2550. }
  2551. ZEND_API zend_mm_heap *zend_mm_startup_ex(const zend_mm_handlers *handlers, void *data, size_t data_size)
  2552. {
  2553. #if ZEND_MM_STORAGE
  2554. zend_mm_storage tmp_storage, *storage;
  2555. zend_mm_chunk *chunk;
  2556. zend_mm_heap *heap;
  2557. memcpy((zend_mm_handlers*)&tmp_storage.handlers, handlers, sizeof(zend_mm_handlers));
  2558. tmp_storage.data = data;
  2559. chunk = (zend_mm_chunk*)handlers->chunk_alloc(&tmp_storage, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
  2560. if (UNEXPECTED(chunk == NULL)) {
  2561. #if ZEND_MM_ERROR
  2562. #ifdef _WIN32
  2563. stderr_last_error("Can't initialize heap");
  2564. #else
  2565. fprintf(stderr, "\nCan't initialize heap: [%d] %s\n", errno, strerror(errno));
  2566. #endif
  2567. #endif
  2568. return NULL;
  2569. }
  2570. heap = &chunk->heap_slot;
  2571. chunk->heap = heap;
  2572. chunk->next = chunk;
  2573. chunk->prev = chunk;
  2574. chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
  2575. chunk->free_tail = ZEND_MM_FIRST_PAGE;
  2576. chunk->num = 0;
  2577. chunk->free_map[0] = (Z_L(1) << ZEND_MM_FIRST_PAGE) - 1;
  2578. chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
  2579. heap->main_chunk = chunk;
  2580. heap->cached_chunks = NULL;
  2581. heap->chunks_count = 1;
  2582. heap->peak_chunks_count = 1;
  2583. heap->cached_chunks_count = 0;
  2584. heap->avg_chunks_count = 1.0;
  2585. heap->last_chunks_delete_boundary = 0;
  2586. heap->last_chunks_delete_count = 0;
  2587. #if ZEND_MM_STAT || ZEND_MM_LIMIT
  2588. heap->real_size = ZEND_MM_CHUNK_SIZE;
  2589. #endif
  2590. #if ZEND_MM_STAT
  2591. heap->real_peak = ZEND_MM_CHUNK_SIZE;
  2592. heap->size = 0;
  2593. heap->peak = 0;
  2594. #endif
  2595. #if ZEND_MM_LIMIT
  2596. heap->limit = (Z_L(-1) >> Z_L(1));
  2597. heap->overflow = 0;
  2598. #endif
  2599. #if ZEND_MM_CUSTOM
  2600. heap->use_custom_heap = 0;
  2601. #endif
  2602. heap->storage = &tmp_storage;
  2603. heap->huge_list = NULL;
  2604. memset(heap->free_slot, 0, sizeof(heap->free_slot));
  2605. storage = _zend_mm_alloc(heap, sizeof(zend_mm_storage) + data_size ZEND_FILE_LINE_CC ZEND_FILE_LINE_CC);
  2606. if (!storage) {
  2607. handlers->chunk_free(&tmp_storage, chunk, ZEND_MM_CHUNK_SIZE);
  2608. #if ZEND_MM_ERROR
  2609. #ifdef _WIN32
  2610. stderr_last_error("Can't initialize heap");
  2611. #else
  2612. fprintf(stderr, "\nCan't initialize heap: [%d] %s\n", errno, strerror(errno));
  2613. #endif
  2614. #endif
  2615. return NULL;
  2616. }
  2617. memcpy(storage, &tmp_storage, sizeof(zend_mm_storage));
  2618. if (data) {
  2619. storage->data = (void*)(((char*)storage + sizeof(zend_mm_storage)));
  2620. memcpy(storage->data, data, data_size);
  2621. }
  2622. heap->storage = storage;
  2623. return heap;
  2624. #else
  2625. return NULL;
  2626. #endif
  2627. }
  2628. static ZEND_COLD ZEND_NORETURN void zend_out_of_memory(void)
  2629. {
  2630. fprintf(stderr, "Out of memory\n");
  2631. exit(1);
  2632. }
  2633. ZEND_API void * __zend_malloc(size_t len)
  2634. {
  2635. void *tmp = malloc(len);
  2636. if (EXPECTED(tmp || !len)) {
  2637. return tmp;
  2638. }
  2639. zend_out_of_memory();
  2640. }
  2641. ZEND_API void * __zend_calloc(size_t nmemb, size_t len)
  2642. {
  2643. void *tmp;
  2644. len = zend_safe_address_guarded(nmemb, len, 0);
  2645. tmp = __zend_malloc(len);
  2646. memset(tmp, 0, len);
  2647. return tmp;
  2648. }
  2649. ZEND_API void * __zend_realloc(void *p, size_t len)
  2650. {
  2651. p = realloc(p, len);
  2652. if (EXPECTED(p || !len)) {
  2653. return p;
  2654. }
  2655. zend_out_of_memory();
  2656. }
  2657. /*
  2658. * Local variables:
  2659. * tab-width: 4
  2660. * c-basic-offset: 4
  2661. * indent-tabs-mode: t
  2662. * End:
  2663. * vim600: sw=4 ts=4 fdm=marker
  2664. * vim<600: sw=4 ts=4
  2665. */