zend_alloc.c 92 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087
  1. /*
  2. +----------------------------------------------------------------------+
  3. | Zend Engine |
  4. +----------------------------------------------------------------------+
  5. | Copyright (c) Zend Technologies Ltd. (http://www.zend.com) |
  6. +----------------------------------------------------------------------+
  7. | This source file is subject to version 2.00 of the Zend license, |
  8. | that is bundled with this package in the file LICENSE, and is |
  9. | available through the world-wide-web at the following url: |
  10. | http://www.zend.com/license/2_00.txt. |
  11. | If you did not receive a copy of the Zend license and are unable to |
  12. | obtain it through the world-wide-web, please send a note to |
  13. | license@zend.com so we can mail you a copy immediately. |
  14. +----------------------------------------------------------------------+
  15. | Authors: Andi Gutmans <andi@php.net> |
  16. | Zeev Suraski <zeev@php.net> |
  17. | Dmitry Stogov <dmitry@php.net> |
  18. +----------------------------------------------------------------------+
  19. */
  20. /*
  21. * zend_alloc is designed to be a modern CPU cache friendly memory manager
  22. * for PHP. Most ideas are taken from jemalloc and tcmalloc implementations.
  23. *
  24. * All allocations are split into 3 categories:
  25. *
  26. * Huge - the size is greater than CHUNK size (~2M by default), allocation is
  27. * performed using mmap(). The result is aligned on 2M boundary.
  28. *
  29. * Large - a number of 4096K pages inside a CHUNK. Large blocks
  30. * are always aligned on page boundary.
  31. *
  32. * Small - less than 3/4 of page size. Small sizes are rounded up to nearest
  33. * greater predefined small size (there are 30 predefined sizes:
  34. * 8, 16, 24, 32, ... 3072). Small blocks are allocated from
  35. * RUNs. Each RUN is allocated as a single or few following pages.
  36. * Allocation inside RUNs implemented using linked list of free
  37. * elements. The result is aligned to 8 bytes.
  38. *
  39. * zend_alloc allocates memory from OS by CHUNKs, these CHUNKs and huge memory
  40. * blocks are always aligned to CHUNK boundary. So it's very easy to determine
  41. * the CHUNK owning the certain pointer. Regular CHUNKs reserve a single
  42. * page at start for special purpose. It contains bitset of free pages,
  43. * few bitset for available runs of predefined small sizes, map of pages that
  44. * keeps information about usage of each page in this CHUNK, etc.
  45. *
  46. * zend_alloc provides familiar emalloc/efree/erealloc API, but in addition it
  47. * provides specialized and optimized routines to allocate blocks of predefined
  48. * sizes (e.g. emalloc_2(), emallc_4(), ..., emalloc_large(), etc)
  49. * The library uses C preprocessor tricks that substitute calls to emalloc()
  50. * with more specialized routines when the requested size is known.
  51. */
  52. #include "zend.h"
  53. #include "zend_alloc.h"
  54. #include "zend_globals.h"
  55. #include "zend_operators.h"
  56. #include "zend_multiply.h"
  57. #include "zend_bitset.h"
  58. #include <signal.h>
  59. #ifdef HAVE_UNISTD_H
  60. # include <unistd.h>
  61. #endif
  62. #ifdef ZEND_WIN32
  63. # include <wincrypt.h>
  64. # include <process.h>
  65. # include "win32/winutil.h"
  66. #endif
  67. #include <stdio.h>
  68. #include <stdlib.h>
  69. #include <string.h>
  70. #include <sys/types.h>
  71. #include <sys/stat.h>
  72. #include <limits.h>
  73. #include <fcntl.h>
  74. #include <errno.h>
  75. #ifndef _WIN32
  76. # include <sys/mman.h>
  77. # ifndef MAP_ANON
  78. # ifdef MAP_ANONYMOUS
  79. # define MAP_ANON MAP_ANONYMOUS
  80. # endif
  81. # endif
  82. # ifndef MAP_FAILED
  83. # define MAP_FAILED ((void*)-1)
  84. # endif
  85. # ifndef MAP_POPULATE
  86. # define MAP_POPULATE 0
  87. # endif
  88. # if defined(_SC_PAGESIZE) || (_SC_PAGE_SIZE)
  89. # define REAL_PAGE_SIZE _real_page_size
  90. static size_t _real_page_size = ZEND_MM_PAGE_SIZE;
  91. # endif
  92. # ifdef MAP_ALIGNED_SUPER
  93. # define MAP_HUGETLB MAP_ALIGNED_SUPER
  94. # endif
  95. #endif
  96. #ifndef REAL_PAGE_SIZE
  97. # define REAL_PAGE_SIZE ZEND_MM_PAGE_SIZE
  98. #endif
  99. /* NetBSD has an mremap() function with a signature that is incompatible with Linux (WTF?),
  100. * so pretend it doesn't exist. */
  101. #ifndef __linux__
  102. # undef HAVE_MREMAP
  103. #endif
  104. #ifndef __APPLE__
  105. # define ZEND_MM_FD -1
  106. #else
  107. # include <mach/vm_statistics.h>
  108. /* Mac allows to track anonymous page via vmmap per TAG id.
  109. * user land applications are allowed to take from 240 to 255.
  110. */
  111. # define ZEND_MM_FD VM_MAKE_TAG(250U)
  112. #endif
  113. #ifndef ZEND_MM_STAT
  114. # define ZEND_MM_STAT 1 /* track current and peak memory usage */
  115. #endif
  116. #ifndef ZEND_MM_LIMIT
  117. # define ZEND_MM_LIMIT 1 /* support for user-defined memory limit */
  118. #endif
  119. #ifndef ZEND_MM_CUSTOM
  120. # define ZEND_MM_CUSTOM 1 /* support for custom memory allocator */
  121. /* USE_ZEND_ALLOC=0 may switch to system malloc() */
  122. #endif
  123. #ifndef ZEND_MM_STORAGE
  124. # define ZEND_MM_STORAGE 1 /* support for custom memory storage */
  125. #endif
  126. #ifndef ZEND_MM_ERROR
  127. # define ZEND_MM_ERROR 1 /* report system errors */
  128. #endif
  129. #ifndef ZEND_MM_CHECK
  130. # define ZEND_MM_CHECK(condition, message) do { \
  131. if (UNEXPECTED(!(condition))) { \
  132. zend_mm_panic(message); \
  133. } \
  134. } while (0)
  135. #endif
  136. typedef uint32_t zend_mm_page_info; /* 4-byte integer */
  137. typedef zend_ulong zend_mm_bitset; /* 4-byte or 8-byte integer */
  138. #define ZEND_MM_ALIGNED_OFFSET(size, alignment) \
  139. (((size_t)(size)) & ((alignment) - 1))
  140. #define ZEND_MM_ALIGNED_BASE(size, alignment) \
  141. (((size_t)(size)) & ~((alignment) - 1))
  142. #define ZEND_MM_SIZE_TO_NUM(size, alignment) \
  143. (((size_t)(size) + ((alignment) - 1)) / (alignment))
  144. #define ZEND_MM_BITSET_LEN (sizeof(zend_mm_bitset) * 8) /* 32 or 64 */
  145. #define ZEND_MM_PAGE_MAP_LEN (ZEND_MM_PAGES / ZEND_MM_BITSET_LEN) /* 16 or 8 */
  146. typedef zend_mm_bitset zend_mm_page_map[ZEND_MM_PAGE_MAP_LEN]; /* 64B */
  147. #define ZEND_MM_IS_FRUN 0x00000000
  148. #define ZEND_MM_IS_LRUN 0x40000000
  149. #define ZEND_MM_IS_SRUN 0x80000000
  150. #define ZEND_MM_LRUN_PAGES_MASK 0x000003ff
  151. #define ZEND_MM_LRUN_PAGES_OFFSET 0
  152. #define ZEND_MM_SRUN_BIN_NUM_MASK 0x0000001f
  153. #define ZEND_MM_SRUN_BIN_NUM_OFFSET 0
  154. #define ZEND_MM_SRUN_FREE_COUNTER_MASK 0x01ff0000
  155. #define ZEND_MM_SRUN_FREE_COUNTER_OFFSET 16
  156. #define ZEND_MM_NRUN_OFFSET_MASK 0x01ff0000
  157. #define ZEND_MM_NRUN_OFFSET_OFFSET 16
  158. #define ZEND_MM_LRUN_PAGES(info) (((info) & ZEND_MM_LRUN_PAGES_MASK) >> ZEND_MM_LRUN_PAGES_OFFSET)
  159. #define ZEND_MM_SRUN_BIN_NUM(info) (((info) & ZEND_MM_SRUN_BIN_NUM_MASK) >> ZEND_MM_SRUN_BIN_NUM_OFFSET)
  160. #define ZEND_MM_SRUN_FREE_COUNTER(info) (((info) & ZEND_MM_SRUN_FREE_COUNTER_MASK) >> ZEND_MM_SRUN_FREE_COUNTER_OFFSET)
  161. #define ZEND_MM_NRUN_OFFSET(info) (((info) & ZEND_MM_NRUN_OFFSET_MASK) >> ZEND_MM_NRUN_OFFSET_OFFSET)
  162. #define ZEND_MM_FRUN() ZEND_MM_IS_FRUN
  163. #define ZEND_MM_LRUN(count) (ZEND_MM_IS_LRUN | ((count) << ZEND_MM_LRUN_PAGES_OFFSET))
  164. #define ZEND_MM_SRUN(bin_num) (ZEND_MM_IS_SRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET))
  165. #define ZEND_MM_SRUN_EX(bin_num, count) (ZEND_MM_IS_SRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET) | ((count) << ZEND_MM_SRUN_FREE_COUNTER_OFFSET))
  166. #define ZEND_MM_NRUN(bin_num, offset) (ZEND_MM_IS_SRUN | ZEND_MM_IS_LRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET) | ((offset) << ZEND_MM_NRUN_OFFSET_OFFSET))
  167. #define ZEND_MM_BINS 30
  168. typedef struct _zend_mm_page zend_mm_page;
  169. typedef struct _zend_mm_bin zend_mm_bin;
  170. typedef struct _zend_mm_free_slot zend_mm_free_slot;
  171. typedef struct _zend_mm_chunk zend_mm_chunk;
  172. typedef struct _zend_mm_huge_list zend_mm_huge_list;
  173. int zend_mm_use_huge_pages = 0;
  174. /*
  175. * Memory is retrieved from OS by chunks of fixed size 2MB.
  176. * Inside chunk it's managed by pages of fixed size 4096B.
  177. * So each chunk consists from 512 pages.
  178. * The first page of each chunk is reserved for chunk header.
  179. * It contains service information about all pages.
  180. *
  181. * free_pages - current number of free pages in this chunk
  182. *
  183. * free_tail - number of continuous free pages at the end of chunk
  184. *
  185. * free_map - bitset (a bit for each page). The bit is set if the corresponding
  186. * page is allocated. Allocator for "large sizes" may easily find a
  187. * free page (or a continuous number of pages) searching for zero
  188. * bits.
  189. *
  190. * map - contains service information for each page. (32-bits for each
  191. * page).
  192. * usage:
  193. * (2 bits)
  194. * FRUN - free page,
  195. * LRUN - first page of "large" allocation
  196. * SRUN - first page of a bin used for "small" allocation
  197. *
  198. * lrun_pages:
  199. * (10 bits) number of allocated pages
  200. *
  201. * srun_bin_num:
  202. * (5 bits) bin number (e.g. 0 for sizes 0-2, 1 for 3-4,
  203. * 2 for 5-8, 3 for 9-16 etc) see zend_alloc_sizes.h
  204. */
  205. struct _zend_mm_heap {
  206. #if ZEND_MM_CUSTOM
  207. int use_custom_heap;
  208. #endif
  209. #if ZEND_MM_STORAGE
  210. zend_mm_storage *storage;
  211. #endif
  212. #if ZEND_MM_STAT
  213. size_t size; /* current memory usage */
  214. size_t peak; /* peak memory usage */
  215. #endif
  216. zend_mm_free_slot *free_slot[ZEND_MM_BINS]; /* free lists for small sizes */
  217. #if ZEND_MM_STAT || ZEND_MM_LIMIT
  218. size_t real_size; /* current size of allocated pages */
  219. #endif
  220. #if ZEND_MM_STAT
  221. size_t real_peak; /* peak size of allocated pages */
  222. #endif
  223. #if ZEND_MM_LIMIT
  224. size_t limit; /* memory limit */
  225. int overflow; /* memory overflow flag */
  226. #endif
  227. zend_mm_huge_list *huge_list; /* list of huge allocated blocks */
  228. zend_mm_chunk *main_chunk;
  229. zend_mm_chunk *cached_chunks; /* list of unused chunks */
  230. int chunks_count; /* number of allocated chunks */
  231. int peak_chunks_count; /* peak number of allocated chunks for current request */
  232. int cached_chunks_count; /* number of cached chunks */
  233. double avg_chunks_count; /* average number of chunks allocated per request */
  234. int last_chunks_delete_boundary; /* number of chunks after last deletion */
  235. int last_chunks_delete_count; /* number of deletion over the last boundary */
  236. #if ZEND_MM_CUSTOM
  237. union {
  238. struct {
  239. void *(*_malloc)(size_t);
  240. void (*_free)(void*);
  241. void *(*_realloc)(void*, size_t);
  242. } std;
  243. struct {
  244. void *(*_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
  245. void (*_free)(void* ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
  246. void *(*_realloc)(void*, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
  247. } debug;
  248. } custom_heap;
  249. HashTable *tracked_allocs;
  250. #endif
  251. };
  252. struct _zend_mm_chunk {
  253. zend_mm_heap *heap;
  254. zend_mm_chunk *next;
  255. zend_mm_chunk *prev;
  256. uint32_t free_pages; /* number of free pages */
  257. uint32_t free_tail; /* number of free pages at the end of chunk */
  258. uint32_t num;
  259. char reserve[64 - (sizeof(void*) * 3 + sizeof(uint32_t) * 3)];
  260. zend_mm_heap heap_slot; /* used only in main chunk */
  261. zend_mm_page_map free_map; /* 512 bits or 64 bytes */
  262. zend_mm_page_info map[ZEND_MM_PAGES]; /* 2 KB = 512 * 4 */
  263. };
  264. struct _zend_mm_page {
  265. char bytes[ZEND_MM_PAGE_SIZE];
  266. };
  267. /*
  268. * bin - is one or few continuous pages (up to 8) used for allocation of
  269. * a particular "small size".
  270. */
  271. struct _zend_mm_bin {
  272. char bytes[ZEND_MM_PAGE_SIZE * 8];
  273. };
  274. struct _zend_mm_free_slot {
  275. zend_mm_free_slot *next_free_slot;
  276. };
  277. struct _zend_mm_huge_list {
  278. void *ptr;
  279. size_t size;
  280. zend_mm_huge_list *next;
  281. #if ZEND_DEBUG
  282. zend_mm_debug_info dbg;
  283. #endif
  284. };
  285. #define ZEND_MM_PAGE_ADDR(chunk, page_num) \
  286. ((void*)(((zend_mm_page*)(chunk)) + (page_num)))
  287. #define _BIN_DATA_SIZE(num, size, elements, pages, x, y) size,
  288. static const uint32_t bin_data_size[] = {
  289. ZEND_MM_BINS_INFO(_BIN_DATA_SIZE, x, y)
  290. };
  291. #define _BIN_DATA_ELEMENTS(num, size, elements, pages, x, y) elements,
  292. static const uint32_t bin_elements[] = {
  293. ZEND_MM_BINS_INFO(_BIN_DATA_ELEMENTS, x, y)
  294. };
  295. #define _BIN_DATA_PAGES(num, size, elements, pages, x, y) pages,
  296. static const uint32_t bin_pages[] = {
  297. ZEND_MM_BINS_INFO(_BIN_DATA_PAGES, x, y)
  298. };
  299. #if ZEND_DEBUG
  300. ZEND_COLD void zend_debug_alloc_output(char *format, ...)
  301. {
  302. char output_buf[256];
  303. va_list args;
  304. va_start(args, format);
  305. vsprintf(output_buf, format, args);
  306. va_end(args);
  307. #ifdef ZEND_WIN32
  308. OutputDebugString(output_buf);
  309. #else
  310. fprintf(stderr, "%s", output_buf);
  311. #endif
  312. }
  313. #endif
  314. static ZEND_COLD ZEND_NORETURN void zend_mm_panic(const char *message)
  315. {
  316. fprintf(stderr, "%s\n", message);
  317. /* See http://support.microsoft.com/kb/190351 */
  318. #ifdef ZEND_WIN32
  319. fflush(stderr);
  320. #endif
  321. #if ZEND_DEBUG && defined(HAVE_KILL) && defined(HAVE_GETPID)
  322. kill(getpid(), SIGSEGV);
  323. #endif
  324. exit(1);
  325. }
  326. static ZEND_COLD ZEND_NORETURN void zend_mm_safe_error(zend_mm_heap *heap,
  327. const char *format,
  328. size_t limit,
  329. #if ZEND_DEBUG
  330. const char *filename,
  331. uint32_t lineno,
  332. #endif
  333. size_t size)
  334. {
  335. heap->overflow = 1;
  336. zend_try {
  337. zend_error_noreturn(E_ERROR,
  338. format,
  339. limit,
  340. #if ZEND_DEBUG
  341. filename,
  342. lineno,
  343. #endif
  344. size);
  345. } zend_catch {
  346. } zend_end_try();
  347. heap->overflow = 0;
  348. zend_bailout();
  349. exit(1);
  350. }
  351. #ifdef _WIN32
  352. void
  353. stderr_last_error(char *msg)
  354. {
  355. DWORD err = GetLastError();
  356. char *buf = php_win32_error_to_msg(err);
  357. if (!buf[0]) {
  358. fprintf(stderr, "\n%s: [0x%08lx]\n", msg, err);
  359. }
  360. else {
  361. fprintf(stderr, "\n%s: [0x%08lx] %s\n", msg, err, buf);
  362. }
  363. php_win32_error_msg_free(buf);
  364. }
  365. #endif
  366. /*****************/
  367. /* OS Allocation */
  368. /*****************/
  369. #ifndef HAVE_MREMAP
  370. static void *zend_mm_mmap_fixed(void *addr, size_t size)
  371. {
  372. #ifdef _WIN32
  373. return VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
  374. #else
  375. int flags = MAP_PRIVATE | MAP_ANON;
  376. #if defined(MAP_EXCL)
  377. flags |= MAP_FIXED | MAP_EXCL;
  378. #endif
  379. /* MAP_FIXED leads to discarding of the old mapping, so it can't be used. */
  380. void *ptr = mmap(addr, size, PROT_READ | PROT_WRITE, flags /*| MAP_POPULATE | MAP_HUGETLB*/, ZEND_MM_FD, 0);
  381. if (ptr == MAP_FAILED) {
  382. #if ZEND_MM_ERROR && !defined(MAP_EXCL)
  383. fprintf(stderr, "\nmmap() failed: [%d] %s\n", errno, strerror(errno));
  384. #endif
  385. return NULL;
  386. } else if (ptr != addr) {
  387. if (munmap(ptr, size) != 0) {
  388. #if ZEND_MM_ERROR
  389. fprintf(stderr, "\nmunmap() failed: [%d] %s\n", errno, strerror(errno));
  390. #endif
  391. }
  392. return NULL;
  393. }
  394. return ptr;
  395. #endif
  396. }
  397. #endif
  398. static void *zend_mm_mmap(size_t size)
  399. {
  400. #ifdef _WIN32
  401. void *ptr = VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
  402. if (ptr == NULL) {
  403. #if ZEND_MM_ERROR
  404. stderr_last_error("VirtualAlloc() failed");
  405. #endif
  406. return NULL;
  407. }
  408. return ptr;
  409. #else
  410. void *ptr;
  411. #ifdef MAP_HUGETLB
  412. if (zend_mm_use_huge_pages && size == ZEND_MM_CHUNK_SIZE) {
  413. ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_HUGETLB, -1, 0);
  414. if (ptr != MAP_FAILED) {
  415. return ptr;
  416. }
  417. }
  418. #endif
  419. ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, ZEND_MM_FD, 0);
  420. if (ptr == MAP_FAILED) {
  421. #if ZEND_MM_ERROR
  422. fprintf(stderr, "\nmmap() failed: [%d] %s\n", errno, strerror(errno));
  423. #endif
  424. return NULL;
  425. }
  426. return ptr;
  427. #endif
  428. }
  429. static void zend_mm_munmap(void *addr, size_t size)
  430. {
  431. #ifdef _WIN32
  432. if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
  433. #if ZEND_MM_ERROR
  434. stderr_last_error("VirtualFree() failed");
  435. #endif
  436. }
  437. #else
  438. if (munmap(addr, size) != 0) {
  439. #if ZEND_MM_ERROR
  440. fprintf(stderr, "\nmunmap() failed: [%d] %s\n", errno, strerror(errno));
  441. #endif
  442. }
  443. #endif
  444. }
  445. /***********/
  446. /* Bitmask */
  447. /***********/
  448. /* number of trailing set (1) bits */
  449. static zend_always_inline int zend_mm_bitset_nts(zend_mm_bitset bitset)
  450. {
  451. #if (defined(__GNUC__) || __has_builtin(__builtin_ctzl)) && SIZEOF_ZEND_LONG == SIZEOF_LONG && defined(PHP_HAVE_BUILTIN_CTZL)
  452. return __builtin_ctzl(~bitset);
  453. #elif (defined(__GNUC__) || __has_builtin(__builtin_ctzll)) && defined(PHP_HAVE_BUILTIN_CTZLL)
  454. return __builtin_ctzll(~bitset);
  455. #elif defined(_WIN32)
  456. unsigned long index;
  457. #if defined(_WIN64)
  458. if (!BitScanForward64(&index, ~bitset)) {
  459. #else
  460. if (!BitScanForward(&index, ~bitset)) {
  461. #endif
  462. /* undefined behavior */
  463. return 32;
  464. }
  465. return (int)index;
  466. #else
  467. int n;
  468. if (bitset == (zend_mm_bitset)-1) return ZEND_MM_BITSET_LEN;
  469. n = 0;
  470. #if SIZEOF_ZEND_LONG == 8
  471. if (sizeof(zend_mm_bitset) == 8) {
  472. if ((bitset & 0xffffffff) == 0xffffffff) {n += 32; bitset = bitset >> Z_UL(32);}
  473. }
  474. #endif
  475. if ((bitset & 0x0000ffff) == 0x0000ffff) {n += 16; bitset = bitset >> 16;}
  476. if ((bitset & 0x000000ff) == 0x000000ff) {n += 8; bitset = bitset >> 8;}
  477. if ((bitset & 0x0000000f) == 0x0000000f) {n += 4; bitset = bitset >> 4;}
  478. if ((bitset & 0x00000003) == 0x00000003) {n += 2; bitset = bitset >> 2;}
  479. return n + (bitset & 1);
  480. #endif
  481. }
  482. static zend_always_inline int zend_mm_bitset_is_set(zend_mm_bitset *bitset, int bit)
  483. {
  484. return ZEND_BIT_TEST(bitset, bit);
  485. }
  486. static zend_always_inline void zend_mm_bitset_set_bit(zend_mm_bitset *bitset, int bit)
  487. {
  488. bitset[bit / ZEND_MM_BITSET_LEN] |= (Z_UL(1) << (bit & (ZEND_MM_BITSET_LEN-1)));
  489. }
  490. static zend_always_inline void zend_mm_bitset_reset_bit(zend_mm_bitset *bitset, int bit)
  491. {
  492. bitset[bit / ZEND_MM_BITSET_LEN] &= ~(Z_UL(1) << (bit & (ZEND_MM_BITSET_LEN-1)));
  493. }
  494. static zend_always_inline void zend_mm_bitset_set_range(zend_mm_bitset *bitset, int start, int len)
  495. {
  496. if (len == 1) {
  497. zend_mm_bitset_set_bit(bitset, start);
  498. } else {
  499. int pos = start / ZEND_MM_BITSET_LEN;
  500. int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
  501. int bit = start & (ZEND_MM_BITSET_LEN - 1);
  502. zend_mm_bitset tmp;
  503. if (pos != end) {
  504. /* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
  505. tmp = (zend_mm_bitset)-1 << bit;
  506. bitset[pos++] |= tmp;
  507. while (pos != end) {
  508. /* set all bits */
  509. bitset[pos++] = (zend_mm_bitset)-1;
  510. }
  511. end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
  512. /* set bits from "0" to "end" */
  513. tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
  514. bitset[pos] |= tmp;
  515. } else {
  516. end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
  517. /* set bits from "bit" to "end" */
  518. tmp = (zend_mm_bitset)-1 << bit;
  519. tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
  520. bitset[pos] |= tmp;
  521. }
  522. }
  523. }
  524. static zend_always_inline void zend_mm_bitset_reset_range(zend_mm_bitset *bitset, int start, int len)
  525. {
  526. if (len == 1) {
  527. zend_mm_bitset_reset_bit(bitset, start);
  528. } else {
  529. int pos = start / ZEND_MM_BITSET_LEN;
  530. int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
  531. int bit = start & (ZEND_MM_BITSET_LEN - 1);
  532. zend_mm_bitset tmp;
  533. if (pos != end) {
  534. /* reset bits from "bit" to ZEND_MM_BITSET_LEN-1 */
  535. tmp = ~((Z_UL(1) << bit) - 1);
  536. bitset[pos++] &= ~tmp;
  537. while (pos != end) {
  538. /* set all bits */
  539. bitset[pos++] = 0;
  540. }
  541. end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
  542. /* reset bits from "0" to "end" */
  543. tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
  544. bitset[pos] &= ~tmp;
  545. } else {
  546. end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
  547. /* reset bits from "bit" to "end" */
  548. tmp = (zend_mm_bitset)-1 << bit;
  549. tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
  550. bitset[pos] &= ~tmp;
  551. }
  552. }
  553. }
  554. static zend_always_inline int zend_mm_bitset_is_free_range(zend_mm_bitset *bitset, int start, int len)
  555. {
  556. if (len == 1) {
  557. return !zend_mm_bitset_is_set(bitset, start);
  558. } else {
  559. int pos = start / ZEND_MM_BITSET_LEN;
  560. int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
  561. int bit = start & (ZEND_MM_BITSET_LEN - 1);
  562. zend_mm_bitset tmp;
  563. if (pos != end) {
  564. /* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
  565. tmp = (zend_mm_bitset)-1 << bit;
  566. if ((bitset[pos++] & tmp) != 0) {
  567. return 0;
  568. }
  569. while (pos != end) {
  570. /* set all bits */
  571. if (bitset[pos++] != 0) {
  572. return 0;
  573. }
  574. }
  575. end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
  576. /* set bits from "0" to "end" */
  577. tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
  578. return (bitset[pos] & tmp) == 0;
  579. } else {
  580. end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
  581. /* set bits from "bit" to "end" */
  582. tmp = (zend_mm_bitset)-1 << bit;
  583. tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
  584. return (bitset[pos] & tmp) == 0;
  585. }
  586. }
  587. }
  588. /**********/
  589. /* Chunks */
  590. /**********/
  591. static void *zend_mm_chunk_alloc_int(size_t size, size_t alignment)
  592. {
  593. void *ptr = zend_mm_mmap(size);
  594. if (ptr == NULL) {
  595. return NULL;
  596. } else if (ZEND_MM_ALIGNED_OFFSET(ptr, alignment) == 0) {
  597. #ifdef MADV_HUGEPAGE
  598. if (zend_mm_use_huge_pages) {
  599. madvise(ptr, size, MADV_HUGEPAGE);
  600. }
  601. #endif
  602. return ptr;
  603. } else {
  604. size_t offset;
  605. /* chunk has to be aligned */
  606. zend_mm_munmap(ptr, size);
  607. ptr = zend_mm_mmap(size + alignment - REAL_PAGE_SIZE);
  608. #ifdef _WIN32
  609. offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
  610. zend_mm_munmap(ptr, size + alignment - REAL_PAGE_SIZE);
  611. ptr = zend_mm_mmap_fixed((void*)((char*)ptr + (alignment - offset)), size);
  612. offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
  613. if (offset != 0) {
  614. zend_mm_munmap(ptr, size);
  615. return NULL;
  616. }
  617. return ptr;
  618. #else
  619. offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
  620. if (offset != 0) {
  621. offset = alignment - offset;
  622. zend_mm_munmap(ptr, offset);
  623. ptr = (char*)ptr + offset;
  624. alignment -= offset;
  625. }
  626. if (alignment > REAL_PAGE_SIZE) {
  627. zend_mm_munmap((char*)ptr + size, alignment - REAL_PAGE_SIZE);
  628. }
  629. # ifdef MADV_HUGEPAGE
  630. if (zend_mm_use_huge_pages) {
  631. madvise(ptr, size, MADV_HUGEPAGE);
  632. }
  633. # endif
  634. #endif
  635. return ptr;
  636. }
  637. }
  638. static void *zend_mm_chunk_alloc(zend_mm_heap *heap, size_t size, size_t alignment)
  639. {
  640. #if ZEND_MM_STORAGE
  641. if (UNEXPECTED(heap->storage)) {
  642. void *ptr = heap->storage->handlers.chunk_alloc(heap->storage, size, alignment);
  643. ZEND_ASSERT(((zend_uintptr_t)((char*)ptr + (alignment-1)) & (alignment-1)) == (zend_uintptr_t)ptr);
  644. return ptr;
  645. }
  646. #endif
  647. return zend_mm_chunk_alloc_int(size, alignment);
  648. }
  649. static void zend_mm_chunk_free(zend_mm_heap *heap, void *addr, size_t size)
  650. {
  651. #if ZEND_MM_STORAGE
  652. if (UNEXPECTED(heap->storage)) {
  653. heap->storage->handlers.chunk_free(heap->storage, addr, size);
  654. return;
  655. }
  656. #endif
  657. zend_mm_munmap(addr, size);
  658. }
  659. static int zend_mm_chunk_truncate(zend_mm_heap *heap, void *addr, size_t old_size, size_t new_size)
  660. {
  661. #if ZEND_MM_STORAGE
  662. if (UNEXPECTED(heap->storage)) {
  663. if (heap->storage->handlers.chunk_truncate) {
  664. return heap->storage->handlers.chunk_truncate(heap->storage, addr, old_size, new_size);
  665. } else {
  666. return 0;
  667. }
  668. }
  669. #endif
  670. #ifndef _WIN32
  671. zend_mm_munmap((char*)addr + new_size, old_size - new_size);
  672. return 1;
  673. #else
  674. return 0;
  675. #endif
  676. }
  677. static int zend_mm_chunk_extend(zend_mm_heap *heap, void *addr, size_t old_size, size_t new_size)
  678. {
  679. #if ZEND_MM_STORAGE
  680. if (UNEXPECTED(heap->storage)) {
  681. if (heap->storage->handlers.chunk_extend) {
  682. return heap->storage->handlers.chunk_extend(heap->storage, addr, old_size, new_size);
  683. } else {
  684. return 0;
  685. }
  686. }
  687. #endif
  688. #ifdef HAVE_MREMAP
  689. /* We don't use MREMAP_MAYMOVE due to alignment requirements. */
  690. void *ptr = mremap(addr, old_size, new_size, 0);
  691. if (ptr == MAP_FAILED) {
  692. return 0;
  693. }
  694. /* Sanity check: The mapping shouldn't have moved. */
  695. ZEND_ASSERT(ptr == addr);
  696. return 1;
  697. #elif !defined(_WIN32)
  698. return (zend_mm_mmap_fixed((char*)addr + old_size, new_size - old_size) != NULL);
  699. #else
  700. return 0;
  701. #endif
  702. }
  703. static zend_always_inline void zend_mm_chunk_init(zend_mm_heap *heap, zend_mm_chunk *chunk)
  704. {
  705. chunk->heap = heap;
  706. chunk->next = heap->main_chunk;
  707. chunk->prev = heap->main_chunk->prev;
  708. chunk->prev->next = chunk;
  709. chunk->next->prev = chunk;
  710. /* mark first pages as allocated */
  711. chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
  712. chunk->free_tail = ZEND_MM_FIRST_PAGE;
  713. /* the younger chunks have bigger number */
  714. chunk->num = chunk->prev->num + 1;
  715. /* mark first pages as allocated */
  716. chunk->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
  717. chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
  718. }
  719. /***********************/
  720. /* Huge Runs (forward) */
  721. /***********************/
  722. static size_t zend_mm_get_huge_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
  723. static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
  724. static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
  725. #if ZEND_DEBUG
  726. static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
  727. #else
  728. static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
  729. #endif
  730. /**************/
  731. /* Large Runs */
  732. /**************/
  733. #if ZEND_DEBUG
  734. static void *zend_mm_alloc_pages(zend_mm_heap *heap, uint32_t pages_count, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  735. #else
  736. static void *zend_mm_alloc_pages(zend_mm_heap *heap, uint32_t pages_count ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  737. #endif
  738. {
  739. zend_mm_chunk *chunk = heap->main_chunk;
  740. uint32_t page_num, len;
  741. int steps = 0;
  742. while (1) {
  743. if (UNEXPECTED(chunk->free_pages < pages_count)) {
  744. goto not_found;
  745. #if 0
  746. } else if (UNEXPECTED(chunk->free_pages + chunk->free_tail == ZEND_MM_PAGES)) {
  747. if (UNEXPECTED(ZEND_MM_PAGES - chunk->free_tail < pages_count)) {
  748. goto not_found;
  749. } else {
  750. page_num = chunk->free_tail;
  751. goto found;
  752. }
  753. } else if (0) {
  754. /* First-Fit Search */
  755. int free_tail = chunk->free_tail;
  756. zend_mm_bitset *bitset = chunk->free_map;
  757. zend_mm_bitset tmp = *(bitset++);
  758. int i = 0;
  759. while (1) {
  760. /* skip allocated blocks */
  761. while (tmp == (zend_mm_bitset)-1) {
  762. i += ZEND_MM_BITSET_LEN;
  763. if (i == ZEND_MM_PAGES) {
  764. goto not_found;
  765. }
  766. tmp = *(bitset++);
  767. }
  768. /* find first 0 bit */
  769. page_num = i + zend_mm_bitset_nts(tmp);
  770. /* reset bits from 0 to "bit" */
  771. tmp &= tmp + 1;
  772. /* skip free blocks */
  773. while (tmp == 0) {
  774. i += ZEND_MM_BITSET_LEN;
  775. len = i - page_num;
  776. if (len >= pages_count) {
  777. goto found;
  778. } else if (i >= free_tail) {
  779. goto not_found;
  780. }
  781. tmp = *(bitset++);
  782. }
  783. /* find first 1 bit */
  784. len = (i + zend_ulong_ntz(tmp)) - page_num;
  785. if (len >= pages_count) {
  786. goto found;
  787. }
  788. /* set bits from 0 to "bit" */
  789. tmp |= tmp - 1;
  790. }
  791. #endif
  792. } else {
  793. /* Best-Fit Search */
  794. int best = -1;
  795. uint32_t best_len = ZEND_MM_PAGES;
  796. uint32_t free_tail = chunk->free_tail;
  797. zend_mm_bitset *bitset = chunk->free_map;
  798. zend_mm_bitset tmp = *(bitset++);
  799. uint32_t i = 0;
  800. while (1) {
  801. /* skip allocated blocks */
  802. while (tmp == (zend_mm_bitset)-1) {
  803. i += ZEND_MM_BITSET_LEN;
  804. if (i == ZEND_MM_PAGES) {
  805. if (best > 0) {
  806. page_num = best;
  807. goto found;
  808. } else {
  809. goto not_found;
  810. }
  811. }
  812. tmp = *(bitset++);
  813. }
  814. /* find first 0 bit */
  815. page_num = i + zend_mm_bitset_nts(tmp);
  816. /* reset bits from 0 to "bit" */
  817. tmp &= tmp + 1;
  818. /* skip free blocks */
  819. while (tmp == 0) {
  820. i += ZEND_MM_BITSET_LEN;
  821. if (i >= free_tail || i == ZEND_MM_PAGES) {
  822. len = ZEND_MM_PAGES - page_num;
  823. if (len >= pages_count && len < best_len) {
  824. chunk->free_tail = page_num + pages_count;
  825. goto found;
  826. } else {
  827. /* set accurate value */
  828. chunk->free_tail = page_num;
  829. if (best > 0) {
  830. page_num = best;
  831. goto found;
  832. } else {
  833. goto not_found;
  834. }
  835. }
  836. }
  837. tmp = *(bitset++);
  838. }
  839. /* find first 1 bit */
  840. len = i + zend_ulong_ntz(tmp) - page_num;
  841. if (len >= pages_count) {
  842. if (len == pages_count) {
  843. goto found;
  844. } else if (len < best_len) {
  845. best_len = len;
  846. best = page_num;
  847. }
  848. }
  849. /* set bits from 0 to "bit" */
  850. tmp |= tmp - 1;
  851. }
  852. }
  853. not_found:
  854. if (chunk->next == heap->main_chunk) {
  855. get_chunk:
  856. if (heap->cached_chunks) {
  857. heap->cached_chunks_count--;
  858. chunk = heap->cached_chunks;
  859. heap->cached_chunks = chunk->next;
  860. } else {
  861. #if ZEND_MM_LIMIT
  862. if (UNEXPECTED(ZEND_MM_CHUNK_SIZE > heap->limit - heap->real_size)) {
  863. if (zend_mm_gc(heap)) {
  864. goto get_chunk;
  865. } else if (heap->overflow == 0) {
  866. #if ZEND_DEBUG
  867. zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
  868. #else
  869. zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, ZEND_MM_PAGE_SIZE * pages_count);
  870. #endif
  871. return NULL;
  872. }
  873. }
  874. #endif
  875. chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(heap, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
  876. if (UNEXPECTED(chunk == NULL)) {
  877. /* insufficient memory */
  878. if (zend_mm_gc(heap) &&
  879. (chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(heap, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE)) != NULL) {
  880. /* pass */
  881. } else {
  882. #if !ZEND_MM_LIMIT
  883. zend_mm_safe_error(heap, "Out of memory");
  884. #elif ZEND_DEBUG
  885. zend_mm_safe_error(heap, "Out of memory (allocated %zu) at %s:%d (tried to allocate %zu bytes)", heap->real_size, __zend_filename, __zend_lineno, size);
  886. #else
  887. zend_mm_safe_error(heap, "Out of memory (allocated %zu) (tried to allocate %zu bytes)", heap->real_size, ZEND_MM_PAGE_SIZE * pages_count);
  888. #endif
  889. return NULL;
  890. }
  891. }
  892. #if ZEND_MM_STAT
  893. do {
  894. size_t size = heap->real_size + ZEND_MM_CHUNK_SIZE;
  895. size_t peak = MAX(heap->real_peak, size);
  896. heap->real_size = size;
  897. heap->real_peak = peak;
  898. } while (0);
  899. #elif ZEND_MM_LIMIT
  900. heap->real_size += ZEND_MM_CHUNK_SIZE;
  901. #endif
  902. }
  903. heap->chunks_count++;
  904. if (heap->chunks_count > heap->peak_chunks_count) {
  905. heap->peak_chunks_count = heap->chunks_count;
  906. }
  907. zend_mm_chunk_init(heap, chunk);
  908. page_num = ZEND_MM_FIRST_PAGE;
  909. len = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
  910. goto found;
  911. } else {
  912. chunk = chunk->next;
  913. steps++;
  914. }
  915. }
  916. found:
  917. if (steps > 2 && pages_count < 8) {
  918. /* move chunk into the head of the linked-list */
  919. chunk->prev->next = chunk->next;
  920. chunk->next->prev = chunk->prev;
  921. chunk->next = heap->main_chunk->next;
  922. chunk->prev = heap->main_chunk;
  923. chunk->prev->next = chunk;
  924. chunk->next->prev = chunk;
  925. }
  926. /* mark run as allocated */
  927. chunk->free_pages -= pages_count;
  928. zend_mm_bitset_set_range(chunk->free_map, page_num, pages_count);
  929. chunk->map[page_num] = ZEND_MM_LRUN(pages_count);
  930. if (page_num == chunk->free_tail) {
  931. chunk->free_tail = page_num + pages_count;
  932. }
  933. return ZEND_MM_PAGE_ADDR(chunk, page_num);
  934. }
  935. static zend_always_inline void *zend_mm_alloc_large_ex(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  936. {
  937. int pages_count = (int)ZEND_MM_SIZE_TO_NUM(size, ZEND_MM_PAGE_SIZE);
  938. #if ZEND_DEBUG
  939. void *ptr = zend_mm_alloc_pages(heap, pages_count, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  940. #else
  941. void *ptr = zend_mm_alloc_pages(heap, pages_count ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  942. #endif
  943. #if ZEND_MM_STAT
  944. do {
  945. size_t size = heap->size + pages_count * ZEND_MM_PAGE_SIZE;
  946. size_t peak = MAX(heap->peak, size);
  947. heap->size = size;
  948. heap->peak = peak;
  949. } while (0);
  950. #endif
  951. return ptr;
  952. }
  953. #if ZEND_DEBUG
  954. static zend_never_inline void *zend_mm_alloc_large(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  955. {
  956. return zend_mm_alloc_large_ex(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  957. }
  958. #else
  959. static zend_never_inline void *zend_mm_alloc_large(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  960. {
  961. return zend_mm_alloc_large_ex(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  962. }
  963. #endif
  964. static zend_always_inline void zend_mm_delete_chunk(zend_mm_heap *heap, zend_mm_chunk *chunk)
  965. {
  966. chunk->next->prev = chunk->prev;
  967. chunk->prev->next = chunk->next;
  968. heap->chunks_count--;
  969. if (heap->chunks_count + heap->cached_chunks_count < heap->avg_chunks_count + 0.1
  970. || (heap->chunks_count == heap->last_chunks_delete_boundary
  971. && heap->last_chunks_delete_count >= 4)) {
  972. /* delay deletion */
  973. heap->cached_chunks_count++;
  974. chunk->next = heap->cached_chunks;
  975. heap->cached_chunks = chunk;
  976. } else {
  977. #if ZEND_MM_STAT || ZEND_MM_LIMIT
  978. heap->real_size -= ZEND_MM_CHUNK_SIZE;
  979. #endif
  980. if (!heap->cached_chunks) {
  981. if (heap->chunks_count != heap->last_chunks_delete_boundary) {
  982. heap->last_chunks_delete_boundary = heap->chunks_count;
  983. heap->last_chunks_delete_count = 0;
  984. } else {
  985. heap->last_chunks_delete_count++;
  986. }
  987. }
  988. if (!heap->cached_chunks || chunk->num > heap->cached_chunks->num) {
  989. zend_mm_chunk_free(heap, chunk, ZEND_MM_CHUNK_SIZE);
  990. } else {
  991. //TODO: select the best chunk to delete???
  992. chunk->next = heap->cached_chunks->next;
  993. zend_mm_chunk_free(heap, heap->cached_chunks, ZEND_MM_CHUNK_SIZE);
  994. heap->cached_chunks = chunk;
  995. }
  996. }
  997. }
  998. static zend_always_inline void zend_mm_free_pages_ex(zend_mm_heap *heap, zend_mm_chunk *chunk, uint32_t page_num, uint32_t pages_count, int free_chunk)
  999. {
  1000. chunk->free_pages += pages_count;
  1001. zend_mm_bitset_reset_range(chunk->free_map, page_num, pages_count);
  1002. chunk->map[page_num] = 0;
  1003. if (chunk->free_tail == page_num + pages_count) {
  1004. /* this setting may be not accurate */
  1005. chunk->free_tail = page_num;
  1006. }
  1007. if (free_chunk && chunk != heap->main_chunk && chunk->free_pages == ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE) {
  1008. zend_mm_delete_chunk(heap, chunk);
  1009. }
  1010. }
  1011. static zend_never_inline void zend_mm_free_pages(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count)
  1012. {
  1013. zend_mm_free_pages_ex(heap, chunk, page_num, pages_count, 1);
  1014. }
  1015. static zend_always_inline void zend_mm_free_large(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count)
  1016. {
  1017. #if ZEND_MM_STAT
  1018. heap->size -= pages_count * ZEND_MM_PAGE_SIZE;
  1019. #endif
  1020. zend_mm_free_pages(heap, chunk, page_num, pages_count);
  1021. }
  1022. /**************/
  1023. /* Small Runs */
  1024. /**************/
  1025. /* higher set bit number (0->N/A, 1->1, 2->2, 4->3, 8->4, 127->7, 128->8 etc) */
  1026. static zend_always_inline int zend_mm_small_size_to_bit(int size)
  1027. {
  1028. #if (defined(__GNUC__) || __has_builtin(__builtin_clz)) && defined(PHP_HAVE_BUILTIN_CLZ)
  1029. return (__builtin_clz(size) ^ 0x1f) + 1;
  1030. #elif defined(_WIN32)
  1031. unsigned long index;
  1032. if (!BitScanReverse(&index, (unsigned long)size)) {
  1033. /* undefined behavior */
  1034. return 64;
  1035. }
  1036. return (((31 - (int)index) ^ 0x1f) + 1);
  1037. #else
  1038. int n = 16;
  1039. if (size <= 0x00ff) {n -= 8; size = size << 8;}
  1040. if (size <= 0x0fff) {n -= 4; size = size << 4;}
  1041. if (size <= 0x3fff) {n -= 2; size = size << 2;}
  1042. if (size <= 0x7fff) {n -= 1;}
  1043. return n;
  1044. #endif
  1045. }
  1046. #ifndef MAX
  1047. # define MAX(a, b) (((a) > (b)) ? (a) : (b))
  1048. #endif
  1049. #ifndef MIN
  1050. # define MIN(a, b) (((a) < (b)) ? (a) : (b))
  1051. #endif
  1052. static zend_always_inline int zend_mm_small_size_to_bin(size_t size)
  1053. {
  1054. #if 0
  1055. int n;
  1056. /*0, 1, 2, 3, 4, 5, 6, 7, 8, 9 10, 11, 12*/
  1057. static const int f1[] = { 3, 3, 3, 3, 3, 3, 3, 4, 5, 6, 7, 8, 9};
  1058. static const int f2[] = { 0, 0, 0, 0, 0, 0, 0, 4, 8, 12, 16, 20, 24};
  1059. if (UNEXPECTED(size <= 2)) return 0;
  1060. n = zend_mm_small_size_to_bit(size - 1);
  1061. return ((size-1) >> f1[n]) + f2[n];
  1062. #else
  1063. unsigned int t1, t2;
  1064. if (size <= 64) {
  1065. /* we need to support size == 0 ... */
  1066. return (size - !!size) >> 3;
  1067. } else {
  1068. t1 = size - 1;
  1069. t2 = zend_mm_small_size_to_bit(t1) - 3;
  1070. t1 = t1 >> t2;
  1071. t2 = t2 - 3;
  1072. t2 = t2 << 2;
  1073. return (int)(t1 + t2);
  1074. }
  1075. #endif
  1076. }
  1077. #define ZEND_MM_SMALL_SIZE_TO_BIN(size) zend_mm_small_size_to_bin(size)
  1078. static zend_never_inline void *zend_mm_alloc_small_slow(zend_mm_heap *heap, uint32_t bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1079. {
  1080. zend_mm_chunk *chunk;
  1081. int page_num;
  1082. zend_mm_bin *bin;
  1083. zend_mm_free_slot *p, *end;
  1084. #if ZEND_DEBUG
  1085. bin = (zend_mm_bin*)zend_mm_alloc_pages(heap, bin_pages[bin_num], bin_data_size[bin_num] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1086. #else
  1087. bin = (zend_mm_bin*)zend_mm_alloc_pages(heap, bin_pages[bin_num] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1088. #endif
  1089. if (UNEXPECTED(bin == NULL)) {
  1090. /* insufficient memory */
  1091. return NULL;
  1092. }
  1093. chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(bin, ZEND_MM_CHUNK_SIZE);
  1094. page_num = ZEND_MM_ALIGNED_OFFSET(bin, ZEND_MM_CHUNK_SIZE) / ZEND_MM_PAGE_SIZE;
  1095. chunk->map[page_num] = ZEND_MM_SRUN(bin_num);
  1096. if (bin_pages[bin_num] > 1) {
  1097. uint32_t i = 1;
  1098. do {
  1099. chunk->map[page_num+i] = ZEND_MM_NRUN(bin_num, i);
  1100. i++;
  1101. } while (i < bin_pages[bin_num]);
  1102. }
  1103. /* create a linked list of elements from 1 to last */
  1104. end = (zend_mm_free_slot*)((char*)bin + (bin_data_size[bin_num] * (bin_elements[bin_num] - 1)));
  1105. heap->free_slot[bin_num] = p = (zend_mm_free_slot*)((char*)bin + bin_data_size[bin_num]);
  1106. do {
  1107. p->next_free_slot = (zend_mm_free_slot*)((char*)p + bin_data_size[bin_num]);
  1108. #if ZEND_DEBUG
  1109. do {
  1110. zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
  1111. dbg->size = 0;
  1112. } while (0);
  1113. #endif
  1114. p = (zend_mm_free_slot*)((char*)p + bin_data_size[bin_num]);
  1115. } while (p != end);
  1116. /* terminate list using NULL */
  1117. p->next_free_slot = NULL;
  1118. #if ZEND_DEBUG
  1119. do {
  1120. zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
  1121. dbg->size = 0;
  1122. } while (0);
  1123. #endif
  1124. /* return first element */
  1125. return bin;
  1126. }
  1127. static zend_always_inline void *zend_mm_alloc_small(zend_mm_heap *heap, int bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1128. {
  1129. #if ZEND_MM_STAT
  1130. do {
  1131. size_t size = heap->size + bin_data_size[bin_num];
  1132. size_t peak = MAX(heap->peak, size);
  1133. heap->size = size;
  1134. heap->peak = peak;
  1135. } while (0);
  1136. #endif
  1137. if (EXPECTED(heap->free_slot[bin_num] != NULL)) {
  1138. zend_mm_free_slot *p = heap->free_slot[bin_num];
  1139. heap->free_slot[bin_num] = p->next_free_slot;
  1140. return p;
  1141. } else {
  1142. return zend_mm_alloc_small_slow(heap, bin_num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1143. }
  1144. }
  1145. static zend_always_inline void zend_mm_free_small(zend_mm_heap *heap, void *ptr, int bin_num)
  1146. {
  1147. zend_mm_free_slot *p;
  1148. #if ZEND_MM_STAT
  1149. heap->size -= bin_data_size[bin_num];
  1150. #endif
  1151. #if ZEND_DEBUG
  1152. do {
  1153. zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)ptr + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
  1154. dbg->size = 0;
  1155. } while (0);
  1156. #endif
  1157. p = (zend_mm_free_slot*)ptr;
  1158. p->next_free_slot = heap->free_slot[bin_num];
  1159. heap->free_slot[bin_num] = p;
  1160. }
  1161. /********/
  1162. /* Heap */
  1163. /********/
  1164. #if ZEND_DEBUG
  1165. static zend_always_inline zend_mm_debug_info *zend_mm_get_debug_info(zend_mm_heap *heap, void *ptr)
  1166. {
  1167. size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
  1168. zend_mm_chunk *chunk;
  1169. int page_num;
  1170. zend_mm_page_info info;
  1171. ZEND_MM_CHECK(page_offset != 0, "zend_mm_heap corrupted");
  1172. chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
  1173. page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
  1174. info = chunk->map[page_num];
  1175. ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
  1176. if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
  1177. int bin_num = ZEND_MM_SRUN_BIN_NUM(info);
  1178. return (zend_mm_debug_info*)((char*)ptr + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
  1179. } else /* if (info & ZEND_MM_IS_LRUN) */ {
  1180. int pages_count = ZEND_MM_LRUN_PAGES(info);
  1181. return (zend_mm_debug_info*)((char*)ptr + ZEND_MM_PAGE_SIZE * pages_count - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
  1182. }
  1183. }
  1184. #endif
  1185. static zend_always_inline void *zend_mm_alloc_heap(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1186. {
  1187. void *ptr;
  1188. #if ZEND_DEBUG
  1189. size_t real_size = size;
  1190. zend_mm_debug_info *dbg;
  1191. /* special handling for zero-size allocation */
  1192. size = MAX(size, 1);
  1193. size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
  1194. if (UNEXPECTED(size < real_size)) {
  1195. zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu + %zu)", ZEND_MM_ALIGNED_SIZE(real_size), ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
  1196. return NULL;
  1197. }
  1198. #endif
  1199. if (EXPECTED(size <= ZEND_MM_MAX_SMALL_SIZE)) {
  1200. ptr = zend_mm_alloc_small(heap, ZEND_MM_SMALL_SIZE_TO_BIN(size) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1201. #if ZEND_DEBUG
  1202. dbg = zend_mm_get_debug_info(heap, ptr);
  1203. dbg->size = real_size;
  1204. dbg->filename = __zend_filename;
  1205. dbg->orig_filename = __zend_orig_filename;
  1206. dbg->lineno = __zend_lineno;
  1207. dbg->orig_lineno = __zend_orig_lineno;
  1208. #endif
  1209. return ptr;
  1210. } else if (EXPECTED(size <= ZEND_MM_MAX_LARGE_SIZE)) {
  1211. ptr = zend_mm_alloc_large(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1212. #if ZEND_DEBUG
  1213. dbg = zend_mm_get_debug_info(heap, ptr);
  1214. dbg->size = real_size;
  1215. dbg->filename = __zend_filename;
  1216. dbg->orig_filename = __zend_orig_filename;
  1217. dbg->lineno = __zend_lineno;
  1218. dbg->orig_lineno = __zend_orig_lineno;
  1219. #endif
  1220. return ptr;
  1221. } else {
  1222. #if ZEND_DEBUG
  1223. size = real_size;
  1224. #endif
  1225. return zend_mm_alloc_huge(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1226. }
  1227. }
  1228. static zend_always_inline void zend_mm_free_heap(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1229. {
  1230. size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
  1231. if (UNEXPECTED(page_offset == 0)) {
  1232. if (ptr != NULL) {
  1233. zend_mm_free_huge(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1234. }
  1235. } else {
  1236. zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
  1237. int page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
  1238. zend_mm_page_info info = chunk->map[page_num];
  1239. ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
  1240. if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
  1241. zend_mm_free_small(heap, ptr, ZEND_MM_SRUN_BIN_NUM(info));
  1242. } else /* if (info & ZEND_MM_IS_LRUN) */ {
  1243. int pages_count = ZEND_MM_LRUN_PAGES(info);
  1244. ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
  1245. zend_mm_free_large(heap, chunk, page_num, pages_count);
  1246. }
  1247. }
  1248. }
  1249. static size_t zend_mm_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1250. {
  1251. size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
  1252. if (UNEXPECTED(page_offset == 0)) {
  1253. return zend_mm_get_huge_block_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1254. } else {
  1255. zend_mm_chunk *chunk;
  1256. #if 0 && ZEND_DEBUG
  1257. zend_mm_debug_info *dbg = zend_mm_get_debug_info(heap, ptr);
  1258. return dbg->size;
  1259. #else
  1260. int page_num;
  1261. zend_mm_page_info info;
  1262. chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
  1263. page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
  1264. info = chunk->map[page_num];
  1265. ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
  1266. if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
  1267. return bin_data_size[ZEND_MM_SRUN_BIN_NUM(info)];
  1268. } else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
  1269. return ZEND_MM_LRUN_PAGES(info) * ZEND_MM_PAGE_SIZE;
  1270. }
  1271. #endif
  1272. }
  1273. }
  1274. static zend_never_inline void *zend_mm_realloc_slow(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1275. {
  1276. void *ret;
  1277. #if ZEND_MM_STAT
  1278. do {
  1279. size_t orig_peak = heap->peak;
  1280. #endif
  1281. ret = zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1282. memcpy(ret, ptr, copy_size);
  1283. zend_mm_free_heap(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1284. #if ZEND_MM_STAT
  1285. heap->peak = MAX(orig_peak, heap->size);
  1286. } while (0);
  1287. #endif
  1288. return ret;
  1289. }
  1290. static zend_never_inline void *zend_mm_realloc_huge(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1291. {
  1292. size_t old_size;
  1293. size_t new_size;
  1294. #if ZEND_DEBUG
  1295. size_t real_size;
  1296. #endif
  1297. old_size = zend_mm_get_huge_block_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1298. #if ZEND_DEBUG
  1299. real_size = size;
  1300. size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
  1301. #endif
  1302. if (size > ZEND_MM_MAX_LARGE_SIZE) {
  1303. #if ZEND_DEBUG
  1304. size = real_size;
  1305. #endif
  1306. #ifdef ZEND_WIN32
  1307. /* On Windows we don't have ability to extend huge blocks in-place.
  1308. * We allocate them with 2MB size granularity, to avoid many
  1309. * reallocations when they are extended by small pieces
  1310. */
  1311. new_size = ZEND_MM_ALIGNED_SIZE_EX(size, MAX(REAL_PAGE_SIZE, ZEND_MM_CHUNK_SIZE));
  1312. #else
  1313. new_size = ZEND_MM_ALIGNED_SIZE_EX(size, REAL_PAGE_SIZE);
  1314. #endif
  1315. if (new_size == old_size) {
  1316. #if ZEND_DEBUG
  1317. zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1318. #else
  1319. zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1320. #endif
  1321. return ptr;
  1322. } else if (new_size < old_size) {
  1323. /* unmup tail */
  1324. if (zend_mm_chunk_truncate(heap, ptr, old_size, new_size)) {
  1325. #if ZEND_MM_STAT || ZEND_MM_LIMIT
  1326. heap->real_size -= old_size - new_size;
  1327. #endif
  1328. #if ZEND_MM_STAT
  1329. heap->size -= old_size - new_size;
  1330. #endif
  1331. #if ZEND_DEBUG
  1332. zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1333. #else
  1334. zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1335. #endif
  1336. return ptr;
  1337. }
  1338. } else /* if (new_size > old_size) */ {
  1339. #if ZEND_MM_LIMIT
  1340. if (UNEXPECTED(new_size - old_size > heap->limit - heap->real_size)) {
  1341. if (zend_mm_gc(heap) && new_size - old_size <= heap->limit - heap->real_size) {
  1342. /* pass */
  1343. } else if (heap->overflow == 0) {
  1344. #if ZEND_DEBUG
  1345. zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
  1346. #else
  1347. zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, size);
  1348. #endif
  1349. return NULL;
  1350. }
  1351. }
  1352. #endif
  1353. /* try to map tail right after this block */
  1354. if (zend_mm_chunk_extend(heap, ptr, old_size, new_size)) {
  1355. #if ZEND_MM_STAT || ZEND_MM_LIMIT
  1356. heap->real_size += new_size - old_size;
  1357. #endif
  1358. #if ZEND_MM_STAT
  1359. heap->real_peak = MAX(heap->real_peak, heap->real_size);
  1360. heap->size += new_size - old_size;
  1361. heap->peak = MAX(heap->peak, heap->size);
  1362. #endif
  1363. #if ZEND_DEBUG
  1364. zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1365. #else
  1366. zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1367. #endif
  1368. return ptr;
  1369. }
  1370. }
  1371. }
  1372. return zend_mm_realloc_slow(heap, ptr, size, MIN(old_size, copy_size) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1373. }
  1374. static zend_always_inline void *zend_mm_realloc_heap(zend_mm_heap *heap, void *ptr, size_t size, bool use_copy_size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1375. {
  1376. size_t page_offset;
  1377. size_t old_size;
  1378. size_t new_size;
  1379. void *ret;
  1380. #if ZEND_DEBUG
  1381. zend_mm_debug_info *dbg;
  1382. #endif
  1383. page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
  1384. if (UNEXPECTED(page_offset == 0)) {
  1385. if (EXPECTED(ptr == NULL)) {
  1386. return _zend_mm_alloc(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1387. } else {
  1388. return zend_mm_realloc_huge(heap, ptr, size, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1389. }
  1390. } else {
  1391. zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
  1392. int page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
  1393. zend_mm_page_info info = chunk->map[page_num];
  1394. #if ZEND_DEBUG
  1395. size_t real_size = size;
  1396. size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
  1397. #endif
  1398. ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
  1399. if (info & ZEND_MM_IS_SRUN) {
  1400. int old_bin_num = ZEND_MM_SRUN_BIN_NUM(info);
  1401. do {
  1402. old_size = bin_data_size[old_bin_num];
  1403. /* Check if requested size fits into current bin */
  1404. if (size <= old_size) {
  1405. /* Check if truncation is necessary */
  1406. if (old_bin_num > 0 && size < bin_data_size[old_bin_num - 1]) {
  1407. /* truncation */
  1408. ret = zend_mm_alloc_small(heap, ZEND_MM_SMALL_SIZE_TO_BIN(size) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1409. copy_size = use_copy_size ? MIN(size, copy_size) : size;
  1410. memcpy(ret, ptr, copy_size);
  1411. zend_mm_free_small(heap, ptr, old_bin_num);
  1412. } else {
  1413. /* reallocation in-place */
  1414. ret = ptr;
  1415. }
  1416. } else if (size <= ZEND_MM_MAX_SMALL_SIZE) {
  1417. /* small extension */
  1418. #if ZEND_MM_STAT
  1419. do {
  1420. size_t orig_peak = heap->peak;
  1421. #endif
  1422. ret = zend_mm_alloc_small(heap, ZEND_MM_SMALL_SIZE_TO_BIN(size) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1423. copy_size = use_copy_size ? MIN(old_size, copy_size) : old_size;
  1424. memcpy(ret, ptr, copy_size);
  1425. zend_mm_free_small(heap, ptr, old_bin_num);
  1426. #if ZEND_MM_STAT
  1427. heap->peak = MAX(orig_peak, heap->size);
  1428. } while (0);
  1429. #endif
  1430. } else {
  1431. /* slow reallocation */
  1432. break;
  1433. }
  1434. #if ZEND_DEBUG
  1435. dbg = zend_mm_get_debug_info(heap, ret);
  1436. dbg->size = real_size;
  1437. dbg->filename = __zend_filename;
  1438. dbg->orig_filename = __zend_orig_filename;
  1439. dbg->lineno = __zend_lineno;
  1440. dbg->orig_lineno = __zend_orig_lineno;
  1441. #endif
  1442. return ret;
  1443. } while (0);
  1444. } else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
  1445. ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
  1446. old_size = ZEND_MM_LRUN_PAGES(info) * ZEND_MM_PAGE_SIZE;
  1447. if (size > ZEND_MM_MAX_SMALL_SIZE && size <= ZEND_MM_MAX_LARGE_SIZE) {
  1448. new_size = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE);
  1449. if (new_size == old_size) {
  1450. #if ZEND_DEBUG
  1451. dbg = zend_mm_get_debug_info(heap, ptr);
  1452. dbg->size = real_size;
  1453. dbg->filename = __zend_filename;
  1454. dbg->orig_filename = __zend_orig_filename;
  1455. dbg->lineno = __zend_lineno;
  1456. dbg->orig_lineno = __zend_orig_lineno;
  1457. #endif
  1458. return ptr;
  1459. } else if (new_size < old_size) {
  1460. /* free tail pages */
  1461. int new_pages_count = (int)(new_size / ZEND_MM_PAGE_SIZE);
  1462. int rest_pages_count = (int)((old_size - new_size) / ZEND_MM_PAGE_SIZE);
  1463. #if ZEND_MM_STAT
  1464. heap->size -= rest_pages_count * ZEND_MM_PAGE_SIZE;
  1465. #endif
  1466. chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
  1467. chunk->free_pages += rest_pages_count;
  1468. zend_mm_bitset_reset_range(chunk->free_map, page_num + new_pages_count, rest_pages_count);
  1469. #if ZEND_DEBUG
  1470. dbg = zend_mm_get_debug_info(heap, ptr);
  1471. dbg->size = real_size;
  1472. dbg->filename = __zend_filename;
  1473. dbg->orig_filename = __zend_orig_filename;
  1474. dbg->lineno = __zend_lineno;
  1475. dbg->orig_lineno = __zend_orig_lineno;
  1476. #endif
  1477. return ptr;
  1478. } else /* if (new_size > old_size) */ {
  1479. int new_pages_count = (int)(new_size / ZEND_MM_PAGE_SIZE);
  1480. int old_pages_count = (int)(old_size / ZEND_MM_PAGE_SIZE);
  1481. /* try to allocate tail pages after this block */
  1482. if (page_num + new_pages_count <= ZEND_MM_PAGES &&
  1483. zend_mm_bitset_is_free_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_count)) {
  1484. #if ZEND_MM_STAT
  1485. do {
  1486. size_t size = heap->size + (new_size - old_size);
  1487. size_t peak = MAX(heap->peak, size);
  1488. heap->size = size;
  1489. heap->peak = peak;
  1490. } while (0);
  1491. #endif
  1492. chunk->free_pages -= new_pages_count - old_pages_count;
  1493. zend_mm_bitset_set_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_count);
  1494. chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
  1495. #if ZEND_DEBUG
  1496. dbg = zend_mm_get_debug_info(heap, ptr);
  1497. dbg->size = real_size;
  1498. dbg->filename = __zend_filename;
  1499. dbg->orig_filename = __zend_orig_filename;
  1500. dbg->lineno = __zend_lineno;
  1501. dbg->orig_lineno = __zend_orig_lineno;
  1502. #endif
  1503. return ptr;
  1504. }
  1505. }
  1506. }
  1507. }
  1508. #if ZEND_DEBUG
  1509. size = real_size;
  1510. #endif
  1511. }
  1512. copy_size = MIN(old_size, copy_size);
  1513. return zend_mm_realloc_slow(heap, ptr, size, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1514. }
  1515. /*********************/
  1516. /* Huge Runs (again) */
  1517. /*********************/
  1518. #if ZEND_DEBUG
  1519. static void zend_mm_add_huge_block(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1520. #else
  1521. static void zend_mm_add_huge_block(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1522. #endif
  1523. {
  1524. zend_mm_huge_list *list = (zend_mm_huge_list*)zend_mm_alloc_heap(heap, sizeof(zend_mm_huge_list) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1525. list->ptr = ptr;
  1526. list->size = size;
  1527. list->next = heap->huge_list;
  1528. #if ZEND_DEBUG
  1529. list->dbg.size = dbg_size;
  1530. list->dbg.filename = __zend_filename;
  1531. list->dbg.orig_filename = __zend_orig_filename;
  1532. list->dbg.lineno = __zend_lineno;
  1533. list->dbg.orig_lineno = __zend_orig_lineno;
  1534. #endif
  1535. heap->huge_list = list;
  1536. }
  1537. static size_t zend_mm_del_huge_block(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1538. {
  1539. zend_mm_huge_list *prev = NULL;
  1540. zend_mm_huge_list *list = heap->huge_list;
  1541. while (list != NULL) {
  1542. if (list->ptr == ptr) {
  1543. size_t size;
  1544. if (prev) {
  1545. prev->next = list->next;
  1546. } else {
  1547. heap->huge_list = list->next;
  1548. }
  1549. size = list->size;
  1550. zend_mm_free_heap(heap, list ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1551. return size;
  1552. }
  1553. prev = list;
  1554. list = list->next;
  1555. }
  1556. ZEND_MM_CHECK(0, "zend_mm_heap corrupted");
  1557. return 0;
  1558. }
  1559. static size_t zend_mm_get_huge_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1560. {
  1561. zend_mm_huge_list *list = heap->huge_list;
  1562. while (list != NULL) {
  1563. if (list->ptr == ptr) {
  1564. return list->size;
  1565. }
  1566. list = list->next;
  1567. }
  1568. ZEND_MM_CHECK(0, "zend_mm_heap corrupted");
  1569. return 0;
  1570. }
  1571. #if ZEND_DEBUG
  1572. static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1573. #else
  1574. static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1575. #endif
  1576. {
  1577. zend_mm_huge_list *list = heap->huge_list;
  1578. while (list != NULL) {
  1579. if (list->ptr == ptr) {
  1580. list->size = size;
  1581. #if ZEND_DEBUG
  1582. list->dbg.size = dbg_size;
  1583. list->dbg.filename = __zend_filename;
  1584. list->dbg.orig_filename = __zend_orig_filename;
  1585. list->dbg.lineno = __zend_lineno;
  1586. list->dbg.orig_lineno = __zend_orig_lineno;
  1587. #endif
  1588. return;
  1589. }
  1590. list = list->next;
  1591. }
  1592. }
  1593. static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1594. {
  1595. #ifdef ZEND_WIN32
  1596. /* On Windows we don't have ability to extend huge blocks in-place.
  1597. * We allocate them with 2MB size granularity, to avoid many
  1598. * reallocations when they are extended by small pieces
  1599. */
  1600. size_t alignment = MAX(REAL_PAGE_SIZE, ZEND_MM_CHUNK_SIZE);
  1601. #else
  1602. size_t alignment = REAL_PAGE_SIZE;
  1603. #endif
  1604. size_t new_size = ZEND_MM_ALIGNED_SIZE_EX(size, alignment);
  1605. void *ptr;
  1606. if (UNEXPECTED(new_size < size)) {
  1607. zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu + %zu)", size, alignment);
  1608. }
  1609. #if ZEND_MM_LIMIT
  1610. if (UNEXPECTED(new_size > heap->limit - heap->real_size)) {
  1611. if (zend_mm_gc(heap) && new_size <= heap->limit - heap->real_size) {
  1612. /* pass */
  1613. } else if (heap->overflow == 0) {
  1614. #if ZEND_DEBUG
  1615. zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
  1616. #else
  1617. zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, size);
  1618. #endif
  1619. return NULL;
  1620. }
  1621. }
  1622. #endif
  1623. ptr = zend_mm_chunk_alloc(heap, new_size, ZEND_MM_CHUNK_SIZE);
  1624. if (UNEXPECTED(ptr == NULL)) {
  1625. /* insufficient memory */
  1626. if (zend_mm_gc(heap) &&
  1627. (ptr = zend_mm_chunk_alloc(heap, new_size, ZEND_MM_CHUNK_SIZE)) != NULL) {
  1628. /* pass */
  1629. } else {
  1630. #if !ZEND_MM_LIMIT
  1631. zend_mm_safe_error(heap, "Out of memory");
  1632. #elif ZEND_DEBUG
  1633. zend_mm_safe_error(heap, "Out of memory (allocated %zu) at %s:%d (tried to allocate %zu bytes)", heap->real_size, __zend_filename, __zend_lineno, size);
  1634. #else
  1635. zend_mm_safe_error(heap, "Out of memory (allocated %zu) (tried to allocate %zu bytes)", heap->real_size, size);
  1636. #endif
  1637. return NULL;
  1638. }
  1639. }
  1640. #if ZEND_DEBUG
  1641. zend_mm_add_huge_block(heap, ptr, new_size, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1642. #else
  1643. zend_mm_add_huge_block(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1644. #endif
  1645. #if ZEND_MM_STAT
  1646. do {
  1647. size_t size = heap->real_size + new_size;
  1648. size_t peak = MAX(heap->real_peak, size);
  1649. heap->real_size = size;
  1650. heap->real_peak = peak;
  1651. } while (0);
  1652. do {
  1653. size_t size = heap->size + new_size;
  1654. size_t peak = MAX(heap->peak, size);
  1655. heap->size = size;
  1656. heap->peak = peak;
  1657. } while (0);
  1658. #elif ZEND_MM_LIMIT
  1659. heap->real_size += new_size;
  1660. #endif
  1661. return ptr;
  1662. }
  1663. static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  1664. {
  1665. size_t size;
  1666. ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE) == 0, "zend_mm_heap corrupted");
  1667. size = zend_mm_del_huge_block(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  1668. zend_mm_chunk_free(heap, ptr, size);
  1669. #if ZEND_MM_STAT || ZEND_MM_LIMIT
  1670. heap->real_size -= size;
  1671. #endif
  1672. #if ZEND_MM_STAT
  1673. heap->size -= size;
  1674. #endif
  1675. }
  1676. /******************/
  1677. /* Initialization */
  1678. /******************/
  1679. static zend_mm_heap *zend_mm_init(void)
  1680. {
  1681. zend_mm_chunk *chunk = (zend_mm_chunk*)zend_mm_chunk_alloc_int(ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
  1682. zend_mm_heap *heap;
  1683. if (UNEXPECTED(chunk == NULL)) {
  1684. #if ZEND_MM_ERROR
  1685. #ifdef _WIN32
  1686. stderr_last_error("Can't initialize heap");
  1687. #else
  1688. fprintf(stderr, "\nCan't initialize heap: [%d] %s\n", errno, strerror(errno));
  1689. #endif
  1690. #endif
  1691. return NULL;
  1692. }
  1693. heap = &chunk->heap_slot;
  1694. chunk->heap = heap;
  1695. chunk->next = chunk;
  1696. chunk->prev = chunk;
  1697. chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
  1698. chunk->free_tail = ZEND_MM_FIRST_PAGE;
  1699. chunk->num = 0;
  1700. chunk->free_map[0] = (Z_L(1) << ZEND_MM_FIRST_PAGE) - 1;
  1701. chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
  1702. heap->main_chunk = chunk;
  1703. heap->cached_chunks = NULL;
  1704. heap->chunks_count = 1;
  1705. heap->peak_chunks_count = 1;
  1706. heap->cached_chunks_count = 0;
  1707. heap->avg_chunks_count = 1.0;
  1708. heap->last_chunks_delete_boundary = 0;
  1709. heap->last_chunks_delete_count = 0;
  1710. #if ZEND_MM_STAT || ZEND_MM_LIMIT
  1711. heap->real_size = ZEND_MM_CHUNK_SIZE;
  1712. #endif
  1713. #if ZEND_MM_STAT
  1714. heap->real_peak = ZEND_MM_CHUNK_SIZE;
  1715. heap->size = 0;
  1716. heap->peak = 0;
  1717. #endif
  1718. #if ZEND_MM_LIMIT
  1719. heap->limit = ((size_t)Z_L(-1) >> (size_t)Z_L(1));
  1720. heap->overflow = 0;
  1721. #endif
  1722. #if ZEND_MM_CUSTOM
  1723. heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_NONE;
  1724. #endif
  1725. #if ZEND_MM_STORAGE
  1726. heap->storage = NULL;
  1727. #endif
  1728. heap->huge_list = NULL;
  1729. return heap;
  1730. }
  1731. ZEND_API size_t zend_mm_gc(zend_mm_heap *heap)
  1732. {
  1733. zend_mm_free_slot *p, **q;
  1734. zend_mm_chunk *chunk;
  1735. size_t page_offset;
  1736. int page_num;
  1737. zend_mm_page_info info;
  1738. uint32_t i, free_counter;
  1739. int has_free_pages;
  1740. size_t collected = 0;
  1741. #if ZEND_MM_CUSTOM
  1742. if (heap->use_custom_heap) {
  1743. return 0;
  1744. }
  1745. #endif
  1746. for (i = 0; i < ZEND_MM_BINS; i++) {
  1747. has_free_pages = 0;
  1748. p = heap->free_slot[i];
  1749. while (p != NULL) {
  1750. chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(p, ZEND_MM_CHUNK_SIZE);
  1751. ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
  1752. page_offset = ZEND_MM_ALIGNED_OFFSET(p, ZEND_MM_CHUNK_SIZE);
  1753. ZEND_ASSERT(page_offset != 0);
  1754. page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
  1755. info = chunk->map[page_num];
  1756. ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
  1757. if (info & ZEND_MM_IS_LRUN) {
  1758. page_num -= ZEND_MM_NRUN_OFFSET(info);
  1759. info = chunk->map[page_num];
  1760. ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
  1761. ZEND_ASSERT(!(info & ZEND_MM_IS_LRUN));
  1762. }
  1763. ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(info) == i);
  1764. free_counter = ZEND_MM_SRUN_FREE_COUNTER(info) + 1;
  1765. if (free_counter == bin_elements[i]) {
  1766. has_free_pages = 1;
  1767. }
  1768. chunk->map[page_num] = ZEND_MM_SRUN_EX(i, free_counter);
  1769. p = p->next_free_slot;
  1770. }
  1771. if (!has_free_pages) {
  1772. continue;
  1773. }
  1774. q = &heap->free_slot[i];
  1775. p = *q;
  1776. while (p != NULL) {
  1777. chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(p, ZEND_MM_CHUNK_SIZE);
  1778. ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
  1779. page_offset = ZEND_MM_ALIGNED_OFFSET(p, ZEND_MM_CHUNK_SIZE);
  1780. ZEND_ASSERT(page_offset != 0);
  1781. page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
  1782. info = chunk->map[page_num];
  1783. ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
  1784. if (info & ZEND_MM_IS_LRUN) {
  1785. page_num -= ZEND_MM_NRUN_OFFSET(info);
  1786. info = chunk->map[page_num];
  1787. ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
  1788. ZEND_ASSERT(!(info & ZEND_MM_IS_LRUN));
  1789. }
  1790. ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(info) == i);
  1791. if (ZEND_MM_SRUN_FREE_COUNTER(info) == bin_elements[i]) {
  1792. /* remove from cache */
  1793. p = p->next_free_slot;
  1794. *q = p;
  1795. } else {
  1796. q = &p->next_free_slot;
  1797. p = *q;
  1798. }
  1799. }
  1800. }
  1801. chunk = heap->main_chunk;
  1802. do {
  1803. i = ZEND_MM_FIRST_PAGE;
  1804. while (i < chunk->free_tail) {
  1805. if (zend_mm_bitset_is_set(chunk->free_map, i)) {
  1806. info = chunk->map[i];
  1807. if (info & ZEND_MM_IS_SRUN) {
  1808. int bin_num = ZEND_MM_SRUN_BIN_NUM(info);
  1809. int pages_count = bin_pages[bin_num];
  1810. if (ZEND_MM_SRUN_FREE_COUNTER(info) == bin_elements[bin_num]) {
  1811. /* all elements are free */
  1812. zend_mm_free_pages_ex(heap, chunk, i, pages_count, 0);
  1813. collected += pages_count;
  1814. } else {
  1815. /* reset counter */
  1816. chunk->map[i] = ZEND_MM_SRUN(bin_num);
  1817. }
  1818. i += bin_pages[bin_num];
  1819. } else /* if (info & ZEND_MM_IS_LRUN) */ {
  1820. i += ZEND_MM_LRUN_PAGES(info);
  1821. }
  1822. } else {
  1823. i++;
  1824. }
  1825. }
  1826. if (chunk->free_pages == ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE) {
  1827. zend_mm_chunk *next_chunk = chunk->next;
  1828. zend_mm_delete_chunk(heap, chunk);
  1829. chunk = next_chunk;
  1830. } else {
  1831. chunk = chunk->next;
  1832. }
  1833. } while (chunk != heap->main_chunk);
  1834. return collected * ZEND_MM_PAGE_SIZE;
  1835. }
  1836. #if ZEND_DEBUG
  1837. /******************/
  1838. /* Leak detection */
  1839. /******************/
  1840. static zend_long zend_mm_find_leaks_small(zend_mm_chunk *p, uint32_t i, uint32_t j, zend_leak_info *leak)
  1841. {
  1842. int empty = 1;
  1843. zend_long count = 0;
  1844. int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
  1845. zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * (j + 1) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
  1846. while (j < bin_elements[bin_num]) {
  1847. if (dbg->size != 0) {
  1848. if (dbg->filename == leak->filename && dbg->lineno == leak->lineno) {
  1849. count++;
  1850. dbg->size = 0;
  1851. dbg->filename = NULL;
  1852. dbg->lineno = 0;
  1853. } else {
  1854. empty = 0;
  1855. }
  1856. }
  1857. j++;
  1858. dbg = (zend_mm_debug_info*)((char*)dbg + bin_data_size[bin_num]);
  1859. }
  1860. if (empty) {
  1861. zend_mm_bitset_reset_range(p->free_map, i, bin_pages[bin_num]);
  1862. }
  1863. return count;
  1864. }
  1865. static zend_long zend_mm_find_leaks(zend_mm_heap *heap, zend_mm_chunk *p, uint32_t i, zend_leak_info *leak)
  1866. {
  1867. zend_long count = 0;
  1868. do {
  1869. while (i < p->free_tail) {
  1870. if (zend_mm_bitset_is_set(p->free_map, i)) {
  1871. if (p->map[i] & ZEND_MM_IS_SRUN) {
  1872. int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
  1873. count += zend_mm_find_leaks_small(p, i, 0, leak);
  1874. i += bin_pages[bin_num];
  1875. } else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
  1876. int pages_count = ZEND_MM_LRUN_PAGES(p->map[i]);
  1877. zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * (i + pages_count) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
  1878. if (dbg->filename == leak->filename && dbg->lineno == leak->lineno) {
  1879. count++;
  1880. }
  1881. zend_mm_bitset_reset_range(p->free_map, i, pages_count);
  1882. i += pages_count;
  1883. }
  1884. } else {
  1885. i++;
  1886. }
  1887. }
  1888. p = p->next;
  1889. i = ZEND_MM_FIRST_PAGE;
  1890. } while (p != heap->main_chunk);
  1891. return count;
  1892. }
  1893. static zend_long zend_mm_find_leaks_huge(zend_mm_heap *heap, zend_mm_huge_list *list)
  1894. {
  1895. zend_long count = 0;
  1896. zend_mm_huge_list *prev = list;
  1897. zend_mm_huge_list *p = list->next;
  1898. while (p) {
  1899. if (p->dbg.filename == list->dbg.filename && p->dbg.lineno == list->dbg.lineno) {
  1900. prev->next = p->next;
  1901. zend_mm_chunk_free(heap, p->ptr, p->size);
  1902. zend_mm_free_heap(heap, p, NULL, 0, NULL, 0);
  1903. count++;
  1904. } else {
  1905. prev = p;
  1906. }
  1907. p = prev->next;
  1908. }
  1909. return count;
  1910. }
  1911. static void zend_mm_check_leaks(zend_mm_heap *heap)
  1912. {
  1913. zend_mm_huge_list *list;
  1914. zend_mm_chunk *p;
  1915. zend_leak_info leak;
  1916. zend_long repeated = 0;
  1917. uint32_t total = 0;
  1918. uint32_t i, j;
  1919. /* find leaked huge blocks and free them */
  1920. list = heap->huge_list;
  1921. while (list) {
  1922. zend_mm_huge_list *q = list;
  1923. leak.addr = list->ptr;
  1924. leak.size = list->dbg.size;
  1925. leak.filename = list->dbg.filename;
  1926. leak.orig_filename = list->dbg.orig_filename;
  1927. leak.lineno = list->dbg.lineno;
  1928. leak.orig_lineno = list->dbg.orig_lineno;
  1929. zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
  1930. zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
  1931. repeated = zend_mm_find_leaks_huge(heap, list);
  1932. total += 1 + repeated;
  1933. if (repeated) {
  1934. zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated);
  1935. }
  1936. heap->huge_list = list = list->next;
  1937. zend_mm_chunk_free(heap, q->ptr, q->size);
  1938. zend_mm_free_heap(heap, q, NULL, 0, NULL, 0);
  1939. }
  1940. /* for each chunk */
  1941. p = heap->main_chunk;
  1942. do {
  1943. i = ZEND_MM_FIRST_PAGE;
  1944. while (i < p->free_tail) {
  1945. if (zend_mm_bitset_is_set(p->free_map, i)) {
  1946. if (p->map[i] & ZEND_MM_IS_SRUN) {
  1947. int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
  1948. zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
  1949. j = 0;
  1950. while (j < bin_elements[bin_num]) {
  1951. if (dbg->size != 0) {
  1952. leak.addr = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * j);
  1953. leak.size = dbg->size;
  1954. leak.filename = dbg->filename;
  1955. leak.orig_filename = dbg->orig_filename;
  1956. leak.lineno = dbg->lineno;
  1957. leak.orig_lineno = dbg->orig_lineno;
  1958. zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
  1959. zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
  1960. dbg->size = 0;
  1961. dbg->filename = NULL;
  1962. dbg->lineno = 0;
  1963. repeated = zend_mm_find_leaks_small(p, i, j + 1, &leak) +
  1964. zend_mm_find_leaks(heap, p, i + bin_pages[bin_num], &leak);
  1965. total += 1 + repeated;
  1966. if (repeated) {
  1967. zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated);
  1968. }
  1969. }
  1970. dbg = (zend_mm_debug_info*)((char*)dbg + bin_data_size[bin_num]);
  1971. j++;
  1972. }
  1973. i += bin_pages[bin_num];
  1974. } else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
  1975. int pages_count = ZEND_MM_LRUN_PAGES(p->map[i]);
  1976. zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * (i + pages_count) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
  1977. leak.addr = (void*)((char*)p + ZEND_MM_PAGE_SIZE * i);
  1978. leak.size = dbg->size;
  1979. leak.filename = dbg->filename;
  1980. leak.orig_filename = dbg->orig_filename;
  1981. leak.lineno = dbg->lineno;
  1982. leak.orig_lineno = dbg->orig_lineno;
  1983. zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
  1984. zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
  1985. zend_mm_bitset_reset_range(p->free_map, i, pages_count);
  1986. repeated = zend_mm_find_leaks(heap, p, i + pages_count, &leak);
  1987. total += 1 + repeated;
  1988. if (repeated) {
  1989. zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated);
  1990. }
  1991. i += pages_count;
  1992. }
  1993. } else {
  1994. i++;
  1995. }
  1996. }
  1997. p = p->next;
  1998. } while (p != heap->main_chunk);
  1999. if (total) {
  2000. zend_message_dispatcher(ZMSG_MEMORY_LEAKS_GRAND_TOTAL, &total);
  2001. }
  2002. }
  2003. #endif
  2004. #if ZEND_MM_CUSTOM
  2005. static void *tracked_malloc(size_t size);
  2006. static void tracked_free_all(void);
  2007. #endif
  2008. void zend_mm_shutdown(zend_mm_heap *heap, bool full, bool silent)
  2009. {
  2010. zend_mm_chunk *p;
  2011. zend_mm_huge_list *list;
  2012. #if ZEND_MM_CUSTOM
  2013. if (heap->use_custom_heap) {
  2014. if (heap->custom_heap.std._malloc == tracked_malloc) {
  2015. if (silent) {
  2016. tracked_free_all();
  2017. }
  2018. zend_hash_clean(heap->tracked_allocs);
  2019. if (full) {
  2020. zend_hash_destroy(heap->tracked_allocs);
  2021. free(heap->tracked_allocs);
  2022. /* Make sure the heap free below does not use tracked_free(). */
  2023. heap->custom_heap.std._free = free;
  2024. }
  2025. heap->size = 0;
  2026. }
  2027. if (full) {
  2028. if (ZEND_DEBUG && heap->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
  2029. heap->custom_heap.debug._free(heap ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC);
  2030. } else {
  2031. heap->custom_heap.std._free(heap);
  2032. }
  2033. }
  2034. return;
  2035. }
  2036. #endif
  2037. #if ZEND_DEBUG
  2038. if (!silent) {
  2039. zend_mm_check_leaks(heap);
  2040. }
  2041. #endif
  2042. /* free huge blocks */
  2043. list = heap->huge_list;
  2044. heap->huge_list = NULL;
  2045. while (list) {
  2046. zend_mm_huge_list *q = list;
  2047. list = list->next;
  2048. zend_mm_chunk_free(heap, q->ptr, q->size);
  2049. }
  2050. /* move all chunks except of the first one into the cache */
  2051. p = heap->main_chunk->next;
  2052. while (p != heap->main_chunk) {
  2053. zend_mm_chunk *q = p->next;
  2054. p->next = heap->cached_chunks;
  2055. heap->cached_chunks = p;
  2056. p = q;
  2057. heap->chunks_count--;
  2058. heap->cached_chunks_count++;
  2059. }
  2060. if (full) {
  2061. /* free all cached chunks */
  2062. while (heap->cached_chunks) {
  2063. p = heap->cached_chunks;
  2064. heap->cached_chunks = p->next;
  2065. zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
  2066. }
  2067. /* free the first chunk */
  2068. zend_mm_chunk_free(heap, heap->main_chunk, ZEND_MM_CHUNK_SIZE);
  2069. } else {
  2070. /* free some cached chunks to keep average count */
  2071. heap->avg_chunks_count = (heap->avg_chunks_count + (double)heap->peak_chunks_count) / 2.0;
  2072. while ((double)heap->cached_chunks_count + 0.9 > heap->avg_chunks_count &&
  2073. heap->cached_chunks) {
  2074. p = heap->cached_chunks;
  2075. heap->cached_chunks = p->next;
  2076. zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
  2077. heap->cached_chunks_count--;
  2078. }
  2079. /* clear cached chunks */
  2080. p = heap->cached_chunks;
  2081. while (p != NULL) {
  2082. zend_mm_chunk *q = p->next;
  2083. memset(p, 0, sizeof(zend_mm_chunk));
  2084. p->next = q;
  2085. p = q;
  2086. }
  2087. /* reinitialize the first chunk and heap */
  2088. p = heap->main_chunk;
  2089. p->heap = &p->heap_slot;
  2090. p->next = p;
  2091. p->prev = p;
  2092. p->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
  2093. p->free_tail = ZEND_MM_FIRST_PAGE;
  2094. p->num = 0;
  2095. #if ZEND_MM_STAT
  2096. heap->size = heap->peak = 0;
  2097. #endif
  2098. memset(heap->free_slot, 0, sizeof(heap->free_slot));
  2099. #if ZEND_MM_STAT || ZEND_MM_LIMIT
  2100. heap->real_size = (heap->cached_chunks_count + 1) * ZEND_MM_CHUNK_SIZE;
  2101. #endif
  2102. #if ZEND_MM_STAT
  2103. heap->real_peak = (heap->cached_chunks_count + 1) * ZEND_MM_CHUNK_SIZE;
  2104. #endif
  2105. heap->chunks_count = 1;
  2106. heap->peak_chunks_count = 1;
  2107. heap->last_chunks_delete_boundary = 0;
  2108. heap->last_chunks_delete_count = 0;
  2109. memset(p->free_map, 0, sizeof(p->free_map) + sizeof(p->map));
  2110. p->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
  2111. p->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
  2112. }
  2113. }
  2114. /**************/
  2115. /* PUBLIC API */
  2116. /**************/
  2117. ZEND_API void* ZEND_FASTCALL _zend_mm_alloc(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2118. {
  2119. return zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2120. }
  2121. ZEND_API void ZEND_FASTCALL _zend_mm_free(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2122. {
  2123. zend_mm_free_heap(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2124. }
  2125. void* ZEND_FASTCALL _zend_mm_realloc(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2126. {
  2127. return zend_mm_realloc_heap(heap, ptr, size, 0, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2128. }
  2129. void* ZEND_FASTCALL _zend_mm_realloc2(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2130. {
  2131. return zend_mm_realloc_heap(heap, ptr, size, 1, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2132. }
  2133. ZEND_API size_t ZEND_FASTCALL _zend_mm_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2134. {
  2135. return zend_mm_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2136. }
  2137. /**********************/
  2138. /* Allocation Manager */
  2139. /**********************/
  2140. typedef struct _zend_alloc_globals {
  2141. zend_mm_heap *mm_heap;
  2142. } zend_alloc_globals;
  2143. #ifdef ZTS
  2144. static int alloc_globals_id;
  2145. static size_t alloc_globals_offset;
  2146. # define AG(v) ZEND_TSRMG_FAST(alloc_globals_offset, zend_alloc_globals *, v)
  2147. #else
  2148. # define AG(v) (alloc_globals.v)
  2149. static zend_alloc_globals alloc_globals;
  2150. #endif
  2151. ZEND_API bool is_zend_mm(void)
  2152. {
  2153. #if ZEND_MM_CUSTOM
  2154. return !AG(mm_heap)->use_custom_heap;
  2155. #else
  2156. return 1;
  2157. #endif
  2158. }
  2159. ZEND_API bool is_zend_ptr(const void *ptr)
  2160. {
  2161. #if ZEND_MM_CUSTOM
  2162. if (AG(mm_heap)->use_custom_heap) {
  2163. return 0;
  2164. }
  2165. #endif
  2166. if (AG(mm_heap)->main_chunk) {
  2167. zend_mm_chunk *chunk = AG(mm_heap)->main_chunk;
  2168. do {
  2169. if (ptr >= (void*)chunk
  2170. && ptr < (void*)((char*)chunk + ZEND_MM_CHUNK_SIZE)) {
  2171. return 1;
  2172. }
  2173. chunk = chunk->next;
  2174. } while (chunk != AG(mm_heap)->main_chunk);
  2175. }
  2176. if (AG(mm_heap)->huge_list) {
  2177. zend_mm_huge_list *block = AG(mm_heap)->huge_list;
  2178. do {
  2179. if (ptr >= (void*)block
  2180. && ptr < (void*)((char*)block + block->size)) {
  2181. return 1;
  2182. }
  2183. block = block->next;
  2184. } while (block != AG(mm_heap)->huge_list);
  2185. }
  2186. return 0;
  2187. }
  2188. #if ZEND_MM_CUSTOM
  2189. static ZEND_COLD void* ZEND_FASTCALL _malloc_custom(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2190. {
  2191. if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
  2192. return AG(mm_heap)->custom_heap.debug._malloc(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2193. } else {
  2194. return AG(mm_heap)->custom_heap.std._malloc(size);
  2195. }
  2196. }
  2197. static ZEND_COLD void ZEND_FASTCALL _efree_custom(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2198. {
  2199. if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
  2200. AG(mm_heap)->custom_heap.debug._free(ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2201. } else {
  2202. AG(mm_heap)->custom_heap.std._free(ptr);
  2203. }
  2204. }
  2205. static ZEND_COLD void* ZEND_FASTCALL _realloc_custom(void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2206. {
  2207. if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
  2208. return AG(mm_heap)->custom_heap.debug._realloc(ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2209. } else {
  2210. return AG(mm_heap)->custom_heap.std._realloc(ptr, size);
  2211. }
  2212. }
  2213. #endif
  2214. #if !ZEND_DEBUG && defined(HAVE_BUILTIN_CONSTANT_P)
  2215. #undef _emalloc
  2216. #if ZEND_MM_CUSTOM
  2217. # define ZEND_MM_CUSTOM_ALLOCATOR(size) do { \
  2218. if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
  2219. return _malloc_custom(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
  2220. } \
  2221. } while (0)
  2222. # define ZEND_MM_CUSTOM_DEALLOCATOR(ptr) do { \
  2223. if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
  2224. _efree_custom(ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
  2225. return; \
  2226. } \
  2227. } while (0)
  2228. #else
  2229. # define ZEND_MM_CUSTOM_ALLOCATOR(size)
  2230. # define ZEND_MM_CUSTOM_DEALLOCATOR(ptr)
  2231. #endif
  2232. # define _ZEND_BIN_ALLOCATOR(_num, _size, _elements, _pages, x, y) \
  2233. ZEND_API void* ZEND_FASTCALL _emalloc_ ## _size(void) { \
  2234. ZEND_MM_CUSTOM_ALLOCATOR(_size); \
  2235. return zend_mm_alloc_small(AG(mm_heap), _num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
  2236. }
  2237. ZEND_MM_BINS_INFO(_ZEND_BIN_ALLOCATOR, x, y)
  2238. ZEND_API void* ZEND_FASTCALL _emalloc_large(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2239. {
  2240. ZEND_MM_CUSTOM_ALLOCATOR(size);
  2241. return zend_mm_alloc_large_ex(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2242. }
  2243. ZEND_API void* ZEND_FASTCALL _emalloc_huge(size_t size)
  2244. {
  2245. ZEND_MM_CUSTOM_ALLOCATOR(size);
  2246. return zend_mm_alloc_huge(AG(mm_heap), size);
  2247. }
  2248. #if ZEND_DEBUG
  2249. # define _ZEND_BIN_FREE(_num, _size, _elements, _pages, x, y) \
  2250. ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \
  2251. ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \
  2252. { \
  2253. size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE); \
  2254. zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
  2255. int page_num = page_offset / ZEND_MM_PAGE_SIZE; \
  2256. ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
  2257. ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_SRUN); \
  2258. ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(chunk->map[page_num]) == _num); \
  2259. zend_mm_free_small(AG(mm_heap), ptr, _num); \
  2260. } \
  2261. }
  2262. #else
  2263. # define _ZEND_BIN_FREE(_num, _size, _elements, _pages, x, y) \
  2264. ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \
  2265. ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \
  2266. { \
  2267. zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
  2268. ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
  2269. zend_mm_free_small(AG(mm_heap), ptr, _num); \
  2270. } \
  2271. }
  2272. #endif
  2273. ZEND_MM_BINS_INFO(_ZEND_BIN_FREE, x, y)
  2274. ZEND_API void ZEND_FASTCALL _efree_large(void *ptr, size_t size)
  2275. {
  2276. ZEND_MM_CUSTOM_DEALLOCATOR(ptr);
  2277. {
  2278. size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
  2279. zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
  2280. int page_num = page_offset / ZEND_MM_PAGE_SIZE;
  2281. uint32_t pages_count = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE) / ZEND_MM_PAGE_SIZE;
  2282. ZEND_MM_CHECK(chunk->heap == AG(mm_heap) && ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
  2283. ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_LRUN);
  2284. ZEND_ASSERT(ZEND_MM_LRUN_PAGES(chunk->map[page_num]) == pages_count);
  2285. zend_mm_free_large(AG(mm_heap), chunk, page_num, pages_count);
  2286. }
  2287. }
  2288. ZEND_API void ZEND_FASTCALL _efree_huge(void *ptr, size_t size)
  2289. {
  2290. ZEND_MM_CUSTOM_DEALLOCATOR(ptr);
  2291. zend_mm_free_huge(AG(mm_heap), ptr);
  2292. }
  2293. #endif
  2294. ZEND_API void* ZEND_FASTCALL _emalloc(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2295. {
  2296. #if ZEND_MM_CUSTOM
  2297. if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
  2298. return _malloc_custom(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2299. }
  2300. #endif
  2301. return zend_mm_alloc_heap(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2302. }
  2303. ZEND_API void ZEND_FASTCALL _efree(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2304. {
  2305. #if ZEND_MM_CUSTOM
  2306. if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
  2307. _efree_custom(ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2308. return;
  2309. }
  2310. #endif
  2311. zend_mm_free_heap(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2312. }
  2313. ZEND_API void* ZEND_FASTCALL _erealloc(void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2314. {
  2315. #if ZEND_MM_CUSTOM
  2316. if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
  2317. return _realloc_custom(ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2318. }
  2319. #endif
  2320. return zend_mm_realloc_heap(AG(mm_heap), ptr, size, 0, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2321. }
  2322. ZEND_API void* ZEND_FASTCALL _erealloc2(void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2323. {
  2324. #if ZEND_MM_CUSTOM
  2325. if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
  2326. return _realloc_custom(ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2327. }
  2328. #endif
  2329. return zend_mm_realloc_heap(AG(mm_heap), ptr, size, 1, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2330. }
  2331. ZEND_API size_t ZEND_FASTCALL _zend_mem_block_size(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2332. {
  2333. #if ZEND_MM_CUSTOM
  2334. if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
  2335. return 0;
  2336. }
  2337. #endif
  2338. return zend_mm_size(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2339. }
  2340. ZEND_API void* ZEND_FASTCALL _safe_emalloc(size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2341. {
  2342. return _emalloc(zend_safe_address_guarded(nmemb, size, offset) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2343. }
  2344. ZEND_API void* ZEND_FASTCALL _safe_malloc(size_t nmemb, size_t size, size_t offset)
  2345. {
  2346. return pemalloc(zend_safe_address_guarded(nmemb, size, offset), 1);
  2347. }
  2348. ZEND_API void* ZEND_FASTCALL _safe_erealloc(void *ptr, size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2349. {
  2350. return _erealloc(ptr, zend_safe_address_guarded(nmemb, size, offset) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2351. }
  2352. ZEND_API void* ZEND_FASTCALL _safe_realloc(void *ptr, size_t nmemb, size_t size, size_t offset)
  2353. {
  2354. return perealloc(ptr, zend_safe_address_guarded(nmemb, size, offset), 1);
  2355. }
  2356. ZEND_API void* ZEND_FASTCALL _ecalloc(size_t nmemb, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2357. {
  2358. void *p;
  2359. size = zend_safe_address_guarded(nmemb, size, 0);
  2360. p = _emalloc(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2361. memset(p, 0, size);
  2362. return p;
  2363. }
  2364. ZEND_API char* ZEND_FASTCALL _estrdup(const char *s ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2365. {
  2366. size_t length;
  2367. char *p;
  2368. length = strlen(s);
  2369. if (UNEXPECTED(length + 1 == 0)) {
  2370. zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (1 * %zu + 1)", length);
  2371. }
  2372. p = (char *) _emalloc(length + 1 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2373. memcpy(p, s, length+1);
  2374. return p;
  2375. }
  2376. ZEND_API char* ZEND_FASTCALL _estrndup(const char *s, size_t length ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
  2377. {
  2378. char *p;
  2379. if (UNEXPECTED(length + 1 == 0)) {
  2380. zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (1 * %zu + 1)", length);
  2381. }
  2382. p = (char *) _emalloc(length + 1 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
  2383. memcpy(p, s, length);
  2384. p[length] = 0;
  2385. return p;
  2386. }
  2387. ZEND_API char* ZEND_FASTCALL zend_strndup(const char *s, size_t length)
  2388. {
  2389. char *p;
  2390. if (UNEXPECTED(length + 1 == 0)) {
  2391. zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (1 * %zu + 1)", length);
  2392. }
  2393. p = (char *) malloc(length + 1);
  2394. if (UNEXPECTED(p == NULL)) {
  2395. return p;
  2396. }
  2397. if (EXPECTED(length)) {
  2398. memcpy(p, s, length);
  2399. }
  2400. p[length] = 0;
  2401. return p;
  2402. }
  2403. ZEND_API zend_result zend_set_memory_limit(size_t memory_limit)
  2404. {
  2405. #if ZEND_MM_LIMIT
  2406. zend_mm_heap *heap = AG(mm_heap);
  2407. if (UNEXPECTED(memory_limit < heap->real_size)) {
  2408. if (memory_limit >= heap->real_size - heap->cached_chunks_count * ZEND_MM_CHUNK_SIZE) {
  2409. /* free some cached chunks to fit into new memory limit */
  2410. do {
  2411. zend_mm_chunk *p = heap->cached_chunks;
  2412. heap->cached_chunks = p->next;
  2413. zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
  2414. heap->cached_chunks_count--;
  2415. heap->real_size -= ZEND_MM_CHUNK_SIZE;
  2416. } while (memory_limit < heap->real_size);
  2417. return SUCCESS;
  2418. }
  2419. return FAILURE;
  2420. }
  2421. AG(mm_heap)->limit = memory_limit;
  2422. #endif
  2423. return SUCCESS;
  2424. }
  2425. ZEND_API bool zend_alloc_in_memory_limit_error_reporting(void)
  2426. {
  2427. #if ZEND_MM_LIMIT
  2428. return AG(mm_heap)->overflow;
  2429. #else
  2430. return false;
  2431. #endif
  2432. }
  2433. ZEND_API size_t zend_memory_usage(bool real_usage)
  2434. {
  2435. #if ZEND_MM_STAT
  2436. if (real_usage) {
  2437. return AG(mm_heap)->real_size;
  2438. } else {
  2439. size_t usage = AG(mm_heap)->size;
  2440. return usage;
  2441. }
  2442. #endif
  2443. return 0;
  2444. }
  2445. ZEND_API size_t zend_memory_peak_usage(bool real_usage)
  2446. {
  2447. #if ZEND_MM_STAT
  2448. if (real_usage) {
  2449. return AG(mm_heap)->real_peak;
  2450. } else {
  2451. return AG(mm_heap)->peak;
  2452. }
  2453. #endif
  2454. return 0;
  2455. }
  2456. ZEND_API void shutdown_memory_manager(bool silent, bool full_shutdown)
  2457. {
  2458. zend_mm_shutdown(AG(mm_heap), full_shutdown, silent);
  2459. }
  2460. #if ZEND_MM_CUSTOM
  2461. static zend_always_inline void tracked_add(zend_mm_heap *heap, void *ptr, size_t size) {
  2462. zval size_zv;
  2463. zend_ulong h = ((uintptr_t) ptr) >> ZEND_MM_ALIGNMENT_LOG2;
  2464. ZEND_ASSERT((void *) (uintptr_t) (h << ZEND_MM_ALIGNMENT_LOG2) == ptr);
  2465. ZVAL_LONG(&size_zv, size);
  2466. zend_hash_index_add_new(heap->tracked_allocs, h, &size_zv);
  2467. }
  2468. static zend_always_inline zval *tracked_get_size_zv(zend_mm_heap *heap, void *ptr) {
  2469. zend_ulong h = ((uintptr_t) ptr) >> ZEND_MM_ALIGNMENT_LOG2;
  2470. zval *size_zv = zend_hash_index_find(heap->tracked_allocs, h);
  2471. ZEND_ASSERT(size_zv && "Trying to free pointer not allocated through ZendMM");
  2472. return size_zv;
  2473. }
  2474. static zend_always_inline void tracked_check_limit(zend_mm_heap *heap, size_t add_size) {
  2475. if (add_size > heap->limit - heap->size && !heap->overflow) {
  2476. #if ZEND_DEBUG
  2477. zend_mm_safe_error(heap,
  2478. "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)",
  2479. heap->limit, "file", 0, add_size);
  2480. #else
  2481. zend_mm_safe_error(heap,
  2482. "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)",
  2483. heap->limit, add_size);
  2484. #endif
  2485. }
  2486. }
  2487. static void *tracked_malloc(size_t size)
  2488. {
  2489. zend_mm_heap *heap = AG(mm_heap);
  2490. tracked_check_limit(heap, size);
  2491. void *ptr = __zend_malloc(size);
  2492. tracked_add(heap, ptr, size);
  2493. heap->size += size;
  2494. return ptr;
  2495. }
  2496. static void tracked_free(void *ptr) {
  2497. if (!ptr) {
  2498. return;
  2499. }
  2500. zend_mm_heap *heap = AG(mm_heap);
  2501. zval *size_zv = tracked_get_size_zv(heap, ptr);
  2502. heap->size -= Z_LVAL_P(size_zv);
  2503. zend_hash_del_bucket(heap->tracked_allocs, (Bucket *) size_zv);
  2504. free(ptr);
  2505. }
  2506. static void *tracked_realloc(void *ptr, size_t new_size) {
  2507. zend_mm_heap *heap = AG(mm_heap);
  2508. zval *old_size_zv = NULL;
  2509. size_t old_size = 0;
  2510. if (ptr) {
  2511. old_size_zv = tracked_get_size_zv(heap, ptr);
  2512. old_size = Z_LVAL_P(old_size_zv);
  2513. }
  2514. if (new_size > old_size) {
  2515. tracked_check_limit(heap, new_size - old_size);
  2516. }
  2517. /* Delete information about old allocation only after checking the memory limit. */
  2518. if (old_size_zv) {
  2519. zend_hash_del_bucket(heap->tracked_allocs, (Bucket *) old_size_zv);
  2520. }
  2521. ptr = __zend_realloc(ptr, new_size);
  2522. tracked_add(heap, ptr, new_size);
  2523. heap->size += new_size - old_size;
  2524. return ptr;
  2525. }
  2526. static void tracked_free_all() {
  2527. HashTable *tracked_allocs = AG(mm_heap)->tracked_allocs;
  2528. zend_ulong h;
  2529. ZEND_HASH_FOREACH_NUM_KEY(tracked_allocs, h) {
  2530. void *ptr = (void *) (uintptr_t) (h << ZEND_MM_ALIGNMENT_LOG2);
  2531. free(ptr);
  2532. } ZEND_HASH_FOREACH_END();
  2533. }
  2534. #endif
  2535. static void alloc_globals_ctor(zend_alloc_globals *alloc_globals)
  2536. {
  2537. char *tmp;
  2538. #if ZEND_MM_CUSTOM
  2539. tmp = getenv("USE_ZEND_ALLOC");
  2540. if (tmp && !ZEND_ATOL(tmp)) {
  2541. bool tracked = (tmp = getenv("USE_TRACKED_ALLOC")) && ZEND_ATOL(tmp);
  2542. zend_mm_heap *mm_heap = alloc_globals->mm_heap = malloc(sizeof(zend_mm_heap));
  2543. memset(mm_heap, 0, sizeof(zend_mm_heap));
  2544. mm_heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_STD;
  2545. mm_heap->limit = ((size_t)Z_L(-1) >> (size_t)Z_L(1));
  2546. mm_heap->overflow = 0;
  2547. if (!tracked) {
  2548. /* Use system allocator. */
  2549. mm_heap->custom_heap.std._malloc = __zend_malloc;
  2550. mm_heap->custom_heap.std._free = free;
  2551. mm_heap->custom_heap.std._realloc = __zend_realloc;
  2552. } else {
  2553. /* Use system allocator and track allocations for auto-free. */
  2554. mm_heap->custom_heap.std._malloc = tracked_malloc;
  2555. mm_heap->custom_heap.std._free = tracked_free;
  2556. mm_heap->custom_heap.std._realloc = tracked_realloc;
  2557. mm_heap->tracked_allocs = malloc(sizeof(HashTable));
  2558. zend_hash_init(mm_heap->tracked_allocs, 1024, NULL, NULL, 1);
  2559. }
  2560. return;
  2561. }
  2562. #endif
  2563. tmp = getenv("USE_ZEND_ALLOC_HUGE_PAGES");
  2564. if (tmp && ZEND_ATOL(tmp)) {
  2565. zend_mm_use_huge_pages = 1;
  2566. }
  2567. alloc_globals->mm_heap = zend_mm_init();
  2568. }
  2569. #ifdef ZTS
  2570. static void alloc_globals_dtor(zend_alloc_globals *alloc_globals)
  2571. {
  2572. zend_mm_shutdown(alloc_globals->mm_heap, 1, 1);
  2573. }
  2574. #endif
  2575. ZEND_API void start_memory_manager(void)
  2576. {
  2577. #ifdef ZTS
  2578. ts_allocate_fast_id(&alloc_globals_id, &alloc_globals_offset, sizeof(zend_alloc_globals), (ts_allocate_ctor) alloc_globals_ctor, (ts_allocate_dtor) alloc_globals_dtor);
  2579. #else
  2580. alloc_globals_ctor(&alloc_globals);
  2581. #endif
  2582. #ifndef _WIN32
  2583. # if defined(_SC_PAGESIZE)
  2584. REAL_PAGE_SIZE = sysconf(_SC_PAGESIZE);
  2585. # elif defined(_SC_PAGE_SIZE)
  2586. REAL_PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
  2587. # endif
  2588. #endif
  2589. }
  2590. ZEND_API zend_mm_heap *zend_mm_set_heap(zend_mm_heap *new_heap)
  2591. {
  2592. zend_mm_heap *old_heap;
  2593. old_heap = AG(mm_heap);
  2594. AG(mm_heap) = (zend_mm_heap*)new_heap;
  2595. return (zend_mm_heap*)old_heap;
  2596. }
  2597. ZEND_API zend_mm_heap *zend_mm_get_heap(void)
  2598. {
  2599. return AG(mm_heap);
  2600. }
  2601. ZEND_API bool zend_mm_is_custom_heap(zend_mm_heap *new_heap)
  2602. {
  2603. #if ZEND_MM_CUSTOM
  2604. return AG(mm_heap)->use_custom_heap;
  2605. #else
  2606. return 0;
  2607. #endif
  2608. }
  2609. ZEND_API void zend_mm_set_custom_handlers(zend_mm_heap *heap,
  2610. void* (*_malloc)(size_t),
  2611. void (*_free)(void*),
  2612. void* (*_realloc)(void*, size_t))
  2613. {
  2614. #if ZEND_MM_CUSTOM
  2615. zend_mm_heap *_heap = (zend_mm_heap*)heap;
  2616. if (!_malloc && !_free && !_realloc) {
  2617. _heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_NONE;
  2618. } else {
  2619. _heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_STD;
  2620. _heap->custom_heap.std._malloc = _malloc;
  2621. _heap->custom_heap.std._free = _free;
  2622. _heap->custom_heap.std._realloc = _realloc;
  2623. }
  2624. #endif
  2625. }
  2626. ZEND_API void zend_mm_get_custom_handlers(zend_mm_heap *heap,
  2627. void* (**_malloc)(size_t),
  2628. void (**_free)(void*),
  2629. void* (**_realloc)(void*, size_t))
  2630. {
  2631. #if ZEND_MM_CUSTOM
  2632. zend_mm_heap *_heap = (zend_mm_heap*)heap;
  2633. if (heap->use_custom_heap) {
  2634. *_malloc = _heap->custom_heap.std._malloc;
  2635. *_free = _heap->custom_heap.std._free;
  2636. *_realloc = _heap->custom_heap.std._realloc;
  2637. } else {
  2638. *_malloc = NULL;
  2639. *_free = NULL;
  2640. *_realloc = NULL;
  2641. }
  2642. #else
  2643. *_malloc = NULL;
  2644. *_free = NULL;
  2645. *_realloc = NULL;
  2646. #endif
  2647. }
  2648. #if ZEND_DEBUG
  2649. ZEND_API void zend_mm_set_custom_debug_handlers(zend_mm_heap *heap,
  2650. void* (*_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
  2651. void (*_free)(void* ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
  2652. void* (*_realloc)(void*, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC))
  2653. {
  2654. #if ZEND_MM_CUSTOM
  2655. zend_mm_heap *_heap = (zend_mm_heap*)heap;
  2656. _heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_DEBUG;
  2657. _heap->custom_heap.debug._malloc = _malloc;
  2658. _heap->custom_heap.debug._free = _free;
  2659. _heap->custom_heap.debug._realloc = _realloc;
  2660. #endif
  2661. }
  2662. #endif
  2663. ZEND_API zend_mm_storage *zend_mm_get_storage(zend_mm_heap *heap)
  2664. {
  2665. #if ZEND_MM_STORAGE
  2666. return heap->storage;
  2667. #else
  2668. return NULL
  2669. #endif
  2670. }
  2671. ZEND_API zend_mm_heap *zend_mm_startup(void)
  2672. {
  2673. return zend_mm_init();
  2674. }
  2675. ZEND_API zend_mm_heap *zend_mm_startup_ex(const zend_mm_handlers *handlers, void *data, size_t data_size)
  2676. {
  2677. #if ZEND_MM_STORAGE
  2678. zend_mm_storage tmp_storage, *storage;
  2679. zend_mm_chunk *chunk;
  2680. zend_mm_heap *heap;
  2681. memcpy((zend_mm_handlers*)&tmp_storage.handlers, handlers, sizeof(zend_mm_handlers));
  2682. tmp_storage.data = data;
  2683. chunk = (zend_mm_chunk*)handlers->chunk_alloc(&tmp_storage, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
  2684. if (UNEXPECTED(chunk == NULL)) {
  2685. #if ZEND_MM_ERROR
  2686. #ifdef _WIN32
  2687. stderr_last_error("Can't initialize heap");
  2688. #else
  2689. fprintf(stderr, "\nCan't initialize heap: [%d] %s\n", errno, strerror(errno));
  2690. #endif
  2691. #endif
  2692. return NULL;
  2693. }
  2694. heap = &chunk->heap_slot;
  2695. chunk->heap = heap;
  2696. chunk->next = chunk;
  2697. chunk->prev = chunk;
  2698. chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
  2699. chunk->free_tail = ZEND_MM_FIRST_PAGE;
  2700. chunk->num = 0;
  2701. chunk->free_map[0] = (Z_L(1) << ZEND_MM_FIRST_PAGE) - 1;
  2702. chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
  2703. heap->main_chunk = chunk;
  2704. heap->cached_chunks = NULL;
  2705. heap->chunks_count = 1;
  2706. heap->peak_chunks_count = 1;
  2707. heap->cached_chunks_count = 0;
  2708. heap->avg_chunks_count = 1.0;
  2709. heap->last_chunks_delete_boundary = 0;
  2710. heap->last_chunks_delete_count = 0;
  2711. #if ZEND_MM_STAT || ZEND_MM_LIMIT
  2712. heap->real_size = ZEND_MM_CHUNK_SIZE;
  2713. #endif
  2714. #if ZEND_MM_STAT
  2715. heap->real_peak = ZEND_MM_CHUNK_SIZE;
  2716. heap->size = 0;
  2717. heap->peak = 0;
  2718. #endif
  2719. #if ZEND_MM_LIMIT
  2720. heap->limit = (Z_L(-1) >> Z_L(1));
  2721. heap->overflow = 0;
  2722. #endif
  2723. #if ZEND_MM_CUSTOM
  2724. heap->use_custom_heap = 0;
  2725. #endif
  2726. heap->storage = &tmp_storage;
  2727. heap->huge_list = NULL;
  2728. memset(heap->free_slot, 0, sizeof(heap->free_slot));
  2729. storage = _zend_mm_alloc(heap, sizeof(zend_mm_storage) + data_size ZEND_FILE_LINE_CC ZEND_FILE_LINE_CC);
  2730. if (!storage) {
  2731. handlers->chunk_free(&tmp_storage, chunk, ZEND_MM_CHUNK_SIZE);
  2732. #if ZEND_MM_ERROR
  2733. #ifdef _WIN32
  2734. stderr_last_error("Can't initialize heap");
  2735. #else
  2736. fprintf(stderr, "\nCan't initialize heap: [%d] %s\n", errno, strerror(errno));
  2737. #endif
  2738. #endif
  2739. return NULL;
  2740. }
  2741. memcpy(storage, &tmp_storage, sizeof(zend_mm_storage));
  2742. if (data) {
  2743. storage->data = (void*)(((char*)storage + sizeof(zend_mm_storage)));
  2744. memcpy(storage->data, data, data_size);
  2745. }
  2746. heap->storage = storage;
  2747. return heap;
  2748. #else
  2749. return NULL;
  2750. #endif
  2751. }
  2752. static ZEND_COLD ZEND_NORETURN void zend_out_of_memory(void)
  2753. {
  2754. fprintf(stderr, "Out of memory\n");
  2755. exit(1);
  2756. }
  2757. ZEND_API void * __zend_malloc(size_t len)
  2758. {
  2759. void *tmp = malloc(len);
  2760. if (EXPECTED(tmp || !len)) {
  2761. return tmp;
  2762. }
  2763. zend_out_of_memory();
  2764. }
  2765. ZEND_API void * __zend_calloc(size_t nmemb, size_t len)
  2766. {
  2767. void *tmp;
  2768. len = zend_safe_address_guarded(nmemb, len, 0);
  2769. tmp = __zend_malloc(len);
  2770. memset(tmp, 0, len);
  2771. return tmp;
  2772. }
  2773. ZEND_API void * __zend_realloc(void *p, size_t len)
  2774. {
  2775. p = realloc(p, len);
  2776. if (EXPECTED(p || !len)) {
  2777. return p;
  2778. }
  2779. zend_out_of_memory();
  2780. }
  2781. #ifdef ZTS
  2782. size_t zend_mm_globals_size(void)
  2783. {
  2784. return sizeof(zend_alloc_globals);
  2785. }
  2786. #endif