12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609 |
- /* Malloc implementation for multiple threads without lock contention.
- Copyright (C) 1996-2019 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
- Contributed by Wolfram Gloger <wg@malloc.de>
- and Doug Lea <dl@cs.oswego.edu>, 2001.
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public License as
- published by the Free Software Foundation; either version 2.1 of the
- License, or (at your option) any later version.
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; see the file COPYING.LIB. If
- not, see <http://www.gnu.org/licenses/>. */
- /*
- This is a version (aka ptmalloc2) of malloc/free/realloc written by
- Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
- There have been substantial changes made after the integration into
- glibc in all parts of the code. Do not look for much commonality
- with the ptmalloc2 version.
- * Version ptmalloc2-20011215
- based on:
- VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
- * Quickstart
- In order to compile this implementation, a Makefile is provided with
- the ptmalloc2 distribution, which has pre-defined targets for some
- popular systems (e.g. "make posix" for Posix threads). All that is
- typically required with regard to compiler flags is the selection of
- the thread package via defining one out of USE_PTHREADS, USE_THR or
- USE_SPROC. Check the thread-m.h file for what effects this has.
- Many/most systems will additionally require USE_TSD_DATA_HACK to be
- defined, so this is the default for "make posix".
- * Why use this malloc?
- This is not the fastest, most space-conserving, most portable, or
- most tunable malloc ever written. However it is among the fastest
- while also being among the most space-conserving, portable and tunable.
- Consistent balance across these factors results in a good general-purpose
- allocator for malloc-intensive programs.
- The main properties of the algorithms are:
- * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
- with ties normally decided via FIFO (i.e. least recently used).
- * For small (<= 64 bytes by default) requests, it is a caching
- allocator, that maintains pools of quickly recycled chunks.
- * In between, and for combinations of large and small requests, it does
- the best it can trying to meet both goals at once.
- * For very large requests (>= 128KB by default), it relies on system
- memory mapping facilities, if supported.
- For a longer but slightly out of date high-level description, see
- http://gee.cs.oswego.edu/dl/html/malloc.html
- You may already by default be using a C library containing a malloc
- that is based on some version of this malloc (for example in
- linux). You might still want to use the one in this file in order to
- customize settings or to avoid overheads associated with library
- versions.
- * Contents, described in more detail in "description of public routines" below.
- Standard (ANSI/SVID/...) functions:
- malloc(size_t n);
- calloc(size_t n_elements, size_t element_size);
- free(void* p);
- realloc(void* p, size_t n);
- memalign(size_t alignment, size_t n);
- valloc(size_t n);
- mallinfo()
- mallopt(int parameter_number, int parameter_value)
- Additional functions:
- independent_calloc(size_t n_elements, size_t size, void* chunks[]);
- independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
- pvalloc(size_t n);
- malloc_trim(size_t pad);
- malloc_usable_size(void* p);
- malloc_stats();
- * Vital statistics:
- Supported pointer representation: 4 or 8 bytes
- Supported size_t representation: 4 or 8 bytes
- Note that size_t is allowed to be 4 bytes even if pointers are 8.
- You can adjust this by defining INTERNAL_SIZE_T
- Alignment: 2 * sizeof(size_t) (default)
- (i.e., 8 byte alignment with 4byte size_t). This suffices for
- nearly all current machines and C compilers. However, you can
- define MALLOC_ALIGNMENT to be wider than this if necessary.
- Minimum overhead per allocated chunk: 4 or 8 bytes
- Each malloced chunk has a hidden word of overhead holding size
- and status information.
- Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
- 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
- When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
- ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
- needed; 4 (8) for a trailing size field and 8 (16) bytes for
- free list pointers. Thus, the minimum allocatable size is
- 16/24/32 bytes.
- Even a request for zero bytes (i.e., malloc(0)) returns a
- pointer to something of the minimum allocatable size.
- The maximum overhead wastage (i.e., number of extra bytes
- allocated than were requested in malloc) is less than or equal
- to the minimum size, except for requests >= mmap_threshold that
- are serviced via mmap(), where the worst case wastage is 2 *
- sizeof(size_t) bytes plus the remainder from a system page (the
- minimal mmap unit); typically 4096 or 8192 bytes.
- Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
- 8-byte size_t: 2^64 minus about two pages
- It is assumed that (possibly signed) size_t values suffice to
- represent chunk sizes. `Possibly signed' is due to the fact
- that `size_t' may be defined on a system as either a signed or
- an unsigned type. The ISO C standard says that it must be
- unsigned, but a few systems are known not to adhere to this.
- Additionally, even when size_t is unsigned, sbrk (which is by
- default used to obtain memory from system) accepts signed
- arguments, and may not be able to handle size_t-wide arguments
- with negative sign bit. Generally, values that would
- appear as negative after accounting for overhead and alignment
- are supported only via mmap(), which does not have this
- limitation.
- Requests for sizes outside the allowed range will perform an optional
- failure action and then return null. (Requests may also
- also fail because a system is out of memory.)
- Thread-safety: thread-safe
- Compliance: I believe it is compliant with the 1997 Single Unix Specification
- Also SVID/XPG, ANSI C, and probably others as well.
- * Synopsis of compile-time options:
- People have reported using previous versions of this malloc on all
- versions of Unix, sometimes by tweaking some of the defines
- below. It has been tested most extensively on Solaris and Linux.
- People also report using it in stand-alone embedded systems.
- The implementation is in straight, hand-tuned ANSI C. It is not
- at all modular. (Sorry!) It uses a lot of macros. To be at all
- usable, this code should be compiled using an optimizing compiler
- (for example gcc -O3) that can simplify expressions and control
- paths. (FAQ: some macros import variables as arguments rather than
- declare locals because people reported that some debuggers
- otherwise get confused.)
- OPTION DEFAULT VALUE
- Compilation Environment options:
- HAVE_MREMAP 0
- Changing default word sizes:
- INTERNAL_SIZE_T size_t
- Configuration and functionality options:
- USE_PUBLIC_MALLOC_WRAPPERS NOT defined
- USE_MALLOC_LOCK NOT defined
- MALLOC_DEBUG NOT defined
- REALLOC_ZERO_BYTES_FREES 1
- TRIM_FASTBINS 0
- Options for customizing MORECORE:
- MORECORE sbrk
- MORECORE_FAILURE -1
- MORECORE_CONTIGUOUS 1
- MORECORE_CANNOT_TRIM NOT defined
- MORECORE_CLEARS 1
- MMAP_AS_MORECORE_SIZE (1024 * 1024)
- Tuning options that are also dynamically changeable via mallopt:
- DEFAULT_MXFAST 64 (for 32bit), 128 (for 64bit)
- DEFAULT_TRIM_THRESHOLD 128 * 1024
- DEFAULT_TOP_PAD 0
- DEFAULT_MMAP_THRESHOLD 128 * 1024
- DEFAULT_MMAP_MAX 65536
- There are several other #defined constants and macros that you
- probably don't want to touch unless you are extending or adapting malloc. */
- /*
- void* is the pointer type that malloc should say it returns
- */
- #ifndef void
- #define void void
- #endif /*void*/
- #include <stddef.h> /* for size_t */
- #include <stdlib.h> /* for getenv(), abort() */
- #include <unistd.h> /* for __libc_enable_secure */
- #include <atomic.h>
- #include <_itoa.h>
- #include <bits/wordsize.h>
- #include <sys/sysinfo.h>
- #include <ldsodefs.h>
- #include <unistd.h>
- #include <stdio.h> /* needed for malloc_stats */
- #include <errno.h>
- #include <assert.h>
- #include <shlib-compat.h>
- /* For uintptr_t. */
- #include <stdint.h>
- /* For va_arg, va_start, va_end. */
- #include <stdarg.h>
- /* For MIN, MAX, powerof2. */
- #include <sys/param.h>
- /* For ALIGN_UP et. al. */
- #include <libc-pointer-arith.h>
- /* For DIAG_PUSH/POP_NEEDS_COMMENT et al. */
- #include <libc-diag.h>
- #include <malloc/malloc-internal.h>
- /* For SINGLE_THREAD_P. */
- #include <sysdep-cancel.h>
- /*
- Debugging:
- Because freed chunks may be overwritten with bookkeeping fields, this
- malloc will often die when freed memory is overwritten by user
- programs. This can be very effective (albeit in an annoying way)
- in helping track down dangling pointers.
- If you compile with -DMALLOC_DEBUG, a number of assertion checks are
- enabled that will catch more memory errors. You probably won't be
- able to make much sense of the actual assertion errors, but they
- should help you locate incorrectly overwritten memory. The checking
- is fairly extensive, and will slow down execution
- noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
- will attempt to check every non-mmapped allocated and free chunk in
- the course of computing the summmaries. (By nature, mmapped regions
- cannot be checked very much automatically.)
- Setting MALLOC_DEBUG may also be helpful if you are trying to modify
- this code. The assertions in the check routines spell out in more
- detail the assumptions and invariants underlying the algorithms.
- Setting MALLOC_DEBUG does NOT provide an automated mechanism for
- checking that all accesses to malloced memory stay within their
- bounds. However, there are several add-ons and adaptations of this
- or other mallocs available that do this.
- */
- #ifndef MALLOC_DEBUG
- #define MALLOC_DEBUG 0
- #endif
- #ifndef NDEBUG
- # define __assert_fail(assertion, file, line, function) \
- __malloc_assert(assertion, file, line, function)
- extern const char *__progname;
- static void
- __malloc_assert (const char *assertion, const char *file, unsigned int line,
- const char *function)
- {
- (void) __fxprintf (NULL, "%s%s%s:%u: %s%sAssertion `%s' failed.\n",
- __progname, __progname[0] ? ": " : "",
- file, line,
- function ? function : "", function ? ": " : "",
- assertion);
- fflush (stderr);
- abort ();
- }
- #endif
- #if USE_TCACHE
- /* We want 64 entries. This is an arbitrary limit, which tunables can reduce. */
- # define TCACHE_MAX_BINS 64
- # define MAX_TCACHE_SIZE tidx2usize (TCACHE_MAX_BINS-1)
- /* Only used to pre-fill the tunables. */
- # define tidx2usize(idx) (((size_t) idx) * MALLOC_ALIGNMENT + MINSIZE - SIZE_SZ)
- /* When "x" is from chunksize(). */
- # define csize2tidx(x) (((x) - MINSIZE + MALLOC_ALIGNMENT - 1) / MALLOC_ALIGNMENT)
- /* When "x" is a user-provided size. */
- # define usize2tidx(x) csize2tidx (request2size (x))
- /* With rounding and alignment, the bins are...
- idx 0 bytes 0..24 (64-bit) or 0..12 (32-bit)
- idx 1 bytes 25..40 or 13..20
- idx 2 bytes 41..56 or 21..28
- etc. */
- /* This is another arbitrary limit, which tunables can change. Each
- tcache bin will hold at most this number of chunks. */
- # define TCACHE_FILL_COUNT 7
- #endif
- /*
- REALLOC_ZERO_BYTES_FREES should be set if a call to
- realloc with zero bytes should be the same as a call to free.
- This is required by the C standard. Otherwise, since this malloc
- returns a unique pointer for malloc(0), so does realloc(p, 0).
- */
- #ifndef REALLOC_ZERO_BYTES_FREES
- #define REALLOC_ZERO_BYTES_FREES 1
- #endif
- /*
- TRIM_FASTBINS controls whether free() of a very small chunk can
- immediately lead to trimming. Setting to true (1) can reduce memory
- footprint, but will almost always slow down programs that use a lot
- of small chunks.
- Define this only if you are willing to give up some speed to more
- aggressively reduce system-level memory footprint when releasing
- memory in programs that use many small chunks. You can get
- essentially the same effect by setting MXFAST to 0, but this can
- lead to even greater slowdowns in programs using many small chunks.
- TRIM_FASTBINS is an in-between compile-time option, that disables
- only those chunks bordering topmost memory from being placed in
- fastbins.
- */
- #ifndef TRIM_FASTBINS
- #define TRIM_FASTBINS 0
- #endif
- /* Definition for getting more memory from the OS. */
- #define MORECORE (*__morecore)
- #define MORECORE_FAILURE 0
- void * __default_morecore (ptrdiff_t);
- void *(*__morecore)(ptrdiff_t) = __default_morecore;
- #include <string.h>
- /*
- MORECORE-related declarations. By default, rely on sbrk
- */
- /*
- MORECORE is the name of the routine to call to obtain more memory
- from the system. See below for general guidance on writing
- alternative MORECORE functions, as well as a version for WIN32 and a
- sample version for pre-OSX macos.
- */
- #ifndef MORECORE
- #define MORECORE sbrk
- #endif
- /*
- MORECORE_FAILURE is the value returned upon failure of MORECORE
- as well as mmap. Since it cannot be an otherwise valid memory address,
- and must reflect values of standard sys calls, you probably ought not
- try to redefine it.
- */
- #ifndef MORECORE_FAILURE
- #define MORECORE_FAILURE (-1)
- #endif
- /*
- If MORECORE_CONTIGUOUS is true, take advantage of fact that
- consecutive calls to MORECORE with positive arguments always return
- contiguous increasing addresses. This is true of unix sbrk. Even
- if not defined, when regions happen to be contiguous, malloc will
- permit allocations spanning regions obtained from different
- calls. But defining this when applicable enables some stronger
- consistency checks and space efficiencies.
- */
- #ifndef MORECORE_CONTIGUOUS
- #define MORECORE_CONTIGUOUS 1
- #endif
- /*
- Define MORECORE_CANNOT_TRIM if your version of MORECORE
- cannot release space back to the system when given negative
- arguments. This is generally necessary only if you are using
- a hand-crafted MORECORE function that cannot handle negative arguments.
- */
- /* #define MORECORE_CANNOT_TRIM */
- /* MORECORE_CLEARS (default 1)
- The degree to which the routine mapped to MORECORE zeroes out
- memory: never (0), only for newly allocated space (1) or always
- (2). The distinction between (1) and (2) is necessary because on
- some systems, if the application first decrements and then
- increments the break value, the contents of the reallocated space
- are unspecified.
- */
- #ifndef MORECORE_CLEARS
- # define MORECORE_CLEARS 1
- #endif
- /*
- MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
- sbrk fails, and mmap is used as a backup. The value must be a
- multiple of page size. This backup strategy generally applies only
- when systems have "holes" in address space, so sbrk cannot perform
- contiguous expansion, but there is still space available on system.
- On systems for which this is known to be useful (i.e. most linux
- kernels), this occurs only when programs allocate huge amounts of
- memory. Between this, and the fact that mmap regions tend to be
- limited, the size should be large, to avoid too many mmap calls and
- thus avoid running out of kernel resources. */
- #ifndef MMAP_AS_MORECORE_SIZE
- #define MMAP_AS_MORECORE_SIZE (1024 * 1024)
- #endif
- /*
- Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
- large blocks.
- */
- #ifndef HAVE_MREMAP
- #define HAVE_MREMAP 0
- #endif
- /* We may need to support __malloc_initialize_hook for backwards
- compatibility. */
- #if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_24)
- # define HAVE_MALLOC_INIT_HOOK 1
- #else
- # define HAVE_MALLOC_INIT_HOOK 0
- #endif
- /*
- This version of malloc supports the standard SVID/XPG mallinfo
- routine that returns a struct containing usage properties and
- statistics. It should work on any SVID/XPG compliant system that has
- a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
- install such a thing yourself, cut out the preliminary declarations
- as described above and below and save them in a malloc.h file. But
- there's no compelling reason to bother to do this.)
- The main declaration needed is the mallinfo struct that is returned
- (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
- bunch of fields that are not even meaningful in this version of
- malloc. These fields are are instead filled by mallinfo() with
- other numbers that might be of interest.
- */
- /* ---------- description of public routines ------------ */
- /*
- malloc(size_t n)
- Returns a pointer to a newly allocated chunk of at least n bytes, or null
- if no space is available. Additionally, on failure, errno is
- set to ENOMEM on ANSI C systems.
- If n is zero, malloc returns a minumum-sized chunk. (The minimum
- size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
- systems.) On most systems, size_t is an unsigned type, so calls
- with negative arguments are interpreted as requests for huge amounts
- of space, which will often fail. The maximum supported value of n
- differs across systems, but is in all cases less than the maximum
- representable value of a size_t.
- */
- void* __libc_malloc(size_t);
- libc_hidden_proto (__libc_malloc)
- /*
- free(void* p)
- Releases the chunk of memory pointed to by p, that had been previously
- allocated using malloc or a related routine such as realloc.
- It has no effect if p is null. It can have arbitrary (i.e., bad!)
- effects if p has already been freed.
- Unless disabled (using mallopt), freeing very large spaces will
- when possible, automatically trigger operations that give
- back unused memory to the system, thus reducing program footprint.
- */
- void __libc_free(void*);
- libc_hidden_proto (__libc_free)
- /*
- calloc(size_t n_elements, size_t element_size);
- Returns a pointer to n_elements * element_size bytes, with all locations
- set to zero.
- */
- void* __libc_calloc(size_t, size_t);
- /*
- realloc(void* p, size_t n)
- Returns a pointer to a chunk of size n that contains the same data
- as does chunk p up to the minimum of (n, p's size) bytes, or null
- if no space is available.
- The returned pointer may or may not be the same as p. The algorithm
- prefers extending p when possible, otherwise it employs the
- equivalent of a malloc-copy-free sequence.
- If p is null, realloc is equivalent to malloc.
- If space is not available, realloc returns null, errno is set (if on
- ANSI) and p is NOT freed.
- if n is for fewer bytes than already held by p, the newly unused
- space is lopped off and freed if possible. Unless the #define
- REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
- zero (re)allocates a minimum-sized chunk.
- Large chunks that were internally obtained via mmap will always be
- grown using malloc-copy-free sequences unless the system supports
- MREMAP (currently only linux).
- The old unix realloc convention of allowing the last-free'd chunk
- to be used as an argument to realloc is not supported.
- */
- void* __libc_realloc(void*, size_t);
- libc_hidden_proto (__libc_realloc)
- /*
- memalign(size_t alignment, size_t n);
- Returns a pointer to a newly allocated chunk of n bytes, aligned
- in accord with the alignment argument.
- The alignment argument should be a power of two. If the argument is
- not a power of two, the nearest greater power is used.
- 8-byte alignment is guaranteed by normal malloc calls, so don't
- bother calling memalign with an argument of 8 or less.
- Overreliance on memalign is a sure way to fragment space.
- */
- void* __libc_memalign(size_t, size_t);
- libc_hidden_proto (__libc_memalign)
- /*
- valloc(size_t n);
- Equivalent to memalign(pagesize, n), where pagesize is the page
- size of the system. If the pagesize is unknown, 4096 is used.
- */
- void* __libc_valloc(size_t);
- /*
- mallopt(int parameter_number, int parameter_value)
- Sets tunable parameters The format is to provide a
- (parameter-number, parameter-value) pair. mallopt then sets the
- corresponding parameter to the argument value if it can (i.e., so
- long as the value is meaningful), and returns 1 if successful else
- 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
- normally defined in malloc.h. Only one of these (M_MXFAST) is used
- in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
- so setting them has no effect. But this malloc also supports four
- other options in mallopt. See below for details. Briefly, supported
- parameters are as follows (listed defaults are for "typical"
- configurations).
- Symbol param # default allowed param values
- M_MXFAST 1 64 0-80 (0 disables fastbins)
- M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming)
- M_TOP_PAD -2 0 any
- M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support)
- M_MMAP_MAX -4 65536 any (0 disables use of mmap)
- */
- int __libc_mallopt(int, int);
- libc_hidden_proto (__libc_mallopt)
- /*
- mallinfo()
- Returns (by copy) a struct containing various summary statistics:
- arena: current total non-mmapped bytes allocated from system
- ordblks: the number of free chunks
- smblks: the number of fastbin blocks (i.e., small chunks that
- have been freed but not use resused or consolidated)
- hblks: current number of mmapped regions
- hblkhd: total bytes held in mmapped regions
- usmblks: always 0
- fsmblks: total bytes held in fastbin blocks
- uordblks: current total allocated space (normal or mmapped)
- fordblks: total free space
- keepcost: the maximum number of bytes that could ideally be released
- back to system via malloc_trim. ("ideally" means that
- it ignores page restrictions etc.)
- Because these fields are ints, but internal bookkeeping may
- be kept as longs, the reported values may wrap around zero and
- thus be inaccurate.
- */
- struct mallinfo __libc_mallinfo(void);
- /*
- pvalloc(size_t n);
- Equivalent to valloc(minimum-page-that-holds(n)), that is,
- round up n to nearest pagesize.
- */
- void* __libc_pvalloc(size_t);
- /*
- malloc_trim(size_t pad);
- If possible, gives memory back to the system (via negative
- arguments to sbrk) if there is unused memory at the `high' end of
- the malloc pool. You can call this after freeing large blocks of
- memory to potentially reduce the system-level memory requirements
- of a program. However, it cannot guarantee to reduce memory. Under
- some allocation patterns, some large free blocks of memory will be
- locked between two used chunks, so they cannot be given back to
- the system.
- The `pad' argument to malloc_trim represents the amount of free
- trailing space to leave untrimmed. If this argument is zero,
- only the minimum amount of memory to maintain internal data
- structures will be left (one page or less). Non-zero arguments
- can be supplied to maintain enough trailing space to service
- future expected allocations without having to re-obtain memory
- from the system.
- Malloc_trim returns 1 if it actually released any memory, else 0.
- On systems that do not support "negative sbrks", it will always
- return 0.
- */
- int __malloc_trim(size_t);
- /*
- malloc_usable_size(void* p);
- Returns the number of bytes you can actually use in
- an allocated chunk, which may be more than you requested (although
- often not) due to alignment and minimum size constraints.
- You can use this many bytes without worrying about
- overwriting other allocated objects. This is not a particularly great
- programming practice. malloc_usable_size can be more useful in
- debugging and assertions, for example:
- p = malloc(n);
- assert(malloc_usable_size(p) >= 256);
- */
- size_t __malloc_usable_size(void*);
- /*
- malloc_stats();
- Prints on stderr the amount of space obtained from the system (both
- via sbrk and mmap), the maximum amount (which may be more than
- current if malloc_trim and/or munmap got called), and the current
- number of bytes allocated via malloc (or realloc, etc) but not yet
- freed. Note that this is the number of bytes allocated, not the
- number requested. It will be larger than the number requested
- because of alignment and bookkeeping overhead. Because it includes
- alignment wastage as being in use, this figure may be greater than
- zero even when no user-level chunks are allocated.
- The reported current and maximum system memory can be inaccurate if
- a program makes other calls to system memory allocation functions
- (normally sbrk) outside of malloc.
- malloc_stats prints only the most commonly interesting statistics.
- More information can be obtained by calling mallinfo.
- */
- void __malloc_stats(void);
- /*
- posix_memalign(void **memptr, size_t alignment, size_t size);
- POSIX wrapper like memalign(), checking for validity of size.
- */
- int __posix_memalign(void **, size_t, size_t);
- /* mallopt tuning options */
- /*
- M_MXFAST is the maximum request size used for "fastbins", special bins
- that hold returned chunks without consolidating their spaces. This
- enables future requests for chunks of the same size to be handled
- very quickly, but can increase fragmentation, and thus increase the
- overall memory footprint of a program.
- This malloc manages fastbins very conservatively yet still
- efficiently, so fragmentation is rarely a problem for values less
- than or equal to the default. The maximum supported value of MXFAST
- is 80. You wouldn't want it any higher than this anyway. Fastbins
- are designed especially for use with many small structs, objects or
- strings -- the default handles structs/objects/arrays with sizes up
- to 8 4byte fields, or small strings representing words, tokens,
- etc. Using fastbins for larger objects normally worsens
- fragmentation without improving speed.
- M_MXFAST is set in REQUEST size units. It is internally used in
- chunksize units, which adds padding and alignment. You can reduce
- M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
- algorithm to be a closer approximation of fifo-best-fit in all cases,
- not just for larger requests, but will generally cause it to be
- slower.
- */
- /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
- #ifndef M_MXFAST
- #define M_MXFAST 1
- #endif
- #ifndef DEFAULT_MXFAST
- #define DEFAULT_MXFAST (64 * SIZE_SZ / 4)
- #endif
- /*
- M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
- to keep before releasing via malloc_trim in free().
- Automatic trimming is mainly useful in long-lived programs.
- Because trimming via sbrk can be slow on some systems, and can
- sometimes be wasteful (in cases where programs immediately
- afterward allocate more large chunks) the value should be high
- enough so that your overall system performance would improve by
- releasing this much memory.
- The trim threshold and the mmap control parameters (see below)
- can be traded off with one another. Trimming and mmapping are
- two different ways of releasing unused memory back to the
- system. Between these two, it is often possible to keep
- system-level demands of a long-lived program down to a bare
- minimum. For example, in one test suite of sessions measuring
- the XF86 X server on Linux, using a trim threshold of 128K and a
- mmap threshold of 192K led to near-minimal long term resource
- consumption.
- If you are using this malloc in a long-lived program, it should
- pay to experiment with these values. As a rough guide, you
- might set to a value close to the average size of a process
- (program) running on your system. Releasing this much memory
- would allow such a process to run in memory. Generally, it's
- worth it to tune for trimming rather tham memory mapping when a
- program undergoes phases where several large chunks are
- allocated and released in ways that can reuse each other's
- storage, perhaps mixed with phases where there are no such
- chunks at all. And in well-behaved long-lived programs,
- controlling release of large blocks via trimming versus mapping
- is usually faster.
- However, in most programs, these parameters serve mainly as
- protection against the system-level effects of carrying around
- massive amounts of unneeded memory. Since frequent calls to
- sbrk, mmap, and munmap otherwise degrade performance, the default
- parameters are set to relatively high values that serve only as
- safeguards.
- The trim value It must be greater than page size to have any useful
- effect. To disable trimming completely, you can set to
- (unsigned long)(-1)
- Trim settings interact with fastbin (MXFAST) settings: Unless
- TRIM_FASTBINS is defined, automatic trimming never takes place upon
- freeing a chunk with size less than or equal to MXFAST. Trimming is
- instead delayed until subsequent freeing of larger chunks. However,
- you can still force an attempted trim by calling malloc_trim.
- Also, trimming is not generally possible in cases where
- the main arena is obtained via mmap.
- Note that the trick some people use of mallocing a huge space and
- then freeing it at program startup, in an attempt to reserve system
- memory, doesn't have the intended effect under automatic trimming,
- since that memory will immediately be returned to the system.
- */
- #define M_TRIM_THRESHOLD -1
- #ifndef DEFAULT_TRIM_THRESHOLD
- #define DEFAULT_TRIM_THRESHOLD (128 * 1024)
- #endif
- /*
- M_TOP_PAD is the amount of extra `padding' space to allocate or
- retain whenever sbrk is called. It is used in two ways internally:
- * When sbrk is called to extend the top of the arena to satisfy
- a new malloc request, this much padding is added to the sbrk
- request.
- * When malloc_trim is called automatically from free(),
- it is used as the `pad' argument.
- In both cases, the actual amount of padding is rounded
- so that the end of the arena is always a system page boundary.
- The main reason for using padding is to avoid calling sbrk so
- often. Having even a small pad greatly reduces the likelihood
- that nearly every malloc request during program start-up (or
- after trimming) will invoke sbrk, which needlessly wastes
- time.
- Automatic rounding-up to page-size units is normally sufficient
- to avoid measurable overhead, so the default is 0. However, in
- systems where sbrk is relatively slow, it can pay to increase
- this value, at the expense of carrying around more memory than
- the program needs.
- */
- #define M_TOP_PAD -2
- #ifndef DEFAULT_TOP_PAD
- #define DEFAULT_TOP_PAD (0)
- #endif
- /*
- MMAP_THRESHOLD_MAX and _MIN are the bounds on the dynamically
- adjusted MMAP_THRESHOLD.
- */
- #ifndef DEFAULT_MMAP_THRESHOLD_MIN
- #define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024)
- #endif
- #ifndef DEFAULT_MMAP_THRESHOLD_MAX
- /* For 32-bit platforms we cannot increase the maximum mmap
- threshold much because it is also the minimum value for the
- maximum heap size and its alignment. Going above 512k (i.e., 1M
- for new heaps) wastes too much address space. */
- # if __WORDSIZE == 32
- # define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024)
- # else
- # define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long))
- # endif
- #endif
- /*
- M_MMAP_THRESHOLD is the request size threshold for using mmap()
- to service a request. Requests of at least this size that cannot
- be allocated using already-existing space will be serviced via mmap.
- (If enough normal freed space already exists it is used instead.)
- Using mmap segregates relatively large chunks of memory so that
- they can be individually obtained and released from the host
- system. A request serviced through mmap is never reused by any
- other request (at least not directly; the system may just so
- happen to remap successive requests to the same locations).
- Segregating space in this way has the benefits that:
- 1. Mmapped space can ALWAYS be individually released back
- to the system, which helps keep the system level memory
- demands of a long-lived program low.
- 2. Mapped memory can never become `locked' between
- other chunks, as can happen with normally allocated chunks, which
- means that even trimming via malloc_trim would not release them.
- 3. On some systems with "holes" in address spaces, mmap can obtain
- memory that sbrk cannot.
- However, it has the disadvantages that:
- 1. The space cannot be reclaimed, consolidated, and then
- used to service later requests, as happens with normal chunks.
- 2. It can lead to more wastage because of mmap page alignment
- requirements
- 3. It causes malloc performance to be more dependent on host
- system memory management support routines which may vary in
- implementation quality and may impose arbitrary
- limitations. Generally, servicing a request via normal
- malloc steps is faster than going through a system's mmap.
- The advantages of mmap nearly always outweigh disadvantages for
- "large" chunks, but the value of "large" varies across systems. The
- default is an empirically derived value that works well in most
- systems.
- Update in 2006:
- The above was written in 2001. Since then the world has changed a lot.
- Memory got bigger. Applications got bigger. The virtual address space
- layout in 32 bit linux changed.
- In the new situation, brk() and mmap space is shared and there are no
- artificial limits on brk size imposed by the kernel. What is more,
- applications have started using transient allocations larger than the
- 128Kb as was imagined in 2001.
- The price for mmap is also high now; each time glibc mmaps from the
- kernel, the kernel is forced to zero out the memory it gives to the
- application. Zeroing memory is expensive and eats a lot of cache and
- memory bandwidth. This has nothing to do with the efficiency of the
- virtual memory system, by doing mmap the kernel just has no choice but
- to zero.
- In 2001, the kernel had a maximum size for brk() which was about 800
- megabytes on 32 bit x86, at that point brk() would hit the first
- mmaped shared libaries and couldn't expand anymore. With current 2.6
- kernels, the VA space layout is different and brk() and mmap
- both can span the entire heap at will.
- Rather than using a static threshold for the brk/mmap tradeoff,
- we are now using a simple dynamic one. The goal is still to avoid
- fragmentation. The old goals we kept are
- 1) try to get the long lived large allocations to use mmap()
- 2) really large allocations should always use mmap()
- and we're adding now:
- 3) transient allocations should use brk() to avoid forcing the kernel
- having to zero memory over and over again
- The implementation works with a sliding threshold, which is by default
- limited to go between 128Kb and 32Mb (64Mb for 64 bitmachines) and starts
- out at 128Kb as per the 2001 default.
- This allows us to satisfy requirement 1) under the assumption that long
- lived allocations are made early in the process' lifespan, before it has
- started doing dynamic allocations of the same size (which will
- increase the threshold).
- The upperbound on the threshold satisfies requirement 2)
- The threshold goes up in value when the application frees memory that was
- allocated with the mmap allocator. The idea is that once the application
- starts freeing memory of a certain size, it's highly probable that this is
- a size the application uses for transient allocations. This estimator
- is there to satisfy the new third requirement.
- */
- #define M_MMAP_THRESHOLD -3
- #ifndef DEFAULT_MMAP_THRESHOLD
- #define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN
- #endif
- /*
- M_MMAP_MAX is the maximum number of requests to simultaneously
- service using mmap. This parameter exists because
- some systems have a limited number of internal tables for
- use by mmap, and using more than a few of them may degrade
- performance.
- The default is set to a value that serves only as a safeguard.
- Setting to 0 disables use of mmap for servicing large requests.
- */
- #define M_MMAP_MAX -4
- #ifndef DEFAULT_MMAP_MAX
- #define DEFAULT_MMAP_MAX (65536)
- #endif
- #include <malloc.h>
- #ifndef RETURN_ADDRESS
- #define RETURN_ADDRESS(X_) (NULL)
- #endif
- /* Forward declarations. */
- struct malloc_chunk;
- typedef struct malloc_chunk* mchunkptr;
- /* Internal routines. */
- static void* _int_malloc(mstate, size_t);
- static void _int_free(mstate, mchunkptr, int);
- static void* _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T,
- INTERNAL_SIZE_T);
- static void* _int_memalign(mstate, size_t, size_t);
- static void* _mid_memalign(size_t, size_t, void *);
- static void malloc_printerr(const char *str) __attribute__ ((noreturn));
- static void* mem2mem_check(void *p, size_t sz);
- static void top_check(void);
- static void munmap_chunk(mchunkptr p);
- #if HAVE_MREMAP
- static mchunkptr mremap_chunk(mchunkptr p, size_t new_size);
- #endif
- static void* malloc_check(size_t sz, const void *caller);
- static void free_check(void* mem, const void *caller);
- static void* realloc_check(void* oldmem, size_t bytes,
- const void *caller);
- static void* memalign_check(size_t alignment, size_t bytes,
- const void *caller);
- /* ------------------ MMAP support ------------------ */
- #include <fcntl.h>
- #include <sys/mman.h>
- #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
- # define MAP_ANONYMOUS MAP_ANON
- #endif
- #ifndef MAP_NORESERVE
- # define MAP_NORESERVE 0
- #endif
- #define MMAP(addr, size, prot, flags) \
- __mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS|MAP_PRIVATE, -1, 0)
- /*
- ----------------------- Chunk representations -----------------------
- */
- /*
- This struct declaration is misleading (but accurate and necessary).
- It declares a "view" into memory allowing access to necessary
- fields at known offsets from a given base. See explanation below.
- */
- struct malloc_chunk {
- INTERNAL_SIZE_T mchunk_prev_size; /* Size of previous chunk (if free). */
- INTERNAL_SIZE_T mchunk_size; /* Size in bytes, including overhead. */
- struct malloc_chunk* fd; /* double links -- used only if free. */
- struct malloc_chunk* bk;
- /* Only used for large blocks: pointer to next larger size. */
- struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */
- struct malloc_chunk* bk_nextsize;
- };
- /*
- malloc_chunk details:
- (The following includes lightly edited explanations by Colin Plumb.)
- Chunks of memory are maintained using a `boundary tag' method as
- described in e.g., Knuth or Standish. (See the paper by Paul
- Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
- survey of such techniques.) Sizes of free chunks are stored both
- in the front of each chunk and at the end. This makes
- consolidating fragmented chunks into bigger chunks very fast. The
- size fields also hold bits representing whether chunks are free or
- in use.
- An allocated chunk looks like this:
- chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Size of previous chunk, if unallocated (P clear) |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Size of chunk, in bytes |A|M|P|
- mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | User data starts here... .
- . .
- . (malloc_usable_size() bytes) .
- . |
- nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | (size of chunk, but used for application data) |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Size of next chunk, in bytes |A|0|1|
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- Where "chunk" is the front of the chunk for the purpose of most of
- the malloc code, but "mem" is the pointer that is returned to the
- user. "Nextchunk" is the beginning of the next contiguous chunk.
- Chunks always begin on even word boundaries, so the mem portion
- (which is returned to the user) is also on an even word boundary, and
- thus at least double-word aligned.
- Free chunks are stored in circular doubly-linked lists, and look like this:
- chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Size of previous chunk, if unallocated (P clear) |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- `head:' | Size of chunk, in bytes |A|0|P|
- mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Forward pointer to next chunk in list |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Back pointer to previous chunk in list |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Unused space (may be 0 bytes long) .
- . .
- . |
- nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- `foot:' | Size of chunk, in bytes |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Size of next chunk, in bytes |A|0|0|
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- The P (PREV_INUSE) bit, stored in the unused low-order bit of the
- chunk size (which is always a multiple of two words), is an in-use
- bit for the *previous* chunk. If that bit is *clear*, then the
- word before the current chunk size contains the previous chunk
- size, and can be used to find the front of the previous chunk.
- The very first chunk allocated always has this bit set,
- preventing access to non-existent (or non-owned) memory. If
- prev_inuse is set for any given chunk, then you CANNOT determine
- the size of the previous chunk, and might even get a memory
- addressing fault when trying to do so.
- The A (NON_MAIN_ARENA) bit is cleared for chunks on the initial,
- main arena, described by the main_arena variable. When additional
- threads are spawned, each thread receives its own arena (up to a
- configurable limit, after which arenas are reused for multiple
- threads), and the chunks in these arenas have the A bit set. To
- find the arena for a chunk on such a non-main arena, heap_for_ptr
- performs a bit mask operation and indirection through the ar_ptr
- member of the per-heap header heap_info (see arena.c).
- Note that the `foot' of the current chunk is actually represented
- as the prev_size of the NEXT chunk. This makes it easier to
- deal with alignments etc but can be very confusing when trying
- to extend or adapt this code.
- The three exceptions to all this are:
- 1. The special chunk `top' doesn't bother using the
- trailing size field since there is no next contiguous chunk
- that would have to index off it. After initialization, `top'
- is forced to always exist. If it would become less than
- MINSIZE bytes long, it is replenished.
- 2. Chunks allocated via mmap, which have the second-lowest-order
- bit M (IS_MMAPPED) set in their size fields. Because they are
- allocated one-by-one, each must contain its own trailing size
- field. If the M bit is set, the other bits are ignored
- (because mmapped chunks are neither in an arena, nor adjacent
- to a freed chunk). The M bit is also used for chunks which
- originally came from a dumped heap via malloc_set_state in
- hooks.c.
- 3. Chunks in fastbins are treated as allocated chunks from the
- point of view of the chunk allocator. They are consolidated
- with their neighbors only in bulk, in malloc_consolidate.
- */
- /*
- ---------- Size and alignment checks and conversions ----------
- */
- /* conversion from malloc headers to user pointers, and back */
- #define chunk2mem(p) ((void*)((char*)(p) + 2*SIZE_SZ))
- #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
- /* The smallest possible chunk */
- #define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize))
- /* The smallest size we can malloc is an aligned minimal chunk */
- #define MINSIZE \
- (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
- /* Check if m has acceptable alignment */
- #define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
- #define misaligned_chunk(p) \
- ((uintptr_t)(MALLOC_ALIGNMENT == 2 * SIZE_SZ ? (p) : chunk2mem (p)) \
- & MALLOC_ALIGN_MASK)
- /*
- Check if a request is so large that it would wrap around zero when
- padded and aligned. To simplify some other code, the bound is made
- low enough so that adding MINSIZE will also not wrap around zero.
- */
- #define REQUEST_OUT_OF_RANGE(req) \
- ((unsigned long) (req) >= \
- (unsigned long) (INTERNAL_SIZE_T) (-2 * MINSIZE))
- /* pad request bytes into a usable size -- internal version */
- #define request2size(req) \
- (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
- MINSIZE : \
- ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
- /* Same, except also perform an argument and result check. First, we check
- that the padding done by request2size didn't result in an integer
- overflow. Then we check (using REQUEST_OUT_OF_RANGE) that the resulting
- size isn't so large that a later alignment would lead to another integer
- overflow. */
- #define checked_request2size(req, sz) \
- ({ \
- (sz) = request2size (req); \
- if (((sz) < (req)) \
- || REQUEST_OUT_OF_RANGE (sz)) \
- { \
- __set_errno (ENOMEM); \
- return 0; \
- } \
- })
- /*
- --------------- Physical chunk operations ---------------
- */
- /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
- #define PREV_INUSE 0x1
- /* extract inuse bit of previous chunk */
- #define prev_inuse(p) ((p)->mchunk_size & PREV_INUSE)
- /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
- #define IS_MMAPPED 0x2
- /* check for mmap()'ed chunk */
- #define chunk_is_mmapped(p) ((p)->mchunk_size & IS_MMAPPED)
- /* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
- from a non-main arena. This is only set immediately before handing
- the chunk to the user, if necessary. */
- #define NON_MAIN_ARENA 0x4
- /* Check for chunk from main arena. */
- #define chunk_main_arena(p) (((p)->mchunk_size & NON_MAIN_ARENA) == 0)
- /* Mark a chunk as not being on the main arena. */
- #define set_non_main_arena(p) ((p)->mchunk_size |= NON_MAIN_ARENA)
- /*
- Bits to mask off when extracting size
- Note: IS_MMAPPED is intentionally not masked off from size field in
- macros for which mmapped chunks should never be seen. This should
- cause helpful core dumps to occur if it is tried by accident by
- people extending or adapting this malloc.
- */
- #define SIZE_BITS (PREV_INUSE | IS_MMAPPED | NON_MAIN_ARENA)
- /* Get size, ignoring use bits */
- #define chunksize(p) (chunksize_nomask (p) & ~(SIZE_BITS))
- /* Like chunksize, but do not mask SIZE_BITS. */
- #define chunksize_nomask(p) ((p)->mchunk_size)
- /* Ptr to next physical malloc_chunk. */
- #define next_chunk(p) ((mchunkptr) (((char *) (p)) + chunksize (p)))
- /* Size of the chunk below P. Only valid if !prev_inuse (P). */
- #define prev_size(p) ((p)->mchunk_prev_size)
- /* Set the size of the chunk below P. Only valid if !prev_inuse (P). */
- #define set_prev_size(p, sz) ((p)->mchunk_prev_size = (sz))
- /* Ptr to previous physical malloc_chunk. Only valid if !prev_inuse (P). */
- #define prev_chunk(p) ((mchunkptr) (((char *) (p)) - prev_size (p)))
- /* Treat space at ptr + offset as a chunk */
- #define chunk_at_offset(p, s) ((mchunkptr) (((char *) (p)) + (s)))
- /* extract p's inuse bit */
- #define inuse(p) \
- ((((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size) & PREV_INUSE)
- /* set/clear chunk as being inuse without otherwise disturbing */
- #define set_inuse(p) \
- ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size |= PREV_INUSE
- #define clear_inuse(p) \
- ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size &= ~(PREV_INUSE)
- /* check/set/clear inuse bits in known places */
- #define inuse_bit_at_offset(p, s) \
- (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size & PREV_INUSE)
- #define set_inuse_bit_at_offset(p, s) \
- (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size |= PREV_INUSE)
- #define clear_inuse_bit_at_offset(p, s) \
- (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size &= ~(PREV_INUSE))
- /* Set size at head, without disturbing its use bit */
- #define set_head_size(p, s) ((p)->mchunk_size = (((p)->mchunk_size & SIZE_BITS) | (s)))
- /* Set size/use field */
- #define set_head(p, s) ((p)->mchunk_size = (s))
- /* Set size at footer (only when chunk is not in use) */
- #define set_foot(p, s) (((mchunkptr) ((char *) (p) + (s)))->mchunk_prev_size = (s))
- #pragma GCC poison mchunk_size
- #pragma GCC poison mchunk_prev_size
- /*
- -------------------- Internal data structures --------------------
- All internal state is held in an instance of malloc_state defined
- below. There are no other static variables, except in two optional
- cases:
- * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
- * If mmap doesn't support MAP_ANONYMOUS, a dummy file descriptor
- for mmap.
- Beware of lots of tricks that minimize the total bookkeeping space
- requirements. The result is a little over 1K bytes (for 4byte
- pointers and size_t.)
- */
- /*
- Bins
- An array of bin headers for free chunks. Each bin is doubly
- linked. The bins are approximately proportionally (log) spaced.
- There are a lot of these bins (128). This may look excessive, but
- works very well in practice. Most bins hold sizes that are
- unusual as malloc request sizes, but are more usual for fragments
- and consolidated sets of chunks, which is what these bins hold, so
- they can be found quickly. All procedures maintain the invariant
- that no consolidated chunk physically borders another one, so each
- chunk in a list is known to be preceeded and followed by either
- inuse chunks or the ends of memory.
- Chunks in bins are kept in size order, with ties going to the
- approximately least recently used chunk. Ordering isn't needed
- for the small bins, which all contain the same-sized chunks, but
- facilitates best-fit allocation for larger chunks. These lists
- are just sequential. Keeping them in order almost never requires
- enough traversal to warrant using fancier ordered data
- structures.
- Chunks of the same size are linked with the most
- recently freed at the front, and allocations are taken from the
- back. This results in LRU (FIFO) allocation order, which tends
- to give each chunk an equal opportunity to be consolidated with
- adjacent freed chunks, resulting in larger free chunks and less
- fragmentation.
- To simplify use in double-linked lists, each bin header acts
- as a malloc_chunk. This avoids special-casing for headers.
- But to conserve space and improve locality, we allocate
- only the fd/bk pointers of bins, and then use repositioning tricks
- to treat these as the fields of a malloc_chunk*.
- */
- typedef struct malloc_chunk *mbinptr;
- /* addressing -- note that bin_at(0) does not exist */
- #define bin_at(m, i) \
- (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2])) \
- - offsetof (struct malloc_chunk, fd))
- /* analog of ++bin */
- #define next_bin(b) ((mbinptr) ((char *) (b) + (sizeof (mchunkptr) << 1)))
- /* Reminders about list directionality within bins */
- #define first(b) ((b)->fd)
- #define last(b) ((b)->bk)
- /*
- Indexing
- Bins for sizes < 512 bytes contain chunks of all the same size, spaced
- 8 bytes apart. Larger bins are approximately logarithmically spaced:
- 64 bins of size 8
- 32 bins of size 64
- 16 bins of size 512
- 8 bins of size 4096
- 4 bins of size 32768
- 2 bins of size 262144
- 1 bin of size what's left
- There is actually a little bit of slop in the numbers in bin_index
- for the sake of speed. This makes no difference elsewhere.
- The bins top out around 1MB because we expect to service large
- requests via mmap.
- Bin 0 does not exist. Bin 1 is the unordered list; if that would be
- a valid chunk size the small bins are bumped up one.
- */
- #define NBINS 128
- #define NSMALLBINS 64
- #define SMALLBIN_WIDTH MALLOC_ALIGNMENT
- #define SMALLBIN_CORRECTION (MALLOC_ALIGNMENT > 2 * SIZE_SZ)
- #define MIN_LARGE_SIZE ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH)
- #define in_smallbin_range(sz) \
- ((unsigned long) (sz) < (unsigned long) MIN_LARGE_SIZE)
- #define smallbin_index(sz) \
- ((SMALLBIN_WIDTH == 16 ? (((unsigned) (sz)) >> 4) : (((unsigned) (sz)) >> 3))\
- + SMALLBIN_CORRECTION)
- #define largebin_index_32(sz) \
- (((((unsigned long) (sz)) >> 6) <= 38) ? 56 + (((unsigned long) (sz)) >> 6) :\
- ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
- ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
- ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
- ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
- 126)
- #define largebin_index_32_big(sz) \
- (((((unsigned long) (sz)) >> 6) <= 45) ? 49 + (((unsigned long) (sz)) >> 6) :\
- ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
- ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
- ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
- ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
- 126)
- // XXX It remains to be seen whether it is good to keep the widths of
- // XXX the buckets the same or whether it should be scaled by a factor
- // XXX of two as well.
- #define largebin_index_64(sz) \
- (((((unsigned long) (sz)) >> 6) <= 48) ? 48 + (((unsigned long) (sz)) >> 6) :\
- ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
- ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
- ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
- ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
- 126)
- #define largebin_index(sz) \
- (SIZE_SZ == 8 ? largebin_index_64 (sz) \
- : MALLOC_ALIGNMENT == 16 ? largebin_index_32_big (sz) \
- : largebin_index_32 (sz))
- #define bin_index(sz) \
- ((in_smallbin_range (sz)) ? smallbin_index (sz) : largebin_index (sz))
- /* Take a chunk off a bin list. */
- static void
- unlink_chunk (mstate av, mchunkptr p)
- {
- if (chunksize (p) != prev_size (next_chunk (p)))
- malloc_printerr ("corrupted size vs. prev_size");
- mchunkptr fd = p->fd;
- mchunkptr bk = p->bk;
- if (__builtin_expect (fd->bk != p || bk->fd != p, 0))
- malloc_printerr ("corrupted double-linked list");
- fd->bk = bk;
- bk->fd = fd;
- if (!in_smallbin_range (chunksize_nomask (p)) && p->fd_nextsize != NULL)
- {
- if (p->fd_nextsize->bk_nextsize != p
- || p->bk_nextsize->fd_nextsize != p)
- malloc_printerr ("corrupted double-linked list (not small)");
- if (fd->fd_nextsize == NULL)
- {
- if (p->fd_nextsize == p)
- fd->fd_nextsize = fd->bk_nextsize = fd;
- else
- {
- fd->fd_nextsize = p->fd_nextsize;
- fd->bk_nextsize = p->bk_nextsize;
- p->fd_nextsize->bk_nextsize = fd;
- p->bk_nextsize->fd_nextsize = fd;
- }
- }
- else
- {
- p->fd_nextsize->bk_nextsize = p->bk_nextsize;
- p->bk_nextsize->fd_nextsize = p->fd_nextsize;
- }
- }
- }
- /*
- Unsorted chunks
- All remainders from chunk splits, as well as all returned chunks,
- are first placed in the "unsorted" bin. They are then placed
- in regular bins after malloc gives them ONE chance to be used before
- binning. So, basically, the unsorted_chunks list acts as a queue,
- with chunks being placed on it in free (and malloc_consolidate),
- and taken off (to be either used or placed in bins) in malloc.
- The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
- does not have to be taken into account in size comparisons.
- */
- /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
- #define unsorted_chunks(M) (bin_at (M, 1))
- /*
- Top
- The top-most available chunk (i.e., the one bordering the end of
- available memory) is treated specially. It is never included in
- any bin, is used only if no other chunk is available, and is
- released back to the system if it is very large (see
- M_TRIM_THRESHOLD). Because top initially
- points to its own bin with initial zero size, thus forcing
- extension on the first malloc request, we avoid having any special
- code in malloc to check whether it even exists yet. But we still
- need to do so when getting memory from system, so we make
- initial_top treat the bin as a legal but unusable chunk during the
- interval between initialization and the first call to
- sysmalloc. (This is somewhat delicate, since it relies on
- the 2 preceding words to be zero during this interval as well.)
- */
- /* Conveniently, the unsorted bin can be used as dummy top on first call */
- #define initial_top(M) (unsorted_chunks (M))
- /*
- Binmap
- To help compensate for the large number of bins, a one-level index
- structure is used for bin-by-bin searching. `binmap' is a
- bitvector recording whether bins are definitely empty so they can
- be skipped over during during traversals. The bits are NOT always
- cleared as soon as bins are empty, but instead only
- when they are noticed to be empty during traversal in malloc.
- */
- /* Conservatively use 32 bits per map word, even if on 64bit system */
- #define BINMAPSHIFT 5
- #define BITSPERMAP (1U << BINMAPSHIFT)
- #define BINMAPSIZE (NBINS / BITSPERMAP)
- #define idx2block(i) ((i) >> BINMAPSHIFT)
- #define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT) - 1))))
- #define mark_bin(m, i) ((m)->binmap[idx2block (i)] |= idx2bit (i))
- #define unmark_bin(m, i) ((m)->binmap[idx2block (i)] &= ~(idx2bit (i)))
- #define get_binmap(m, i) ((m)->binmap[idx2block (i)] & idx2bit (i))
- /*
- Fastbins
- An array of lists holding recently freed small chunks. Fastbins
- are not doubly linked. It is faster to single-link them, and
- since chunks are never removed from the middles of these lists,
- double linking is not necessary. Also, unlike regular bins, they
- are not even processed in FIFO order (they use faster LIFO) since
- ordering doesn't much matter in the transient contexts in which
- fastbins are normally used.
- Chunks in fastbins keep their inuse bit set, so they cannot
- be consolidated with other free chunks. malloc_consolidate
- releases all chunks in fastbins and consolidates them with
- other free chunks.
- */
- typedef struct malloc_chunk *mfastbinptr;
- #define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx])
- /* offset 2 to use otherwise unindexable first 2 bins */
- #define fastbin_index(sz) \
- ((((unsigned int) (sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2)
- /* The maximum fastbin request size we support */
- #define MAX_FAST_SIZE (80 * SIZE_SZ / 4)
- #define NFASTBINS (fastbin_index (request2size (MAX_FAST_SIZE)) + 1)
- /*
- FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
- that triggers automatic consolidation of possibly-surrounding
- fastbin chunks. This is a heuristic, so the exact value should not
- matter too much. It is defined at half the default trim threshold as a
- compromise heuristic to only attempt consolidation if it is likely
- to lead to trimming. However, it is not dynamically tunable, since
- consolidation reduces fragmentation surrounding large chunks even
- if trimming is not used.
- */
- #define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
- /*
- NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
- regions. Otherwise, contiguity is exploited in merging together,
- when possible, results from consecutive MORECORE calls.
- The initial value comes from MORECORE_CONTIGUOUS, but is
- changed dynamically if mmap is ever used as an sbrk substitute.
- */
- #define NONCONTIGUOUS_BIT (2U)
- #define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0)
- #define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0)
- #define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT)
- #define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT)
- /* Maximum size of memory handled in fastbins. */
- static INTERNAL_SIZE_T global_max_fast;
- /*
- Set value of max_fast.
- Use impossibly small value if 0.
- Precondition: there are no existing fastbin chunks in the main arena.
- Since do_check_malloc_state () checks this, we call malloc_consolidate ()
- before changing max_fast. Note other arenas will leak their fast bin
- entries if max_fast is reduced.
- */
- #define set_max_fast(s) \
- global_max_fast = (((s) == 0) \
- ? SMALLBIN_WIDTH : ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK))
- static inline INTERNAL_SIZE_T
- get_max_fast (void)
- {
- /* Tell the GCC optimizers that global_max_fast is never larger
- than MAX_FAST_SIZE. This avoids out-of-bounds array accesses in
- _int_malloc after constant propagation of the size parameter.
- (The code never executes because malloc preserves the
- global_max_fast invariant, but the optimizers may not recognize
- this.) */
- if (global_max_fast > MAX_FAST_SIZE)
- __builtin_unreachable ();
- return global_max_fast;
- }
- /*
- ----------- Internal state representation and initialization -----------
- */
- /*
- have_fastchunks indicates that there are probably some fastbin chunks.
- It is set true on entering a chunk into any fastbin, and cleared early in
- malloc_consolidate. The value is approximate since it may be set when there
- are no fastbin chunks, or it may be clear even if there are fastbin chunks
- available. Given it's sole purpose is to reduce number of redundant calls to
- malloc_consolidate, it does not affect correctness. As a result we can safely
- use relaxed atomic accesses.
- */
- struct malloc_state
- {
- /* Serialize access. */
- __libc_lock_define (, mutex);
- /* Flags (formerly in max_fast). */
- int flags;
- /* Set if the fastbin chunks contain recently inserted free blocks. */
- /* Note this is a bool but not all targets support atomics on booleans. */
- int have_fastchunks;
- /* Fastbins */
- mfastbinptr fastbinsY[NFASTBINS];
- /* Base of the topmost chunk -- not otherwise kept in a bin */
- mchunkptr top;
- /* The remainder from the most recent split of a small request */
- mchunkptr last_remainder;
- /* Normal bins packed as described above */
- mchunkptr bins[NBINS * 2 - 2];
- /* Bitmap of bins */
- unsigned int binmap[BINMAPSIZE];
- /* Linked list */
- struct malloc_state *next;
- /* Linked list for free arenas. Access to this field is serialized
- by free_list_lock in arena.c. */
- struct malloc_state *next_free;
- /* Number of threads attached to this arena. 0 if the arena is on
- the free list. Access to this field is serialized by
- free_list_lock in arena.c. */
- INTERNAL_SIZE_T attached_threads;
- /* Memory allocated from the system in this arena. */
- INTERNAL_SIZE_T system_mem;
- INTERNAL_SIZE_T max_system_mem;
- };
- struct malloc_par
- {
- /* Tunable parameters */
- unsigned long trim_threshold;
- INTERNAL_SIZE_T top_pad;
- INTERNAL_SIZE_T mmap_threshold;
- INTERNAL_SIZE_T arena_test;
- INTERNAL_SIZE_T arena_max;
- /* Memory map support */
- int n_mmaps;
- int n_mmaps_max;
- int max_n_mmaps;
- /* the mmap_threshold is dynamic, until the user sets
- it manually, at which point we need to disable any
- dynamic behavior. */
- int no_dyn_threshold;
- /* Statistics */
- INTERNAL_SIZE_T mmapped_mem;
- INTERNAL_SIZE_T max_mmapped_mem;
- /* First address handed out by MORECORE/sbrk. */
- char *sbrk_base;
- #if USE_TCACHE
- /* Maximum number of buckets to use. */
- size_t tcache_bins;
- size_t tcache_max_bytes;
- /* Maximum number of chunks in each bucket. */
- size_t tcache_count;
- /* Maximum number of chunks to remove from the unsorted list, which
- aren't used to prefill the cache. */
- size_t tcache_unsorted_limit;
- #endif
- };
- /* There are several instances of this struct ("arenas") in this
- malloc. If you are adapting this malloc in a way that does NOT use
- a static or mmapped malloc_state, you MUST explicitly zero-fill it
- before using. This malloc relies on the property that malloc_state
- is initialized to all zeroes (as is true of C statics). */
- static struct malloc_state main_arena =
- {
- .mutex = _LIBC_LOCK_INITIALIZER,
- .next = &main_arena,
- .attached_threads = 1
- };
- /* These variables are used for undumping support. Chunked are marked
- as using mmap, but we leave them alone if they fall into this
- range. NB: The chunk size for these chunks only includes the
- initial size field (of SIZE_SZ bytes), there is no trailing size
- field (unlike with regular mmapped chunks). */
- static mchunkptr dumped_main_arena_start; /* Inclusive. */
- static mchunkptr dumped_main_arena_end; /* Exclusive. */
- /* True if the pointer falls into the dumped arena. Use this after
- chunk_is_mmapped indicates a chunk is mmapped. */
- #define DUMPED_MAIN_ARENA_CHUNK(p) \
- ((p) >= dumped_main_arena_start && (p) < dumped_main_arena_end)
- /* There is only one instance of the malloc parameters. */
- static struct malloc_par mp_ =
- {
- .top_pad = DEFAULT_TOP_PAD,
- .n_mmaps_max = DEFAULT_MMAP_MAX,
- .mmap_threshold = DEFAULT_MMAP_THRESHOLD,
- .trim_threshold = DEFAULT_TRIM_THRESHOLD,
- #define NARENAS_FROM_NCORES(n) ((n) * (sizeof (long) == 4 ? 2 : 8))
- .arena_test = NARENAS_FROM_NCORES (1)
- #if USE_TCACHE
- ,
- .tcache_count = TCACHE_FILL_COUNT,
- .tcache_bins = TCACHE_MAX_BINS,
- .tcache_max_bytes = tidx2usize (TCACHE_MAX_BINS-1),
- .tcache_unsorted_limit = 0 /* No limit. */
- #endif
- };
- /*
- Initialize a malloc_state struct.
- This is called from ptmalloc_init () or from _int_new_arena ()
- when creating a new arena.
- */
- static void
- malloc_init_state (mstate av)
- {
- int i;
- mbinptr bin;
- /* Establish circular links for normal bins */
- for (i = 1; i < NBINS; ++i)
- {
- bin = bin_at (av, i);
- bin->fd = bin->bk = bin;
- }
- #if MORECORE_CONTIGUOUS
- if (av != &main_arena)
- #endif
- set_noncontiguous (av);
- if (av == &main_arena)
- set_max_fast (DEFAULT_MXFAST);
- atomic_store_relaxed (&av->have_fastchunks, false);
- av->top = initial_top (av);
- }
- /*
- Other internal utilities operating on mstates
- */
- static void *sysmalloc (INTERNAL_SIZE_T, mstate);
- static int systrim (size_t, mstate);
- static void malloc_consolidate (mstate);
- /* -------------- Early definitions for debugging hooks ---------------- */
- /* Define and initialize the hook variables. These weak definitions must
- appear before any use of the variables in a function (arena.c uses one). */
- #ifndef weak_variable
- /* In GNU libc we want the hook variables to be weak definitions to
- avoid a problem with Emacs. */
- # define weak_variable weak_function
- #endif
- /* Forward declarations. */
- static void *malloc_hook_ini (size_t sz,
- const void *caller) __THROW;
- static void *realloc_hook_ini (void *ptr, size_t sz,
- const void *caller) __THROW;
- static void *memalign_hook_ini (size_t alignment, size_t sz,
- const void *caller) __THROW;
- #if HAVE_MALLOC_INIT_HOOK
- void weak_variable (*__malloc_initialize_hook) (void) = NULL;
- compat_symbol (libc, __malloc_initialize_hook,
- __malloc_initialize_hook, GLIBC_2_0);
- #endif
- void weak_variable (*__free_hook) (void *__ptr,
- const void *) = NULL;
- void *weak_variable (*__malloc_hook)
- (size_t __size, const void *) = malloc_hook_ini;
- void *weak_variable (*__realloc_hook)
- (void *__ptr, size_t __size, const void *)
- = realloc_hook_ini;
- void *weak_variable (*__memalign_hook)
- (size_t __alignment, size_t __size, const void *)
- = memalign_hook_ini;
- void weak_variable (*__after_morecore_hook) (void) = NULL;
- /* This function is called from the arena shutdown hook, to free the
- thread cache (if it exists). */
- static void tcache_thread_shutdown (void);
- /* ------------------ Testing support ----------------------------------*/
- static int perturb_byte;
- static void
- alloc_perturb (char *p, size_t n)
- {
- if (__glibc_unlikely (perturb_byte))
- memset (p, perturb_byte ^ 0xff, n);
- }
- static void
- free_perturb (char *p, size_t n)
- {
- if (__glibc_unlikely (perturb_byte))
- memset (p, perturb_byte, n);
- }
- #include <stap-probe.h>
- /* ------------------- Support for multiple arenas -------------------- */
- #include "arena.c"
- /*
- Debugging support
- These routines make a number of assertions about the states
- of data structures that should be true at all times. If any
- are not true, it's very likely that a user program has somehow
- trashed memory. (It's also possible that there is a coding error
- in malloc. In which case, please report it!)
- */
- #if !MALLOC_DEBUG
- # define check_chunk(A, P)
- # define check_free_chunk(A, P)
- # define check_inuse_chunk(A, P)
- # define check_remalloced_chunk(A, P, N)
- # define check_malloced_chunk(A, P, N)
- # define check_malloc_state(A)
- #else
- # define check_chunk(A, P) do_check_chunk (A, P)
- # define check_free_chunk(A, P) do_check_free_chunk (A, P)
- # define check_inuse_chunk(A, P) do_check_inuse_chunk (A, P)
- # define check_remalloced_chunk(A, P, N) do_check_remalloced_chunk (A, P, N)
- # define check_malloced_chunk(A, P, N) do_check_malloced_chunk (A, P, N)
- # define check_malloc_state(A) do_check_malloc_state (A)
- /*
- Properties of all chunks
- */
- static void
- do_check_chunk (mstate av, mchunkptr p)
- {
- unsigned long sz = chunksize (p);
- /* min and max possible addresses assuming contiguous allocation */
- char *max_address = (char *) (av->top) + chunksize (av->top);
- char *min_address = max_address - av->system_mem;
- if (!chunk_is_mmapped (p))
- {
- /* Has legal address ... */
- if (p != av->top)
- {
- if (contiguous (av))
- {
- assert (((char *) p) >= min_address);
- assert (((char *) p + sz) <= ((char *) (av->top)));
- }
- }
- else
- {
- /* top size is always at least MINSIZE */
- assert ((unsigned long) (sz) >= MINSIZE);
- /* top predecessor always marked inuse */
- assert (prev_inuse (p));
- }
- }
- else if (!DUMPED_MAIN_ARENA_CHUNK (p))
- {
- /* address is outside main heap */
- if (contiguous (av) && av->top != initial_top (av))
- {
- assert (((char *) p) < min_address || ((char *) p) >= max_address);
- }
- /* chunk is page-aligned */
- assert (((prev_size (p) + sz) & (GLRO (dl_pagesize) - 1)) == 0);
- /* mem is aligned */
- assert (aligned_OK (chunk2mem (p)));
- }
- }
- /*
- Properties of free chunks
- */
- static void
- do_check_free_chunk (mstate av, mchunkptr p)
- {
- INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA);
- mchunkptr next = chunk_at_offset (p, sz);
- do_check_chunk (av, p);
- /* Chunk must claim to be free ... */
- assert (!inuse (p));
- assert (!chunk_is_mmapped (p));
- /* Unless a special marker, must have OK fields */
- if ((unsigned long) (sz) >= MINSIZE)
- {
- assert ((sz & MALLOC_ALIGN_MASK) == 0);
- assert (aligned_OK (chunk2mem (p)));
- /* ... matching footer field */
- assert (prev_size (next_chunk (p)) == sz);
- /* ... and is fully consolidated */
- assert (prev_inuse (p));
- assert (next == av->top || inuse (next));
- /* ... and has minimally sane links */
- assert (p->fd->bk == p);
- assert (p->bk->fd == p);
- }
- else /* markers are always of size SIZE_SZ */
- assert (sz == SIZE_SZ);
- }
- /*
- Properties of inuse chunks
- */
- static void
- do_check_inuse_chunk (mstate av, mchunkptr p)
- {
- mchunkptr next;
- do_check_chunk (av, p);
- if (chunk_is_mmapped (p))
- return; /* mmapped chunks have no next/prev */
- /* Check whether it claims to be in use ... */
- assert (inuse (p));
- next = next_chunk (p);
- /* ... and is surrounded by OK chunks.
- Since more things can be checked with free chunks than inuse ones,
- if an inuse chunk borders them and debug is on, it's worth doing them.
- */
- if (!prev_inuse (p))
- {
- /* Note that we cannot even look at prev unless it is not inuse */
- mchunkptr prv = prev_chunk (p);
- assert (next_chunk (prv) == p);
- do_check_free_chunk (av, prv);
- }
- if (next == av->top)
- {
- assert (prev_inuse (next));
- assert (chunksize (next) >= MINSIZE);
- }
- else if (!inuse (next))
- do_check_free_chunk (av, next);
- }
- /*
- Properties of chunks recycled from fastbins
- */
- static void
- do_check_remalloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
- {
- INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA);
- if (!chunk_is_mmapped (p))
- {
- assert (av == arena_for_chunk (p));
- if (chunk_main_arena (p))
- assert (av == &main_arena);
- else
- assert (av != &main_arena);
- }
- do_check_inuse_chunk (av, p);
- /* Legal size ... */
- assert ((sz & MALLOC_ALIGN_MASK) == 0);
- assert ((unsigned long) (sz) >= MINSIZE);
- /* ... and alignment */
- assert (aligned_OK (chunk2mem (p)));
- /* chunk is less than MINSIZE more than request */
- assert ((long) (sz) - (long) (s) >= 0);
- assert ((long) (sz) - (long) (s + MINSIZE) < 0);
- }
- /*
- Properties of nonrecycled chunks at the point they are malloced
- */
- static void
- do_check_malloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
- {
- /* same as recycled case ... */
- do_check_remalloced_chunk (av, p, s);
- /*
- ... plus, must obey implementation invariant that prev_inuse is
- always true of any allocated chunk; i.e., that each allocated
- chunk borders either a previously allocated and still in-use
- chunk, or the base of its memory arena. This is ensured
- by making all allocations from the `lowest' part of any found
- chunk. This does not necessarily hold however for chunks
- recycled via fastbins.
- */
- assert (prev_inuse (p));
- }
- /*
- Properties of malloc_state.
- This may be useful for debugging malloc, as well as detecting user
- programmer errors that somehow write into malloc_state.
- If you are extending or experimenting with this malloc, you can
- probably figure out how to hack this routine to print out or
- display chunk addresses, sizes, bins, and other instrumentation.
- */
- static void
- do_check_malloc_state (mstate av)
- {
- int i;
- mchunkptr p;
- mchunkptr q;
- mbinptr b;
- unsigned int idx;
- INTERNAL_SIZE_T size;
- unsigned long total = 0;
- int max_fast_bin;
- /* internal size_t must be no wider than pointer type */
- assert (sizeof (INTERNAL_SIZE_T) <= sizeof (char *));
- /* alignment is a power of 2 */
- assert ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - 1)) == 0);
- /* Check the arena is initialized. */
- assert (av->top != 0);
- /* No memory has been allocated yet, so doing more tests is not possible. */
- if (av->top == initial_top (av))
- return;
- /* pagesize is a power of 2 */
- assert (powerof2(GLRO (dl_pagesize)));
- /* A contiguous main_arena is consistent with sbrk_base. */
- if (av == &main_arena && contiguous (av))
- assert ((char *) mp_.sbrk_base + av->system_mem ==
- (char *) av->top + chunksize (av->top));
- /* properties of fastbins */
- /* max_fast is in allowed range */
- assert ((get_max_fast () & ~1) <= request2size (MAX_FAST_SIZE));
- max_fast_bin = fastbin_index (get_max_fast ());
- for (i = 0; i < NFASTBINS; ++i)
- {
- p = fastbin (av, i);
- /* The following test can only be performed for the main arena.
- While mallopt calls malloc_consolidate to get rid of all fast
- bins (especially those larger than the new maximum) this does
- only happen for the main arena. Trying to do this for any
- other arena would mean those arenas have to be locked and
- malloc_consolidate be called for them. This is excessive. And
- even if this is acceptable to somebody it still cannot solve
- the problem completely since if the arena is locked a
- concurrent malloc call might create a new arena which then
- could use the newly invalid fast bins. */
- /* all bins past max_fast are empty */
- if (av == &main_arena && i > max_fast_bin)
- assert (p == 0);
- while (p != 0)
- {
- /* each chunk claims to be inuse */
- do_check_inuse_chunk (av, p);
- total += chunksize (p);
- /* chunk belongs in this bin */
- assert (fastbin_index (chunksize (p)) == i);
- p = p->fd;
- }
- }
- /* check normal bins */
- for (i = 1; i < NBINS; ++i)
- {
- b = bin_at (av, i);
- /* binmap is accurate (except for bin 1 == unsorted_chunks) */
- if (i >= 2)
- {
- unsigned int binbit = get_binmap (av, i);
- int empty = last (b) == b;
- if (!binbit)
- assert (empty);
- else if (!empty)
- assert (binbit);
- }
- for (p = last (b); p != b; p = p->bk)
- {
- /* each chunk claims to be free */
- do_check_free_chunk (av, p);
- size = chunksize (p);
- total += size;
- if (i >= 2)
- {
- /* chunk belongs in bin */
- idx = bin_index (size);
- assert (idx == i);
- /* lists are sorted */
- assert (p->bk == b ||
- (unsigned long) chunksize (p->bk) >= (unsigned long) chunksize (p));
- if (!in_smallbin_range (size))
- {
- if (p->fd_nextsize != NULL)
- {
- if (p->fd_nextsize == p)
- assert (p->bk_nextsize == p);
- else
- {
- if (p->fd_nextsize == first (b))
- assert (chunksize (p) < chunksize (p->fd_nextsize));
- else
- assert (chunksize (p) > chunksize (p->fd_nextsize));
- if (p == first (b))
- assert (chunksize (p) > chunksize (p->bk_nextsize));
- else
- assert (chunksize (p) < chunksize (p->bk_nextsize));
- }
- }
- else
- assert (p->bk_nextsize == NULL);
- }
- }
- else if (!in_smallbin_range (size))
- assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL);
- /* chunk is followed by a legal chain of inuse chunks */
- for (q = next_chunk (p);
- (q != av->top && inuse (q) &&
- (unsigned long) (chunksize (q)) >= MINSIZE);
- q = next_chunk (q))
- do_check_inuse_chunk (av, q);
- }
- }
- /* top chunk is OK */
- check_chunk (av, av->top);
- }
- #endif
- /* ----------------- Support for debugging hooks -------------------- */
- #include "hooks.c"
- /* ----------- Routines dealing with system allocation -------------- */
- /*
- sysmalloc handles malloc cases requiring more memory from the system.
- On entry, it is assumed that av->top does not have enough
- space to service request for nb bytes, thus requiring that av->top
- be extended or replaced.
- */
- static void *
- sysmalloc (INTERNAL_SIZE_T nb, mstate av)
- {
- mchunkptr old_top; /* incoming value of av->top */
- INTERNAL_SIZE_T old_size; /* its size */
- char *old_end; /* its end address */
- long size; /* arg to first MORECORE or mmap call */
- char *brk; /* return value from MORECORE */
- long correction; /* arg to 2nd MORECORE call */
- char *snd_brk; /* 2nd return val */
- INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
- INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
- char *aligned_brk; /* aligned offset into brk */
- mchunkptr p; /* the allocated/returned chunk */
- mchunkptr remainder; /* remainder from allocation */
- unsigned long remainder_size; /* its size */
- size_t pagesize = GLRO (dl_pagesize);
- bool tried_mmap = false;
- /*
- If have mmap, and the request size meets the mmap threshold, and
- the system supports mmap, and there are few enough currently
- allocated mmapped regions, try to directly map this request
- rather than expanding top.
- */
- if (av == NULL
- || ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold)
- && (mp_.n_mmaps < mp_.n_mmaps_max)))
- {
- char *mm; /* return value from mmap call*/
- try_mmap:
- /*
- Round up size to nearest page. For mmapped chunks, the overhead
- is one SIZE_SZ unit larger than for normal chunks, because there
- is no following chunk whose prev_size field could be used.
- See the front_misalign handling below, for glibc there is no
- need for further alignments unless we have have high alignment.
- */
- if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
- size = ALIGN_UP (nb + SIZE_SZ, pagesize);
- else
- size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize);
- tried_mmap = true;
- /* Don't try if size wraps around 0 */
- if ((unsigned long) (size) > (unsigned long) (nb))
- {
- mm = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0));
- if (mm != MAP_FAILED)
- {
- /*
- The offset to the start of the mmapped region is stored
- in the prev_size field of the chunk. This allows us to adjust
- returned start address to meet alignment requirements here
- and in memalign(), and still be able to compute proper
- address argument for later munmap in free() and realloc().
- */
- if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
- {
- /* For glibc, chunk2mem increases the address by 2*SIZE_SZ and
- MALLOC_ALIGN_MASK is 2*SIZE_SZ-1. Each mmap'ed area is page
- aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */
- assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0);
- front_misalign = 0;
- }
- else
- front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK;
- if (front_misalign > 0)
- {
- correction = MALLOC_ALIGNMENT - front_misalign;
- p = (mchunkptr) (mm + correction);
- set_prev_size (p, correction);
- set_head (p, (size - correction) | IS_MMAPPED);
- }
- else
- {
- p = (mchunkptr) mm;
- set_prev_size (p, 0);
- set_head (p, size | IS_MMAPPED);
- }
- /* update statistics */
- int new = atomic_exchange_and_add (&mp_.n_mmaps, 1) + 1;
- atomic_max (&mp_.max_n_mmaps, new);
- unsigned long sum;
- sum = atomic_exchange_and_add (&mp_.mmapped_mem, size) + size;
- atomic_max (&mp_.max_mmapped_mem, sum);
- check_chunk (av, p);
- return chunk2mem (p);
- }
- }
- }
- /* There are no usable arenas and mmap also failed. */
- if (av == NULL)
- return 0;
- /* Record incoming configuration of top */
- old_top = av->top;
- old_size = chunksize (old_top);
- old_end = (char *) (chunk_at_offset (old_top, old_size));
- brk = snd_brk = (char *) (MORECORE_FAILURE);
- /*
- If not the first time through, we require old_size to be
- at least MINSIZE and to have prev_inuse set.
- */
- assert ((old_top == initial_top (av) && old_size == 0) ||
- ((unsigned long) (old_size) >= MINSIZE &&
- prev_inuse (old_top) &&
- ((unsigned long) old_end & (pagesize - 1)) == 0));
- /* Precondition: not enough current space to satisfy nb request */
- assert ((unsigned long) (old_size) < (unsigned long) (nb + MINSIZE));
- if (av != &main_arena)
- {
- heap_info *old_heap, *heap;
- size_t old_heap_size;
- /* First try to extend the current heap. */
- old_heap = heap_for_ptr (old_top);
- old_heap_size = old_heap->size;
- if ((long) (MINSIZE + nb - old_size) > 0
- && grow_heap (old_heap, MINSIZE + nb - old_size) == 0)
- {
- av->system_mem += old_heap->size - old_heap_size;
- set_head (old_top, (((char *) old_heap + old_heap->size) - (char *) old_top)
- | PREV_INUSE);
- }
- else if ((heap = new_heap (nb + (MINSIZE + sizeof (*heap)), mp_.top_pad)))
- {
- /* Use a newly allocated heap. */
- heap->ar_ptr = av;
- heap->prev = old_heap;
- av->system_mem += heap->size;
- /* Set up the new top. */
- top (av) = chunk_at_offset (heap, sizeof (*heap));
- set_head (top (av), (heap->size - sizeof (*heap)) | PREV_INUSE);
- /* Setup fencepost and free the old top chunk with a multiple of
- MALLOC_ALIGNMENT in size. */
- /* The fencepost takes at least MINSIZE bytes, because it might
- become the top chunk again later. Note that a footer is set
- up, too, although the chunk is marked in use. */
- old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK;
- set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ), 0 | PREV_INUSE);
- if (old_size >= MINSIZE)
- {
- set_head (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ) | PREV_INUSE);
- set_foot (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ));
- set_head (old_top, old_size | PREV_INUSE | NON_MAIN_ARENA);
- _int_free (av, old_top, 1);
- }
- else
- {
- set_head (old_top, (old_size + 2 * SIZE_SZ) | PREV_INUSE);
- set_foot (old_top, (old_size + 2 * SIZE_SZ));
- }
- }
- else if (!tried_mmap)
- /* We can at least try to use to mmap memory. */
- goto try_mmap;
- }
- else /* av == main_arena */
- { /* Request enough space for nb + pad + overhead */
- size = nb + mp_.top_pad + MINSIZE;
- /*
- If contiguous, we can subtract out existing space that we hope to
- combine with new space. We add it back later only if
- we don't actually get contiguous space.
- */
- if (contiguous (av))
- size -= old_size;
- /*
- Round to a multiple of page size.
- If MORECORE is not contiguous, this ensures that we only call it
- with whole-page arguments. And if MORECORE is contiguous and
- this is not first time through, this preserves page-alignment of
- previous calls. Otherwise, we correct to page-align below.
- */
- size = ALIGN_UP (size, pagesize);
- /*
- Don't try to call MORECORE if argument is so big as to appear
- negative. Note that since mmap takes size_t arg, it may succeed
- below even if we cannot call MORECORE.
- */
- if (size > 0)
- {
- brk = (char *) (MORECORE (size));
- LIBC_PROBE (memory_sbrk_more, 2, brk, size);
- }
- if (brk != (char *) (MORECORE_FAILURE))
- {
- /* Call the `morecore' hook if necessary. */
- void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
- if (__builtin_expect (hook != NULL, 0))
- (*hook)();
- }
- else
- {
- /*
- If have mmap, try using it as a backup when MORECORE fails or
- cannot be used. This is worth doing on systems that have "holes" in
- address space, so sbrk cannot extend to give contiguous space, but
- space is available elsewhere. Note that we ignore mmap max count
- and threshold limits, since the space will not be used as a
- segregated mmap region.
- */
- /* Cannot merge with old top, so add its size back in */
- if (contiguous (av))
- size = ALIGN_UP (size + old_size, pagesize);
- /* If we are relying on mmap as backup, then use larger units */
- if ((unsigned long) (size) < (unsigned long) (MMAP_AS_MORECORE_SIZE))
- size = MMAP_AS_MORECORE_SIZE;
- /* Don't try if size wraps around 0 */
- if ((unsigned long) (size) > (unsigned long) (nb))
- {
- char *mbrk = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0));
- if (mbrk != MAP_FAILED)
- {
- /* We do not need, and cannot use, another sbrk call to find end */
- brk = mbrk;
- snd_brk = brk + size;
- /*
- Record that we no longer have a contiguous sbrk region.
- After the first time mmap is used as backup, we do not
- ever rely on contiguous space since this could incorrectly
- bridge regions.
- */
- set_noncontiguous (av);
- }
- }
- }
- if (brk != (char *) (MORECORE_FAILURE))
- {
- if (mp_.sbrk_base == 0)
- mp_.sbrk_base = brk;
- av->system_mem += size;
- /*
- If MORECORE extends previous space, we can likewise extend top size.
- */
- if (brk == old_end && snd_brk == (char *) (MORECORE_FAILURE))
- set_head (old_top, (size + old_size) | PREV_INUSE);
- else if (contiguous (av) && old_size && brk < old_end)
- /* Oops! Someone else killed our space.. Can't touch anything. */
- malloc_printerr ("break adjusted to free malloc space");
- /*
- Otherwise, make adjustments:
- * If the first time through or noncontiguous, we need to call sbrk
- just to find out where the end of memory lies.
- * We need to ensure that all returned chunks from malloc will meet
- MALLOC_ALIGNMENT
- * If there was an intervening foreign sbrk, we need to adjust sbrk
- request size to account for fact that we will not be able to
- combine new space with existing space in old_top.
- * Almost all systems internally allocate whole pages at a time, in
- which case we might as well use the whole last page of request.
- So we allocate enough more memory to hit a page boundary now,
- which in turn causes future contiguous calls to page-align.
- */
- else
- {
- front_misalign = 0;
- end_misalign = 0;
- correction = 0;
- aligned_brk = brk;
- /* handle contiguous cases */
- if (contiguous (av))
- {
- /* Count foreign sbrk as system_mem. */
- if (old_size)
- av->system_mem += brk - old_end;
- /* Guarantee alignment of first new chunk made from this space */
- front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
- if (front_misalign > 0)
- {
- /*
- Skip over some bytes to arrive at an aligned position.
- We don't need to specially mark these wasted front bytes.
- They will never be accessed anyway because
- prev_inuse of av->top (and any chunk created from its start)
- is always true after initialization.
- */
- correction = MALLOC_ALIGNMENT - front_misalign;
- aligned_brk += correction;
- }
- /*
- If this isn't adjacent to existing space, then we will not
- be able to merge with old_top space, so must add to 2nd request.
- */
- correction += old_size;
- /* Extend the end address to hit a page boundary */
- end_misalign = (INTERNAL_SIZE_T) (brk + size + correction);
- correction += (ALIGN_UP (end_misalign, pagesize)) - end_misalign;
- assert (correction >= 0);
- snd_brk = (char *) (MORECORE (correction));
- /*
- If can't allocate correction, try to at least find out current
- brk. It might be enough to proceed without failing.
- Note that if second sbrk did NOT fail, we assume that space
- is contiguous with first sbrk. This is a safe assumption unless
- program is multithreaded but doesn't use locks and a foreign sbrk
- occurred between our first and second calls.
- */
- if (snd_brk == (char *) (MORECORE_FAILURE))
- {
- correction = 0;
- snd_brk = (char *) (MORECORE (0));
- }
- else
- {
- /* Call the `morecore' hook if necessary. */
- void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
- if (__builtin_expect (hook != NULL, 0))
- (*hook)();
- }
- }
- /* handle non-contiguous cases */
- else
- {
- if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
- /* MORECORE/mmap must correctly align */
- assert (((unsigned long) chunk2mem (brk) & MALLOC_ALIGN_MASK) == 0);
- else
- {
- front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
- if (front_misalign > 0)
- {
- /*
- Skip over some bytes to arrive at an aligned position.
- We don't need to specially mark these wasted front bytes.
- They will never be accessed anyway because
- prev_inuse of av->top (and any chunk created from its start)
- is always true after initialization.
- */
- aligned_brk += MALLOC_ALIGNMENT - front_misalign;
- }
- }
- /* Find out current end of memory */
- if (snd_brk == (char *) (MORECORE_FAILURE))
- {
- snd_brk = (char *) (MORECORE (0));
- }
- }
- /* Adjust top based on results of second sbrk */
- if (snd_brk != (char *) (MORECORE_FAILURE))
- {
- av->top = (mchunkptr) aligned_brk;
- set_head (av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
- av->system_mem += correction;
- /*
- If not the first time through, we either have a
- gap due to foreign sbrk or a non-contiguous region. Insert a
- double fencepost at old_top to prevent consolidation with space
- we don't own. These fenceposts are artificial chunks that are
- marked as inuse and are in any case too small to use. We need
- two to make sizes and alignments work out.
- */
- if (old_size != 0)
- {
- /*
- Shrink old_top to insert fenceposts, keeping size a
- multiple of MALLOC_ALIGNMENT. We know there is at least
- enough space in old_top to do this.
- */
- old_size = (old_size - 4 * SIZE_SZ) & ~MALLOC_ALIGN_MASK;
- set_head (old_top, old_size | PREV_INUSE);
- /*
- Note that the following assignments completely overwrite
- old_top when old_size was previously MINSIZE. This is
- intentional. We need the fencepost, even if old_top otherwise gets
- lost.
- */
- set_head (chunk_at_offset (old_top, old_size),
- (2 * SIZE_SZ) | PREV_INUSE);
- set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ),
- (2 * SIZE_SZ) | PREV_INUSE);
- /* If possible, release the rest. */
- if (old_size >= MINSIZE)
- {
- _int_free (av, old_top, 1);
- }
- }
- }
- }
- }
- } /* if (av != &main_arena) */
- if ((unsigned long) av->system_mem > (unsigned long) (av->max_system_mem))
- av->max_system_mem = av->system_mem;
- check_malloc_state (av);
- /* finally, do the allocation */
- p = av->top;
- size = chunksize (p);
- /* check that one of the above allocation paths succeeded */
- if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
- {
- remainder_size = size - nb;
- remainder = chunk_at_offset (p, nb);
- av->top = remainder;
- set_head (p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
- set_head (remainder, remainder_size | PREV_INUSE);
- check_malloced_chunk (av, p, nb);
- return chunk2mem (p);
- }
- /* catch all failure paths */
- __set_errno (ENOMEM);
- return 0;
- }
- /*
- systrim is an inverse of sorts to sysmalloc. It gives memory back
- to the system (via negative arguments to sbrk) if there is unused
- memory at the `high' end of the malloc pool. It is called
- automatically by free() when top space exceeds the trim
- threshold. It is also called by the public malloc_trim routine. It
- returns 1 if it actually released any memory, else 0.
- */
- static int
- systrim (size_t pad, mstate av)
- {
- long top_size; /* Amount of top-most memory */
- long extra; /* Amount to release */
- long released; /* Amount actually released */
- char *current_brk; /* address returned by pre-check sbrk call */
- char *new_brk; /* address returned by post-check sbrk call */
- size_t pagesize;
- long top_area;
- pagesize = GLRO (dl_pagesize);
- top_size = chunksize (av->top);
- top_area = top_size - MINSIZE - 1;
- if (top_area <= pad)
- return 0;
- /* Release in pagesize units and round down to the nearest page. */
- extra = ALIGN_DOWN(top_area - pad, pagesize);
- if (extra == 0)
- return 0;
- /*
- Only proceed if end of memory is where we last set it.
- This avoids problems if there were foreign sbrk calls.
- */
- current_brk = (char *) (MORECORE (0));
- if (current_brk == (char *) (av->top) + top_size)
- {
- /*
- Attempt to release memory. We ignore MORECORE return value,
- and instead call again to find out where new end of memory is.
- This avoids problems if first call releases less than we asked,
- of if failure somehow altered brk value. (We could still
- encounter problems if it altered brk in some very bad way,
- but the only thing we can do is adjust anyway, which will cause
- some downstream failure.)
- */
- MORECORE (-extra);
- /* Call the `morecore' hook if necessary. */
- void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
- if (__builtin_expect (hook != NULL, 0))
- (*hook)();
- new_brk = (char *) (MORECORE (0));
- LIBC_PROBE (memory_sbrk_less, 2, new_brk, extra);
- if (new_brk != (char *) MORECORE_FAILURE)
- {
- released = (long) (current_brk - new_brk);
- if (released != 0)
- {
- /* Success. Adjust top. */
- av->system_mem -= released;
- set_head (av->top, (top_size - released) | PREV_INUSE);
- check_malloc_state (av);
- return 1;
- }
- }
- }
- return 0;
- }
- static void
- munmap_chunk (mchunkptr p)
- {
- size_t pagesize = GLRO (dl_pagesize);
- INTERNAL_SIZE_T size = chunksize (p);
- assert (chunk_is_mmapped (p));
- /* Do nothing if the chunk is a faked mmapped chunk in the dumped
- main arena. We never free this memory. */
- if (DUMPED_MAIN_ARENA_CHUNK (p))
- return;
- uintptr_t mem = (uintptr_t) chunk2mem (p);
- uintptr_t block = (uintptr_t) p - prev_size (p);
- size_t total_size = prev_size (p) + size;
- /* Unfortunately we have to do the compilers job by hand here. Normally
- we would test BLOCK and TOTAL-SIZE separately for compliance with the
- page size. But gcc does not recognize the optimization possibility
- (in the moment at least) so we combine the two values into one before
- the bit test. */
- if (__glibc_unlikely ((block | total_size) & (pagesize - 1)) != 0
- || __glibc_unlikely (!powerof2 (mem & (pagesize - 1))))
- malloc_printerr ("munmap_chunk(): invalid pointer");
- atomic_decrement (&mp_.n_mmaps);
- atomic_add (&mp_.mmapped_mem, -total_size);
- /* If munmap failed the process virtual memory address space is in a
- bad shape. Just leave the block hanging around, the process will
- terminate shortly anyway since not much can be done. */
- __munmap ((char *) block, total_size);
- }
- #if HAVE_MREMAP
- static mchunkptr
- mremap_chunk (mchunkptr p, size_t new_size)
- {
- size_t pagesize = GLRO (dl_pagesize);
- INTERNAL_SIZE_T offset = prev_size (p);
- INTERNAL_SIZE_T size = chunksize (p);
- char *cp;
- assert (chunk_is_mmapped (p));
- uintptr_t block = (uintptr_t) p - offset;
- uintptr_t mem = (uintptr_t) chunk2mem(p);
- size_t total_size = offset + size;
- if (__glibc_unlikely ((block | total_size) & (pagesize - 1)) != 0
- || __glibc_unlikely (!powerof2 (mem & (pagesize - 1))))
- malloc_printerr("mremap_chunk(): invalid pointer");
- /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
- new_size = ALIGN_UP (new_size + offset + SIZE_SZ, pagesize);
- /* No need to remap if the number of pages does not change. */
- if (total_size == new_size)
- return p;
- cp = (char *) __mremap ((char *) block, total_size, new_size,
- MREMAP_MAYMOVE);
- if (cp == MAP_FAILED)
- return 0;
- p = (mchunkptr) (cp + offset);
- assert (aligned_OK (chunk2mem (p)));
- assert (prev_size (p) == offset);
- set_head (p, (new_size - offset) | IS_MMAPPED);
- INTERNAL_SIZE_T new;
- new = atomic_exchange_and_add (&mp_.mmapped_mem, new_size - size - offset)
- + new_size - size - offset;
- atomic_max (&mp_.max_mmapped_mem, new);
- return p;
- }
- #endif /* HAVE_MREMAP */
- /*------------------------ Public wrappers. --------------------------------*/
- #if USE_TCACHE
- /* We overlay this structure on the user-data portion of a chunk when
- the chunk is stored in the per-thread cache. */
- typedef struct tcache_entry
- {
- struct tcache_entry *next;
- /* This field exists to detect double frees. */
- struct tcache_perthread_struct *key;
- } tcache_entry;
- /* There is one of these for each thread, which contains the
- per-thread cache (hence "tcache_perthread_struct"). Keeping
- overall size low is mildly important. Note that COUNTS and ENTRIES
- are redundant (we could have just counted the linked list each
- time), this is for performance reasons. */
- typedef struct tcache_perthread_struct
- {
- char counts[TCACHE_MAX_BINS];
- tcache_entry *entries[TCACHE_MAX_BINS];
- } tcache_perthread_struct;
- static __thread bool tcache_shutting_down = false;
- static __thread tcache_perthread_struct *tcache = NULL;
- /* Caller must ensure that we know tc_idx is valid and there's room
- for more chunks. */
- static __always_inline void
- tcache_put (mchunkptr chunk, size_t tc_idx)
- {
- tcache_entry *e = (tcache_entry *) chunk2mem (chunk);
- assert (tc_idx < TCACHE_MAX_BINS);
- /* Mark this chunk as "in the tcache" so the test in _int_free will
- detect a double free. */
- e->key = tcache;
- e->next = tcache->entries[tc_idx];
- tcache->entries[tc_idx] = e;
- ++(tcache->counts[tc_idx]);
- }
- /* Caller must ensure that we know tc_idx is valid and there's
- available chunks to remove. */
- static __always_inline void *
- tcache_get (size_t tc_idx)
- {
- tcache_entry *e = tcache->entries[tc_idx];
- assert (tc_idx < TCACHE_MAX_BINS);
- assert (tcache->entries[tc_idx] > 0);
- tcache->entries[tc_idx] = e->next;
- --(tcache->counts[tc_idx]);
- e->key = NULL;
- return (void *) e;
- }
- static void
- tcache_thread_shutdown (void)
- {
- int i;
- tcache_perthread_struct *tcache_tmp = tcache;
- if (!tcache)
- return;
- /* Disable the tcache and prevent it from being reinitialized. */
- tcache = NULL;
- tcache_shutting_down = true;
- /* Free all of the entries and the tcache itself back to the arena
- heap for coalescing. */
- for (i = 0; i < TCACHE_MAX_BINS; ++i)
- {
- while (tcache_tmp->entries[i])
- {
- tcache_entry *e = tcache_tmp->entries[i];
- tcache_tmp->entries[i] = e->next;
- __libc_free (e);
- }
- }
- __libc_free (tcache_tmp);
- }
- static void
- tcache_init(void)
- {
- mstate ar_ptr;
- void *victim = 0;
- const size_t bytes = sizeof (tcache_perthread_struct);
- if (tcache_shutting_down)
- return;
- arena_get (ar_ptr, bytes);
- victim = _int_malloc (ar_ptr, bytes);
- if (!victim && ar_ptr != NULL)
- {
- ar_ptr = arena_get_retry (ar_ptr, bytes);
- victim = _int_malloc (ar_ptr, bytes);
- }
- if (ar_ptr != NULL)
- __libc_lock_unlock (ar_ptr->mutex);
- /* In a low memory situation, we may not be able to allocate memory
- - in which case, we just keep trying later. However, we
- typically do this very early, so either there is sufficient
- memory, or there isn't enough memory to do non-trivial
- allocations anyway. */
- if (victim)
- {
- tcache = (tcache_perthread_struct *) victim;
- memset (tcache, 0, sizeof (tcache_perthread_struct));
- }
- }
- # define MAYBE_INIT_TCACHE() \
- if (__glibc_unlikely (tcache == NULL)) \
- tcache_init();
- #else /* !USE_TCACHE */
- # define MAYBE_INIT_TCACHE()
- static void
- tcache_thread_shutdown (void)
- {
- /* Nothing to do if there is no thread cache. */
- }
- #endif /* !USE_TCACHE */
- void *
- __libc_malloc (size_t bytes)
- {
- mstate ar_ptr;
- void *victim;
- void *(*hook) (size_t, const void *)
- = atomic_forced_read (__malloc_hook);
- if (__builtin_expect (hook != NULL, 0))
- return (*hook)(bytes, RETURN_ADDRESS (0));
- #if USE_TCACHE
- /* int_free also calls request2size, be careful to not pad twice. */
- size_t tbytes;
- checked_request2size (bytes, tbytes);
- size_t tc_idx = csize2tidx (tbytes);
- MAYBE_INIT_TCACHE ();
- DIAG_PUSH_NEEDS_COMMENT;
- if (tc_idx < mp_.tcache_bins
- /*&& tc_idx < TCACHE_MAX_BINS*/ /* to appease gcc */
- && tcache
- && tcache->entries[tc_idx] != NULL)
- {
- return tcache_get (tc_idx);
- }
- DIAG_POP_NEEDS_COMMENT;
- #endif
- if (SINGLE_THREAD_P)
- {
- victim = _int_malloc (&main_arena, bytes);
- assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
- &main_arena == arena_for_chunk (mem2chunk (victim)));
- return victim;
- }
- arena_get (ar_ptr, bytes);
- victim = _int_malloc (ar_ptr, bytes);
- /* Retry with another arena only if we were able to find a usable arena
- before. */
- if (!victim && ar_ptr != NULL)
- {
- LIBC_PROBE (memory_malloc_retry, 1, bytes);
- ar_ptr = arena_get_retry (ar_ptr, bytes);
- victim = _int_malloc (ar_ptr, bytes);
- }
- if (ar_ptr != NULL)
- __libc_lock_unlock (ar_ptr->mutex);
- assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
- ar_ptr == arena_for_chunk (mem2chunk (victim)));
- return victim;
- }
- libc_hidden_def (__libc_malloc)
- void
- __libc_free (void *mem)
- {
- mstate ar_ptr;
- mchunkptr p; /* chunk corresponding to mem */
- void (*hook) (void *, const void *)
- = atomic_forced_read (__free_hook);
- if (__builtin_expect (hook != NULL, 0))
- {
- (*hook)(mem, RETURN_ADDRESS (0));
- return;
- }
- if (mem == 0) /* free(0) has no effect */
- return;
- p = mem2chunk (mem);
- if (chunk_is_mmapped (p)) /* release mmapped memory. */
- {
- /* See if the dynamic brk/mmap threshold needs adjusting.
- Dumped fake mmapped chunks do not affect the threshold. */
- if (!mp_.no_dyn_threshold
- && chunksize_nomask (p) > mp_.mmap_threshold
- && chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX
- && !DUMPED_MAIN_ARENA_CHUNK (p))
- {
- mp_.mmap_threshold = chunksize (p);
- mp_.trim_threshold = 2 * mp_.mmap_threshold;
- LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2,
- mp_.mmap_threshold, mp_.trim_threshold);
- }
- munmap_chunk (p);
- return;
- }
- MAYBE_INIT_TCACHE ();
- ar_ptr = arena_for_chunk (p);
- _int_free (ar_ptr, p, 0);
- }
- libc_hidden_def (__libc_free)
- void *
- __libc_realloc (void *oldmem, size_t bytes)
- {
- mstate ar_ptr;
- INTERNAL_SIZE_T nb; /* padded request size */
- void *newp; /* chunk to return */
- void *(*hook) (void *, size_t, const void *) =
- atomic_forced_read (__realloc_hook);
- if (__builtin_expect (hook != NULL, 0))
- return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
- #if REALLOC_ZERO_BYTES_FREES
- if (bytes == 0 && oldmem != NULL)
- {
- __libc_free (oldmem); return 0;
- }
- #endif
- /* realloc of null is supposed to be same as malloc */
- if (oldmem == 0)
- return __libc_malloc (bytes);
- /* chunk corresponding to oldmem */
- const mchunkptr oldp = mem2chunk (oldmem);
- /* its size */
- const INTERNAL_SIZE_T oldsize = chunksize (oldp);
- if (chunk_is_mmapped (oldp))
- ar_ptr = NULL;
- else
- {
- MAYBE_INIT_TCACHE ();
- ar_ptr = arena_for_chunk (oldp);
- }
- /* Little security check which won't hurt performance: the allocator
- never wrapps around at the end of the address space. Therefore
- we can exclude some size values which might appear here by
- accident or by "design" from some intruder. We need to bypass
- this check for dumped fake mmap chunks from the old main arena
- because the new malloc may provide additional alignment. */
- if ((__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
- || __builtin_expect (misaligned_chunk (oldp), 0))
- && !DUMPED_MAIN_ARENA_CHUNK (oldp))
- malloc_printerr ("realloc(): invalid pointer");
- checked_request2size (bytes, nb);
- if (chunk_is_mmapped (oldp))
- {
- /* If this is a faked mmapped chunk from the dumped main arena,
- always make a copy (and do not free the old chunk). */
- if (DUMPED_MAIN_ARENA_CHUNK (oldp))
- {
- /* Must alloc, copy, free. */
- void *newmem = __libc_malloc (bytes);
- if (newmem == 0)
- return NULL;
- /* Copy as many bytes as are available from the old chunk
- and fit into the new size. NB: The overhead for faked
- mmapped chunks is only SIZE_SZ, not 2 * SIZE_SZ as for
- regular mmapped chunks. */
- if (bytes > oldsize - SIZE_SZ)
- bytes = oldsize - SIZE_SZ;
- memcpy (newmem, oldmem, bytes);
- return newmem;
- }
- void *newmem;
- #if HAVE_MREMAP
- newp = mremap_chunk (oldp, nb);
- if (newp)
- return chunk2mem (newp);
- #endif
- /* Note the extra SIZE_SZ overhead. */
- if (oldsize - SIZE_SZ >= nb)
- return oldmem; /* do nothing */
- /* Must alloc, copy, free. */
- newmem = __libc_malloc (bytes);
- if (newmem == 0)
- return 0; /* propagate failure */
- memcpy (newmem, oldmem, oldsize - 2 * SIZE_SZ);
- munmap_chunk (oldp);
- return newmem;
- }
- if (SINGLE_THREAD_P)
- {
- newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
- assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
- ar_ptr == arena_for_chunk (mem2chunk (newp)));
- return newp;
- }
- __libc_lock_lock (ar_ptr->mutex);
- newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
- __libc_lock_unlock (ar_ptr->mutex);
- assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
- ar_ptr == arena_for_chunk (mem2chunk (newp)));
- if (newp == NULL)
- {
- /* Try harder to allocate memory in other arenas. */
- LIBC_PROBE (memory_realloc_retry, 2, bytes, oldmem);
- newp = __libc_malloc (bytes);
- if (newp != NULL)
- {
- memcpy (newp, oldmem, oldsize - SIZE_SZ);
- _int_free (ar_ptr, oldp, 0);
- }
- }
- return newp;
- }
- libc_hidden_def (__libc_realloc)
- void *
- __libc_memalign (size_t alignment, size_t bytes)
- {
- void *address = RETURN_ADDRESS (0);
- return _mid_memalign (alignment, bytes, address);
- }
- static void *
- _mid_memalign (size_t alignment, size_t bytes, void *address)
- {
- mstate ar_ptr;
- void *p;
- void *(*hook) (size_t, size_t, const void *) =
- atomic_forced_read (__memalign_hook);
- if (__builtin_expect (hook != NULL, 0))
- return (*hook)(alignment, bytes, address);
- /* If we need less alignment than we give anyway, just relay to malloc. */
- if (alignment <= MALLOC_ALIGNMENT)
- return __libc_malloc (bytes);
- /* Otherwise, ensure that it is at least a minimum chunk size */
- if (alignment < MINSIZE)
- alignment = MINSIZE;
- /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
- power of 2 and will cause overflow in the check below. */
- if (alignment > SIZE_MAX / 2 + 1)
- {
- __set_errno (EINVAL);
- return 0;
- }
- /* Check for overflow. */
- if (bytes > SIZE_MAX - alignment - MINSIZE)
- {
- __set_errno (ENOMEM);
- return 0;
- }
- /* Make sure alignment is power of 2. */
- if (!powerof2 (alignment))
- {
- size_t a = MALLOC_ALIGNMENT * 2;
- while (a < alignment)
- a <<= 1;
- alignment = a;
- }
- if (SINGLE_THREAD_P)
- {
- p = _int_memalign (&main_arena, alignment, bytes);
- assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
- &main_arena == arena_for_chunk (mem2chunk (p)));
- return p;
- }
- arena_get (ar_ptr, bytes + alignment + MINSIZE);
- p = _int_memalign (ar_ptr, alignment, bytes);
- if (!p && ar_ptr != NULL)
- {
- LIBC_PROBE (memory_memalign_retry, 2, bytes, alignment);
- ar_ptr = arena_get_retry (ar_ptr, bytes);
- p = _int_memalign (ar_ptr, alignment, bytes);
- }
- if (ar_ptr != NULL)
- __libc_lock_unlock (ar_ptr->mutex);
- assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
- ar_ptr == arena_for_chunk (mem2chunk (p)));
- return p;
- }
- /* For ISO C11. */
- weak_alias (__libc_memalign, aligned_alloc)
- libc_hidden_def (__libc_memalign)
- void *
- __libc_valloc (size_t bytes)
- {
- if (__malloc_initialized < 0)
- ptmalloc_init ();
- void *address = RETURN_ADDRESS (0);
- size_t pagesize = GLRO (dl_pagesize);
- return _mid_memalign (pagesize, bytes, address);
- }
- void *
- __libc_pvalloc (size_t bytes)
- {
- if (__malloc_initialized < 0)
- ptmalloc_init ();
- void *address = RETURN_ADDRESS (0);
- size_t pagesize = GLRO (dl_pagesize);
- size_t rounded_bytes = ALIGN_UP (bytes, pagesize);
- /* Check for overflow. */
- if (bytes > SIZE_MAX - 2 * pagesize - MINSIZE)
- {
- __set_errno (ENOMEM);
- return 0;
- }
- return _mid_memalign (pagesize, rounded_bytes, address);
- }
- void *
- __libc_calloc (size_t n, size_t elem_size)
- {
- mstate av;
- mchunkptr oldtop, p;
- INTERNAL_SIZE_T bytes, sz, csz, oldtopsize;
- void *mem;
- unsigned long clearsize;
- unsigned long nclears;
- INTERNAL_SIZE_T *d;
- /* size_t is unsigned so the behavior on overflow is defined. */
- bytes = n * elem_size;
- #define HALF_INTERNAL_SIZE_T \
- (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2))
- if (__builtin_expect ((n | elem_size) >= HALF_INTERNAL_SIZE_T, 0))
- {
- if (elem_size != 0 && bytes / elem_size != n)
- {
- __set_errno (ENOMEM);
- return 0;
- }
- }
- void *(*hook) (size_t, const void *) =
- atomic_forced_read (__malloc_hook);
- if (__builtin_expect (hook != NULL, 0))
- {
- sz = bytes;
- mem = (*hook)(sz, RETURN_ADDRESS (0));
- if (mem == 0)
- return 0;
- return memset (mem, 0, sz);
- }
- sz = bytes;
- MAYBE_INIT_TCACHE ();
- if (SINGLE_THREAD_P)
- av = &main_arena;
- else
- arena_get (av, sz);
- if (av)
- {
- /* Check if we hand out the top chunk, in which case there may be no
- need to clear. */
- #if MORECORE_CLEARS
- oldtop = top (av);
- oldtopsize = chunksize (top (av));
- # if MORECORE_CLEARS < 2
- /* Only newly allocated memory is guaranteed to be cleared. */
- if (av == &main_arena &&
- oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *) oldtop)
- oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *) oldtop);
- # endif
- if (av != &main_arena)
- {
- heap_info *heap = heap_for_ptr (oldtop);
- if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
- oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
- }
- #endif
- }
- else
- {
- /* No usable arenas. */
- oldtop = 0;
- oldtopsize = 0;
- }
- mem = _int_malloc (av, sz);
- assert (!mem || chunk_is_mmapped (mem2chunk (mem)) ||
- av == arena_for_chunk (mem2chunk (mem)));
- if (!SINGLE_THREAD_P)
- {
- if (mem == 0 && av != NULL)
- {
- LIBC_PROBE (memory_calloc_retry, 1, sz);
- av = arena_get_retry (av, sz);
- mem = _int_malloc (av, sz);
- }
- if (av != NULL)
- __libc_lock_unlock (av->mutex);
- }
- /* Allocation failed even after a retry. */
- if (mem == 0)
- return 0;
- p = mem2chunk (mem);
- /* Two optional cases in which clearing not necessary */
- if (chunk_is_mmapped (p))
- {
- if (__builtin_expect (perturb_byte, 0))
- return memset (mem, 0, sz);
- return mem;
- }
- csz = chunksize (p);
- #if MORECORE_CLEARS
- if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize))
- {
- /* clear only the bytes from non-freshly-sbrked memory */
- csz = oldtopsize;
- }
- #endif
- /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
- contents have an odd number of INTERNAL_SIZE_T-sized words;
- minimally 3. */
- d = (INTERNAL_SIZE_T *) mem;
- clearsize = csz - SIZE_SZ;
- nclears = clearsize / sizeof (INTERNAL_SIZE_T);
- assert (nclears >= 3);
- if (nclears > 9)
- return memset (d, 0, clearsize);
- else
- {
- *(d + 0) = 0;
- *(d + 1) = 0;
- *(d + 2) = 0;
- if (nclears > 4)
- {
- *(d + 3) = 0;
- *(d + 4) = 0;
- if (nclears > 6)
- {
- *(d + 5) = 0;
- *(d + 6) = 0;
- if (nclears > 8)
- {
- *(d + 7) = 0;
- *(d + 8) = 0;
- }
- }
- }
- }
- return mem;
- }
- /*
- ------------------------------ malloc ------------------------------
- */
- static void *
- _int_malloc (mstate av, size_t bytes)
- {
- INTERNAL_SIZE_T nb; /* normalized request size */
- unsigned int idx; /* associated bin index */
- mbinptr bin; /* associated bin */
- mchunkptr victim; /* inspected/selected chunk */
- INTERNAL_SIZE_T size; /* its size */
- int victim_index; /* its bin index */
- mchunkptr remainder; /* remainder from a split */
- unsigned long remainder_size; /* its size */
- unsigned int block; /* bit map traverser */
- unsigned int bit; /* bit map traverser */
- unsigned int map; /* current word of binmap */
- mchunkptr fwd; /* misc temp for linking */
- mchunkptr bck; /* misc temp for linking */
- #if USE_TCACHE
- size_t tcache_unsorted_count; /* count of unsorted chunks processed */
- #endif
- /*
- Convert request size to internal form by adding SIZE_SZ bytes
- overhead plus possibly more to obtain necessary alignment and/or
- to obtain a size of at least MINSIZE, the smallest allocatable
- size. Also, checked_request2size traps (returning 0) request sizes
- that are so large that they wrap around zero when padded and
- aligned.
- */
- checked_request2size (bytes, nb);
- /* There are no usable arenas. Fall back to sysmalloc to get a chunk from
- mmap. */
- if (__glibc_unlikely (av == NULL))
- {
- void *p = sysmalloc (nb, av);
- if (p != NULL)
- alloc_perturb (p, bytes);
- return p;
- }
- /*
- If the size qualifies as a fastbin, first check corresponding bin.
- This code is safe to execute even if av is not yet initialized, so we
- can try it without checking, which saves some time on this fast path.
- */
- #define REMOVE_FB(fb, victim, pp) \
- do \
- { \
- victim = pp; \
- if (victim == NULL) \
- break; \
- } \
- while ((pp = catomic_compare_and_exchange_val_acq (fb, victim->fd, victim)) \
- != victim); \
- if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ()))
- {
- idx = fastbin_index (nb);
- mfastbinptr *fb = &fastbin (av, idx);
- mchunkptr pp;
- victim = *fb;
- if (victim != NULL)
- {
- if (SINGLE_THREAD_P)
- *fb = victim->fd;
- else
- REMOVE_FB (fb, pp, victim);
- if (__glibc_likely (victim != NULL))
- {
- size_t victim_idx = fastbin_index (chunksize (victim));
- if (__builtin_expect (victim_idx != idx, 0))
- malloc_printerr ("malloc(): memory corruption (fast)");
- check_remalloced_chunk (av, victim, nb);
- #if USE_TCACHE
- /* While we're here, if we see other chunks of the same size,
- stash them in the tcache. */
- size_t tc_idx = csize2tidx (nb);
- if (tcache && tc_idx < mp_.tcache_bins)
- {
- mchunkptr tc_victim;
- /* While bin not empty and tcache not full, copy chunks. */
- while (tcache->counts[tc_idx] < mp_.tcache_count
- && (tc_victim = *fb) != NULL)
- {
- if (SINGLE_THREAD_P)
- *fb = tc_victim->fd;
- else
- {
- REMOVE_FB (fb, pp, tc_victim);
- if (__glibc_unlikely (tc_victim == NULL))
- break;
- }
- tcache_put (tc_victim, tc_idx);
- }
- }
- #endif
- void *p = chunk2mem (victim);
- alloc_perturb (p, bytes);
- return p;
- }
- }
- }
- /*
- If a small request, check regular bin. Since these "smallbins"
- hold one size each, no searching within bins is necessary.
- (For a large request, we need to wait until unsorted chunks are
- processed to find best fit. But for small ones, fits are exact
- anyway, so we can check now, which is faster.)
- */
- if (in_smallbin_range (nb))
- {
- idx = smallbin_index (nb);
- bin = bin_at (av, idx);
- if ((victim = last (bin)) != bin)
- {
- bck = victim->bk;
- if (__glibc_unlikely (bck->fd != victim))
- malloc_printerr ("malloc(): smallbin double linked list corrupted");
- set_inuse_bit_at_offset (victim, nb);
- bin->bk = bck;
- bck->fd = bin;
- if (av != &main_arena)
- set_non_main_arena (victim);
- check_malloced_chunk (av, victim, nb);
- #if USE_TCACHE
- /* While we're here, if we see other chunks of the same size,
- stash them in the tcache. */
- size_t tc_idx = csize2tidx (nb);
- if (tcache && tc_idx < mp_.tcache_bins)
- {
- mchunkptr tc_victim;
- /* While bin not empty and tcache not full, copy chunks over. */
- while (tcache->counts[tc_idx] < mp_.tcache_count
- && (tc_victim = last (bin)) != bin)
- {
- if (tc_victim != 0)
- {
- bck = tc_victim->bk;
- set_inuse_bit_at_offset (tc_victim, nb);
- if (av != &main_arena)
- set_non_main_arena (tc_victim);
- bin->bk = bck;
- bck->fd = bin;
- tcache_put (tc_victim, tc_idx);
- }
- }
- }
- #endif
- void *p = chunk2mem (victim);
- alloc_perturb (p, bytes);
- return p;
- }
- }
- /*
- If this is a large request, consolidate fastbins before continuing.
- While it might look excessive to kill all fastbins before
- even seeing if there is space available, this avoids
- fragmentation problems normally associated with fastbins.
- Also, in practice, programs tend to have runs of either small or
- large requests, but less often mixtures, so consolidation is not
- invoked all that often in most programs. And the programs that
- it is called frequently in otherwise tend to fragment.
- */
- else
- {
- idx = largebin_index (nb);
- if (atomic_load_relaxed (&av->have_fastchunks))
- malloc_consolidate (av);
- }
- /*
- Process recently freed or remaindered chunks, taking one only if
- it is exact fit, or, if this a small request, the chunk is remainder from
- the most recent non-exact fit. Place other traversed chunks in
- bins. Note that this step is the only place in any routine where
- chunks are placed in bins.
- The outer loop here is needed because we might not realize until
- near the end of malloc that we should have consolidated, so must
- do so and retry. This happens at most once, and only when we would
- otherwise need to expand memory to service a "small" request.
- */
- #if USE_TCACHE
- INTERNAL_SIZE_T tcache_nb = 0;
- size_t tc_idx = csize2tidx (nb);
- if (tcache && tc_idx < mp_.tcache_bins)
- tcache_nb = nb;
- int return_cached = 0;
- tcache_unsorted_count = 0;
- #endif
- for (;; )
- {
- int iters = 0;
- while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av))
- {
- bck = victim->bk;
- size = chunksize (victim);
- mchunkptr next = chunk_at_offset (victim, size);
- if (__glibc_unlikely (size <= 2 * SIZE_SZ)
- || __glibc_unlikely (size > av->system_mem))
- malloc_printerr ("malloc(): invalid size (unsorted)");
- if (__glibc_unlikely (chunksize_nomask (next) < 2 * SIZE_SZ)
- || __glibc_unlikely (chunksize_nomask (next) > av->system_mem))
- malloc_printerr ("malloc(): invalid next size (unsorted)");
- if (__glibc_unlikely ((prev_size (next) & ~(SIZE_BITS)) != size))
- malloc_printerr ("malloc(): mismatching next->prev_size (unsorted)");
- if (__glibc_unlikely (bck->fd != victim)
- || __glibc_unlikely (victim->fd != unsorted_chunks (av)))
- malloc_printerr ("malloc(): unsorted double linked list corrupted");
- if (__glibc_unlikely (prev_inuse (next)))
- malloc_printerr ("malloc(): invalid next->prev_inuse (unsorted)");
- /*
- If a small request, try to use last remainder if it is the
- only chunk in unsorted bin. This helps promote locality for
- runs of consecutive small requests. This is the only
- exception to best-fit, and applies only when there is
- no exact fit for a small chunk.
- */
- if (in_smallbin_range (nb) &&
- bck == unsorted_chunks (av) &&
- victim == av->last_remainder &&
- (unsigned long) (size) > (unsigned long) (nb + MINSIZE))
- {
- /* split and reattach remainder */
- remainder_size = size - nb;
- remainder = chunk_at_offset (victim, nb);
- unsorted_chunks (av)->bk = unsorted_chunks (av)->fd = remainder;
- av->last_remainder = remainder;
- remainder->bk = remainder->fd = unsorted_chunks (av);
- if (!in_smallbin_range (remainder_size))
- {
- remainder->fd_nextsize = NULL;
- remainder->bk_nextsize = NULL;
- }
- set_head (victim, nb | PREV_INUSE |
- (av != &main_arena ? NON_MAIN_ARENA : 0));
- set_head (remainder, remainder_size | PREV_INUSE);
- set_foot (remainder, remainder_size);
- check_malloced_chunk (av, victim, nb);
- void *p = chunk2mem (victim);
- alloc_perturb (p, bytes);
- return p;
- }
- /* remove from unsorted list */
- if (__glibc_unlikely (bck->fd != victim))
- malloc_printerr ("malloc(): corrupted unsorted chunks 3");
- unsorted_chunks (av)->bk = bck;
- bck->fd = unsorted_chunks (av);
- /* Take now instead of binning if exact fit */
- if (size == nb)
- {
- set_inuse_bit_at_offset (victim, size);
- if (av != &main_arena)
- set_non_main_arena (victim);
- #if USE_TCACHE
- /* Fill cache first, return to user only if cache fills.
- We may return one of these chunks later. */
- if (tcache_nb
- && tcache->counts[tc_idx] < mp_.tcache_count)
- {
- tcache_put (victim, tc_idx);
- return_cached = 1;
- continue;
- }
- else
- {
- #endif
- check_malloced_chunk (av, victim, nb);
- void *p = chunk2mem (victim);
- alloc_perturb (p, bytes);
- return p;
- #if USE_TCACHE
- }
- #endif
- }
- /* place chunk in bin */
- if (in_smallbin_range (size))
- {
- victim_index = smallbin_index (size);
- bck = bin_at (av, victim_index);
- fwd = bck->fd;
- }
- else
- {
- victim_index = largebin_index (size);
- bck = bin_at (av, victim_index);
- fwd = bck->fd;
- /* maintain large bins in sorted order */
- if (fwd != bck)
- {
- /* Or with inuse bit to speed comparisons */
- size |= PREV_INUSE;
- /* if smaller than smallest, bypass loop below */
- assert (chunk_main_arena (bck->bk));
- if ((unsigned long) (size)
- < (unsigned long) chunksize_nomask (bck->bk))
- {
- fwd = bck;
- bck = bck->bk;
- victim->fd_nextsize = fwd->fd;
- victim->bk_nextsize = fwd->fd->bk_nextsize;
- fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
- }
- else
- {
- assert (chunk_main_arena (fwd));
- while ((unsigned long) size < chunksize_nomask (fwd))
- {
- fwd = fwd->fd_nextsize;
- assert (chunk_main_arena (fwd));
- }
- if ((unsigned long) size
- == (unsigned long) chunksize_nomask (fwd))
- /* Always insert in the second position. */
- fwd = fwd->fd;
- else
- {
- victim->fd_nextsize = fwd;
- victim->bk_nextsize = fwd->bk_nextsize;
- fwd->bk_nextsize = victim;
- victim->bk_nextsize->fd_nextsize = victim;
- }
- bck = fwd->bk;
- }
- }
- else
- victim->fd_nextsize = victim->bk_nextsize = victim;
- }
- mark_bin (av, victim_index);
- victim->bk = bck;
- victim->fd = fwd;
- fwd->bk = victim;
- bck->fd = victim;
- #if USE_TCACHE
- /* If we've processed as many chunks as we're allowed while
- filling the cache, return one of the cached ones. */
- ++tcache_unsorted_count;
- if (return_cached
- && mp_.tcache_unsorted_limit > 0
- && tcache_unsorted_count > mp_.tcache_unsorted_limit)
- {
- return tcache_get (tc_idx);
- }
- #endif
- #define MAX_ITERS 10000
- if (++iters >= MAX_ITERS)
- break;
- }
- #if USE_TCACHE
- /* If all the small chunks we found ended up cached, return one now. */
- if (return_cached)
- {
- return tcache_get (tc_idx);
- }
- #endif
- /*
- If a large request, scan through the chunks of current bin in
- sorted order to find smallest that fits. Use the skip list for this.
- */
- if (!in_smallbin_range (nb))
- {
- bin = bin_at (av, idx);
- /* skip scan if empty or largest chunk is too small */
- if ((victim = first (bin)) != bin
- && (unsigned long) chunksize_nomask (victim)
- >= (unsigned long) (nb))
- {
- victim = victim->bk_nextsize;
- while (((unsigned long) (size = chunksize (victim)) <
- (unsigned long) (nb)))
- victim = victim->bk_nextsize;
- /* Avoid removing the first entry for a size so that the skip
- list does not have to be rerouted. */
- if (victim != last (bin)
- && chunksize_nomask (victim)
- == chunksize_nomask (victim->fd))
- victim = victim->fd;
- remainder_size = size - nb;
- unlink_chunk (av, victim);
- /* Exhaust */
- if (remainder_size < MINSIZE)
- {
- set_inuse_bit_at_offset (victim, size);
- if (av != &main_arena)
- set_non_main_arena (victim);
- }
- /* Split */
- else
- {
- remainder = chunk_at_offset (victim, nb);
- /* We cannot assume the unsorted list is empty and therefore
- have to perform a complete insert here. */
- bck = unsorted_chunks (av);
- fwd = bck->fd;
- if (__glibc_unlikely (fwd->bk != bck))
- malloc_printerr ("malloc(): corrupted unsorted chunks");
- remainder->bk = bck;
- remainder->fd = fwd;
- bck->fd = remainder;
- fwd->bk = remainder;
- if (!in_smallbin_range (remainder_size))
- {
- remainder->fd_nextsize = NULL;
- remainder->bk_nextsize = NULL;
- }
- set_head (victim, nb | PREV_INUSE |
- (av != &main_arena ? NON_MAIN_ARENA : 0));
- set_head (remainder, remainder_size | PREV_INUSE);
- set_foot (remainder, remainder_size);
- }
- check_malloced_chunk (av, victim, nb);
- void *p = chunk2mem (victim);
- alloc_perturb (p, bytes);
- return p;
- }
- }
- /*
- Search for a chunk by scanning bins, starting with next largest
- bin. This search is strictly by best-fit; i.e., the smallest
- (with ties going to approximately the least recently used) chunk
- that fits is selected.
- The bitmap avoids needing to check that most blocks are nonempty.
- The particular case of skipping all bins during warm-up phases
- when no chunks have been returned yet is faster than it might look.
- */
- ++idx;
- bin = bin_at (av, idx);
- block = idx2block (idx);
- map = av->binmap[block];
- bit = idx2bit (idx);
- for (;; )
- {
- /* Skip rest of block if there are no more set bits in this block. */
- if (bit > map || bit == 0)
- {
- do
- {
- if (++block >= BINMAPSIZE) /* out of bins */
- goto use_top;
- }
- while ((map = av->binmap[block]) == 0);
- bin = bin_at (av, (block << BINMAPSHIFT));
- bit = 1;
- }
- /* Advance to bin with set bit. There must be one. */
- while ((bit & map) == 0)
- {
- bin = next_bin (bin);
- bit <<= 1;
- assert (bit != 0);
- }
- /* Inspect the bin. It is likely to be non-empty */
- victim = last (bin);
- /* If a false alarm (empty bin), clear the bit. */
- if (victim == bin)
- {
- av->binmap[block] = map &= ~bit; /* Write through */
- bin = next_bin (bin);
- bit <<= 1;
- }
- else
- {
- size = chunksize (victim);
- /* We know the first chunk in this bin is big enough to use. */
- assert ((unsigned long) (size) >= (unsigned long) (nb));
- remainder_size = size - nb;
- /* unlink */
- unlink_chunk (av, victim);
- /* Exhaust */
- if (remainder_size < MINSIZE)
- {
- set_inuse_bit_at_offset (victim, size);
- if (av != &main_arena)
- set_non_main_arena (victim);
- }
- /* Split */
- else
- {
- remainder = chunk_at_offset (victim, nb);
- /* We cannot assume the unsorted list is empty and therefore
- have to perform a complete insert here. */
- bck = unsorted_chunks (av);
- fwd = bck->fd;
- if (__glibc_unlikely (fwd->bk != bck))
- malloc_printerr ("malloc(): corrupted unsorted chunks 2");
- remainder->bk = bck;
- remainder->fd = fwd;
- bck->fd = remainder;
- fwd->bk = remainder;
- /* advertise as last remainder */
- if (in_smallbin_range (nb))
- av->last_remainder = remainder;
- if (!in_smallbin_range (remainder_size))
- {
- remainder->fd_nextsize = NULL;
- remainder->bk_nextsize = NULL;
- }
- set_head (victim, nb | PREV_INUSE |
- (av != &main_arena ? NON_MAIN_ARENA : 0));
- set_head (remainder, remainder_size | PREV_INUSE);
- set_foot (remainder, remainder_size);
- }
- check_malloced_chunk (av, victim, nb);
- void *p = chunk2mem (victim);
- alloc_perturb (p, bytes);
- return p;
- }
- }
- use_top:
- /*
- If large enough, split off the chunk bordering the end of memory
- (held in av->top). Note that this is in accord with the best-fit
- search rule. In effect, av->top is treated as larger (and thus
- less well fitting) than any other available chunk since it can
- be extended to be as large as necessary (up to system
- limitations).
- We require that av->top always exists (i.e., has size >=
- MINSIZE) after initialization, so if it would otherwise be
- exhausted by current request, it is replenished. (The main
- reason for ensuring it exists is that we may need MINSIZE space
- to put in fenceposts in sysmalloc.)
- */
- victim = av->top;
- size = chunksize (victim);
- if (__glibc_unlikely (size > av->system_mem))
- malloc_printerr ("malloc(): corrupted top size");
- if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
- {
- remainder_size = size - nb;
- remainder = chunk_at_offset (victim, nb);
- av->top = remainder;
- set_head (victim, nb | PREV_INUSE |
- (av != &main_arena ? NON_MAIN_ARENA : 0));
- set_head (remainder, remainder_size | PREV_INUSE);
- check_malloced_chunk (av, victim, nb);
- void *p = chunk2mem (victim);
- alloc_perturb (p, bytes);
- return p;
- }
- /* When we are using atomic ops to free fast chunks we can get
- here for all block sizes. */
- else if (atomic_load_relaxed (&av->have_fastchunks))
- {
- malloc_consolidate (av);
- /* restore original bin index */
- if (in_smallbin_range (nb))
- idx = smallbin_index (nb);
- else
- idx = largebin_index (nb);
- }
- /*
- Otherwise, relay to handle system-dependent cases
- */
- else
- {
- void *p = sysmalloc (nb, av);
- if (p != NULL)
- alloc_perturb (p, bytes);
- return p;
- }
- }
- }
- /*
- ------------------------------ free ------------------------------
- */
- static void
- _int_free (mstate av, mchunkptr p, int have_lock)
- {
- INTERNAL_SIZE_T size; /* its size */
- mfastbinptr *fb; /* associated fastbin */
- mchunkptr nextchunk; /* next contiguous chunk */
- INTERNAL_SIZE_T nextsize; /* its size */
- int nextinuse; /* true if nextchunk is used */
- INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
- mchunkptr bck; /* misc temp for linking */
- mchunkptr fwd; /* misc temp for linking */
- size = chunksize (p);
- /* Little security check which won't hurt performance: the
- allocator never wrapps around at the end of the address space.
- Therefore we can exclude some size values which might appear
- here by accident or by "design" from some intruder. */
- if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
- || __builtin_expect (misaligned_chunk (p), 0))
- malloc_printerr ("free(): invalid pointer");
- /* We know that each chunk is at least MINSIZE bytes in size or a
- multiple of MALLOC_ALIGNMENT. */
- if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size)))
- malloc_printerr ("free(): invalid size");
- check_inuse_chunk(av, p);
- #if USE_TCACHE
- {
- size_t tc_idx = csize2tidx (size);
- if (tcache != NULL && tc_idx < mp_.tcache_bins)
- {
- /* Check to see if it's already in the tcache. */
- tcache_entry *e = (tcache_entry *) chunk2mem (p);
- /* This test succeeds on double free. However, we don't 100%
- trust it (it also matches random payload data at a 1 in
- 2^<size_t> chance), so verify it's not an unlikely
- coincidence before aborting. */
- if (__glibc_unlikely (e->key == tcache))
- {
- tcache_entry *tmp;
- LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx);
- for (tmp = tcache->entries[tc_idx];
- tmp;
- tmp = tmp->next)
- if (tmp == e)
- malloc_printerr ("free(): double free detected in tcache 2");
- /* If we get here, it was a coincidence. We've wasted a
- few cycles, but don't abort. */
- }
- if (tcache->counts[tc_idx] < mp_.tcache_count)
- {
- tcache_put (p, tc_idx);
- return;
- }
- }
- }
- #endif
- /*
- If eligible, place chunk on a fastbin so it can be found
- and used quickly in malloc.
- */
- if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
- #if TRIM_FASTBINS
- /*
- If TRIM_FASTBINS set, don't place chunks
- bordering top into fastbins
- */
- && (chunk_at_offset(p, size) != av->top)
- #endif
- ) {
- if (__builtin_expect (chunksize_nomask (chunk_at_offset (p, size))
- <= 2 * SIZE_SZ, 0)
- || __builtin_expect (chunksize (chunk_at_offset (p, size))
- >= av->system_mem, 0))
- {
- bool fail = true;
- /* We might not have a lock at this point and concurrent modifications
- of system_mem might result in a false positive. Redo the test after
- getting the lock. */
- if (!have_lock)
- {
- __libc_lock_lock (av->mutex);
- fail = (chunksize_nomask (chunk_at_offset (p, size)) <= 2 * SIZE_SZ
- || chunksize (chunk_at_offset (p, size)) >= av->system_mem);
- __libc_lock_unlock (av->mutex);
- }
- if (fail)
- malloc_printerr ("free(): invalid next size (fast)");
- }
- free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
- atomic_store_relaxed (&av->have_fastchunks, true);
- unsigned int idx = fastbin_index(size);
- fb = &fastbin (av, idx);
- /* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */
- mchunkptr old = *fb, old2;
- if (SINGLE_THREAD_P)
- {
- /* Check that the top of the bin is not the record we are going to
- add (i.e., double free). */
- if (__builtin_expect (old == p, 0))
- malloc_printerr ("double free or corruption (fasttop)");
- p->fd = old;
- *fb = p;
- }
- else
- do
- {
- /* Check that the top of the bin is not the record we are going to
- add (i.e., double free). */
- if (__builtin_expect (old == p, 0))
- malloc_printerr ("double free or corruption (fasttop)");
- p->fd = old2 = old;
- }
- while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2))
- != old2);
- /* Check that size of fastbin chunk at the top is the same as
- size of the chunk that we are adding. We can dereference OLD
- only if we have the lock, otherwise it might have already been
- allocated again. */
- if (have_lock && old != NULL
- && __builtin_expect (fastbin_index (chunksize (old)) != idx, 0))
- malloc_printerr ("invalid fastbin entry (free)");
- }
- /*
- Consolidate other non-mmapped chunks as they arrive.
- */
- else if (!chunk_is_mmapped(p)) {
- /* If we're single-threaded, don't lock the arena. */
- if (SINGLE_THREAD_P)
- have_lock = true;
- if (!have_lock)
- __libc_lock_lock (av->mutex);
- nextchunk = chunk_at_offset(p, size);
- /* Lightweight tests: check whether the block is already the
- top block. */
- if (__glibc_unlikely (p == av->top))
- malloc_printerr ("double free or corruption (top)");
- /* Or whether the next chunk is beyond the boundaries of the arena. */
- if (__builtin_expect (contiguous (av)
- && (char *) nextchunk
- >= ((char *) av->top + chunksize(av->top)), 0))
- malloc_printerr ("double free or corruption (out)");
- /* Or whether the block is actually not marked used. */
- if (__glibc_unlikely (!prev_inuse(nextchunk)))
- malloc_printerr ("double free or corruption (!prev)");
- nextsize = chunksize(nextchunk);
- if (__builtin_expect (chunksize_nomask (nextchunk) <= 2 * SIZE_SZ, 0)
- || __builtin_expect (nextsize >= av->system_mem, 0))
- malloc_printerr ("free(): invalid next size (normal)");
- free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
- /* consolidate backward */
- if (!prev_inuse(p)) {
- prevsize = prev_size (p);
- size += prevsize;
- p = chunk_at_offset(p, -((long) prevsize));
- if (__glibc_unlikely (chunksize(p) != prevsize))
- malloc_printerr ("corrupted size vs. prev_size while consolidating");
- unlink_chunk (av, p);
- }
- if (nextchunk != av->top) {
- /* get and clear inuse bit */
- nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
- /* consolidate forward */
- if (!nextinuse) {
- unlink_chunk (av, nextchunk);
- size += nextsize;
- } else
- clear_inuse_bit_at_offset(nextchunk, 0);
- /*
- Place the chunk in unsorted chunk list. Chunks are
- not placed into regular bins until after they have
- been given one chance to be used in malloc.
- */
- bck = unsorted_chunks(av);
- fwd = bck->fd;
- if (__glibc_unlikely (fwd->bk != bck))
- malloc_printerr ("free(): corrupted unsorted chunks");
- p->fd = fwd;
- p->bk = bck;
- if (!in_smallbin_range(size))
- {
- p->fd_nextsize = NULL;
- p->bk_nextsize = NULL;
- }
- bck->fd = p;
- fwd->bk = p;
- set_head(p, size | PREV_INUSE);
- set_foot(p, size);
- check_free_chunk(av, p);
- }
- /*
- If the chunk borders the current high end of memory,
- consolidate into top
- */
- else {
- size += nextsize;
- set_head(p, size | PREV_INUSE);
- av->top = p;
- check_chunk(av, p);
- }
- /*
- If freeing a large space, consolidate possibly-surrounding
- chunks. Then, if the total unused topmost memory exceeds trim
- threshold, ask malloc_trim to reduce top.
- Unless max_fast is 0, we don't know if there are fastbins
- bordering top, so we cannot tell for sure whether threshold
- has been reached unless fastbins are consolidated. But we
- don't want to consolidate on each free. As a compromise,
- consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
- is reached.
- */
- if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
- if (atomic_load_relaxed (&av->have_fastchunks))
- malloc_consolidate(av);
- if (av == &main_arena) {
- #ifndef MORECORE_CANNOT_TRIM
- if ((unsigned long)(chunksize(av->top)) >=
- (unsigned long)(mp_.trim_threshold))
- systrim(mp_.top_pad, av);
- #endif
- } else {
- /* Always try heap_trim(), even if the top chunk is not
- large, because the corresponding heap might go away. */
- heap_info *heap = heap_for_ptr(top(av));
- assert(heap->ar_ptr == av);
- heap_trim(heap, mp_.top_pad);
- }
- }
- if (!have_lock)
- __libc_lock_unlock (av->mutex);
- }
- /*
- If the chunk was allocated via mmap, release via munmap().
- */
- else {
- munmap_chunk (p);
- }
- }
- /*
- ------------------------- malloc_consolidate -------------------------
- malloc_consolidate is a specialized version of free() that tears
- down chunks held in fastbins. Free itself cannot be used for this
- purpose since, among other things, it might place chunks back onto
- fastbins. So, instead, we need to use a minor variant of the same
- code.
- */
- static void malloc_consolidate(mstate av)
- {
- mfastbinptr* fb; /* current fastbin being consolidated */
- mfastbinptr* maxfb; /* last fastbin (for loop control) */
- mchunkptr p; /* current chunk being consolidated */
- mchunkptr nextp; /* next chunk to consolidate */
- mchunkptr unsorted_bin; /* bin header */
- mchunkptr first_unsorted; /* chunk to link to */
- /* These have same use as in free() */
- mchunkptr nextchunk;
- INTERNAL_SIZE_T size;
- INTERNAL_SIZE_T nextsize;
- INTERNAL_SIZE_T prevsize;
- int nextinuse;
- atomic_store_relaxed (&av->have_fastchunks, false);
- unsorted_bin = unsorted_chunks(av);
- /*
- Remove each chunk from fast bin and consolidate it, placing it
- then in unsorted bin. Among other reasons for doing this,
- placing in unsorted bin avoids needing to calculate actual bins
- until malloc is sure that chunks aren't immediately going to be
- reused anyway.
- */
- maxfb = &fastbin (av, NFASTBINS - 1);
- fb = &fastbin (av, 0);
- do {
- p = atomic_exchange_acq (fb, NULL);
- if (p != 0) {
- do {
- {
- unsigned int idx = fastbin_index (chunksize (p));
- if ((&fastbin (av, idx)) != fb)
- malloc_printerr ("malloc_consolidate(): invalid chunk size");
- }
- check_inuse_chunk(av, p);
- nextp = p->fd;
- /* Slightly streamlined version of consolidation code in free() */
- size = chunksize (p);
- nextchunk = chunk_at_offset(p, size);
- nextsize = chunksize(nextchunk);
- if (!prev_inuse(p)) {
- prevsize = prev_size (p);
- size += prevsize;
- p = chunk_at_offset(p, -((long) prevsize));
- if (__glibc_unlikely (chunksize(p) != prevsize))
- malloc_printerr ("corrupted size vs. prev_size in fastbins");
- unlink_chunk (av, p);
- }
- if (nextchunk != av->top) {
- nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
- if (!nextinuse) {
- size += nextsize;
- unlink_chunk (av, nextchunk);
- } else
- clear_inuse_bit_at_offset(nextchunk, 0);
- first_unsorted = unsorted_bin->fd;
- unsorted_bin->fd = p;
- first_unsorted->bk = p;
- if (!in_smallbin_range (size)) {
- p->fd_nextsize = NULL;
- p->bk_nextsize = NULL;
- }
- set_head(p, size | PREV_INUSE);
- p->bk = unsorted_bin;
- p->fd = first_unsorted;
- set_foot(p, size);
- }
- else {
- size += nextsize;
- set_head(p, size | PREV_INUSE);
- av->top = p;
- }
- } while ( (p = nextp) != 0);
- }
- } while (fb++ != maxfb);
- }
- /*
- ------------------------------ realloc ------------------------------
- */
- void*
- _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
- INTERNAL_SIZE_T nb)
- {
- mchunkptr newp; /* chunk to return */
- INTERNAL_SIZE_T newsize; /* its size */
- void* newmem; /* corresponding user mem */
- mchunkptr next; /* next contiguous chunk after oldp */
- mchunkptr remainder; /* extra space at end of newp */
- unsigned long remainder_size; /* its size */
- /* oldmem size */
- if (__builtin_expect (chunksize_nomask (oldp) <= 2 * SIZE_SZ, 0)
- || __builtin_expect (oldsize >= av->system_mem, 0))
- malloc_printerr ("realloc(): invalid old size");
- check_inuse_chunk (av, oldp);
- /* All callers already filter out mmap'ed chunks. */
- assert (!chunk_is_mmapped (oldp));
- next = chunk_at_offset (oldp, oldsize);
- INTERNAL_SIZE_T nextsize = chunksize (next);
- if (__builtin_expect (chunksize_nomask (next) <= 2 * SIZE_SZ, 0)
- || __builtin_expect (nextsize >= av->system_mem, 0))
- malloc_printerr ("realloc(): invalid next size");
- if ((unsigned long) (oldsize) >= (unsigned long) (nb))
- {
- /* already big enough; split below */
- newp = oldp;
- newsize = oldsize;
- }
- else
- {
- /* Try to expand forward into top */
- if (next == av->top &&
- (unsigned long) (newsize = oldsize + nextsize) >=
- (unsigned long) (nb + MINSIZE))
- {
- set_head_size (oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
- av->top = chunk_at_offset (oldp, nb);
- set_head (av->top, (newsize - nb) | PREV_INUSE);
- check_inuse_chunk (av, oldp);
- return chunk2mem (oldp);
- }
- /* Try to expand forward into next chunk; split off remainder below */
- else if (next != av->top &&
- !inuse (next) &&
- (unsigned long) (newsize = oldsize + nextsize) >=
- (unsigned long) (nb))
- {
- newp = oldp;
- unlink_chunk (av, next);
- }
- /* allocate, copy, free */
- else
- {
- newmem = _int_malloc (av, nb - MALLOC_ALIGN_MASK);
- if (newmem == 0)
- return 0; /* propagate failure */
- newp = mem2chunk (newmem);
- newsize = chunksize (newp);
- /*
- Avoid copy if newp is next chunk after oldp.
- */
- if (newp == next)
- {
- newsize += oldsize;
- newp = oldp;
- }
- else
- {
- memcpy (newmem, chunk2mem (oldp), oldsize - SIZE_SZ);
- _int_free (av, oldp, 1);
- check_inuse_chunk (av, newp);
- return chunk2mem (newp);
- }
- }
- }
- /* If possible, free extra space in old or extended chunk */
- assert ((unsigned long) (newsize) >= (unsigned long) (nb));
- remainder_size = newsize - nb;
- if (remainder_size < MINSIZE) /* not enough extra to split off */
- {
- set_head_size (newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
- set_inuse_bit_at_offset (newp, newsize);
- }
- else /* split remainder */
- {
- remainder = chunk_at_offset (newp, nb);
- set_head_size (newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
- set_head (remainder, remainder_size | PREV_INUSE |
- (av != &main_arena ? NON_MAIN_ARENA : 0));
- /* Mark remainder as inuse so free() won't complain */
- set_inuse_bit_at_offset (remainder, remainder_size);
- _int_free (av, remainder, 1);
- }
- check_inuse_chunk (av, newp);
- return chunk2mem (newp);
- }
- /*
- ------------------------------ memalign ------------------------------
- */
- static void *
- _int_memalign (mstate av, size_t alignment, size_t bytes)
- {
- INTERNAL_SIZE_T nb; /* padded request size */
- char *m; /* memory returned by malloc call */
- mchunkptr p; /* corresponding chunk */
- char *brk; /* alignment point within p */
- mchunkptr newp; /* chunk to return */
- INTERNAL_SIZE_T newsize; /* its size */
- INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
- mchunkptr remainder; /* spare room at end to split off */
- unsigned long remainder_size; /* its size */
- INTERNAL_SIZE_T size;
- checked_request2size (bytes, nb);
- /*
- Strategy: find a spot within that chunk that meets the alignment
- request, and then possibly free the leading and trailing space.
- */
- /* Check for overflow. */
- if (nb > SIZE_MAX - alignment - MINSIZE)
- {
- __set_errno (ENOMEM);
- return 0;
- }
- /* Call malloc with worst case padding to hit alignment. */
- m = (char *) (_int_malloc (av, nb + alignment + MINSIZE));
- if (m == 0)
- return 0; /* propagate failure */
- p = mem2chunk (m);
- if ((((unsigned long) (m)) % alignment) != 0) /* misaligned */
- { /*
- Find an aligned spot inside chunk. Since we need to give back
- leading space in a chunk of at least MINSIZE, if the first
- calculation places us at a spot with less than MINSIZE leader,
- we can move to the next aligned spot -- we've allocated enough
- total room so that this is always possible.
- */
- brk = (char *) mem2chunk (((unsigned long) (m + alignment - 1)) &
- - ((signed long) alignment));
- if ((unsigned long) (brk - (char *) (p)) < MINSIZE)
- brk += alignment;
- newp = (mchunkptr) brk;
- leadsize = brk - (char *) (p);
- newsize = chunksize (p) - leadsize;
- /* For mmapped chunks, just adjust offset */
- if (chunk_is_mmapped (p))
- {
- set_prev_size (newp, prev_size (p) + leadsize);
- set_head (newp, newsize | IS_MMAPPED);
- return chunk2mem (newp);
- }
- /* Otherwise, give back leader, use the rest */
- set_head (newp, newsize | PREV_INUSE |
- (av != &main_arena ? NON_MAIN_ARENA : 0));
- set_inuse_bit_at_offset (newp, newsize);
- set_head_size (p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
- _int_free (av, p, 1);
- p = newp;
- assert (newsize >= nb &&
- (((unsigned long) (chunk2mem (p))) % alignment) == 0);
- }
- /* Also give back spare room at the end */
- if (!chunk_is_mmapped (p))
- {
- size = chunksize (p);
- if ((unsigned long) (size) > (unsigned long) (nb + MINSIZE))
- {
- remainder_size = size - nb;
- remainder = chunk_at_offset (p, nb);
- set_head (remainder, remainder_size | PREV_INUSE |
- (av != &main_arena ? NON_MAIN_ARENA : 0));
- set_head_size (p, nb);
- _int_free (av, remainder, 1);
- }
- }
- check_inuse_chunk (av, p);
- return chunk2mem (p);
- }
- /*
- ------------------------------ malloc_trim ------------------------------
- */
- static int
- mtrim (mstate av, size_t pad)
- {
- /* Ensure all blocks are consolidated. */
- malloc_consolidate (av);
- const size_t ps = GLRO (dl_pagesize);
- int psindex = bin_index (ps);
- const size_t psm1 = ps - 1;
- int result = 0;
- for (int i = 1; i < NBINS; ++i)
- if (i == 1 || i >= psindex)
- {
- mbinptr bin = bin_at (av, i);
- for (mchunkptr p = last (bin); p != bin; p = p->bk)
- {
- INTERNAL_SIZE_T size = chunksize (p);
- if (size > psm1 + sizeof (struct malloc_chunk))
- {
- /* See whether the chunk contains at least one unused page. */
- char *paligned_mem = (char *) (((uintptr_t) p
- + sizeof (struct malloc_chunk)
- + psm1) & ~psm1);
- assert ((char *) chunk2mem (p) + 4 * SIZE_SZ <= paligned_mem);
- assert ((char *) p + size > paligned_mem);
- /* This is the size we could potentially free. */
- size -= paligned_mem - (char *) p;
- if (size > psm1)
- {
- #if MALLOC_DEBUG
- /* When debugging we simulate destroying the memory
- content. */
- memset (paligned_mem, 0x89, size & ~psm1);
- #endif
- __madvise (paligned_mem, size & ~psm1, MADV_DONTNEED);
- result = 1;
- }
- }
- }
- }
- #ifndef MORECORE_CANNOT_TRIM
- return result | (av == &main_arena ? systrim (pad, av) : 0);
- #else
- return result;
- #endif
- }
- int
- __malloc_trim (size_t s)
- {
- int result = 0;
- if (__malloc_initialized < 0)
- ptmalloc_init ();
- mstate ar_ptr = &main_arena;
- do
- {
- __libc_lock_lock (ar_ptr->mutex);
- result |= mtrim (ar_ptr, s);
- __libc_lock_unlock (ar_ptr->mutex);
- ar_ptr = ar_ptr->next;
- }
- while (ar_ptr != &main_arena);
- return result;
- }
- /*
- ------------------------- malloc_usable_size -------------------------
- */
- static size_t
- musable (void *mem)
- {
- mchunkptr p;
- if (mem != 0)
- {
- p = mem2chunk (mem);
- if (__builtin_expect (using_malloc_checking == 1, 0))
- return malloc_check_get_size (p);
- if (chunk_is_mmapped (p))
- {
- if (DUMPED_MAIN_ARENA_CHUNK (p))
- return chunksize (p) - SIZE_SZ;
- else
- return chunksize (p) - 2 * SIZE_SZ;
- }
- else if (inuse (p))
- return chunksize (p) - SIZE_SZ;
- }
- return 0;
- }
- size_t
- __malloc_usable_size (void *m)
- {
- size_t result;
- result = musable (m);
- return result;
- }
- /*
- ------------------------------ mallinfo ------------------------------
- Accumulate malloc statistics for arena AV into M.
- */
- static void
- int_mallinfo (mstate av, struct mallinfo *m)
- {
- size_t i;
- mbinptr b;
- mchunkptr p;
- INTERNAL_SIZE_T avail;
- INTERNAL_SIZE_T fastavail;
- int nblocks;
- int nfastblocks;
- check_malloc_state (av);
- /* Account for top */
- avail = chunksize (av->top);
- nblocks = 1; /* top always exists */
- /* traverse fastbins */
- nfastblocks = 0;
- fastavail = 0;
- for (i = 0; i < NFASTBINS; ++i)
- {
- for (p = fastbin (av, i); p != 0; p = p->fd)
- {
- ++nfastblocks;
- fastavail += chunksize (p);
- }
- }
- avail += fastavail;
- /* traverse regular bins */
- for (i = 1; i < NBINS; ++i)
- {
- b = bin_at (av, i);
- for (p = last (b); p != b; p = p->bk)
- {
- ++nblocks;
- avail += chunksize (p);
- }
- }
- m->smblks += nfastblocks;
- m->ordblks += nblocks;
- m->fordblks += avail;
- m->uordblks += av->system_mem - avail;
- m->arena += av->system_mem;
- m->fsmblks += fastavail;
- if (av == &main_arena)
- {
- m->hblks = mp_.n_mmaps;
- m->hblkhd = mp_.mmapped_mem;
- m->usmblks = 0;
- m->keepcost = chunksize (av->top);
- }
- }
- struct mallinfo
- __libc_mallinfo (void)
- {
- struct mallinfo m;
- mstate ar_ptr;
- if (__malloc_initialized < 0)
- ptmalloc_init ();
- memset (&m, 0, sizeof (m));
- ar_ptr = &main_arena;
- do
- {
- __libc_lock_lock (ar_ptr->mutex);
- int_mallinfo (ar_ptr, &m);
- __libc_lock_unlock (ar_ptr->mutex);
- ar_ptr = ar_ptr->next;
- }
- while (ar_ptr != &main_arena);
- return m;
- }
- /*
- ------------------------------ malloc_stats ------------------------------
- */
- void
- __malloc_stats (void)
- {
- int i;
- mstate ar_ptr;
- unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b;
- if (__malloc_initialized < 0)
- ptmalloc_init ();
- _IO_flockfile (stderr);
- int old_flags2 = stderr->_flags2;
- stderr->_flags2 |= _IO_FLAGS2_NOTCANCEL;
- for (i = 0, ar_ptr = &main_arena;; i++)
- {
- struct mallinfo mi;
- memset (&mi, 0, sizeof (mi));
- __libc_lock_lock (ar_ptr->mutex);
- int_mallinfo (ar_ptr, &mi);
- fprintf (stderr, "Arena %d:\n", i);
- fprintf (stderr, "system bytes = %10u\n", (unsigned int) mi.arena);
- fprintf (stderr, "in use bytes = %10u\n", (unsigned int) mi.uordblks);
- #if MALLOC_DEBUG > 1
- if (i > 0)
- dump_heap (heap_for_ptr (top (ar_ptr)));
- #endif
- system_b += mi.arena;
- in_use_b += mi.uordblks;
- __libc_lock_unlock (ar_ptr->mutex);
- ar_ptr = ar_ptr->next;
- if (ar_ptr == &main_arena)
- break;
- }
- fprintf (stderr, "Total (incl. mmap):\n");
- fprintf (stderr, "system bytes = %10u\n", system_b);
- fprintf (stderr, "in use bytes = %10u\n", in_use_b);
- fprintf (stderr, "max mmap regions = %10u\n", (unsigned int) mp_.max_n_mmaps);
- fprintf (stderr, "max mmap bytes = %10lu\n",
- (unsigned long) mp_.max_mmapped_mem);
- stderr->_flags2 = old_flags2;
- _IO_funlockfile (stderr);
- }
- /*
- ------------------------------ mallopt ------------------------------
- */
- static inline int
- __always_inline
- do_set_trim_threshold (size_t value)
- {
- LIBC_PROBE (memory_mallopt_trim_threshold, 3, value, mp_.trim_threshold,
- mp_.no_dyn_threshold);
- mp_.trim_threshold = value;
- mp_.no_dyn_threshold = 1;
- return 1;
- }
- static inline int
- __always_inline
- do_set_top_pad (size_t value)
- {
- LIBC_PROBE (memory_mallopt_top_pad, 3, value, mp_.top_pad,
- mp_.no_dyn_threshold);
- mp_.top_pad = value;
- mp_.no_dyn_threshold = 1;
- return 1;
- }
- static inline int
- __always_inline
- do_set_mmap_threshold (size_t value)
- {
- /* Forbid setting the threshold too high. */
- if (value <= HEAP_MAX_SIZE / 2)
- {
- LIBC_PROBE (memory_mallopt_mmap_threshold, 3, value, mp_.mmap_threshold,
- mp_.no_dyn_threshold);
- mp_.mmap_threshold = value;
- mp_.no_dyn_threshold = 1;
- return 1;
- }
- return 0;
- }
- static inline int
- __always_inline
- do_set_mmaps_max (int32_t value)
- {
- LIBC_PROBE (memory_mallopt_mmap_max, 3, value, mp_.n_mmaps_max,
- mp_.no_dyn_threshold);
- mp_.n_mmaps_max = value;
- mp_.no_dyn_threshold = 1;
- return 1;
- }
- static inline int
- __always_inline
- do_set_mallopt_check (int32_t value)
- {
- return 1;
- }
- static inline int
- __always_inline
- do_set_perturb_byte (int32_t value)
- {
- LIBC_PROBE (memory_mallopt_perturb, 2, value, perturb_byte);
- perturb_byte = value;
- return 1;
- }
- static inline int
- __always_inline
- do_set_arena_test (size_t value)
- {
- LIBC_PROBE (memory_mallopt_arena_test, 2, value, mp_.arena_test);
- mp_.arena_test = value;
- return 1;
- }
- static inline int
- __always_inline
- do_set_arena_max (size_t value)
- {
- LIBC_PROBE (memory_mallopt_arena_max, 2, value, mp_.arena_max);
- mp_.arena_max = value;
- return 1;
- }
- #if USE_TCACHE
- static inline int
- __always_inline
- do_set_tcache_max (size_t value)
- {
- if (value >= 0 && value <= MAX_TCACHE_SIZE)
- {
- LIBC_PROBE (memory_tunable_tcache_max_bytes, 2, value, mp_.tcache_max_bytes);
- mp_.tcache_max_bytes = value;
- mp_.tcache_bins = csize2tidx (request2size(value)) + 1;
- }
- return 1;
- }
- static inline int
- __always_inline
- do_set_tcache_count (size_t value)
- {
- LIBC_PROBE (memory_tunable_tcache_count, 2, value, mp_.tcache_count);
- mp_.tcache_count = value;
- return 1;
- }
- static inline int
- __always_inline
- do_set_tcache_unsorted_limit (size_t value)
- {
- LIBC_PROBE (memory_tunable_tcache_unsorted_limit, 2, value, mp_.tcache_unsorted_limit);
- mp_.tcache_unsorted_limit = value;
- return 1;
- }
- #endif
- int
- __libc_mallopt (int param_number, int value)
- {
- mstate av = &main_arena;
- int res = 1;
- if (__malloc_initialized < 0)
- ptmalloc_init ();
- __libc_lock_lock (av->mutex);
- LIBC_PROBE (memory_mallopt, 2, param_number, value);
- /* We must consolidate main arena before changing max_fast
- (see definition of set_max_fast). */
- malloc_consolidate (av);
- switch (param_number)
- {
- case M_MXFAST:
- if (value >= 0 && value <= MAX_FAST_SIZE)
- {
- LIBC_PROBE (memory_mallopt_mxfast, 2, value, get_max_fast ());
- set_max_fast (value);
- }
- else
- res = 0;
- break;
- case M_TRIM_THRESHOLD:
- do_set_trim_threshold (value);
- break;
- case M_TOP_PAD:
- do_set_top_pad (value);
- break;
- case M_MMAP_THRESHOLD:
- res = do_set_mmap_threshold (value);
- break;
- case M_MMAP_MAX:
- do_set_mmaps_max (value);
- break;
- case M_CHECK_ACTION:
- do_set_mallopt_check (value);
- break;
- case M_PERTURB:
- do_set_perturb_byte (value);
- break;
- case M_ARENA_TEST:
- if (value > 0)
- do_set_arena_test (value);
- break;
- case M_ARENA_MAX:
- if (value > 0)
- do_set_arena_max (value);
- break;
- }
- __libc_lock_unlock (av->mutex);
- return res;
- }
- libc_hidden_def (__libc_mallopt)
- /*
- -------------------- Alternative MORECORE functions --------------------
- */
- /*
- General Requirements for MORECORE.
- The MORECORE function must have the following properties:
- If MORECORE_CONTIGUOUS is false:
- * MORECORE must allocate in multiples of pagesize. It will
- only be called with arguments that are multiples of pagesize.
- * MORECORE(0) must return an address that is at least
- MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
- else (i.e. If MORECORE_CONTIGUOUS is true):
- * Consecutive calls to MORECORE with positive arguments
- return increasing addresses, indicating that space has been
- contiguously extended.
- * MORECORE need not allocate in multiples of pagesize.
- Calls to MORECORE need not have args of multiples of pagesize.
- * MORECORE need not page-align.
- In either case:
- * MORECORE may allocate more memory than requested. (Or even less,
- but this will generally result in a malloc failure.)
- * MORECORE must not allocate memory when given argument zero, but
- instead return one past the end address of memory from previous
- nonzero call. This malloc does NOT call MORECORE(0)
- until at least one call with positive arguments is made, so
- the initial value returned is not important.
- * Even though consecutive calls to MORECORE need not return contiguous
- addresses, it must be OK for malloc'ed chunks to span multiple
- regions in those cases where they do happen to be contiguous.
- * MORECORE need not handle negative arguments -- it may instead
- just return MORECORE_FAILURE when given negative arguments.
- Negative arguments are always multiples of pagesize. MORECORE
- must not misinterpret negative args as large positive unsigned
- args. You can suppress all such calls from even occurring by defining
- MORECORE_CANNOT_TRIM,
- There is some variation across systems about the type of the
- argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
- actually be size_t, because sbrk supports negative args, so it is
- normally the signed type of the same width as size_t (sometimes
- declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
- matter though. Internally, we use "long" as arguments, which should
- work across all reasonable possibilities.
- Additionally, if MORECORE ever returns failure for a positive
- request, then mmap is used as a noncontiguous system allocator. This
- is a useful backup strategy for systems with holes in address spaces
- -- in this case sbrk cannot contiguously expand the heap, but mmap
- may be able to map noncontiguous space.
- If you'd like mmap to ALWAYS be used, you can define MORECORE to be
- a function that always returns MORECORE_FAILURE.
- If you are using this malloc with something other than sbrk (or its
- emulation) to supply memory regions, you probably want to set
- MORECORE_CONTIGUOUS as false. As an example, here is a custom
- allocator kindly contributed for pre-OSX macOS. It uses virtually
- but not necessarily physically contiguous non-paged memory (locked
- in, present and won't get swapped out). You can use it by
- uncommenting this section, adding some #includes, and setting up the
- appropriate defines above:
- *#define MORECORE osMoreCore
- *#define MORECORE_CONTIGUOUS 0
- There is also a shutdown routine that should somehow be called for
- cleanup upon program exit.
- *#define MAX_POOL_ENTRIES 100
- *#define MINIMUM_MORECORE_SIZE (64 * 1024)
- static int next_os_pool;
- void *our_os_pools[MAX_POOL_ENTRIES];
- void *osMoreCore(int size)
- {
- void *ptr = 0;
- static void *sbrk_top = 0;
- if (size > 0)
- {
- if (size < MINIMUM_MORECORE_SIZE)
- size = MINIMUM_MORECORE_SIZE;
- if (CurrentExecutionLevel() == kTaskLevel)
- ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
- if (ptr == 0)
- {
- return (void *) MORECORE_FAILURE;
- }
- // save ptrs so they can be freed during cleanup
- our_os_pools[next_os_pool] = ptr;
- next_os_pool++;
- ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
- sbrk_top = (char *) ptr + size;
- return ptr;
- }
- else if (size < 0)
- {
- // we don't currently support shrink behavior
- return (void *) MORECORE_FAILURE;
- }
- else
- {
- return sbrk_top;
- }
- }
- // cleanup any allocated memory pools
- // called as last thing before shutting down driver
- void osCleanupMem(void)
- {
- void **ptr;
- for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
- if (*ptr)
- {
- PoolDeallocate(*ptr);
- * ptr = 0;
- }
- }
- */
- /* Helper code. */
- extern char **__libc_argv attribute_hidden;
- static void
- malloc_printerr (const char *str)
- {
- __libc_message (do_abort, "%s\n", str);
- __builtin_unreachable ();
- }
- /* We need a wrapper function for one of the additions of POSIX. */
- int
- __posix_memalign (void **memptr, size_t alignment, size_t size)
- {
- void *mem;
- /* Test whether the SIZE argument is valid. It must be a power of
- two multiple of sizeof (void *). */
- if (alignment % sizeof (void *) != 0
- || !powerof2 (alignment / sizeof (void *))
- || alignment == 0)
- return EINVAL;
- void *address = RETURN_ADDRESS (0);
- mem = _mid_memalign (alignment, size, address);
- if (mem != NULL)
- {
- *memptr = mem;
- return 0;
- }
- return ENOMEM;
- }
- weak_alias (__posix_memalign, posix_memalign)
- int
- __malloc_info (int options, FILE *fp)
- {
- /* For now, at least. */
- if (options != 0)
- return EINVAL;
- int n = 0;
- size_t total_nblocks = 0;
- size_t total_nfastblocks = 0;
- size_t total_avail = 0;
- size_t total_fastavail = 0;
- size_t total_system = 0;
- size_t total_max_system = 0;
- size_t total_aspace = 0;
- size_t total_aspace_mprotect = 0;
- if (__malloc_initialized < 0)
- ptmalloc_init ();
- fputs ("<malloc version=\"1\">\n", fp);
- /* Iterate over all arenas currently in use. */
- mstate ar_ptr = &main_arena;
- do
- {
- fprintf (fp, "<heap nr=\"%d\">\n<sizes>\n", n++);
- size_t nblocks = 0;
- size_t nfastblocks = 0;
- size_t avail = 0;
- size_t fastavail = 0;
- struct
- {
- size_t from;
- size_t to;
- size_t total;
- size_t count;
- } sizes[NFASTBINS + NBINS - 1];
- #define nsizes (sizeof (sizes) / sizeof (sizes[0]))
- __libc_lock_lock (ar_ptr->mutex);
- for (size_t i = 0; i < NFASTBINS; ++i)
- {
- mchunkptr p = fastbin (ar_ptr, i);
- if (p != NULL)
- {
- size_t nthissize = 0;
- size_t thissize = chunksize (p);
- while (p != NULL)
- {
- ++nthissize;
- p = p->fd;
- }
- fastavail += nthissize * thissize;
- nfastblocks += nthissize;
- sizes[i].from = thissize - (MALLOC_ALIGNMENT - 1);
- sizes[i].to = thissize;
- sizes[i].count = nthissize;
- }
- else
- sizes[i].from = sizes[i].to = sizes[i].count = 0;
- sizes[i].total = sizes[i].count * sizes[i].to;
- }
- mbinptr bin;
- struct malloc_chunk *r;
- for (size_t i = 1; i < NBINS; ++i)
- {
- bin = bin_at (ar_ptr, i);
- r = bin->fd;
- sizes[NFASTBINS - 1 + i].from = ~((size_t) 0);
- sizes[NFASTBINS - 1 + i].to = sizes[NFASTBINS - 1 + i].total
- = sizes[NFASTBINS - 1 + i].count = 0;
- if (r != NULL)
- while (r != bin)
- {
- size_t r_size = chunksize_nomask (r);
- ++sizes[NFASTBINS - 1 + i].count;
- sizes[NFASTBINS - 1 + i].total += r_size;
- sizes[NFASTBINS - 1 + i].from
- = MIN (sizes[NFASTBINS - 1 + i].from, r_size);
- sizes[NFASTBINS - 1 + i].to = MAX (sizes[NFASTBINS - 1 + i].to,
- r_size);
- r = r->fd;
- }
- if (sizes[NFASTBINS - 1 + i].count == 0)
- sizes[NFASTBINS - 1 + i].from = 0;
- nblocks += sizes[NFASTBINS - 1 + i].count;
- avail += sizes[NFASTBINS - 1 + i].total;
- }
- size_t heap_size = 0;
- size_t heap_mprotect_size = 0;
- size_t heap_count = 0;
- if (ar_ptr != &main_arena)
- {
- /* Iterate over the arena heaps from back to front. */
- heap_info *heap = heap_for_ptr (top (ar_ptr));
- do
- {
- heap_size += heap->size;
- heap_mprotect_size += heap->mprotect_size;
- heap = heap->prev;
- ++heap_count;
- }
- while (heap != NULL);
- }
- __libc_lock_unlock (ar_ptr->mutex);
- total_nfastblocks += nfastblocks;
- total_fastavail += fastavail;
- total_nblocks += nblocks;
- total_avail += avail;
- for (size_t i = 0; i < nsizes; ++i)
- if (sizes[i].count != 0 && i != NFASTBINS)
- fprintf (fp, " \
- <size from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
- sizes[i].from, sizes[i].to, sizes[i].total, sizes[i].count);
- if (sizes[NFASTBINS].count != 0)
- fprintf (fp, "\
- <unsorted from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
- sizes[NFASTBINS].from, sizes[NFASTBINS].to,
- sizes[NFASTBINS].total, sizes[NFASTBINS].count);
- total_system += ar_ptr->system_mem;
- total_max_system += ar_ptr->max_system_mem;
- fprintf (fp,
- "</sizes>\n<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
- "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
- "<system type=\"current\" size=\"%zu\"/>\n"
- "<system type=\"max\" size=\"%zu\"/>\n",
- nfastblocks, fastavail, nblocks, avail,
- ar_ptr->system_mem, ar_ptr->max_system_mem);
- if (ar_ptr != &main_arena)
- {
- fprintf (fp,
- "<aspace type=\"total\" size=\"%zu\"/>\n"
- "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
- "<aspace type=\"subheaps\" size=\"%zu\"/>\n",
- heap_size, heap_mprotect_size, heap_count);
- total_aspace += heap_size;
- total_aspace_mprotect += heap_mprotect_size;
- }
- else
- {
- fprintf (fp,
- "<aspace type=\"total\" size=\"%zu\"/>\n"
- "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
- ar_ptr->system_mem, ar_ptr->system_mem);
- total_aspace += ar_ptr->system_mem;
- total_aspace_mprotect += ar_ptr->system_mem;
- }
- fputs ("</heap>\n", fp);
- ar_ptr = ar_ptr->next;
- }
- while (ar_ptr != &main_arena);
- fprintf (fp,
- "<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
- "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
- "<total type=\"mmap\" count=\"%d\" size=\"%zu\"/>\n"
- "<system type=\"current\" size=\"%zu\"/>\n"
- "<system type=\"max\" size=\"%zu\"/>\n"
- "<aspace type=\"total\" size=\"%zu\"/>\n"
- "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
- "</malloc>\n",
- total_nfastblocks, total_fastavail, total_nblocks, total_avail,
- mp_.n_mmaps, mp_.mmapped_mem,
- total_system, total_max_system,
- total_aspace, total_aspace_mprotect);
- return 0;
- }
- weak_alias (__malloc_info, malloc_info)
- strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
- strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
- strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
- strong_alias (__libc_memalign, __memalign)
- weak_alias (__libc_memalign, memalign)
- strong_alias (__libc_realloc, __realloc) strong_alias (__libc_realloc, realloc)
- strong_alias (__libc_valloc, __valloc) weak_alias (__libc_valloc, valloc)
- strong_alias (__libc_pvalloc, __pvalloc) weak_alias (__libc_pvalloc, pvalloc)
- strong_alias (__libc_mallinfo, __mallinfo)
- weak_alias (__libc_mallinfo, mallinfo)
- strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt)
- weak_alias (__malloc_stats, malloc_stats)
- weak_alias (__malloc_usable_size, malloc_usable_size)
- weak_alias (__malloc_trim, malloc_trim)
- #if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_26)
- compat_symbol (libc, __libc_free, cfree, GLIBC_2_0);
- #endif
- /* ------------------------------------------------------------
- History:
- [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]
- */
- /*
- * Local variables:
- * c-basic-offset: 2
- * End:
- */
|