123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568 |
- #include <linux/export.h>
- #include <linux/kernel.h>
- #include <linux/sched.h>
- #include <linux/init.h>
- #include <linux/signal.h>
- #include <linux/completion.h>
- #include <linux/workqueue.h>
- #include <linux/slab.h>
- #include <linux/cpu.h>
- #include <linux/notifier.h>
- #include <linux/kthread.h>
- #include <linux/hardirq.h>
- #include <linux/mempolicy.h>
- #include <linux/freezer.h>
- #include <linux/kallsyms.h>
- #include <linux/debug_locks.h>
- #include <linux/lockdep.h>
- #include <linux/idr.h>
- #include <linux/jhash.h>
- #include <linux/hashtable.h>
- #include <linux/rculist.h>
- #include <linux/nodemask.h>
- #include <linux/moduleparam.h>
- #include <linux/uaccess.h>
- #include "workqueue_internal.h"
- enum {
-
- POOL_DISASSOCIATED = 1 << 2,
-
- WORKER_DIE = 1 << 1,
- WORKER_IDLE = 1 << 2,
- WORKER_PREP = 1 << 3,
- WORKER_CPU_INTENSIVE = 1 << 6,
- WORKER_UNBOUND = 1 << 7,
- WORKER_REBOUND = 1 << 8,
- WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE |
- WORKER_UNBOUND | WORKER_REBOUND,
- NR_STD_WORKER_POOLS = 2,
- UNBOUND_POOL_HASH_ORDER = 6,
- BUSY_WORKER_HASH_ORDER = 6,
- MAX_IDLE_WORKERS_RATIO = 4,
- IDLE_WORKER_TIMEOUT = 300 * HZ,
- MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2,
-
- MAYDAY_INTERVAL = HZ / 10,
- CREATE_COOLDOWN = HZ,
-
- RESCUER_NICE_LEVEL = MIN_NICE,
- HIGHPRI_NICE_LEVEL = MIN_NICE,
- WQ_NAME_LEN = 24,
- };
- struct worker_pool {
- spinlock_t lock;
- int cpu;
- int node;
- int id;
- unsigned int flags;
- unsigned long watchdog_ts;
- struct list_head worklist;
- int nr_workers;
-
- int nr_idle;
- struct list_head idle_list;
- struct timer_list idle_timer;
- struct timer_list mayday_timer;
-
- DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
-
-
- struct mutex manager_arb;
- struct worker *manager;
- struct mutex attach_mutex;
- struct list_head workers;
- struct completion *detach_completion;
- struct ida worker_ida;
- struct workqueue_attrs *attrs;
- struct hlist_node hash_node;
- int refcnt;
-
- atomic_t nr_running ____cacheline_aligned_in_smp;
-
- struct rcu_head rcu;
- } ____cacheline_aligned_in_smp;
- struct pool_workqueue {
- struct worker_pool *pool;
- struct workqueue_struct *wq;
- int work_color;
- int flush_color;
- int refcnt;
- int nr_in_flight[WORK_NR_COLORS];
-
- int nr_active;
- int max_active;
- struct list_head delayed_works;
- struct list_head pwqs_node;
- struct list_head mayday_node;
-
- struct work_struct unbound_release_work;
- struct rcu_head rcu;
- } __aligned(1 << WORK_STRUCT_FLAG_BITS);
- struct wq_flusher {
- struct list_head list;
- int flush_color;
- struct completion done;
- };
- struct wq_device;
- struct workqueue_struct {
- struct list_head pwqs;
- struct list_head list;
- struct mutex mutex;
- int work_color;
- int flush_color;
- atomic_t nr_pwqs_to_flush;
- struct wq_flusher *first_flusher;
- struct list_head flusher_queue;
- struct list_head flusher_overflow;
- struct list_head maydays;
- struct worker *rescuer;
- int nr_drainers;
- int saved_max_active;
- struct workqueue_attrs *unbound_attrs;
- struct pool_workqueue *dfl_pwq;
- #ifdef CONFIG_SYSFS
- struct wq_device *wq_dev;
- #endif
- #ifdef CONFIG_LOCKDEP
- struct lockdep_map lockdep_map;
- #endif
- char name[WQ_NAME_LEN];
-
- struct rcu_head rcu;
-
- unsigned int flags ____cacheline_aligned;
- struct pool_workqueue __percpu *cpu_pwqs;
- struct pool_workqueue __rcu *numa_pwq_tbl[];
- };
- static struct kmem_cache *pwq_cache;
- static cpumask_var_t *wq_numa_possible_cpumask;
-
- static bool wq_disable_numa;
- module_param_named(disable_numa, wq_disable_numa, bool, 0444);
- static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
- module_param_named(power_efficient, wq_power_efficient, bool, 0444);
- static bool wq_numa_enabled;
- static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
- static DEFINE_MUTEX(wq_pool_mutex);
- static DEFINE_SPINLOCK(wq_mayday_lock);
- static LIST_HEAD(workqueues);
- static bool workqueue_freezing;
- static cpumask_var_t wq_unbound_cpumask;
- static DEFINE_PER_CPU(int, wq_rr_cpu_last);
- #ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
- static bool wq_debug_force_rr_cpu = true;
- #else
- static bool wq_debug_force_rr_cpu = false;
- #endif
- module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
- static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools);
- static DEFINE_IDR(worker_pool_idr);
- static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
- static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
- static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
- struct workqueue_struct *system_wq __read_mostly;
- EXPORT_SYMBOL(system_wq);
- struct workqueue_struct *system_highpri_wq __read_mostly;
- EXPORT_SYMBOL_GPL(system_highpri_wq);
- struct workqueue_struct *system_long_wq __read_mostly;
- EXPORT_SYMBOL_GPL(system_long_wq);
- struct workqueue_struct *system_unbound_wq __read_mostly;
- EXPORT_SYMBOL_GPL(system_unbound_wq);
- struct workqueue_struct *system_freezable_wq __read_mostly;
- EXPORT_SYMBOL_GPL(system_freezable_wq);
- struct workqueue_struct *system_power_efficient_wq __read_mostly;
- EXPORT_SYMBOL_GPL(system_power_efficient_wq);
- struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
- EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
- static int worker_thread(void *__worker);
- static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
- #define CREATE_TRACE_POINTS
- #include <trace/events/workqueue.h>
- #define assert_rcu_or_pool_mutex() \
- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
- !lockdep_is_held(&wq_pool_mutex), \
- "sched RCU or wq_pool_mutex should be held")
- #define assert_rcu_or_wq_mutex(wq) \
- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
- !lockdep_is_held(&wq->mutex), \
- "sched RCU or wq->mutex should be held")
- #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \
- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
- !lockdep_is_held(&wq->mutex) && \
- !lockdep_is_held(&wq_pool_mutex), \
- "sched RCU, wq->mutex or wq_pool_mutex should be held")
- #define for_each_cpu_worker_pool(pool, cpu) \
- for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
- (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
- (pool)++)
- #define for_each_pool(pool, pi) \
- idr_for_each_entry(&worker_pool_idr, pool, pi) \
- if (({ assert_rcu_or_pool_mutex(); false; })) { } \
- else
- #define for_each_pool_worker(worker, pool) \
- list_for_each_entry((worker), &(pool)->workers, node) \
- if (({ lockdep_assert_held(&pool->attach_mutex); false; })) { } \
- else
- #define for_each_pwq(pwq, wq) \
- list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node) \
- if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \
- else
- #ifdef CONFIG_DEBUG_OBJECTS_WORK
- static struct debug_obj_descr work_debug_descr;
- static void *work_debug_hint(void *addr)
- {
- return ((struct work_struct *) addr)->func;
- }
- static bool work_is_static_object(void *addr)
- {
- struct work_struct *work = addr;
- return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work));
- }
- static bool work_fixup_init(void *addr, enum debug_obj_state state)
- {
- struct work_struct *work = addr;
- switch (state) {
- case ODEBUG_STATE_ACTIVE:
- cancel_work_sync(work);
- debug_object_init(work, &work_debug_descr);
- return true;
- default:
- return false;
- }
- }
- static bool work_fixup_free(void *addr, enum debug_obj_state state)
- {
- struct work_struct *work = addr;
- switch (state) {
- case ODEBUG_STATE_ACTIVE:
- cancel_work_sync(work);
- debug_object_free(work, &work_debug_descr);
- return true;
- default:
- return false;
- }
- }
- static struct debug_obj_descr work_debug_descr = {
- .name = "work_struct",
- .debug_hint = work_debug_hint,
- .is_static_object = work_is_static_object,
- .fixup_init = work_fixup_init,
- .fixup_free = work_fixup_free,
- };
- static inline void debug_work_activate(struct work_struct *work)
- {
- debug_object_activate(work, &work_debug_descr);
- }
- static inline void debug_work_deactivate(struct work_struct *work)
- {
- debug_object_deactivate(work, &work_debug_descr);
- }
- void __init_work(struct work_struct *work, int onstack)
- {
- if (onstack)
- debug_object_init_on_stack(work, &work_debug_descr);
- else
- debug_object_init(work, &work_debug_descr);
- }
- EXPORT_SYMBOL_GPL(__init_work);
- void destroy_work_on_stack(struct work_struct *work)
- {
- debug_object_free(work, &work_debug_descr);
- }
- EXPORT_SYMBOL_GPL(destroy_work_on_stack);
- void destroy_delayed_work_on_stack(struct delayed_work *work)
- {
- destroy_timer_on_stack(&work->timer);
- debug_object_free(&work->work, &work_debug_descr);
- }
- EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
- #else
- static inline void debug_work_activate(struct work_struct *work) { }
- static inline void debug_work_deactivate(struct work_struct *work) { }
- #endif
- static int worker_pool_assign_id(struct worker_pool *pool)
- {
- int ret;
- lockdep_assert_held(&wq_pool_mutex);
- ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
- GFP_KERNEL);
- if (ret >= 0) {
- pool->id = ret;
- return 0;
- }
- return ret;
- }
- static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
- int node)
- {
- assert_rcu_or_wq_mutex_or_pool_mutex(wq);
-
- if (unlikely(node == NUMA_NO_NODE))
- return wq->dfl_pwq;
- return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
- }
- static unsigned int work_color_to_flags(int color)
- {
- return color << WORK_STRUCT_COLOR_SHIFT;
- }
- static int get_work_color(struct work_struct *work)
- {
- return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
- ((1 << WORK_STRUCT_COLOR_BITS) - 1);
- }
- static int work_next_color(int color)
- {
- return (color + 1) % WORK_NR_COLORS;
- }
- static inline void set_work_data(struct work_struct *work, unsigned long data,
- unsigned long flags)
- {
- WARN_ON_ONCE(!work_pending(work));
- atomic_long_set(&work->data, data | flags | work_static(work));
- }
- static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
- unsigned long extra_flags)
- {
- set_work_data(work, (unsigned long)pwq,
- WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
- }
- static void set_work_pool_and_keep_pending(struct work_struct *work,
- int pool_id)
- {
- set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
- WORK_STRUCT_PENDING);
- }
- static void set_work_pool_and_clear_pending(struct work_struct *work,
- int pool_id)
- {
-
- smp_wmb();
- set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
-
- smp_mb();
- }
- static void clear_work_data(struct work_struct *work)
- {
- smp_wmb();
- set_work_data(work, WORK_STRUCT_NO_POOL, 0);
- }
- static struct pool_workqueue *get_work_pwq(struct work_struct *work)
- {
- unsigned long data = atomic_long_read(&work->data);
- if (data & WORK_STRUCT_PWQ)
- return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
- else
- return NULL;
- }
- static struct worker_pool *get_work_pool(struct work_struct *work)
- {
- unsigned long data = atomic_long_read(&work->data);
- int pool_id;
- assert_rcu_or_pool_mutex();
- if (data & WORK_STRUCT_PWQ)
- return ((struct pool_workqueue *)
- (data & WORK_STRUCT_WQ_DATA_MASK))->pool;
- pool_id = data >> WORK_OFFQ_POOL_SHIFT;
- if (pool_id == WORK_OFFQ_POOL_NONE)
- return NULL;
- return idr_find(&worker_pool_idr, pool_id);
- }
- static int get_work_pool_id(struct work_struct *work)
- {
- unsigned long data = atomic_long_read(&work->data);
- if (data & WORK_STRUCT_PWQ)
- return ((struct pool_workqueue *)
- (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
- return data >> WORK_OFFQ_POOL_SHIFT;
- }
- static void mark_work_canceling(struct work_struct *work)
- {
- unsigned long pool_id = get_work_pool_id(work);
- pool_id <<= WORK_OFFQ_POOL_SHIFT;
- set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
- }
- static bool work_is_canceling(struct work_struct *work)
- {
- unsigned long data = atomic_long_read(&work->data);
- return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
- }
- static bool __need_more_worker(struct worker_pool *pool)
- {
- return !atomic_read(&pool->nr_running);
- }
- static bool need_more_worker(struct worker_pool *pool)
- {
- return !list_empty(&pool->worklist) && __need_more_worker(pool);
- }
- static bool may_start_working(struct worker_pool *pool)
- {
- return pool->nr_idle;
- }
- static bool keep_working(struct worker_pool *pool)
- {
- return !list_empty(&pool->worklist) &&
- atomic_read(&pool->nr_running) <= 1;
- }
- static bool need_to_create_worker(struct worker_pool *pool)
- {
- return need_more_worker(pool) && !may_start_working(pool);
- }
- static bool too_many_workers(struct worker_pool *pool)
- {
- bool managing = mutex_is_locked(&pool->manager_arb);
- int nr_idle = pool->nr_idle + managing;
- int nr_busy = pool->nr_workers - nr_idle;
- return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
- }
- static struct worker *first_idle_worker(struct worker_pool *pool)
- {
- if (unlikely(list_empty(&pool->idle_list)))
- return NULL;
- return list_first_entry(&pool->idle_list, struct worker, entry);
- }
- static void wake_up_worker(struct worker_pool *pool)
- {
- struct worker *worker = first_idle_worker(pool);
- if (likely(worker))
- wake_up_process(worker->task);
- }
- void wq_worker_waking_up(struct task_struct *task, int cpu)
- {
- struct worker *worker = kthread_data(task);
- if (!(worker->flags & WORKER_NOT_RUNNING)) {
- WARN_ON_ONCE(worker->pool->cpu != cpu);
- atomic_inc(&worker->pool->nr_running);
- }
- }
- struct task_struct *wq_worker_sleeping(struct task_struct *task)
- {
- struct worker *worker = kthread_data(task), *to_wakeup = NULL;
- struct worker_pool *pool;
-
- if (worker->flags & WORKER_NOT_RUNNING)
- return NULL;
- pool = worker->pool;
-
- if (WARN_ON_ONCE(pool->cpu != raw_smp_processor_id()))
- return NULL;
-
- if (atomic_dec_and_test(&pool->nr_running) &&
- !list_empty(&pool->worklist))
- to_wakeup = first_idle_worker(pool);
- return to_wakeup ? to_wakeup->task : NULL;
- }
- static inline void worker_set_flags(struct worker *worker, unsigned int flags)
- {
- struct worker_pool *pool = worker->pool;
- WARN_ON_ONCE(worker->task != current);
-
- if ((flags & WORKER_NOT_RUNNING) &&
- !(worker->flags & WORKER_NOT_RUNNING)) {
- atomic_dec(&pool->nr_running);
- }
- worker->flags |= flags;
- }
- static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
- {
- struct worker_pool *pool = worker->pool;
- unsigned int oflags = worker->flags;
- WARN_ON_ONCE(worker->task != current);
- worker->flags &= ~flags;
-
- if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
- if (!(worker->flags & WORKER_NOT_RUNNING))
- atomic_inc(&pool->nr_running);
- }
- static struct worker *find_worker_executing_work(struct worker_pool *pool,
- struct work_struct *work)
- {
- struct worker *worker;
- hash_for_each_possible(pool->busy_hash, worker, hentry,
- (unsigned long)work)
- if (worker->current_work == work &&
- worker->current_func == work->func)
- return worker;
- return NULL;
- }
- static void move_linked_works(struct work_struct *work, struct list_head *head,
- struct work_struct **nextp)
- {
- struct work_struct *n;
-
- list_for_each_entry_safe_from(work, n, NULL, entry) {
- list_move_tail(&work->entry, head);
- if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
- break;
- }
-
- if (nextp)
- *nextp = n;
- }
- static void get_pwq(struct pool_workqueue *pwq)
- {
- lockdep_assert_held(&pwq->pool->lock);
- WARN_ON_ONCE(pwq->refcnt <= 0);
- pwq->refcnt++;
- }
- static void put_pwq(struct pool_workqueue *pwq)
- {
- lockdep_assert_held(&pwq->pool->lock);
- if (likely(--pwq->refcnt))
- return;
- if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND)))
- return;
-
- schedule_work(&pwq->unbound_release_work);
- }
- static void put_pwq_unlocked(struct pool_workqueue *pwq)
- {
- if (pwq) {
-
- spin_lock_irq(&pwq->pool->lock);
- put_pwq(pwq);
- spin_unlock_irq(&pwq->pool->lock);
- }
- }
- static void pwq_activate_delayed_work(struct work_struct *work)
- {
- struct pool_workqueue *pwq = get_work_pwq(work);
- trace_workqueue_activate_work(work);
- if (list_empty(&pwq->pool->worklist))
- pwq->pool->watchdog_ts = jiffies;
- move_linked_works(work, &pwq->pool->worklist, NULL);
- __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
- pwq->nr_active++;
- }
- static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
- {
- struct work_struct *work = list_first_entry(&pwq->delayed_works,
- struct work_struct, entry);
- pwq_activate_delayed_work(work);
- }
- static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
- {
-
- if (color == WORK_NO_COLOR)
- goto out_put;
- pwq->nr_in_flight[color]--;
- pwq->nr_active--;
- if (!list_empty(&pwq->delayed_works)) {
-
- if (pwq->nr_active < pwq->max_active)
- pwq_activate_first_delayed(pwq);
- }
-
- if (likely(pwq->flush_color != color))
- goto out_put;
-
- if (pwq->nr_in_flight[color])
- goto out_put;
-
- pwq->flush_color = -1;
-
- if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
- complete(&pwq->wq->first_flusher->done);
- out_put:
- put_pwq(pwq);
- }
- static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
- unsigned long *flags)
- {
- struct worker_pool *pool;
- struct pool_workqueue *pwq;
- local_irq_save(*flags);
-
- if (is_dwork) {
- struct delayed_work *dwork = to_delayed_work(work);
-
- if (likely(del_timer(&dwork->timer)))
- return 1;
- }
-
- if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
- return 0;
-
- pool = get_work_pool(work);
- if (!pool)
- goto fail;
- spin_lock(&pool->lock);
-
- pwq = get_work_pwq(work);
- if (pwq && pwq->pool == pool) {
- debug_work_deactivate(work);
-
- if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
- pwq_activate_delayed_work(work);
- list_del_init(&work->entry);
- pwq_dec_nr_in_flight(pwq, get_work_color(work));
-
- set_work_pool_and_keep_pending(work, pool->id);
- spin_unlock(&pool->lock);
- return 1;
- }
- spin_unlock(&pool->lock);
- fail:
- local_irq_restore(*flags);
- if (work_is_canceling(work))
- return -ENOENT;
- cpu_relax();
- return -EAGAIN;
- }
- static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
- struct list_head *head, unsigned int extra_flags)
- {
- struct worker_pool *pool = pwq->pool;
-
- set_work_pwq(work, pwq, extra_flags);
- list_add_tail(&work->entry, head);
- get_pwq(pwq);
-
- smp_mb();
- if (__need_more_worker(pool))
- wake_up_worker(pool);
- }
- static bool is_chained_work(struct workqueue_struct *wq)
- {
- struct worker *worker;
- worker = current_wq_worker();
-
- return worker && worker->current_pwq->wq == wq;
- }
- static int wq_select_unbound_cpu(int cpu)
- {
- static bool printed_dbg_warning;
- int new_cpu;
- if (likely(!wq_debug_force_rr_cpu)) {
- if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
- return cpu;
- } else if (!printed_dbg_warning) {
- pr_warn("workqueue: round-robin CPU selection forced, expect performance impact\n");
- printed_dbg_warning = true;
- }
- if (cpumask_empty(wq_unbound_cpumask))
- return cpu;
- new_cpu = __this_cpu_read(wq_rr_cpu_last);
- new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
- if (unlikely(new_cpu >= nr_cpu_ids)) {
- new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
- if (unlikely(new_cpu >= nr_cpu_ids))
- return cpu;
- }
- __this_cpu_write(wq_rr_cpu_last, new_cpu);
- return new_cpu;
- }
- static void __queue_work(int cpu, struct workqueue_struct *wq,
- struct work_struct *work)
- {
- struct pool_workqueue *pwq;
- struct worker_pool *last_pool;
- struct list_head *worklist;
- unsigned int work_flags;
- unsigned int req_cpu = cpu;
-
- WARN_ON_ONCE(!irqs_disabled());
- debug_work_activate(work);
-
- if (unlikely(wq->flags & __WQ_DRAINING) &&
- WARN_ON_ONCE(!is_chained_work(wq)))
- return;
- retry:
- if (req_cpu == WORK_CPU_UNBOUND)
- cpu = wq_select_unbound_cpu(raw_smp_processor_id());
-
- if (!(wq->flags & WQ_UNBOUND))
- pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
- else
- pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
-
- last_pool = get_work_pool(work);
- if (last_pool && last_pool != pwq->pool) {
- struct worker *worker;
- spin_lock(&last_pool->lock);
- worker = find_worker_executing_work(last_pool, work);
- if (worker && worker->current_pwq->wq == wq) {
- pwq = worker->current_pwq;
- } else {
-
- spin_unlock(&last_pool->lock);
- spin_lock(&pwq->pool->lock);
- }
- } else {
- spin_lock(&pwq->pool->lock);
- }
-
- if (unlikely(!pwq->refcnt)) {
- if (wq->flags & WQ_UNBOUND) {
- spin_unlock(&pwq->pool->lock);
- cpu_relax();
- goto retry;
- }
-
- WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
- wq->name, cpu);
- }
-
- trace_workqueue_queue_work(req_cpu, pwq, work);
- if (WARN_ON(!list_empty(&work->entry))) {
- spin_unlock(&pwq->pool->lock);
- return;
- }
- pwq->nr_in_flight[pwq->work_color]++;
- work_flags = work_color_to_flags(pwq->work_color);
- if (likely(pwq->nr_active < pwq->max_active)) {
- trace_workqueue_activate_work(work);
- pwq->nr_active++;
- worklist = &pwq->pool->worklist;
- if (list_empty(worklist))
- pwq->pool->watchdog_ts = jiffies;
- } else {
- work_flags |= WORK_STRUCT_DELAYED;
- worklist = &pwq->delayed_works;
- }
- insert_work(pwq, work, worklist, work_flags);
- spin_unlock(&pwq->pool->lock);
- }
- bool queue_work_on(int cpu, struct workqueue_struct *wq,
- struct work_struct *work)
- {
- bool ret = false;
- unsigned long flags;
- local_irq_save(flags);
- if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
- __queue_work(cpu, wq, work);
- ret = true;
- }
- local_irq_restore(flags);
- return ret;
- }
- EXPORT_SYMBOL(queue_work_on);
- void delayed_work_timer_fn(unsigned long __data)
- {
- struct delayed_work *dwork = (struct delayed_work *)__data;
-
- __queue_work(dwork->cpu, dwork->wq, &dwork->work);
- }
- EXPORT_SYMBOL(delayed_work_timer_fn);
- static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
- struct delayed_work *dwork, unsigned long delay)
- {
- struct timer_list *timer = &dwork->timer;
- struct work_struct *work = &dwork->work;
- WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
- timer->data != (unsigned long)dwork);
- WARN_ON_ONCE(timer_pending(timer));
- WARN_ON_ONCE(!list_empty(&work->entry));
-
- if (!delay) {
- __queue_work(cpu, wq, &dwork->work);
- return;
- }
- timer_stats_timer_set_start_info(&dwork->timer);
- dwork->wq = wq;
- dwork->cpu = cpu;
- timer->expires = jiffies + delay;
- if (unlikely(cpu != WORK_CPU_UNBOUND))
- add_timer_on(timer, cpu);
- else
- add_timer(timer);
- }
- bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
- struct delayed_work *dwork, unsigned long delay)
- {
- struct work_struct *work = &dwork->work;
- bool ret = false;
- unsigned long flags;
-
- local_irq_save(flags);
- if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
- __queue_delayed_work(cpu, wq, dwork, delay);
- ret = true;
- }
- local_irq_restore(flags);
- return ret;
- }
- EXPORT_SYMBOL(queue_delayed_work_on);
- bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
- struct delayed_work *dwork, unsigned long delay)
- {
- unsigned long flags;
- int ret;
- do {
- ret = try_to_grab_pending(&dwork->work, true, &flags);
- } while (unlikely(ret == -EAGAIN));
- if (likely(ret >= 0)) {
- __queue_delayed_work(cpu, wq, dwork, delay);
- local_irq_restore(flags);
- }
-
- return ret;
- }
- EXPORT_SYMBOL_GPL(mod_delayed_work_on);
- static void worker_enter_idle(struct worker *worker)
- {
- struct worker_pool *pool = worker->pool;
- if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
- WARN_ON_ONCE(!list_empty(&worker->entry) &&
- (worker->hentry.next || worker->hentry.pprev)))
- return;
-
- worker->flags |= WORKER_IDLE;
- pool->nr_idle++;
- worker->last_active = jiffies;
-
- list_add(&worker->entry, &pool->idle_list);
- if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
- mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
-
- WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
- pool->nr_workers == pool->nr_idle &&
- atomic_read(&pool->nr_running));
- }
- static void worker_leave_idle(struct worker *worker)
- {
- struct worker_pool *pool = worker->pool;
- if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
- return;
- worker_clr_flags(worker, WORKER_IDLE);
- pool->nr_idle--;
- list_del_init(&worker->entry);
- }
- static struct worker *alloc_worker(int node)
- {
- struct worker *worker;
- worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
- if (worker) {
- INIT_LIST_HEAD(&worker->entry);
- INIT_LIST_HEAD(&worker->scheduled);
- INIT_LIST_HEAD(&worker->node);
-
- worker->flags = WORKER_PREP;
- }
- return worker;
- }
- static void worker_attach_to_pool(struct worker *worker,
- struct worker_pool *pool)
- {
- mutex_lock(&pool->attach_mutex);
-
- set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
-
- if (pool->flags & POOL_DISASSOCIATED)
- worker->flags |= WORKER_UNBOUND;
- list_add_tail(&worker->node, &pool->workers);
- mutex_unlock(&pool->attach_mutex);
- }
- static void worker_detach_from_pool(struct worker *worker,
- struct worker_pool *pool)
- {
- struct completion *detach_completion = NULL;
- mutex_lock(&pool->attach_mutex);
- list_del(&worker->node);
- if (list_empty(&pool->workers))
- detach_completion = pool->detach_completion;
- mutex_unlock(&pool->attach_mutex);
-
- worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
- if (detach_completion)
- complete(detach_completion);
- }
- static struct worker *create_worker(struct worker_pool *pool)
- {
- struct worker *worker = NULL;
- int id = -1;
- char id_buf[16];
-
- id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL);
- if (id < 0)
- goto fail;
- worker = alloc_worker(pool->node);
- if (!worker)
- goto fail;
- worker->pool = pool;
- worker->id = id;
- if (pool->cpu >= 0)
- snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
- pool->attrs->nice < 0 ? "H" : "");
- else
- snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
- worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
- "kworker/%s", id_buf);
- if (IS_ERR(worker->task))
- goto fail;
- set_user_nice(worker->task, pool->attrs->nice);
- kthread_bind_mask(worker->task, pool->attrs->cpumask);
-
- worker_attach_to_pool(worker, pool);
-
- spin_lock_irq(&pool->lock);
- worker->pool->nr_workers++;
- worker_enter_idle(worker);
- wake_up_process(worker->task);
- spin_unlock_irq(&pool->lock);
- return worker;
- fail:
- if (id >= 0)
- ida_simple_remove(&pool->worker_ida, id);
- kfree(worker);
- return NULL;
- }
- static void destroy_worker(struct worker *worker)
- {
- struct worker_pool *pool = worker->pool;
- lockdep_assert_held(&pool->lock);
-
- if (WARN_ON(worker->current_work) ||
- WARN_ON(!list_empty(&worker->scheduled)) ||
- WARN_ON(!(worker->flags & WORKER_IDLE)))
- return;
- pool->nr_workers--;
- pool->nr_idle--;
- list_del_init(&worker->entry);
- worker->flags |= WORKER_DIE;
- wake_up_process(worker->task);
- }
- static void idle_worker_timeout(unsigned long __pool)
- {
- struct worker_pool *pool = (void *)__pool;
- spin_lock_irq(&pool->lock);
- while (too_many_workers(pool)) {
- struct worker *worker;
- unsigned long expires;
-
- worker = list_entry(pool->idle_list.prev, struct worker, entry);
- expires = worker->last_active + IDLE_WORKER_TIMEOUT;
- if (time_before(jiffies, expires)) {
- mod_timer(&pool->idle_timer, expires);
- break;
- }
- destroy_worker(worker);
- }
- spin_unlock_irq(&pool->lock);
- }
- static void send_mayday(struct work_struct *work)
- {
- struct pool_workqueue *pwq = get_work_pwq(work);
- struct workqueue_struct *wq = pwq->wq;
- lockdep_assert_held(&wq_mayday_lock);
- if (!wq->rescuer)
- return;
-
- if (list_empty(&pwq->mayday_node)) {
-
- get_pwq(pwq);
- list_add_tail(&pwq->mayday_node, &wq->maydays);
- wake_up_process(wq->rescuer->task);
- }
- }
- static void pool_mayday_timeout(unsigned long __pool)
- {
- struct worker_pool *pool = (void *)__pool;
- struct work_struct *work;
- spin_lock_irq(&pool->lock);
- spin_lock(&wq_mayday_lock);
- if (need_to_create_worker(pool)) {
-
- list_for_each_entry(work, &pool->worklist, entry)
- send_mayday(work);
- }
- spin_unlock(&wq_mayday_lock);
- spin_unlock_irq(&pool->lock);
- mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
- }
- static void maybe_create_worker(struct worker_pool *pool)
- __releases(&pool->lock)
- __acquires(&pool->lock)
- {
- restart:
- spin_unlock_irq(&pool->lock);
-
- mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
- while (true) {
- if (create_worker(pool) || !need_to_create_worker(pool))
- break;
- schedule_timeout_interruptible(CREATE_COOLDOWN);
- if (!need_to_create_worker(pool))
- break;
- }
- del_timer_sync(&pool->mayday_timer);
- spin_lock_irq(&pool->lock);
-
- if (need_to_create_worker(pool))
- goto restart;
- }
- static bool manage_workers(struct worker *worker)
- {
- struct worker_pool *pool = worker->pool;
-
- if (!mutex_trylock(&pool->manager_arb))
- return false;
- pool->manager = worker;
- maybe_create_worker(pool);
- pool->manager = NULL;
- mutex_unlock(&pool->manager_arb);
- return true;
- }
- static void process_one_work(struct worker *worker, struct work_struct *work)
- __releases(&pool->lock)
- __acquires(&pool->lock)
- {
- struct pool_workqueue *pwq = get_work_pwq(work);
- struct worker_pool *pool = worker->pool;
- bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
- int work_color;
- struct worker *collision;
- #ifdef CONFIG_LOCKDEP
-
- struct lockdep_map lockdep_map;
- lockdep_copy_map(&lockdep_map, &work->lockdep_map);
- #endif
-
- WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
- raw_smp_processor_id() != pool->cpu);
-
- collision = find_worker_executing_work(pool, work);
- if (unlikely(collision)) {
- move_linked_works(work, &collision->scheduled, NULL);
- return;
- }
-
- debug_work_deactivate(work);
- hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
- worker->current_work = work;
- worker->current_func = work->func;
- worker->current_pwq = pwq;
- work_color = get_work_color(work);
- list_del_init(&work->entry);
-
- if (unlikely(cpu_intensive))
- worker_set_flags(worker, WORKER_CPU_INTENSIVE);
-
- if (need_more_worker(pool))
- wake_up_worker(pool);
-
- set_work_pool_and_clear_pending(work, pool->id);
- spin_unlock_irq(&pool->lock);
- lock_map_acquire_read(&pwq->wq->lockdep_map);
- lock_map_acquire(&lockdep_map);
- trace_workqueue_execute_start(work);
- worker->current_func(work);
-
- trace_workqueue_execute_end(work);
- lock_map_release(&lockdep_map);
- lock_map_release(&pwq->wq->lockdep_map);
- if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
- pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
- " last function: %pf\n",
- current->comm, preempt_count(), task_pid_nr(current),
- worker->current_func);
- debug_show_held_locks(current);
- dump_stack();
- }
-
- cond_resched_rcu_qs();
- spin_lock_irq(&pool->lock);
-
- if (unlikely(cpu_intensive))
- worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
-
- hash_del(&worker->hentry);
- worker->current_work = NULL;
- worker->current_func = NULL;
- worker->current_pwq = NULL;
- worker->desc_valid = false;
- pwq_dec_nr_in_flight(pwq, work_color);
- }
- static void process_scheduled_works(struct worker *worker)
- {
- while (!list_empty(&worker->scheduled)) {
- struct work_struct *work = list_first_entry(&worker->scheduled,
- struct work_struct, entry);
- process_one_work(worker, work);
- }
- }
- static int worker_thread(void *__worker)
- {
- struct worker *worker = __worker;
- struct worker_pool *pool = worker->pool;
-
- worker->task->flags |= PF_WQ_WORKER;
- woke_up:
- spin_lock_irq(&pool->lock);
-
- if (unlikely(worker->flags & WORKER_DIE)) {
- spin_unlock_irq(&pool->lock);
- WARN_ON_ONCE(!list_empty(&worker->entry));
- worker->task->flags &= ~PF_WQ_WORKER;
- set_task_comm(worker->task, "kworker/dying");
- ida_simple_remove(&pool->worker_ida, worker->id);
- worker_detach_from_pool(worker, pool);
- kfree(worker);
- return 0;
- }
- worker_leave_idle(worker);
- recheck:
-
- if (!need_more_worker(pool))
- goto sleep;
-
- if (unlikely(!may_start_working(pool)) && manage_workers(worker))
- goto recheck;
-
- WARN_ON_ONCE(!list_empty(&worker->scheduled));
-
- worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
- do {
- struct work_struct *work =
- list_first_entry(&pool->worklist,
- struct work_struct, entry);
- pool->watchdog_ts = jiffies;
- if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
-
- process_one_work(worker, work);
- if (unlikely(!list_empty(&worker->scheduled)))
- process_scheduled_works(worker);
- } else {
- move_linked_works(work, &worker->scheduled, NULL);
- process_scheduled_works(worker);
- }
- } while (keep_working(pool));
- worker_set_flags(worker, WORKER_PREP);
- sleep:
-
- worker_enter_idle(worker);
- __set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irq(&pool->lock);
- schedule();
- goto woke_up;
- }
- static int rescuer_thread(void *__rescuer)
- {
- struct worker *rescuer = __rescuer;
- struct workqueue_struct *wq = rescuer->rescue_wq;
- struct list_head *scheduled = &rescuer->scheduled;
- bool should_stop;
- set_user_nice(current, RESCUER_NICE_LEVEL);
-
- rescuer->task->flags |= PF_WQ_WORKER;
- repeat:
- set_current_state(TASK_INTERRUPTIBLE);
-
- should_stop = kthread_should_stop();
-
- spin_lock_irq(&wq_mayday_lock);
- while (!list_empty(&wq->maydays)) {
- struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
- struct pool_workqueue, mayday_node);
- struct worker_pool *pool = pwq->pool;
- struct work_struct *work, *n;
- bool first = true;
- __set_current_state(TASK_RUNNING);
- list_del_init(&pwq->mayday_node);
- spin_unlock_irq(&wq_mayday_lock);
- worker_attach_to_pool(rescuer, pool);
- spin_lock_irq(&pool->lock);
- rescuer->pool = pool;
-
- WARN_ON_ONCE(!list_empty(scheduled));
- list_for_each_entry_safe(work, n, &pool->worklist, entry) {
- if (get_work_pwq(work) == pwq) {
- if (first)
- pool->watchdog_ts = jiffies;
- move_linked_works(work, scheduled, &n);
- }
- first = false;
- }
- if (!list_empty(scheduled)) {
- process_scheduled_works(rescuer);
-
- if (need_to_create_worker(pool)) {
- spin_lock(&wq_mayday_lock);
- get_pwq(pwq);
- list_move_tail(&pwq->mayday_node, &wq->maydays);
- spin_unlock(&wq_mayday_lock);
- }
- }
-
- put_pwq(pwq);
-
- if (need_more_worker(pool))
- wake_up_worker(pool);
- rescuer->pool = NULL;
- spin_unlock_irq(&pool->lock);
- worker_detach_from_pool(rescuer, pool);
- spin_lock_irq(&wq_mayday_lock);
- }
- spin_unlock_irq(&wq_mayday_lock);
- if (should_stop) {
- __set_current_state(TASK_RUNNING);
- rescuer->task->flags &= ~PF_WQ_WORKER;
- return 0;
- }
-
- WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
- schedule();
- goto repeat;
- }
- static void check_flush_dependency(struct workqueue_struct *target_wq,
- struct work_struct *target_work)
- {
- work_func_t target_func = target_work ? target_work->func : NULL;
- struct worker *worker;
- if (target_wq->flags & WQ_MEM_RECLAIM)
- return;
- worker = current_wq_worker();
- WARN_ONCE(current->flags & PF_MEMALLOC,
- "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%pf",
- current->pid, current->comm, target_wq->name, target_func);
- WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
- (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
- "workqueue: WQ_MEM_RECLAIM %s:%pf is flushing !WQ_MEM_RECLAIM %s:%pf",
- worker->current_pwq->wq->name, worker->current_func,
- target_wq->name, target_func);
- }
- struct wq_barrier {
- struct work_struct work;
- struct completion done;
- struct task_struct *task;
- };
- static void wq_barrier_func(struct work_struct *work)
- {
- struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
- complete(&barr->done);
- }
- static void insert_wq_barrier(struct pool_workqueue *pwq,
- struct wq_barrier *barr,
- struct work_struct *target, struct worker *worker)
- {
- struct list_head *head;
- unsigned int linked = 0;
-
- INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
- __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
- init_completion(&barr->done);
- barr->task = current;
-
- if (worker)
- head = worker->scheduled.next;
- else {
- unsigned long *bits = work_data_bits(target);
- head = target->entry.next;
-
- linked = *bits & WORK_STRUCT_LINKED;
- __set_bit(WORK_STRUCT_LINKED_BIT, bits);
- }
- debug_work_activate(&barr->work);
- insert_work(pwq, &barr->work, head,
- work_color_to_flags(WORK_NO_COLOR) | linked);
- }
- static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
- int flush_color, int work_color)
- {
- bool wait = false;
- struct pool_workqueue *pwq;
- if (flush_color >= 0) {
- WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
- atomic_set(&wq->nr_pwqs_to_flush, 1);
- }
- for_each_pwq(pwq, wq) {
- struct worker_pool *pool = pwq->pool;
- spin_lock_irq(&pool->lock);
- if (flush_color >= 0) {
- WARN_ON_ONCE(pwq->flush_color != -1);
- if (pwq->nr_in_flight[flush_color]) {
- pwq->flush_color = flush_color;
- atomic_inc(&wq->nr_pwqs_to_flush);
- wait = true;
- }
- }
- if (work_color >= 0) {
- WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
- pwq->work_color = work_color;
- }
- spin_unlock_irq(&pool->lock);
- }
- if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
- complete(&wq->first_flusher->done);
- return wait;
- }
- void flush_workqueue(struct workqueue_struct *wq)
- {
- struct wq_flusher this_flusher = {
- .list = LIST_HEAD_INIT(this_flusher.list),
- .flush_color = -1,
- .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
- };
- int next_color;
- lock_map_acquire(&wq->lockdep_map);
- lock_map_release(&wq->lockdep_map);
- mutex_lock(&wq->mutex);
-
- next_color = work_next_color(wq->work_color);
- if (next_color != wq->flush_color) {
-
- WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
- this_flusher.flush_color = wq->work_color;
- wq->work_color = next_color;
- if (!wq->first_flusher) {
-
- WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
- wq->first_flusher = &this_flusher;
- if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
- wq->work_color)) {
-
- wq->flush_color = next_color;
- wq->first_flusher = NULL;
- goto out_unlock;
- }
- } else {
-
- WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
- list_add_tail(&this_flusher.list, &wq->flusher_queue);
- flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
- }
- } else {
-
- list_add_tail(&this_flusher.list, &wq->flusher_overflow);
- }
- check_flush_dependency(wq, NULL);
- mutex_unlock(&wq->mutex);
- wait_for_completion(&this_flusher.done);
-
- if (wq->first_flusher != &this_flusher)
- return;
- mutex_lock(&wq->mutex);
-
- if (wq->first_flusher != &this_flusher)
- goto out_unlock;
- wq->first_flusher = NULL;
- WARN_ON_ONCE(!list_empty(&this_flusher.list));
- WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
- while (true) {
- struct wq_flusher *next, *tmp;
-
- list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
- if (next->flush_color != wq->flush_color)
- break;
- list_del_init(&next->list);
- complete(&next->done);
- }
- WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
- wq->flush_color != work_next_color(wq->work_color));
-
- wq->flush_color = work_next_color(wq->flush_color);
-
- if (!list_empty(&wq->flusher_overflow)) {
-
- list_for_each_entry(tmp, &wq->flusher_overflow, list)
- tmp->flush_color = wq->work_color;
- wq->work_color = work_next_color(wq->work_color);
- list_splice_tail_init(&wq->flusher_overflow,
- &wq->flusher_queue);
- flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
- }
- if (list_empty(&wq->flusher_queue)) {
- WARN_ON_ONCE(wq->flush_color != wq->work_color);
- break;
- }
-
- WARN_ON_ONCE(wq->flush_color == wq->work_color);
- WARN_ON_ONCE(wq->flush_color != next->flush_color);
- list_del_init(&next->list);
- wq->first_flusher = next;
- if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
- break;
-
- wq->first_flusher = NULL;
- }
- out_unlock:
- mutex_unlock(&wq->mutex);
- }
- EXPORT_SYMBOL(flush_workqueue);
- void drain_workqueue(struct workqueue_struct *wq)
- {
- unsigned int flush_cnt = 0;
- struct pool_workqueue *pwq;
-
- mutex_lock(&wq->mutex);
- if (!wq->nr_drainers++)
- wq->flags |= __WQ_DRAINING;
- mutex_unlock(&wq->mutex);
- reflush:
- flush_workqueue(wq);
- mutex_lock(&wq->mutex);
- for_each_pwq(pwq, wq) {
- bool drained;
- spin_lock_irq(&pwq->pool->lock);
- drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
- spin_unlock_irq(&pwq->pool->lock);
- if (drained)
- continue;
- if (++flush_cnt == 10 ||
- (flush_cnt % 100 == 0 && flush_cnt <= 1000))
- pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n",
- wq->name, flush_cnt);
- mutex_unlock(&wq->mutex);
- goto reflush;
- }
- if (!--wq->nr_drainers)
- wq->flags &= ~__WQ_DRAINING;
- mutex_unlock(&wq->mutex);
- }
- EXPORT_SYMBOL_GPL(drain_workqueue);
- static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
- {
- struct worker *worker = NULL;
- struct worker_pool *pool;
- struct pool_workqueue *pwq;
- might_sleep();
- local_irq_disable();
- pool = get_work_pool(work);
- if (!pool) {
- local_irq_enable();
- return false;
- }
- spin_lock(&pool->lock);
-
- pwq = get_work_pwq(work);
- if (pwq) {
- if (unlikely(pwq->pool != pool))
- goto already_gone;
- } else {
- worker = find_worker_executing_work(pool, work);
- if (!worker)
- goto already_gone;
- pwq = worker->current_pwq;
- }
- check_flush_dependency(pwq->wq, work);
- insert_wq_barrier(pwq, barr, work, worker);
- spin_unlock_irq(&pool->lock);
-
- if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)
- lock_map_acquire(&pwq->wq->lockdep_map);
- else
- lock_map_acquire_read(&pwq->wq->lockdep_map);
- lock_map_release(&pwq->wq->lockdep_map);
- return true;
- already_gone:
- spin_unlock_irq(&pool->lock);
- return false;
- }
- bool flush_work(struct work_struct *work)
- {
- struct wq_barrier barr;
- lock_map_acquire(&work->lockdep_map);
- lock_map_release(&work->lockdep_map);
- if (start_flush_work(work, &barr)) {
- wait_for_completion(&barr.done);
- destroy_work_on_stack(&barr.work);
- return true;
- } else {
- return false;
- }
- }
- EXPORT_SYMBOL_GPL(flush_work);
- struct cwt_wait {
- wait_queue_t wait;
- struct work_struct *work;
- };
- static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key)
- {
- struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
- if (cwait->work != key)
- return 0;
- return autoremove_wake_function(wait, mode, sync, key);
- }
- static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
- {
- static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
- unsigned long flags;
- int ret;
- do {
- ret = try_to_grab_pending(work, is_dwork, &flags);
-
- if (unlikely(ret == -ENOENT)) {
- struct cwt_wait cwait;
- init_wait(&cwait.wait);
- cwait.wait.func = cwt_wakefn;
- cwait.work = work;
- prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
- TASK_UNINTERRUPTIBLE);
- if (work_is_canceling(work))
- schedule();
- finish_wait(&cancel_waitq, &cwait.wait);
- }
- } while (unlikely(ret < 0));
-
- mark_work_canceling(work);
- local_irq_restore(flags);
- flush_work(work);
- clear_work_data(work);
-
- smp_mb();
- if (waitqueue_active(&cancel_waitq))
- __wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
- return ret;
- }
- bool cancel_work_sync(struct work_struct *work)
- {
- return __cancel_work_timer(work, false);
- }
- EXPORT_SYMBOL_GPL(cancel_work_sync);
- bool flush_delayed_work(struct delayed_work *dwork)
- {
- local_irq_disable();
- if (del_timer_sync(&dwork->timer))
- __queue_work(dwork->cpu, dwork->wq, &dwork->work);
- local_irq_enable();
- return flush_work(&dwork->work);
- }
- EXPORT_SYMBOL(flush_delayed_work);
- static bool __cancel_work(struct work_struct *work, bool is_dwork)
- {
- unsigned long flags;
- int ret;
- do {
- ret = try_to_grab_pending(work, is_dwork, &flags);
- } while (unlikely(ret == -EAGAIN));
- if (unlikely(ret < 0))
- return false;
- set_work_pool_and_clear_pending(work, get_work_pool_id(work));
- local_irq_restore(flags);
- return ret;
- }
- bool cancel_work(struct work_struct *work)
- {
- return __cancel_work(work, false);
- }
- bool cancel_delayed_work(struct delayed_work *dwork)
- {
- return __cancel_work(&dwork->work, true);
- }
- EXPORT_SYMBOL(cancel_delayed_work);
- bool cancel_delayed_work_sync(struct delayed_work *dwork)
- {
- return __cancel_work_timer(&dwork->work, true);
- }
- EXPORT_SYMBOL(cancel_delayed_work_sync);
- int schedule_on_each_cpu(work_func_t func)
- {
- int cpu;
- struct work_struct __percpu *works;
- works = alloc_percpu(struct work_struct);
- if (!works)
- return -ENOMEM;
- get_online_cpus();
- for_each_online_cpu(cpu) {
- struct work_struct *work = per_cpu_ptr(works, cpu);
- INIT_WORK(work, func);
- schedule_work_on(cpu, work);
- }
- for_each_online_cpu(cpu)
- flush_work(per_cpu_ptr(works, cpu));
- put_online_cpus();
- free_percpu(works);
- return 0;
- }
- int execute_in_process_context(work_func_t fn, struct execute_work *ew)
- {
- if (!in_interrupt()) {
- fn(&ew->work);
- return 0;
- }
- INIT_WORK(&ew->work, fn);
- schedule_work(&ew->work);
- return 1;
- }
- EXPORT_SYMBOL_GPL(execute_in_process_context);
- void free_workqueue_attrs(struct workqueue_attrs *attrs)
- {
- if (attrs) {
- free_cpumask_var(attrs->cpumask);
- kfree(attrs);
- }
- }
- struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask)
- {
- struct workqueue_attrs *attrs;
- attrs = kzalloc(sizeof(*attrs), gfp_mask);
- if (!attrs)
- goto fail;
- if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask))
- goto fail;
- cpumask_copy(attrs->cpumask, cpu_possible_mask);
- return attrs;
- fail:
- free_workqueue_attrs(attrs);
- return NULL;
- }
- static void copy_workqueue_attrs(struct workqueue_attrs *to,
- const struct workqueue_attrs *from)
- {
- to->nice = from->nice;
- cpumask_copy(to->cpumask, from->cpumask);
-
- to->no_numa = from->no_numa;
- }
- static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
- {
- u32 hash = 0;
- hash = jhash_1word(attrs->nice, hash);
- hash = jhash(cpumask_bits(attrs->cpumask),
- BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
- return hash;
- }
- static bool wqattrs_equal(const struct workqueue_attrs *a,
- const struct workqueue_attrs *b)
- {
- if (a->nice != b->nice)
- return false;
- if (!cpumask_equal(a->cpumask, b->cpumask))
- return false;
- return true;
- }
- static int init_worker_pool(struct worker_pool *pool)
- {
- spin_lock_init(&pool->lock);
- pool->id = -1;
- pool->cpu = -1;
- pool->node = NUMA_NO_NODE;
- pool->flags |= POOL_DISASSOCIATED;
- pool->watchdog_ts = jiffies;
- INIT_LIST_HEAD(&pool->worklist);
- INIT_LIST_HEAD(&pool->idle_list);
- hash_init(pool->busy_hash);
- init_timer_deferrable(&pool->idle_timer);
- pool->idle_timer.function = idle_worker_timeout;
- pool->idle_timer.data = (unsigned long)pool;
- setup_timer(&pool->mayday_timer, pool_mayday_timeout,
- (unsigned long)pool);
- mutex_init(&pool->manager_arb);
- mutex_init(&pool->attach_mutex);
- INIT_LIST_HEAD(&pool->workers);
- ida_init(&pool->worker_ida);
- INIT_HLIST_NODE(&pool->hash_node);
- pool->refcnt = 1;
-
- pool->attrs = alloc_workqueue_attrs(GFP_KERNEL);
- if (!pool->attrs)
- return -ENOMEM;
- return 0;
- }
- static void rcu_free_wq(struct rcu_head *rcu)
- {
- struct workqueue_struct *wq =
- container_of(rcu, struct workqueue_struct, rcu);
- if (!(wq->flags & WQ_UNBOUND))
- free_percpu(wq->cpu_pwqs);
- else
- free_workqueue_attrs(wq->unbound_attrs);
- kfree(wq->rescuer);
- kfree(wq);
- }
- static void rcu_free_pool(struct rcu_head *rcu)
- {
- struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
- ida_destroy(&pool->worker_ida);
- free_workqueue_attrs(pool->attrs);
- kfree(pool);
- }
- static void put_unbound_pool(struct worker_pool *pool)
- {
- DECLARE_COMPLETION_ONSTACK(detach_completion);
- struct worker *worker;
- lockdep_assert_held(&wq_pool_mutex);
- if (--pool->refcnt)
- return;
-
- if (WARN_ON(!(pool->cpu < 0)) ||
- WARN_ON(!list_empty(&pool->worklist)))
- return;
-
- if (pool->id >= 0)
- idr_remove(&worker_pool_idr, pool->id);
- hash_del(&pool->hash_node);
-
- mutex_lock(&pool->manager_arb);
- spin_lock_irq(&pool->lock);
- while ((worker = first_idle_worker(pool)))
- destroy_worker(worker);
- WARN_ON(pool->nr_workers || pool->nr_idle);
- spin_unlock_irq(&pool->lock);
- mutex_lock(&pool->attach_mutex);
- if (!list_empty(&pool->workers))
- pool->detach_completion = &detach_completion;
- mutex_unlock(&pool->attach_mutex);
- if (pool->detach_completion)
- wait_for_completion(pool->detach_completion);
- mutex_unlock(&pool->manager_arb);
-
- del_timer_sync(&pool->idle_timer);
- del_timer_sync(&pool->mayday_timer);
-
- call_rcu_sched(&pool->rcu, rcu_free_pool);
- }
- static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
- {
- u32 hash = wqattrs_hash(attrs);
- struct worker_pool *pool;
- int node;
- int target_node = NUMA_NO_NODE;
- lockdep_assert_held(&wq_pool_mutex);
-
- hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
- if (wqattrs_equal(pool->attrs, attrs)) {
- pool->refcnt++;
- return pool;
- }
- }
-
- if (wq_numa_enabled) {
- for_each_node(node) {
- if (cpumask_subset(attrs->cpumask,
- wq_numa_possible_cpumask[node])) {
- target_node = node;
- break;
- }
- }
- }
-
- pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, target_node);
- if (!pool || init_worker_pool(pool) < 0)
- goto fail;
- lockdep_set_subclass(&pool->lock, 1);
- copy_workqueue_attrs(pool->attrs, attrs);
- pool->node = target_node;
-
- pool->attrs->no_numa = false;
- if (worker_pool_assign_id(pool) < 0)
- goto fail;
-
- if (!create_worker(pool))
- goto fail;
-
- hash_add(unbound_pool_hash, &pool->hash_node, hash);
- return pool;
- fail:
- if (pool)
- put_unbound_pool(pool);
- return NULL;
- }
- static void rcu_free_pwq(struct rcu_head *rcu)
- {
- kmem_cache_free(pwq_cache,
- container_of(rcu, struct pool_workqueue, rcu));
- }
- static void pwq_unbound_release_workfn(struct work_struct *work)
- {
- struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
- unbound_release_work);
- struct workqueue_struct *wq = pwq->wq;
- struct worker_pool *pool = pwq->pool;
- bool is_last;
- if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
- return;
- mutex_lock(&wq->mutex);
- list_del_rcu(&pwq->pwqs_node);
- is_last = list_empty(&wq->pwqs);
- mutex_unlock(&wq->mutex);
- mutex_lock(&wq_pool_mutex);
- put_unbound_pool(pool);
- mutex_unlock(&wq_pool_mutex);
- call_rcu_sched(&pwq->rcu, rcu_free_pwq);
-
- if (is_last)
- call_rcu_sched(&wq->rcu, rcu_free_wq);
- }
- static void pwq_adjust_max_active(struct pool_workqueue *pwq)
- {
- struct workqueue_struct *wq = pwq->wq;
- bool freezable = wq->flags & WQ_FREEZABLE;
-
- lockdep_assert_held(&wq->mutex);
-
- if (!freezable && pwq->max_active == wq->saved_max_active)
- return;
- spin_lock_irq(&pwq->pool->lock);
-
- if (!freezable || !workqueue_freezing) {
- pwq->max_active = wq->saved_max_active;
- while (!list_empty(&pwq->delayed_works) &&
- pwq->nr_active < pwq->max_active)
- pwq_activate_first_delayed(pwq);
-
- wake_up_worker(pwq->pool);
- } else {
- pwq->max_active = 0;
- }
- spin_unlock_irq(&pwq->pool->lock);
- }
- static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
- struct worker_pool *pool)
- {
- BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
- memset(pwq, 0, sizeof(*pwq));
- pwq->pool = pool;
- pwq->wq = wq;
- pwq->flush_color = -1;
- pwq->refcnt = 1;
- INIT_LIST_HEAD(&pwq->delayed_works);
- INIT_LIST_HEAD(&pwq->pwqs_node);
- INIT_LIST_HEAD(&pwq->mayday_node);
- INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
- }
- static void link_pwq(struct pool_workqueue *pwq)
- {
- struct workqueue_struct *wq = pwq->wq;
- lockdep_assert_held(&wq->mutex);
-
- if (!list_empty(&pwq->pwqs_node))
- return;
-
- pwq->work_color = wq->work_color;
-
- pwq_adjust_max_active(pwq);
-
- list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
- }
- static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
- const struct workqueue_attrs *attrs)
- {
- struct worker_pool *pool;
- struct pool_workqueue *pwq;
- lockdep_assert_held(&wq_pool_mutex);
- pool = get_unbound_pool(attrs);
- if (!pool)
- return NULL;
- pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
- if (!pwq) {
- put_unbound_pool(pool);
- return NULL;
- }
- init_pwq(pwq, wq, pool);
- return pwq;
- }
- static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
- int cpu_going_down, cpumask_t *cpumask)
- {
- if (!wq_numa_enabled || attrs->no_numa)
- goto use_dfl;
-
- cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask);
- if (cpu_going_down >= 0)
- cpumask_clear_cpu(cpu_going_down, cpumask);
- if (cpumask_empty(cpumask))
- goto use_dfl;
-
- cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
- return !cpumask_equal(cpumask, attrs->cpumask);
- use_dfl:
- cpumask_copy(cpumask, attrs->cpumask);
- return false;
- }
- static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
- int node,
- struct pool_workqueue *pwq)
- {
- struct pool_workqueue *old_pwq;
- lockdep_assert_held(&wq_pool_mutex);
- lockdep_assert_held(&wq->mutex);
-
- link_pwq(pwq);
- old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
- rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
- return old_pwq;
- }
- struct apply_wqattrs_ctx {
- struct workqueue_struct *wq;
- struct workqueue_attrs *attrs;
- struct list_head list;
- struct pool_workqueue *dfl_pwq;
- struct pool_workqueue *pwq_tbl[];
- };
- static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
- {
- if (ctx) {
- int node;
- for_each_node(node)
- put_pwq_unlocked(ctx->pwq_tbl[node]);
- put_pwq_unlocked(ctx->dfl_pwq);
- free_workqueue_attrs(ctx->attrs);
- kfree(ctx);
- }
- }
- static struct apply_wqattrs_ctx *
- apply_wqattrs_prepare(struct workqueue_struct *wq,
- const struct workqueue_attrs *attrs)
- {
- struct apply_wqattrs_ctx *ctx;
- struct workqueue_attrs *new_attrs, *tmp_attrs;
- int node;
- lockdep_assert_held(&wq_pool_mutex);
- ctx = kzalloc(sizeof(*ctx) + nr_node_ids * sizeof(ctx->pwq_tbl[0]),
- GFP_KERNEL);
- new_attrs = alloc_workqueue_attrs(GFP_KERNEL);
- tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL);
- if (!ctx || !new_attrs || !tmp_attrs)
- goto out_free;
-
- copy_workqueue_attrs(new_attrs, attrs);
- cpumask_and(new_attrs->cpumask, new_attrs->cpumask, wq_unbound_cpumask);
- if (unlikely(cpumask_empty(new_attrs->cpumask)))
- cpumask_copy(new_attrs->cpumask, wq_unbound_cpumask);
-
- copy_workqueue_attrs(tmp_attrs, new_attrs);
-
- ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
- if (!ctx->dfl_pwq)
- goto out_free;
- for_each_node(node) {
- if (wq_calc_node_cpumask(new_attrs, node, -1, tmp_attrs->cpumask)) {
- ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
- if (!ctx->pwq_tbl[node])
- goto out_free;
- } else {
- ctx->dfl_pwq->refcnt++;
- ctx->pwq_tbl[node] = ctx->dfl_pwq;
- }
- }
-
- copy_workqueue_attrs(new_attrs, attrs);
- cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
- ctx->attrs = new_attrs;
- ctx->wq = wq;
- free_workqueue_attrs(tmp_attrs);
- return ctx;
- out_free:
- free_workqueue_attrs(tmp_attrs);
- free_workqueue_attrs(new_attrs);
- apply_wqattrs_cleanup(ctx);
- return NULL;
- }
- static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
- {
- int node;
-
- mutex_lock(&ctx->wq->mutex);
- copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs);
-
- for_each_node(node)
- ctx->pwq_tbl[node] = numa_pwq_tbl_install(ctx->wq, node,
- ctx->pwq_tbl[node]);
-
- link_pwq(ctx->dfl_pwq);
- swap(ctx->wq->dfl_pwq, ctx->dfl_pwq);
- mutex_unlock(&ctx->wq->mutex);
- }
- static void apply_wqattrs_lock(void)
- {
-
- get_online_cpus();
- mutex_lock(&wq_pool_mutex);
- }
- static void apply_wqattrs_unlock(void)
- {
- mutex_unlock(&wq_pool_mutex);
- put_online_cpus();
- }
- static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
- const struct workqueue_attrs *attrs)
- {
- struct apply_wqattrs_ctx *ctx;
-
- if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
- return -EINVAL;
-
- if (!list_empty(&wq->pwqs)) {
- if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
- return -EINVAL;
- wq->flags &= ~__WQ_ORDERED;
- }
- ctx = apply_wqattrs_prepare(wq, attrs);
- if (!ctx)
- return -ENOMEM;
-
- apply_wqattrs_commit(ctx);
- apply_wqattrs_cleanup(ctx);
- return 0;
- }
- int apply_workqueue_attrs(struct workqueue_struct *wq,
- const struct workqueue_attrs *attrs)
- {
- int ret;
- apply_wqattrs_lock();
- ret = apply_workqueue_attrs_locked(wq, attrs);
- apply_wqattrs_unlock();
- return ret;
- }
- static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
- bool online)
- {
- int node = cpu_to_node(cpu);
- int cpu_off = online ? -1 : cpu;
- struct pool_workqueue *old_pwq = NULL, *pwq;
- struct workqueue_attrs *target_attrs;
- cpumask_t *cpumask;
- lockdep_assert_held(&wq_pool_mutex);
- if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND) ||
- wq->unbound_attrs->no_numa)
- return;
-
- target_attrs = wq_update_unbound_numa_attrs_buf;
- cpumask = target_attrs->cpumask;
- copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
- pwq = unbound_pwq_by_node(wq, node);
-
- if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) {
- if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
- return;
- } else {
- goto use_dfl_pwq;
- }
-
- pwq = alloc_unbound_pwq(wq, target_attrs);
- if (!pwq) {
- pr_warn("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
- wq->name);
- goto use_dfl_pwq;
- }
-
- mutex_lock(&wq->mutex);
- old_pwq = numa_pwq_tbl_install(wq, node, pwq);
- goto out_unlock;
- use_dfl_pwq:
- mutex_lock(&wq->mutex);
- spin_lock_irq(&wq->dfl_pwq->pool->lock);
- get_pwq(wq->dfl_pwq);
- spin_unlock_irq(&wq->dfl_pwq->pool->lock);
- old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
- out_unlock:
- mutex_unlock(&wq->mutex);
- put_pwq_unlocked(old_pwq);
- }
- static int alloc_and_link_pwqs(struct workqueue_struct *wq)
- {
- bool highpri = wq->flags & WQ_HIGHPRI;
- int cpu, ret;
- if (!(wq->flags & WQ_UNBOUND)) {
- wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
- if (!wq->cpu_pwqs)
- return -ENOMEM;
- for_each_possible_cpu(cpu) {
- struct pool_workqueue *pwq =
- per_cpu_ptr(wq->cpu_pwqs, cpu);
- struct worker_pool *cpu_pools =
- per_cpu(cpu_worker_pools, cpu);
- init_pwq(pwq, wq, &cpu_pools[highpri]);
- mutex_lock(&wq->mutex);
- link_pwq(pwq);
- mutex_unlock(&wq->mutex);
- }
- return 0;
- } else if (wq->flags & __WQ_ORDERED) {
- ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
-
- WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
- wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
- "ordering guarantee broken for workqueue %s\n", wq->name);
- return ret;
- } else {
- return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
- }
- }
- static int wq_clamp_max_active(int max_active, unsigned int flags,
- const char *name)
- {
- int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
- if (max_active < 1 || max_active > lim)
- pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
- max_active, name, 1, lim);
- return clamp_val(max_active, 1, lim);
- }
- struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
- unsigned int flags,
- int max_active,
- struct lock_class_key *key,
- const char *lock_name, ...)
- {
- size_t tbl_size = 0;
- va_list args;
- struct workqueue_struct *wq;
- struct pool_workqueue *pwq;
-
- if ((flags & WQ_UNBOUND) && max_active == 1)
- flags |= __WQ_ORDERED;
-
- if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
- flags |= WQ_UNBOUND;
-
- if (flags & WQ_UNBOUND)
- tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]);
- wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL);
- if (!wq)
- return NULL;
- if (flags & WQ_UNBOUND) {
- wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL);
- if (!wq->unbound_attrs)
- goto err_free_wq;
- }
- va_start(args, lock_name);
- vsnprintf(wq->name, sizeof(wq->name), fmt, args);
- va_end(args);
- max_active = max_active ?: WQ_DFL_ACTIVE;
- max_active = wq_clamp_max_active(max_active, flags, wq->name);
-
- wq->flags = flags;
- wq->saved_max_active = max_active;
- mutex_init(&wq->mutex);
- atomic_set(&wq->nr_pwqs_to_flush, 0);
- INIT_LIST_HEAD(&wq->pwqs);
- INIT_LIST_HEAD(&wq->flusher_queue);
- INIT_LIST_HEAD(&wq->flusher_overflow);
- INIT_LIST_HEAD(&wq->maydays);
- lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
- INIT_LIST_HEAD(&wq->list);
- if (alloc_and_link_pwqs(wq) < 0)
- goto err_free_wq;
-
- if (flags & WQ_MEM_RECLAIM) {
- struct worker *rescuer;
- rescuer = alloc_worker(NUMA_NO_NODE);
- if (!rescuer)
- goto err_destroy;
- rescuer->rescue_wq = wq;
- rescuer->task = kthread_create(rescuer_thread, rescuer, "%s",
- wq->name);
- if (IS_ERR(rescuer->task)) {
- kfree(rescuer);
- goto err_destroy;
- }
- wq->rescuer = rescuer;
- kthread_bind_mask(rescuer->task, cpu_possible_mask);
- wake_up_process(rescuer->task);
- }
- if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
- goto err_destroy;
-
- mutex_lock(&wq_pool_mutex);
- mutex_lock(&wq->mutex);
- for_each_pwq(pwq, wq)
- pwq_adjust_max_active(pwq);
- mutex_unlock(&wq->mutex);
- list_add_tail_rcu(&wq->list, &workqueues);
- mutex_unlock(&wq_pool_mutex);
- return wq;
- err_free_wq:
- free_workqueue_attrs(wq->unbound_attrs);
- kfree(wq);
- return NULL;
- err_destroy:
- destroy_workqueue(wq);
- return NULL;
- }
- EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
- void destroy_workqueue(struct workqueue_struct *wq)
- {
- struct pool_workqueue *pwq;
- int node;
-
- drain_workqueue(wq);
-
- mutex_lock(&wq->mutex);
- for_each_pwq(pwq, wq) {
- int i;
- for (i = 0; i < WORK_NR_COLORS; i++) {
- if (WARN_ON(pwq->nr_in_flight[i])) {
- mutex_unlock(&wq->mutex);
- return;
- }
- }
- if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) ||
- WARN_ON(pwq->nr_active) ||
- WARN_ON(!list_empty(&pwq->delayed_works))) {
- mutex_unlock(&wq->mutex);
- return;
- }
- }
- mutex_unlock(&wq->mutex);
-
- mutex_lock(&wq_pool_mutex);
- list_del_rcu(&wq->list);
- mutex_unlock(&wq_pool_mutex);
- workqueue_sysfs_unregister(wq);
- if (wq->rescuer)
- kthread_stop(wq->rescuer->task);
- if (!(wq->flags & WQ_UNBOUND)) {
-
- call_rcu_sched(&wq->rcu, rcu_free_wq);
- } else {
-
- for_each_node(node) {
- pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
- RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL);
- put_pwq_unlocked(pwq);
- }
-
- pwq = wq->dfl_pwq;
- wq->dfl_pwq = NULL;
- put_pwq_unlocked(pwq);
- }
- }
- EXPORT_SYMBOL_GPL(destroy_workqueue);
- void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
- {
- struct pool_workqueue *pwq;
-
- if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
- return;
- max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
- mutex_lock(&wq->mutex);
- wq->flags &= ~__WQ_ORDERED;
- wq->saved_max_active = max_active;
- for_each_pwq(pwq, wq)
- pwq_adjust_max_active(pwq);
- mutex_unlock(&wq->mutex);
- }
- EXPORT_SYMBOL_GPL(workqueue_set_max_active);
- bool current_is_workqueue_rescuer(void)
- {
- struct worker *worker = current_wq_worker();
- return worker && worker->rescue_wq;
- }
- bool workqueue_congested(int cpu, struct workqueue_struct *wq)
- {
- struct pool_workqueue *pwq;
- bool ret;
- rcu_read_lock_sched();
- if (cpu == WORK_CPU_UNBOUND)
- cpu = smp_processor_id();
- if (!(wq->flags & WQ_UNBOUND))
- pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
- else
- pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
- ret = !list_empty(&pwq->delayed_works);
- rcu_read_unlock_sched();
- return ret;
- }
- EXPORT_SYMBOL_GPL(workqueue_congested);
- unsigned int work_busy(struct work_struct *work)
- {
- struct worker_pool *pool;
- unsigned long flags;
- unsigned int ret = 0;
- if (work_pending(work))
- ret |= WORK_BUSY_PENDING;
- local_irq_save(flags);
- pool = get_work_pool(work);
- if (pool) {
- spin_lock(&pool->lock);
- if (find_worker_executing_work(pool, work))
- ret |= WORK_BUSY_RUNNING;
- spin_unlock(&pool->lock);
- }
- local_irq_restore(flags);
- return ret;
- }
- EXPORT_SYMBOL_GPL(work_busy);
- void set_worker_desc(const char *fmt, ...)
- {
- struct worker *worker = current_wq_worker();
- va_list args;
- if (worker) {
- va_start(args, fmt);
- vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
- va_end(args);
- worker->desc_valid = true;
- }
- }
- void print_worker_info(const char *log_lvl, struct task_struct *task)
- {
- work_func_t *fn = NULL;
- char name[WQ_NAME_LEN] = { };
- char desc[WORKER_DESC_LEN] = { };
- struct pool_workqueue *pwq = NULL;
- struct workqueue_struct *wq = NULL;
- bool desc_valid = false;
- struct worker *worker;
- if (!(task->flags & PF_WQ_WORKER))
- return;
-
- worker = kthread_probe_data(task);
-
- probe_kernel_read(&fn, &worker->current_func, sizeof(fn));
- probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq));
- probe_kernel_read(&wq, &pwq->wq, sizeof(wq));
- probe_kernel_read(name, wq->name, sizeof(name) - 1);
-
- probe_kernel_read(&desc_valid, &worker->desc_valid, sizeof(desc_valid));
- if (desc_valid)
- probe_kernel_read(desc, worker->desc, sizeof(desc) - 1);
- if (fn || name[0] || desc[0]) {
- printk("%sWorkqueue: %s %pf", log_lvl, name, fn);
- if (desc[0])
- pr_cont(" (%s)", desc);
- pr_cont("\n");
- }
- }
- static void pr_cont_pool_info(struct worker_pool *pool)
- {
- pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask);
- if (pool->node != NUMA_NO_NODE)
- pr_cont(" node=%d", pool->node);
- pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice);
- }
- static void pr_cont_work(bool comma, struct work_struct *work)
- {
- if (work->func == wq_barrier_func) {
- struct wq_barrier *barr;
- barr = container_of(work, struct wq_barrier, work);
- pr_cont("%s BAR(%d)", comma ? "," : "",
- task_pid_nr(barr->task));
- } else {
- pr_cont("%s %pf", comma ? "," : "", work->func);
- }
- }
- static void show_pwq(struct pool_workqueue *pwq)
- {
- struct worker_pool *pool = pwq->pool;
- struct work_struct *work;
- struct worker *worker;
- bool has_in_flight = false, has_pending = false;
- int bkt;
- pr_info(" pwq %d:", pool->id);
- pr_cont_pool_info(pool);
- pr_cont(" active=%d/%d%s\n", pwq->nr_active, pwq->max_active,
- !list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
- hash_for_each(pool->busy_hash, bkt, worker, hentry) {
- if (worker->current_pwq == pwq) {
- has_in_flight = true;
- break;
- }
- }
- if (has_in_flight) {
- bool comma = false;
- pr_info(" in-flight:");
- hash_for_each(pool->busy_hash, bkt, worker, hentry) {
- if (worker->current_pwq != pwq)
- continue;
- pr_cont("%s %d%s:%pf", comma ? "," : "",
- task_pid_nr(worker->task),
- worker == pwq->wq->rescuer ? "(RESCUER)" : "",
- worker->current_func);
- list_for_each_entry(work, &worker->scheduled, entry)
- pr_cont_work(false, work);
- comma = true;
- }
- pr_cont("\n");
- }
- list_for_each_entry(work, &pool->worklist, entry) {
- if (get_work_pwq(work) == pwq) {
- has_pending = true;
- break;
- }
- }
- if (has_pending) {
- bool comma = false;
- pr_info(" pending:");
- list_for_each_entry(work, &pool->worklist, entry) {
- if (get_work_pwq(work) != pwq)
- continue;
- pr_cont_work(comma, work);
- comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
- }
- pr_cont("\n");
- }
- if (!list_empty(&pwq->delayed_works)) {
- bool comma = false;
- pr_info(" delayed:");
- list_for_each_entry(work, &pwq->delayed_works, entry) {
- pr_cont_work(comma, work);
- comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
- }
- pr_cont("\n");
- }
- }
- void show_workqueue_state(void)
- {
- struct workqueue_struct *wq;
- struct worker_pool *pool;
- unsigned long flags;
- int pi;
- rcu_read_lock_sched();
- pr_info("Showing busy workqueues and worker pools:\n");
- list_for_each_entry_rcu(wq, &workqueues, list) {
- struct pool_workqueue *pwq;
- bool idle = true;
- for_each_pwq(pwq, wq) {
- if (pwq->nr_active || !list_empty(&pwq->delayed_works)) {
- idle = false;
- break;
- }
- }
- if (idle)
- continue;
- pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
- for_each_pwq(pwq, wq) {
- spin_lock_irqsave(&pwq->pool->lock, flags);
- if (pwq->nr_active || !list_empty(&pwq->delayed_works))
- show_pwq(pwq);
- spin_unlock_irqrestore(&pwq->pool->lock, flags);
- }
- }
- for_each_pool(pool, pi) {
- struct worker *worker;
- bool first = true;
- spin_lock_irqsave(&pool->lock, flags);
- if (pool->nr_workers == pool->nr_idle)
- goto next_pool;
- pr_info("pool %d:", pool->id);
- pr_cont_pool_info(pool);
- pr_cont(" hung=%us workers=%d",
- jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
- pool->nr_workers);
- if (pool->manager)
- pr_cont(" manager: %d",
- task_pid_nr(pool->manager->task));
- list_for_each_entry(worker, &pool->idle_list, entry) {
- pr_cont(" %s%d", first ? "idle: " : "",
- task_pid_nr(worker->task));
- first = false;
- }
- pr_cont("\n");
- next_pool:
- spin_unlock_irqrestore(&pool->lock, flags);
- }
- rcu_read_unlock_sched();
- }
- static void wq_unbind_fn(struct work_struct *work)
- {
- int cpu = smp_processor_id();
- struct worker_pool *pool;
- struct worker *worker;
- for_each_cpu_worker_pool(pool, cpu) {
- mutex_lock(&pool->attach_mutex);
- spin_lock_irq(&pool->lock);
-
- for_each_pool_worker(worker, pool)
- worker->flags |= WORKER_UNBOUND;
- pool->flags |= POOL_DISASSOCIATED;
- spin_unlock_irq(&pool->lock);
- mutex_unlock(&pool->attach_mutex);
-
- schedule();
-
- atomic_set(&pool->nr_running, 0);
-
- spin_lock_irq(&pool->lock);
- wake_up_worker(pool);
- spin_unlock_irq(&pool->lock);
- }
- }
- static void rebind_workers(struct worker_pool *pool)
- {
- struct worker *worker;
- lockdep_assert_held(&pool->attach_mutex);
-
- for_each_pool_worker(worker, pool)
- WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
- pool->attrs->cpumask) < 0);
- spin_lock_irq(&pool->lock);
-
- if (!(pool->flags & POOL_DISASSOCIATED)) {
- spin_unlock_irq(&pool->lock);
- return;
- }
- pool->flags &= ~POOL_DISASSOCIATED;
- for_each_pool_worker(worker, pool) {
- unsigned int worker_flags = worker->flags;
-
- if (worker_flags & WORKER_IDLE)
- wake_up_process(worker->task);
-
- WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
- worker_flags |= WORKER_REBOUND;
- worker_flags &= ~WORKER_UNBOUND;
- ACCESS_ONCE(worker->flags) = worker_flags;
- }
- spin_unlock_irq(&pool->lock);
- }
- static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
- {
- static cpumask_t cpumask;
- struct worker *worker;
- lockdep_assert_held(&pool->attach_mutex);
-
- if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
- return;
- cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
-
- for_each_pool_worker(worker, pool)
- WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
- }
- int workqueue_prepare_cpu(unsigned int cpu)
- {
- struct worker_pool *pool;
- for_each_cpu_worker_pool(pool, cpu) {
- if (pool->nr_workers)
- continue;
- if (!create_worker(pool))
- return -ENOMEM;
- }
- return 0;
- }
- int workqueue_online_cpu(unsigned int cpu)
- {
- struct worker_pool *pool;
- struct workqueue_struct *wq;
- int pi;
- mutex_lock(&wq_pool_mutex);
- for_each_pool(pool, pi) {
- mutex_lock(&pool->attach_mutex);
- if (pool->cpu == cpu)
- rebind_workers(pool);
- else if (pool->cpu < 0)
- restore_unbound_workers_cpumask(pool, cpu);
- mutex_unlock(&pool->attach_mutex);
- }
-
- list_for_each_entry(wq, &workqueues, list)
- wq_update_unbound_numa(wq, cpu, true);
- mutex_unlock(&wq_pool_mutex);
- return 0;
- }
- int workqueue_offline_cpu(unsigned int cpu)
- {
- struct work_struct unbind_work;
- struct workqueue_struct *wq;
-
- INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
- queue_work_on(cpu, system_highpri_wq, &unbind_work);
-
- mutex_lock(&wq_pool_mutex);
- list_for_each_entry(wq, &workqueues, list)
- wq_update_unbound_numa(wq, cpu, false);
- mutex_unlock(&wq_pool_mutex);
-
- flush_work(&unbind_work);
- destroy_work_on_stack(&unbind_work);
- return 0;
- }
- #ifdef CONFIG_SMP
- struct work_for_cpu {
- struct work_struct work;
- long (*fn)(void *);
- void *arg;
- long ret;
- };
- static void work_for_cpu_fn(struct work_struct *work)
- {
- struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
- wfc->ret = wfc->fn(wfc->arg);
- }
- long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
- {
- struct work_for_cpu wfc = { .fn = fn, .arg = arg };
- INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
- schedule_work_on(cpu, &wfc.work);
- flush_work(&wfc.work);
- destroy_work_on_stack(&wfc.work);
- return wfc.ret;
- }
- EXPORT_SYMBOL_GPL(work_on_cpu);
- #endif
- #ifdef CONFIG_FREEZER
- void freeze_workqueues_begin(void)
- {
- struct workqueue_struct *wq;
- struct pool_workqueue *pwq;
- mutex_lock(&wq_pool_mutex);
- WARN_ON_ONCE(workqueue_freezing);
- workqueue_freezing = true;
- list_for_each_entry(wq, &workqueues, list) {
- mutex_lock(&wq->mutex);
- for_each_pwq(pwq, wq)
- pwq_adjust_max_active(pwq);
- mutex_unlock(&wq->mutex);
- }
- mutex_unlock(&wq_pool_mutex);
- }
- bool freeze_workqueues_busy(void)
- {
- bool busy = false;
- struct workqueue_struct *wq;
- struct pool_workqueue *pwq;
- mutex_lock(&wq_pool_mutex);
- WARN_ON_ONCE(!workqueue_freezing);
- list_for_each_entry(wq, &workqueues, list) {
- if (!(wq->flags & WQ_FREEZABLE))
- continue;
-
- rcu_read_lock_sched();
- for_each_pwq(pwq, wq) {
- WARN_ON_ONCE(pwq->nr_active < 0);
- if (pwq->nr_active) {
- busy = true;
- rcu_read_unlock_sched();
- goto out_unlock;
- }
- }
- rcu_read_unlock_sched();
- }
- out_unlock:
- mutex_unlock(&wq_pool_mutex);
- return busy;
- }
- void thaw_workqueues(void)
- {
- struct workqueue_struct *wq;
- struct pool_workqueue *pwq;
- mutex_lock(&wq_pool_mutex);
- if (!workqueue_freezing)
- goto out_unlock;
- workqueue_freezing = false;
-
- list_for_each_entry(wq, &workqueues, list) {
- mutex_lock(&wq->mutex);
- for_each_pwq(pwq, wq)
- pwq_adjust_max_active(pwq);
- mutex_unlock(&wq->mutex);
- }
- out_unlock:
- mutex_unlock(&wq_pool_mutex);
- }
- #endif
- static int workqueue_apply_unbound_cpumask(void)
- {
- LIST_HEAD(ctxs);
- int ret = 0;
- struct workqueue_struct *wq;
- struct apply_wqattrs_ctx *ctx, *n;
- lockdep_assert_held(&wq_pool_mutex);
- list_for_each_entry(wq, &workqueues, list) {
- if (!(wq->flags & WQ_UNBOUND))
- continue;
-
- if (wq->flags & __WQ_ORDERED)
- continue;
- ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs);
- if (!ctx) {
- ret = -ENOMEM;
- break;
- }
- list_add_tail(&ctx->list, &ctxs);
- }
- list_for_each_entry_safe(ctx, n, &ctxs, list) {
- if (!ret)
- apply_wqattrs_commit(ctx);
- apply_wqattrs_cleanup(ctx);
- }
- return ret;
- }
- int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
- {
- int ret = -EINVAL;
- cpumask_var_t saved_cpumask;
- if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL))
- return -ENOMEM;
- cpumask_and(cpumask, cpumask, cpu_possible_mask);
- if (!cpumask_empty(cpumask)) {
- apply_wqattrs_lock();
-
- cpumask_copy(saved_cpumask, wq_unbound_cpumask);
-
- cpumask_copy(wq_unbound_cpumask, cpumask);
- ret = workqueue_apply_unbound_cpumask();
-
- if (ret < 0)
- cpumask_copy(wq_unbound_cpumask, saved_cpumask);
- apply_wqattrs_unlock();
- }
- free_cpumask_var(saved_cpumask);
- return ret;
- }
- #ifdef CONFIG_SYSFS
- struct wq_device {
- struct workqueue_struct *wq;
- struct device dev;
- };
- static struct workqueue_struct *dev_to_wq(struct device *dev)
- {
- struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
- return wq_dev->wq;
- }
- static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
- char *buf)
- {
- struct workqueue_struct *wq = dev_to_wq(dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
- }
- static DEVICE_ATTR_RO(per_cpu);
- static ssize_t max_active_show(struct device *dev,
- struct device_attribute *attr, char *buf)
- {
- struct workqueue_struct *wq = dev_to_wq(dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
- }
- static ssize_t max_active_store(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count)
- {
- struct workqueue_struct *wq = dev_to_wq(dev);
- int val;
- if (sscanf(buf, "%d", &val) != 1 || val <= 0)
- return -EINVAL;
- workqueue_set_max_active(wq, val);
- return count;
- }
- static DEVICE_ATTR_RW(max_active);
- static struct attribute *wq_sysfs_attrs[] = {
- &dev_attr_per_cpu.attr,
- &dev_attr_max_active.attr,
- NULL,
- };
- ATTRIBUTE_GROUPS(wq_sysfs);
- static ssize_t wq_pool_ids_show(struct device *dev,
- struct device_attribute *attr, char *buf)
- {
- struct workqueue_struct *wq = dev_to_wq(dev);
- const char *delim = "";
- int node, written = 0;
- rcu_read_lock_sched();
- for_each_node(node) {
- written += scnprintf(buf + written, PAGE_SIZE - written,
- "%s%d:%d", delim, node,
- unbound_pwq_by_node(wq, node)->pool->id);
- delim = " ";
- }
- written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
- rcu_read_unlock_sched();
- return written;
- }
- static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
- char *buf)
- {
- struct workqueue_struct *wq = dev_to_wq(dev);
- int written;
- mutex_lock(&wq->mutex);
- written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
- mutex_unlock(&wq->mutex);
- return written;
- }
- static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
- {
- struct workqueue_attrs *attrs;
- lockdep_assert_held(&wq_pool_mutex);
- attrs = alloc_workqueue_attrs(GFP_KERNEL);
- if (!attrs)
- return NULL;
- copy_workqueue_attrs(attrs, wq->unbound_attrs);
- return attrs;
- }
- static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
- {
- struct workqueue_struct *wq = dev_to_wq(dev);
- struct workqueue_attrs *attrs;
- int ret = -ENOMEM;
- apply_wqattrs_lock();
- attrs = wq_sysfs_prep_attrs(wq);
- if (!attrs)
- goto out_unlock;
- if (sscanf(buf, "%d", &attrs->nice) == 1 &&
- attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
- ret = apply_workqueue_attrs_locked(wq, attrs);
- else
- ret = -EINVAL;
- out_unlock:
- apply_wqattrs_unlock();
- free_workqueue_attrs(attrs);
- return ret ?: count;
- }
- static ssize_t wq_cpumask_show(struct device *dev,
- struct device_attribute *attr, char *buf)
- {
- struct workqueue_struct *wq = dev_to_wq(dev);
- int written;
- mutex_lock(&wq->mutex);
- written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
- cpumask_pr_args(wq->unbound_attrs->cpumask));
- mutex_unlock(&wq->mutex);
- return written;
- }
- static ssize_t wq_cpumask_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
- {
- struct workqueue_struct *wq = dev_to_wq(dev);
- struct workqueue_attrs *attrs;
- int ret = -ENOMEM;
- apply_wqattrs_lock();
- attrs = wq_sysfs_prep_attrs(wq);
- if (!attrs)
- goto out_unlock;
- ret = cpumask_parse(buf, attrs->cpumask);
- if (!ret)
- ret = apply_workqueue_attrs_locked(wq, attrs);
- out_unlock:
- apply_wqattrs_unlock();
- free_workqueue_attrs(attrs);
- return ret ?: count;
- }
- static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr,
- char *buf)
- {
- struct workqueue_struct *wq = dev_to_wq(dev);
- int written;
- mutex_lock(&wq->mutex);
- written = scnprintf(buf, PAGE_SIZE, "%d\n",
- !wq->unbound_attrs->no_numa);
- mutex_unlock(&wq->mutex);
- return written;
- }
- static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
- {
- struct workqueue_struct *wq = dev_to_wq(dev);
- struct workqueue_attrs *attrs;
- int v, ret = -ENOMEM;
- apply_wqattrs_lock();
- attrs = wq_sysfs_prep_attrs(wq);
- if (!attrs)
- goto out_unlock;
- ret = -EINVAL;
- if (sscanf(buf, "%d", &v) == 1) {
- attrs->no_numa = !v;
- ret = apply_workqueue_attrs_locked(wq, attrs);
- }
- out_unlock:
- apply_wqattrs_unlock();
- free_workqueue_attrs(attrs);
- return ret ?: count;
- }
- static struct device_attribute wq_sysfs_unbound_attrs[] = {
- __ATTR(pool_ids, 0444, wq_pool_ids_show, NULL),
- __ATTR(nice, 0644, wq_nice_show, wq_nice_store),
- __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
- __ATTR(numa, 0644, wq_numa_show, wq_numa_store),
- __ATTR_NULL,
- };
- static struct bus_type wq_subsys = {
- .name = "workqueue",
- .dev_groups = wq_sysfs_groups,
- };
- static ssize_t wq_unbound_cpumask_show(struct device *dev,
- struct device_attribute *attr, char *buf)
- {
- int written;
- mutex_lock(&wq_pool_mutex);
- written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
- cpumask_pr_args(wq_unbound_cpumask));
- mutex_unlock(&wq_pool_mutex);
- return written;
- }
- static ssize_t wq_unbound_cpumask_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
- {
- cpumask_var_t cpumask;
- int ret;
- if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
- return -ENOMEM;
- ret = cpumask_parse(buf, cpumask);
- if (!ret)
- ret = workqueue_set_unbound_cpumask(cpumask);
- free_cpumask_var(cpumask);
- return ret ? ret : count;
- }
- static struct device_attribute wq_sysfs_cpumask_attr =
- __ATTR(cpumask, 0644, wq_unbound_cpumask_show,
- wq_unbound_cpumask_store);
- static int __init wq_sysfs_init(void)
- {
- int err;
- err = subsys_virtual_register(&wq_subsys, NULL);
- if (err)
- return err;
- return device_create_file(wq_subsys.dev_root, &wq_sysfs_cpumask_attr);
- }
- core_initcall(wq_sysfs_init);
- static void wq_device_release(struct device *dev)
- {
- struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
- kfree(wq_dev);
- }
- int workqueue_sysfs_register(struct workqueue_struct *wq)
- {
- struct wq_device *wq_dev;
- int ret;
-
- if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
- return -EINVAL;
- wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
- if (!wq_dev)
- return -ENOMEM;
- wq_dev->wq = wq;
- wq_dev->dev.bus = &wq_subsys;
- wq_dev->dev.release = wq_device_release;
- dev_set_name(&wq_dev->dev, "%s", wq->name);
-
- dev_set_uevent_suppress(&wq_dev->dev, true);
- ret = device_register(&wq_dev->dev);
- if (ret) {
- kfree(wq_dev);
- wq->wq_dev = NULL;
- return ret;
- }
- if (wq->flags & WQ_UNBOUND) {
- struct device_attribute *attr;
- for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
- ret = device_create_file(&wq_dev->dev, attr);
- if (ret) {
- device_unregister(&wq_dev->dev);
- wq->wq_dev = NULL;
- return ret;
- }
- }
- }
- dev_set_uevent_suppress(&wq_dev->dev, false);
- kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
- return 0;
- }
- static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
- {
- struct wq_device *wq_dev = wq->wq_dev;
- if (!wq->wq_dev)
- return;
- wq->wq_dev = NULL;
- device_unregister(&wq_dev->dev);
- }
- #else
- static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { }
- #endif
- #ifdef CONFIG_WQ_WATCHDOG
- static void wq_watchdog_timer_fn(unsigned long data);
- static unsigned long wq_watchdog_thresh = 30;
- static struct timer_list wq_watchdog_timer =
- TIMER_DEFERRED_INITIALIZER(wq_watchdog_timer_fn, 0, 0);
- static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
- static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
- static void wq_watchdog_reset_touched(void)
- {
- int cpu;
- wq_watchdog_touched = jiffies;
- for_each_possible_cpu(cpu)
- per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
- }
- static void wq_watchdog_timer_fn(unsigned long data)
- {
- unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
- bool lockup_detected = false;
- struct worker_pool *pool;
- int pi;
- if (!thresh)
- return;
- rcu_read_lock();
- for_each_pool(pool, pi) {
- unsigned long pool_ts, touched, ts;
- if (list_empty(&pool->worklist))
- continue;
-
- pool_ts = READ_ONCE(pool->watchdog_ts);
- touched = READ_ONCE(wq_watchdog_touched);
- if (time_after(pool_ts, touched))
- ts = pool_ts;
- else
- ts = touched;
- if (pool->cpu >= 0) {
- unsigned long cpu_touched =
- READ_ONCE(per_cpu(wq_watchdog_touched_cpu,
- pool->cpu));
- if (time_after(cpu_touched, ts))
- ts = cpu_touched;
- }
-
- if (time_after(jiffies, ts + thresh)) {
- lockup_detected = true;
- pr_emerg("BUG: workqueue lockup - pool");
- pr_cont_pool_info(pool);
- pr_cont(" stuck for %us!\n",
- jiffies_to_msecs(jiffies - pool_ts) / 1000);
- }
- }
- rcu_read_unlock();
- if (lockup_detected)
- show_workqueue_state();
- wq_watchdog_reset_touched();
- mod_timer(&wq_watchdog_timer, jiffies + thresh);
- }
- void wq_watchdog_touch(int cpu)
- {
- if (cpu >= 0)
- per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
- else
- wq_watchdog_touched = jiffies;
- }
- static void wq_watchdog_set_thresh(unsigned long thresh)
- {
- wq_watchdog_thresh = 0;
- del_timer_sync(&wq_watchdog_timer);
- if (thresh) {
- wq_watchdog_thresh = thresh;
- wq_watchdog_reset_touched();
- mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ);
- }
- }
- static int wq_watchdog_param_set_thresh(const char *val,
- const struct kernel_param *kp)
- {
- unsigned long thresh;
- int ret;
- ret = kstrtoul(val, 0, &thresh);
- if (ret)
- return ret;
- if (system_wq)
- wq_watchdog_set_thresh(thresh);
- else
- wq_watchdog_thresh = thresh;
- return 0;
- }
- static const struct kernel_param_ops wq_watchdog_thresh_ops = {
- .set = wq_watchdog_param_set_thresh,
- .get = param_get_ulong,
- };
- module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh,
- 0644);
- static void wq_watchdog_init(void)
- {
- wq_watchdog_set_thresh(wq_watchdog_thresh);
- }
- #else
- static inline void wq_watchdog_init(void) { }
- #endif
- static void __init wq_numa_init(void)
- {
- cpumask_var_t *tbl;
- int node, cpu;
- if (num_possible_nodes() <= 1)
- return;
- if (wq_disable_numa) {
- pr_info("workqueue: NUMA affinity support disabled\n");
- return;
- }
- wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs(GFP_KERNEL);
- BUG_ON(!wq_update_unbound_numa_attrs_buf);
-
- tbl = kzalloc(nr_node_ids * sizeof(tbl[0]), GFP_KERNEL);
- BUG_ON(!tbl);
- for_each_node(node)
- BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
- node_online(node) ? node : NUMA_NO_NODE));
- for_each_possible_cpu(cpu) {
- node = cpu_to_node(cpu);
- if (WARN_ON(node == NUMA_NO_NODE)) {
- pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
-
- return;
- }
- cpumask_set_cpu(cpu, tbl[node]);
- }
- wq_numa_possible_cpumask = tbl;
- wq_numa_enabled = true;
- }
- static int __init init_workqueues(void)
- {
- int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
- int i, cpu;
- WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
- BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
- cpumask_copy(wq_unbound_cpumask, cpu_possible_mask);
- pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
- wq_numa_init();
-
- for_each_possible_cpu(cpu) {
- struct worker_pool *pool;
- i = 0;
- for_each_cpu_worker_pool(pool, cpu) {
- BUG_ON(init_worker_pool(pool));
- pool->cpu = cpu;
- cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
- pool->attrs->nice = std_nice[i++];
- pool->node = cpu_to_node(cpu);
-
- mutex_lock(&wq_pool_mutex);
- BUG_ON(worker_pool_assign_id(pool));
- mutex_unlock(&wq_pool_mutex);
- }
- }
-
- for_each_online_cpu(cpu) {
- struct worker_pool *pool;
- for_each_cpu_worker_pool(pool, cpu) {
- pool->flags &= ~POOL_DISASSOCIATED;
- BUG_ON(!create_worker(pool));
- }
- }
-
- for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
- struct workqueue_attrs *attrs;
- BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
- attrs->nice = std_nice[i];
- unbound_std_wq_attrs[i] = attrs;
-
- BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
- attrs->nice = std_nice[i];
- attrs->no_numa = true;
- ordered_wq_attrs[i] = attrs;
- }
- system_wq = alloc_workqueue("events", 0, 0);
- system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
- system_long_wq = alloc_workqueue("events_long", 0, 0);
- system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
- WQ_UNBOUND_MAX_ACTIVE);
- system_freezable_wq = alloc_workqueue("events_freezable",
- WQ_FREEZABLE, 0);
- system_power_efficient_wq = alloc_workqueue("events_power_efficient",
- WQ_POWER_EFFICIENT, 0);
- system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
- WQ_FREEZABLE | WQ_POWER_EFFICIENT,
- 0);
- BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
- !system_unbound_wq || !system_freezable_wq ||
- !system_power_efficient_wq ||
- !system_freezable_power_efficient_wq);
- wq_watchdog_init();
- return 0;
- }
- early_initcall(init_workqueues);
|