netdevice.h 136 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Definitions for the Interfaces handler.
  7. *
  8. * Version: @(#)dev.h 1.0.10 08/12/93
  9. *
  10. * Authors: Ross Biro
  11. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12. * Corey Minyard <wf-rch!minyard@relay.EU.net>
  13. * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
  14. * Alan Cox, <alan@lxorguk.ukuu.org.uk>
  15. * Bjorn Ekwall. <bj0rn@blox.se>
  16. * Pekka Riikonen <priikone@poseidon.pspt.fi>
  17. *
  18. * This program is free software; you can redistribute it and/or
  19. * modify it under the terms of the GNU General Public License
  20. * as published by the Free Software Foundation; either version
  21. * 2 of the License, or (at your option) any later version.
  22. *
  23. * Moved to /usr/include/linux for NET3
  24. */
  25. #ifndef _LINUX_NETDEVICE_H
  26. #define _LINUX_NETDEVICE_H
  27. #include <linux/timer.h>
  28. #include <linux/bug.h>
  29. #include <linux/delay.h>
  30. #include <linux/atomic.h>
  31. #include <linux/prefetch.h>
  32. #include <asm/cache.h>
  33. #include <asm/byteorder.h>
  34. #include <linux/percpu.h>
  35. #include <linux/rculist.h>
  36. #include <linux/dmaengine.h>
  37. #include <linux/workqueue.h>
  38. #include <linux/dynamic_queue_limits.h>
  39. #include <linux/ethtool.h>
  40. #include <net/net_namespace.h>
  41. #include <net/dsa.h>
  42. #ifdef CONFIG_DCB
  43. #include <net/dcbnl.h>
  44. #endif
  45. #include <net/netprio_cgroup.h>
  46. #include <linux/netdev_features.h>
  47. #include <linux/neighbour.h>
  48. #include <uapi/linux/netdevice.h>
  49. #include <uapi/linux/if_bonding.h>
  50. #include <uapi/linux/pkt_cls.h>
  51. #include <linux/hashtable.h>
  52. struct netpoll_info;
  53. struct device;
  54. struct phy_device;
  55. /* 802.11 specific */
  56. struct wireless_dev;
  57. /* 802.15.4 specific */
  58. struct wpan_dev;
  59. struct mpls_dev;
  60. /* UDP Tunnel offloads */
  61. struct udp_tunnel_info;
  62. struct bpf_prog;
  63. void netdev_set_default_ethtool_ops(struct net_device *dev,
  64. const struct ethtool_ops *ops);
  65. /* Backlog congestion levels */
  66. #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
  67. #define NET_RX_DROP 1 /* packet dropped */
  68. /*
  69. * Transmit return codes: transmit return codes originate from three different
  70. * namespaces:
  71. *
  72. * - qdisc return codes
  73. * - driver transmit return codes
  74. * - errno values
  75. *
  76. * Drivers are allowed to return any one of those in their hard_start_xmit()
  77. * function. Real network devices commonly used with qdiscs should only return
  78. * the driver transmit return codes though - when qdiscs are used, the actual
  79. * transmission happens asynchronously, so the value is not propagated to
  80. * higher layers. Virtual network devices transmit synchronously; in this case
  81. * the driver transmit return codes are consumed by dev_queue_xmit(), and all
  82. * others are propagated to higher layers.
  83. */
  84. /* qdisc ->enqueue() return codes. */
  85. #define NET_XMIT_SUCCESS 0x00
  86. #define NET_XMIT_DROP 0x01 /* skb dropped */
  87. #define NET_XMIT_CN 0x02 /* congestion notification */
  88. #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
  89. /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
  90. * indicates that the device will soon be dropping packets, or already drops
  91. * some packets of the same priority; prompting us to send less aggressively. */
  92. #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
  93. #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
  94. /* Driver transmit return codes */
  95. #define NETDEV_TX_MASK 0xf0
  96. enum netdev_tx {
  97. __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
  98. NETDEV_TX_OK = 0x00, /* driver took care of packet */
  99. NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
  100. };
  101. typedef enum netdev_tx netdev_tx_t;
  102. /*
  103. * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
  104. * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
  105. */
  106. static inline bool dev_xmit_complete(int rc)
  107. {
  108. /*
  109. * Positive cases with an skb consumed by a driver:
  110. * - successful transmission (rc == NETDEV_TX_OK)
  111. * - error while transmitting (rc < 0)
  112. * - error while queueing to a different device (rc & NET_XMIT_MASK)
  113. */
  114. if (likely(rc < NET_XMIT_MASK))
  115. return true;
  116. return false;
  117. }
  118. /*
  119. * Compute the worst-case header length according to the protocols
  120. * used.
  121. */
  122. #if defined(CONFIG_HYPERV_NET)
  123. # define LL_MAX_HEADER 128
  124. #elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
  125. # if defined(CONFIG_MAC80211_MESH)
  126. # define LL_MAX_HEADER 128
  127. # else
  128. # define LL_MAX_HEADER 96
  129. # endif
  130. #else
  131. # define LL_MAX_HEADER 32
  132. #endif
  133. #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
  134. !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
  135. #define MAX_HEADER LL_MAX_HEADER
  136. #else
  137. #define MAX_HEADER (LL_MAX_HEADER + 48)
  138. #endif
  139. /*
  140. * Old network device statistics. Fields are native words
  141. * (unsigned long) so they can be read and written atomically.
  142. */
  143. struct net_device_stats {
  144. unsigned long rx_packets;
  145. unsigned long tx_packets;
  146. unsigned long rx_bytes;
  147. unsigned long tx_bytes;
  148. unsigned long rx_errors;
  149. unsigned long tx_errors;
  150. unsigned long rx_dropped;
  151. unsigned long tx_dropped;
  152. unsigned long multicast;
  153. unsigned long collisions;
  154. unsigned long rx_length_errors;
  155. unsigned long rx_over_errors;
  156. unsigned long rx_crc_errors;
  157. unsigned long rx_frame_errors;
  158. unsigned long rx_fifo_errors;
  159. unsigned long rx_missed_errors;
  160. unsigned long tx_aborted_errors;
  161. unsigned long tx_carrier_errors;
  162. unsigned long tx_fifo_errors;
  163. unsigned long tx_heartbeat_errors;
  164. unsigned long tx_window_errors;
  165. unsigned long rx_compressed;
  166. unsigned long tx_compressed;
  167. };
  168. #include <linux/cache.h>
  169. #include <linux/skbuff.h>
  170. #ifdef CONFIG_RPS
  171. #include <linux/static_key.h>
  172. extern struct static_key rps_needed;
  173. #endif
  174. struct neighbour;
  175. struct neigh_parms;
  176. struct sk_buff;
  177. struct netdev_hw_addr {
  178. struct list_head list;
  179. unsigned char addr[MAX_ADDR_LEN];
  180. unsigned char type;
  181. #define NETDEV_HW_ADDR_T_LAN 1
  182. #define NETDEV_HW_ADDR_T_SAN 2
  183. #define NETDEV_HW_ADDR_T_SLAVE 3
  184. #define NETDEV_HW_ADDR_T_UNICAST 4
  185. #define NETDEV_HW_ADDR_T_MULTICAST 5
  186. bool global_use;
  187. int sync_cnt;
  188. int refcount;
  189. int synced;
  190. struct rcu_head rcu_head;
  191. };
  192. struct netdev_hw_addr_list {
  193. struct list_head list;
  194. int count;
  195. };
  196. #define netdev_hw_addr_list_count(l) ((l)->count)
  197. #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
  198. #define netdev_hw_addr_list_for_each(ha, l) \
  199. list_for_each_entry(ha, &(l)->list, list)
  200. #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
  201. #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
  202. #define netdev_for_each_uc_addr(ha, dev) \
  203. netdev_hw_addr_list_for_each(ha, &(dev)->uc)
  204. #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
  205. #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
  206. #define netdev_for_each_mc_addr(ha, dev) \
  207. netdev_hw_addr_list_for_each(ha, &(dev)->mc)
  208. struct hh_cache {
  209. u16 hh_len;
  210. u16 __pad;
  211. seqlock_t hh_lock;
  212. /* cached hardware header; allow for machine alignment needs. */
  213. #define HH_DATA_MOD 16
  214. #define HH_DATA_OFF(__len) \
  215. (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
  216. #define HH_DATA_ALIGN(__len) \
  217. (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
  218. unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
  219. };
  220. /* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much.
  221. * Alternative is:
  222. * dev->hard_header_len ? (dev->hard_header_len +
  223. * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
  224. *
  225. * We could use other alignment values, but we must maintain the
  226. * relationship HH alignment <= LL alignment.
  227. */
  228. #define LL_RESERVED_SPACE(dev) \
  229. ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  230. #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
  231. ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  232. struct header_ops {
  233. int (*create) (struct sk_buff *skb, struct net_device *dev,
  234. unsigned short type, const void *daddr,
  235. const void *saddr, unsigned int len);
  236. int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
  237. int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
  238. void (*cache_update)(struct hh_cache *hh,
  239. const struct net_device *dev,
  240. const unsigned char *haddr);
  241. bool (*validate)(const char *ll_header, unsigned int len);
  242. };
  243. /* These flag bits are private to the generic network queueing
  244. * layer; they may not be explicitly referenced by any other
  245. * code.
  246. */
  247. enum netdev_state_t {
  248. __LINK_STATE_START,
  249. __LINK_STATE_PRESENT,
  250. __LINK_STATE_NOCARRIER,
  251. __LINK_STATE_LINKWATCH_PENDING,
  252. __LINK_STATE_DORMANT,
  253. };
  254. /*
  255. * This structure holds boot-time configured netdevice settings. They
  256. * are then used in the device probing.
  257. */
  258. struct netdev_boot_setup {
  259. char name[IFNAMSIZ];
  260. struct ifmap map;
  261. };
  262. #define NETDEV_BOOT_SETUP_MAX 8
  263. int __init netdev_boot_setup(char *str);
  264. /*
  265. * Structure for NAPI scheduling similar to tasklet but with weighting
  266. */
  267. struct napi_struct {
  268. /* The poll_list must only be managed by the entity which
  269. * changes the state of the NAPI_STATE_SCHED bit. This means
  270. * whoever atomically sets that bit can add this napi_struct
  271. * to the per-CPU poll_list, and whoever clears that bit
  272. * can remove from the list right before clearing the bit.
  273. */
  274. struct list_head poll_list;
  275. unsigned long state;
  276. int weight;
  277. unsigned int gro_count;
  278. int (*poll)(struct napi_struct *, int);
  279. #ifdef CONFIG_NETPOLL
  280. spinlock_t poll_lock;
  281. int poll_owner;
  282. #endif
  283. struct net_device *dev;
  284. struct sk_buff *gro_list;
  285. struct sk_buff *skb;
  286. struct hrtimer timer;
  287. struct list_head dev_list;
  288. struct hlist_node napi_hash_node;
  289. unsigned int napi_id;
  290. };
  291. enum {
  292. NAPI_STATE_SCHED, /* Poll is scheduled */
  293. NAPI_STATE_DISABLE, /* Disable pending */
  294. NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
  295. NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */
  296. NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */
  297. };
  298. enum gro_result {
  299. GRO_MERGED,
  300. GRO_MERGED_FREE,
  301. GRO_HELD,
  302. GRO_NORMAL,
  303. GRO_DROP,
  304. };
  305. typedef enum gro_result gro_result_t;
  306. /*
  307. * enum rx_handler_result - Possible return values for rx_handlers.
  308. * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
  309. * further.
  310. * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
  311. * case skb->dev was changed by rx_handler.
  312. * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
  313. * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called.
  314. *
  315. * rx_handlers are functions called from inside __netif_receive_skb(), to do
  316. * special processing of the skb, prior to delivery to protocol handlers.
  317. *
  318. * Currently, a net_device can only have a single rx_handler registered. Trying
  319. * to register a second rx_handler will return -EBUSY.
  320. *
  321. * To register a rx_handler on a net_device, use netdev_rx_handler_register().
  322. * To unregister a rx_handler on a net_device, use
  323. * netdev_rx_handler_unregister().
  324. *
  325. * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
  326. * do with the skb.
  327. *
  328. * If the rx_handler consumed the skb in some way, it should return
  329. * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
  330. * the skb to be delivered in some other way.
  331. *
  332. * If the rx_handler changed skb->dev, to divert the skb to another
  333. * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
  334. * new device will be called if it exists.
  335. *
  336. * If the rx_handler decides the skb should be ignored, it should return
  337. * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
  338. * are registered on exact device (ptype->dev == skb->dev).
  339. *
  340. * If the rx_handler didn't change skb->dev, but wants the skb to be normally
  341. * delivered, it should return RX_HANDLER_PASS.
  342. *
  343. * A device without a registered rx_handler will behave as if rx_handler
  344. * returned RX_HANDLER_PASS.
  345. */
  346. enum rx_handler_result {
  347. RX_HANDLER_CONSUMED,
  348. RX_HANDLER_ANOTHER,
  349. RX_HANDLER_EXACT,
  350. RX_HANDLER_PASS,
  351. };
  352. typedef enum rx_handler_result rx_handler_result_t;
  353. typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
  354. void __napi_schedule(struct napi_struct *n);
  355. void __napi_schedule_irqoff(struct napi_struct *n);
  356. static inline bool napi_disable_pending(struct napi_struct *n)
  357. {
  358. return test_bit(NAPI_STATE_DISABLE, &n->state);
  359. }
  360. /**
  361. * napi_schedule_prep - check if NAPI can be scheduled
  362. * @n: NAPI context
  363. *
  364. * Test if NAPI routine is already running, and if not mark
  365. * it as running. This is used as a condition variable to
  366. * insure only one NAPI poll instance runs. We also make
  367. * sure there is no pending NAPI disable.
  368. */
  369. static inline bool napi_schedule_prep(struct napi_struct *n)
  370. {
  371. return !napi_disable_pending(n) &&
  372. !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
  373. }
  374. /**
  375. * napi_schedule - schedule NAPI poll
  376. * @n: NAPI context
  377. *
  378. * Schedule NAPI poll routine to be called if it is not already
  379. * running.
  380. */
  381. static inline void napi_schedule(struct napi_struct *n)
  382. {
  383. if (napi_schedule_prep(n))
  384. __napi_schedule(n);
  385. }
  386. /**
  387. * napi_schedule_irqoff - schedule NAPI poll
  388. * @n: NAPI context
  389. *
  390. * Variant of napi_schedule(), assuming hard irqs are masked.
  391. */
  392. static inline void napi_schedule_irqoff(struct napi_struct *n)
  393. {
  394. if (napi_schedule_prep(n))
  395. __napi_schedule_irqoff(n);
  396. }
  397. /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
  398. static inline bool napi_reschedule(struct napi_struct *napi)
  399. {
  400. if (napi_schedule_prep(napi)) {
  401. __napi_schedule(napi);
  402. return true;
  403. }
  404. return false;
  405. }
  406. void __napi_complete(struct napi_struct *n);
  407. void napi_complete_done(struct napi_struct *n, int work_done);
  408. /**
  409. * napi_complete - NAPI processing complete
  410. * @n: NAPI context
  411. *
  412. * Mark NAPI processing as complete.
  413. * Consider using napi_complete_done() instead.
  414. */
  415. static inline void napi_complete(struct napi_struct *n)
  416. {
  417. return napi_complete_done(n, 0);
  418. }
  419. /**
  420. * napi_hash_add - add a NAPI to global hashtable
  421. * @napi: NAPI context
  422. *
  423. * Generate a new napi_id and store a @napi under it in napi_hash.
  424. * Used for busy polling (CONFIG_NET_RX_BUSY_POLL).
  425. * Note: This is normally automatically done from netif_napi_add(),
  426. * so might disappear in a future Linux version.
  427. */
  428. void napi_hash_add(struct napi_struct *napi);
  429. /**
  430. * napi_hash_del - remove a NAPI from global table
  431. * @napi: NAPI context
  432. *
  433. * Warning: caller must observe RCU grace period
  434. * before freeing memory containing @napi, if
  435. * this function returns true.
  436. * Note: core networking stack automatically calls it
  437. * from netif_napi_del().
  438. * Drivers might want to call this helper to combine all
  439. * the needed RCU grace periods into a single one.
  440. */
  441. bool napi_hash_del(struct napi_struct *napi);
  442. /**
  443. * napi_disable - prevent NAPI from scheduling
  444. * @n: NAPI context
  445. *
  446. * Stop NAPI from being scheduled on this context.
  447. * Waits till any outstanding processing completes.
  448. */
  449. void napi_disable(struct napi_struct *n);
  450. /**
  451. * napi_enable - enable NAPI scheduling
  452. * @n: NAPI context
  453. *
  454. * Resume NAPI from being scheduled on this context.
  455. * Must be paired with napi_disable.
  456. */
  457. static inline void napi_enable(struct napi_struct *n)
  458. {
  459. BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
  460. smp_mb__before_atomic();
  461. clear_bit(NAPI_STATE_SCHED, &n->state);
  462. clear_bit(NAPI_STATE_NPSVC, &n->state);
  463. }
  464. /**
  465. * napi_synchronize - wait until NAPI is not running
  466. * @n: NAPI context
  467. *
  468. * Wait until NAPI is done being scheduled on this context.
  469. * Waits till any outstanding processing completes but
  470. * does not disable future activations.
  471. */
  472. static inline void napi_synchronize(const struct napi_struct *n)
  473. {
  474. if (IS_ENABLED(CONFIG_SMP))
  475. while (test_bit(NAPI_STATE_SCHED, &n->state))
  476. msleep(1);
  477. else
  478. barrier();
  479. }
  480. enum netdev_queue_state_t {
  481. __QUEUE_STATE_DRV_XOFF,
  482. __QUEUE_STATE_STACK_XOFF,
  483. __QUEUE_STATE_FROZEN,
  484. };
  485. #define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF)
  486. #define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF)
  487. #define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN)
  488. #define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
  489. #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
  490. QUEUE_STATE_FROZEN)
  491. #define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
  492. QUEUE_STATE_FROZEN)
  493. /*
  494. * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
  495. * netif_tx_* functions below are used to manipulate this flag. The
  496. * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
  497. * queue independently. The netif_xmit_*stopped functions below are called
  498. * to check if the queue has been stopped by the driver or stack (either
  499. * of the XOFF bits are set in the state). Drivers should not need to call
  500. * netif_xmit*stopped functions, they should only be using netif_tx_*.
  501. */
  502. struct netdev_queue {
  503. /*
  504. * read-mostly part
  505. */
  506. struct net_device *dev;
  507. struct Qdisc __rcu *qdisc;
  508. struct Qdisc *qdisc_sleeping;
  509. #ifdef CONFIG_SYSFS
  510. struct kobject kobj;
  511. #endif
  512. #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
  513. int numa_node;
  514. #endif
  515. unsigned long tx_maxrate;
  516. /*
  517. * Number of TX timeouts for this queue
  518. * (/sys/class/net/DEV/Q/trans_timeout)
  519. */
  520. unsigned long trans_timeout;
  521. /*
  522. * write-mostly part
  523. */
  524. spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
  525. int xmit_lock_owner;
  526. /*
  527. * Time (in jiffies) of last Tx
  528. */
  529. unsigned long trans_start;
  530. unsigned long state;
  531. #ifdef CONFIG_BQL
  532. struct dql dql;
  533. #endif
  534. } ____cacheline_aligned_in_smp;
  535. static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
  536. {
  537. #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
  538. return q->numa_node;
  539. #else
  540. return NUMA_NO_NODE;
  541. #endif
  542. }
  543. static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
  544. {
  545. #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
  546. q->numa_node = node;
  547. #endif
  548. }
  549. #ifdef CONFIG_RPS
  550. /*
  551. * This structure holds an RPS map which can be of variable length. The
  552. * map is an array of CPUs.
  553. */
  554. struct rps_map {
  555. unsigned int len;
  556. struct rcu_head rcu;
  557. u16 cpus[0];
  558. };
  559. #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
  560. /*
  561. * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
  562. * tail pointer for that CPU's input queue at the time of last enqueue, and
  563. * a hardware filter index.
  564. */
  565. struct rps_dev_flow {
  566. u16 cpu;
  567. u16 filter;
  568. unsigned int last_qtail;
  569. };
  570. #define RPS_NO_FILTER 0xffff
  571. /*
  572. * The rps_dev_flow_table structure contains a table of flow mappings.
  573. */
  574. struct rps_dev_flow_table {
  575. unsigned int mask;
  576. struct rcu_head rcu;
  577. struct rps_dev_flow flows[0];
  578. };
  579. #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
  580. ((_num) * sizeof(struct rps_dev_flow)))
  581. /*
  582. * The rps_sock_flow_table contains mappings of flows to the last CPU
  583. * on which they were processed by the application (set in recvmsg).
  584. * Each entry is a 32bit value. Upper part is the high-order bits
  585. * of flow hash, lower part is CPU number.
  586. * rps_cpu_mask is used to partition the space, depending on number of
  587. * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
  588. * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f,
  589. * meaning we use 32-6=26 bits for the hash.
  590. */
  591. struct rps_sock_flow_table {
  592. u32 mask;
  593. u32 ents[0] ____cacheline_aligned_in_smp;
  594. };
  595. #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
  596. #define RPS_NO_CPU 0xffff
  597. extern u32 rps_cpu_mask;
  598. extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
  599. static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
  600. u32 hash)
  601. {
  602. if (table && hash) {
  603. unsigned int index = hash & table->mask;
  604. u32 val = hash & ~rps_cpu_mask;
  605. /* We only give a hint, preemption can change CPU under us */
  606. val |= raw_smp_processor_id();
  607. if (table->ents[index] != val)
  608. table->ents[index] = val;
  609. }
  610. }
  611. #ifdef CONFIG_RFS_ACCEL
  612. bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
  613. u16 filter_id);
  614. #endif
  615. #endif /* CONFIG_RPS */
  616. /* This structure contains an instance of an RX queue. */
  617. struct netdev_rx_queue {
  618. #ifdef CONFIG_RPS
  619. struct rps_map __rcu *rps_map;
  620. struct rps_dev_flow_table __rcu *rps_flow_table;
  621. #endif
  622. struct kobject kobj;
  623. struct net_device *dev;
  624. } ____cacheline_aligned_in_smp;
  625. /*
  626. * RX queue sysfs structures and functions.
  627. */
  628. struct rx_queue_attribute {
  629. struct attribute attr;
  630. ssize_t (*show)(struct netdev_rx_queue *queue,
  631. struct rx_queue_attribute *attr, char *buf);
  632. ssize_t (*store)(struct netdev_rx_queue *queue,
  633. struct rx_queue_attribute *attr, const char *buf, size_t len);
  634. };
  635. #ifdef CONFIG_XPS
  636. /*
  637. * This structure holds an XPS map which can be of variable length. The
  638. * map is an array of queues.
  639. */
  640. struct xps_map {
  641. unsigned int len;
  642. unsigned int alloc_len;
  643. struct rcu_head rcu;
  644. u16 queues[0];
  645. };
  646. #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
  647. #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
  648. - sizeof(struct xps_map)) / sizeof(u16))
  649. /*
  650. * This structure holds all XPS maps for device. Maps are indexed by CPU.
  651. */
  652. struct xps_dev_maps {
  653. struct rcu_head rcu;
  654. struct xps_map __rcu *cpu_map[0];
  655. };
  656. #define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
  657. (nr_cpu_ids * sizeof(struct xps_map *)))
  658. #endif /* CONFIG_XPS */
  659. #define TC_MAX_QUEUE 16
  660. #define TC_BITMASK 15
  661. /* HW offloaded queuing disciplines txq count and offset maps */
  662. struct netdev_tc_txq {
  663. u16 count;
  664. u16 offset;
  665. };
  666. #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
  667. /*
  668. * This structure is to hold information about the device
  669. * configured to run FCoE protocol stack.
  670. */
  671. struct netdev_fcoe_hbainfo {
  672. char manufacturer[64];
  673. char serial_number[64];
  674. char hardware_version[64];
  675. char driver_version[64];
  676. char optionrom_version[64];
  677. char firmware_version[64];
  678. char model[256];
  679. char model_description[256];
  680. };
  681. #endif
  682. #define MAX_PHYS_ITEM_ID_LEN 32
  683. /* This structure holds a unique identifier to identify some
  684. * physical item (port for example) used by a netdevice.
  685. */
  686. struct netdev_phys_item_id {
  687. unsigned char id[MAX_PHYS_ITEM_ID_LEN];
  688. unsigned char id_len;
  689. };
  690. static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
  691. struct netdev_phys_item_id *b)
  692. {
  693. return a->id_len == b->id_len &&
  694. memcmp(a->id, b->id, a->id_len) == 0;
  695. }
  696. typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
  697. struct sk_buff *skb);
  698. /* These structures hold the attributes of qdisc and classifiers
  699. * that are being passed to the netdevice through the setup_tc op.
  700. */
  701. enum {
  702. TC_SETUP_MQPRIO,
  703. TC_SETUP_CLSU32,
  704. TC_SETUP_CLSFLOWER,
  705. TC_SETUP_MATCHALL,
  706. TC_SETUP_CLSBPF,
  707. };
  708. struct tc_cls_u32_offload;
  709. struct tc_to_netdev {
  710. unsigned int type;
  711. union {
  712. u8 tc;
  713. struct tc_cls_u32_offload *cls_u32;
  714. struct tc_cls_flower_offload *cls_flower;
  715. struct tc_cls_matchall_offload *cls_mall;
  716. struct tc_cls_bpf_offload *cls_bpf;
  717. };
  718. };
  719. /* These structures hold the attributes of xdp state that are being passed
  720. * to the netdevice through the xdp op.
  721. */
  722. enum xdp_netdev_command {
  723. /* Set or clear a bpf program used in the earliest stages of packet
  724. * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee
  725. * is responsible for calling bpf_prog_put on any old progs that are
  726. * stored. In case of error, the callee need not release the new prog
  727. * reference, but on success it takes ownership and must bpf_prog_put
  728. * when it is no longer used.
  729. */
  730. XDP_SETUP_PROG,
  731. /* Check if a bpf program is set on the device. The callee should
  732. * return true if a program is currently attached and running.
  733. */
  734. XDP_QUERY_PROG,
  735. };
  736. struct netdev_xdp {
  737. enum xdp_netdev_command command;
  738. union {
  739. /* XDP_SETUP_PROG */
  740. struct bpf_prog *prog;
  741. /* XDP_QUERY_PROG */
  742. bool prog_attached;
  743. };
  744. };
  745. /*
  746. * This structure defines the management hooks for network devices.
  747. * The following hooks can be defined; unless noted otherwise, they are
  748. * optional and can be filled with a null pointer.
  749. *
  750. * int (*ndo_init)(struct net_device *dev);
  751. * This function is called once when a network device is registered.
  752. * The network device can use this for any late stage initialization
  753. * or semantic validation. It can fail with an error code which will
  754. * be propagated back to register_netdev.
  755. *
  756. * void (*ndo_uninit)(struct net_device *dev);
  757. * This function is called when device is unregistered or when registration
  758. * fails. It is not called if init fails.
  759. *
  760. * int (*ndo_open)(struct net_device *dev);
  761. * This function is called when a network device transitions to the up
  762. * state.
  763. *
  764. * int (*ndo_stop)(struct net_device *dev);
  765. * This function is called when a network device transitions to the down
  766. * state.
  767. *
  768. * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
  769. * struct net_device *dev);
  770. * Called when a packet needs to be transmitted.
  771. * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop
  772. * the queue before that can happen; it's for obsolete devices and weird
  773. * corner cases, but the stack really does a non-trivial amount
  774. * of useless work if you return NETDEV_TX_BUSY.
  775. * Required; cannot be NULL.
  776. *
  777. * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
  778. * struct net_device *dev
  779. * netdev_features_t features);
  780. * Called by core transmit path to determine if device is capable of
  781. * performing offload operations on a given packet. This is to give
  782. * the device an opportunity to implement any restrictions that cannot
  783. * be otherwise expressed by feature flags. The check is called with
  784. * the set of features that the stack has calculated and it returns
  785. * those the driver believes to be appropriate.
  786. *
  787. * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
  788. * void *accel_priv, select_queue_fallback_t fallback);
  789. * Called to decide which queue to use when device supports multiple
  790. * transmit queues.
  791. *
  792. * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
  793. * This function is called to allow device receiver to make
  794. * changes to configuration when multicast or promiscuous is enabled.
  795. *
  796. * void (*ndo_set_rx_mode)(struct net_device *dev);
  797. * This function is called device changes address list filtering.
  798. * If driver handles unicast address filtering, it should set
  799. * IFF_UNICAST_FLT in its priv_flags.
  800. *
  801. * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
  802. * This function is called when the Media Access Control address
  803. * needs to be changed. If this interface is not defined, the
  804. * MAC address can not be changed.
  805. *
  806. * int (*ndo_validate_addr)(struct net_device *dev);
  807. * Test if Media Access Control address is valid for the device.
  808. *
  809. * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
  810. * Called when a user requests an ioctl which can't be handled by
  811. * the generic interface code. If not defined ioctls return
  812. * not supported error code.
  813. *
  814. * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
  815. * Used to set network devices bus interface parameters. This interface
  816. * is retained for legacy reasons; new devices should use the bus
  817. * interface (PCI) for low level management.
  818. *
  819. * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
  820. * Called when a user wants to change the Maximum Transfer Unit
  821. * of a device. If not defined, any request to change MTU will
  822. * will return an error.
  823. *
  824. * void (*ndo_tx_timeout)(struct net_device *dev);
  825. * Callback used when the transmitter has not made any progress
  826. * for dev->watchdog ticks.
  827. *
  828. * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
  829. * struct rtnl_link_stats64 *storage);
  830. * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
  831. * Called when a user wants to get the network device usage
  832. * statistics. Drivers must do one of the following:
  833. * 1. Define @ndo_get_stats64 to fill in a zero-initialised
  834. * rtnl_link_stats64 structure passed by the caller.
  835. * 2. Define @ndo_get_stats to update a net_device_stats structure
  836. * (which should normally be dev->stats) and return a pointer to
  837. * it. The structure may be changed asynchronously only if each
  838. * field is written atomically.
  839. * 3. Update dev->stats asynchronously and atomically, and define
  840. * neither operation.
  841. *
  842. * bool (*ndo_has_offload_stats)(int attr_id)
  843. * Return true if this device supports offload stats of this attr_id.
  844. *
  845. * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev,
  846. * void *attr_data)
  847. * Get statistics for offload operations by attr_id. Write it into the
  848. * attr_data pointer.
  849. *
  850. * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
  851. * If device supports VLAN filtering this function is called when a
  852. * VLAN id is registered.
  853. *
  854. * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
  855. * If device supports VLAN filtering this function is called when a
  856. * VLAN id is unregistered.
  857. *
  858. * void (*ndo_poll_controller)(struct net_device *dev);
  859. *
  860. * SR-IOV management functions.
  861. * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
  862. * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan,
  863. * u8 qos, __be16 proto);
  864. * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
  865. * int max_tx_rate);
  866. * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
  867. * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting);
  868. * int (*ndo_get_vf_config)(struct net_device *dev,
  869. * int vf, struct ifla_vf_info *ivf);
  870. * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
  871. * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
  872. * struct nlattr *port[]);
  873. *
  874. * Enable or disable the VF ability to query its RSS Redirection Table and
  875. * Hash Key. This is needed since on some devices VF share this information
  876. * with PF and querying it may introduce a theoretical security risk.
  877. * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
  878. * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
  879. * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
  880. * Called to setup 'tc' number of traffic classes in the net device. This
  881. * is always called from the stack with the rtnl lock held and netif tx
  882. * queues stopped. This allows the netdevice to perform queue management
  883. * safely.
  884. *
  885. * Fiber Channel over Ethernet (FCoE) offload functions.
  886. * int (*ndo_fcoe_enable)(struct net_device *dev);
  887. * Called when the FCoE protocol stack wants to start using LLD for FCoE
  888. * so the underlying device can perform whatever needed configuration or
  889. * initialization to support acceleration of FCoE traffic.
  890. *
  891. * int (*ndo_fcoe_disable)(struct net_device *dev);
  892. * Called when the FCoE protocol stack wants to stop using LLD for FCoE
  893. * so the underlying device can perform whatever needed clean-ups to
  894. * stop supporting acceleration of FCoE traffic.
  895. *
  896. * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
  897. * struct scatterlist *sgl, unsigned int sgc);
  898. * Called when the FCoE Initiator wants to initialize an I/O that
  899. * is a possible candidate for Direct Data Placement (DDP). The LLD can
  900. * perform necessary setup and returns 1 to indicate the device is set up
  901. * successfully to perform DDP on this I/O, otherwise this returns 0.
  902. *
  903. * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid);
  904. * Called when the FCoE Initiator/Target is done with the DDPed I/O as
  905. * indicated by the FC exchange id 'xid', so the underlying device can
  906. * clean up and reuse resources for later DDP requests.
  907. *
  908. * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
  909. * struct scatterlist *sgl, unsigned int sgc);
  910. * Called when the FCoE Target wants to initialize an I/O that
  911. * is a possible candidate for Direct Data Placement (DDP). The LLD can
  912. * perform necessary setup and returns 1 to indicate the device is set up
  913. * successfully to perform DDP on this I/O, otherwise this returns 0.
  914. *
  915. * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
  916. * struct netdev_fcoe_hbainfo *hbainfo);
  917. * Called when the FCoE Protocol stack wants information on the underlying
  918. * device. This information is utilized by the FCoE protocol stack to
  919. * register attributes with Fiber Channel management service as per the
  920. * FC-GS Fabric Device Management Information(FDMI) specification.
  921. *
  922. * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
  923. * Called when the underlying device wants to override default World Wide
  924. * Name (WWN) generation mechanism in FCoE protocol stack to pass its own
  925. * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
  926. * protocol stack to use.
  927. *
  928. * RFS acceleration.
  929. * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
  930. * u16 rxq_index, u32 flow_id);
  931. * Set hardware filter for RFS. rxq_index is the target queue index;
  932. * flow_id is a flow ID to be passed to rps_may_expire_flow() later.
  933. * Return the filter ID on success, or a negative error code.
  934. *
  935. * Slave management functions (for bridge, bonding, etc).
  936. * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
  937. * Called to make another netdev an underling.
  938. *
  939. * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
  940. * Called to release previously enslaved netdev.
  941. *
  942. * Feature/offload setting functions.
  943. * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
  944. * netdev_features_t features);
  945. * Adjusts the requested feature flags according to device-specific
  946. * constraints, and returns the resulting flags. Must not modify
  947. * the device state.
  948. *
  949. * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
  950. * Called to update device configuration to new features. Passed
  951. * feature set might be less than what was returned by ndo_fix_features()).
  952. * Must return >0 or -errno if it changed dev->features itself.
  953. *
  954. * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
  955. * struct net_device *dev,
  956. * const unsigned char *addr, u16 vid, u16 flags)
  957. * Adds an FDB entry to dev for addr.
  958. * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
  959. * struct net_device *dev,
  960. * const unsigned char *addr, u16 vid)
  961. * Deletes the FDB entry from dev coresponding to addr.
  962. * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
  963. * struct net_device *dev, struct net_device *filter_dev,
  964. * int *idx)
  965. * Used to add FDB entries to dump requests. Implementers should add
  966. * entries to skb and update idx with the number of entries.
  967. *
  968. * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
  969. * u16 flags)
  970. * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
  971. * struct net_device *dev, u32 filter_mask,
  972. * int nlflags)
  973. * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
  974. * u16 flags);
  975. *
  976. * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
  977. * Called to change device carrier. Soft-devices (like dummy, team, etc)
  978. * which do not represent real hardware may define this to allow their
  979. * userspace components to manage their virtual carrier state. Devices
  980. * that determine carrier state from physical hardware properties (eg
  981. * network cables) or protocol-dependent mechanisms (eg
  982. * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
  983. *
  984. * int (*ndo_get_phys_port_id)(struct net_device *dev,
  985. * struct netdev_phys_item_id *ppid);
  986. * Called to get ID of physical port of this device. If driver does
  987. * not implement this, it is assumed that the hw is not able to have
  988. * multiple net devices on single physical port.
  989. *
  990. * void (*ndo_udp_tunnel_add)(struct net_device *dev,
  991. * struct udp_tunnel_info *ti);
  992. * Called by UDP tunnel to notify a driver about the UDP port and socket
  993. * address family that a UDP tunnel is listnening to. It is called only
  994. * when a new port starts listening. The operation is protected by the
  995. * RTNL.
  996. *
  997. * void (*ndo_udp_tunnel_del)(struct net_device *dev,
  998. * struct udp_tunnel_info *ti);
  999. * Called by UDP tunnel to notify the driver about a UDP port and socket
  1000. * address family that the UDP tunnel is not listening to anymore. The
  1001. * operation is protected by the RTNL.
  1002. *
  1003. * void* (*ndo_dfwd_add_station)(struct net_device *pdev,
  1004. * struct net_device *dev)
  1005. * Called by upper layer devices to accelerate switching or other
  1006. * station functionality into hardware. 'pdev is the lowerdev
  1007. * to use for the offload and 'dev' is the net device that will
  1008. * back the offload. Returns a pointer to the private structure
  1009. * the upper layer will maintain.
  1010. * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv)
  1011. * Called by upper layer device to delete the station created
  1012. * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing
  1013. * the station and priv is the structure returned by the add
  1014. * operation.
  1015. * netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *skb,
  1016. * struct net_device *dev,
  1017. * void *priv);
  1018. * Callback to use for xmit over the accelerated station. This
  1019. * is used in place of ndo_start_xmit on accelerated net
  1020. * devices.
  1021. * int (*ndo_set_tx_maxrate)(struct net_device *dev,
  1022. * int queue_index, u32 maxrate);
  1023. * Called when a user wants to set a max-rate limitation of specific
  1024. * TX queue.
  1025. * int (*ndo_get_iflink)(const struct net_device *dev);
  1026. * Called to get the iflink value of this device.
  1027. * void (*ndo_change_proto_down)(struct net_device *dev,
  1028. * bool proto_down);
  1029. * This function is used to pass protocol port error state information
  1030. * to the switch driver. The switch driver can react to the proto_down
  1031. * by doing a phys down on the associated switch port.
  1032. * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
  1033. * This function is used to get egress tunnel information for given skb.
  1034. * This is useful for retrieving outer tunnel header parameters while
  1035. * sampling packet.
  1036. * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom);
  1037. * This function is used to specify the headroom that the skb must
  1038. * consider when allocation skb during packet reception. Setting
  1039. * appropriate rx headroom value allows avoiding skb head copy on
  1040. * forward. Setting a negative value resets the rx headroom to the
  1041. * default value.
  1042. * int (*ndo_xdp)(struct net_device *dev, struct netdev_xdp *xdp);
  1043. * This function is used to set or query state related to XDP on the
  1044. * netdevice. See definition of enum xdp_netdev_command for details.
  1045. *
  1046. */
  1047. struct net_device_ops {
  1048. int (*ndo_init)(struct net_device *dev);
  1049. void (*ndo_uninit)(struct net_device *dev);
  1050. int (*ndo_open)(struct net_device *dev);
  1051. int (*ndo_stop)(struct net_device *dev);
  1052. netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
  1053. struct net_device *dev);
  1054. netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
  1055. struct net_device *dev,
  1056. netdev_features_t features);
  1057. u16 (*ndo_select_queue)(struct net_device *dev,
  1058. struct sk_buff *skb,
  1059. void *accel_priv,
  1060. select_queue_fallback_t fallback);
  1061. void (*ndo_change_rx_flags)(struct net_device *dev,
  1062. int flags);
  1063. void (*ndo_set_rx_mode)(struct net_device *dev);
  1064. int (*ndo_set_mac_address)(struct net_device *dev,
  1065. void *addr);
  1066. int (*ndo_validate_addr)(struct net_device *dev);
  1067. int (*ndo_do_ioctl)(struct net_device *dev,
  1068. struct ifreq *ifr, int cmd);
  1069. int (*ndo_set_config)(struct net_device *dev,
  1070. struct ifmap *map);
  1071. int (*ndo_change_mtu)(struct net_device *dev,
  1072. int new_mtu);
  1073. int (*ndo_neigh_setup)(struct net_device *dev,
  1074. struct neigh_parms *);
  1075. void (*ndo_tx_timeout) (struct net_device *dev);
  1076. struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
  1077. struct rtnl_link_stats64 *storage);
  1078. bool (*ndo_has_offload_stats)(int attr_id);
  1079. int (*ndo_get_offload_stats)(int attr_id,
  1080. const struct net_device *dev,
  1081. void *attr_data);
  1082. struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
  1083. int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
  1084. __be16 proto, u16 vid);
  1085. int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
  1086. __be16 proto, u16 vid);
  1087. #ifdef CONFIG_NET_POLL_CONTROLLER
  1088. void (*ndo_poll_controller)(struct net_device *dev);
  1089. int (*ndo_netpoll_setup)(struct net_device *dev,
  1090. struct netpoll_info *info);
  1091. void (*ndo_netpoll_cleanup)(struct net_device *dev);
  1092. #endif
  1093. #ifdef CONFIG_NET_RX_BUSY_POLL
  1094. int (*ndo_busy_poll)(struct napi_struct *dev);
  1095. #endif
  1096. int (*ndo_set_vf_mac)(struct net_device *dev,
  1097. int queue, u8 *mac);
  1098. int (*ndo_set_vf_vlan)(struct net_device *dev,
  1099. int queue, u16 vlan,
  1100. u8 qos, __be16 proto);
  1101. int (*ndo_set_vf_rate)(struct net_device *dev,
  1102. int vf, int min_tx_rate,
  1103. int max_tx_rate);
  1104. int (*ndo_set_vf_spoofchk)(struct net_device *dev,
  1105. int vf, bool setting);
  1106. int (*ndo_set_vf_trust)(struct net_device *dev,
  1107. int vf, bool setting);
  1108. int (*ndo_get_vf_config)(struct net_device *dev,
  1109. int vf,
  1110. struct ifla_vf_info *ivf);
  1111. int (*ndo_set_vf_link_state)(struct net_device *dev,
  1112. int vf, int link_state);
  1113. int (*ndo_get_vf_stats)(struct net_device *dev,
  1114. int vf,
  1115. struct ifla_vf_stats
  1116. *vf_stats);
  1117. int (*ndo_set_vf_port)(struct net_device *dev,
  1118. int vf,
  1119. struct nlattr *port[]);
  1120. int (*ndo_get_vf_port)(struct net_device *dev,
  1121. int vf, struct sk_buff *skb);
  1122. int (*ndo_set_vf_guid)(struct net_device *dev,
  1123. int vf, u64 guid,
  1124. int guid_type);
  1125. int (*ndo_set_vf_rss_query_en)(
  1126. struct net_device *dev,
  1127. int vf, bool setting);
  1128. int (*ndo_setup_tc)(struct net_device *dev,
  1129. u32 handle,
  1130. __be16 protocol,
  1131. struct tc_to_netdev *tc);
  1132. #if IS_ENABLED(CONFIG_FCOE)
  1133. int (*ndo_fcoe_enable)(struct net_device *dev);
  1134. int (*ndo_fcoe_disable)(struct net_device *dev);
  1135. int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
  1136. u16 xid,
  1137. struct scatterlist *sgl,
  1138. unsigned int sgc);
  1139. int (*ndo_fcoe_ddp_done)(struct net_device *dev,
  1140. u16 xid);
  1141. int (*ndo_fcoe_ddp_target)(struct net_device *dev,
  1142. u16 xid,
  1143. struct scatterlist *sgl,
  1144. unsigned int sgc);
  1145. int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
  1146. struct netdev_fcoe_hbainfo *hbainfo);
  1147. #endif
  1148. #if IS_ENABLED(CONFIG_LIBFCOE)
  1149. #define NETDEV_FCOE_WWNN 0
  1150. #define NETDEV_FCOE_WWPN 1
  1151. int (*ndo_fcoe_get_wwn)(struct net_device *dev,
  1152. u64 *wwn, int type);
  1153. #endif
  1154. #ifdef CONFIG_RFS_ACCEL
  1155. int (*ndo_rx_flow_steer)(struct net_device *dev,
  1156. const struct sk_buff *skb,
  1157. u16 rxq_index,
  1158. u32 flow_id);
  1159. #endif
  1160. int (*ndo_add_slave)(struct net_device *dev,
  1161. struct net_device *slave_dev);
  1162. int (*ndo_del_slave)(struct net_device *dev,
  1163. struct net_device *slave_dev);
  1164. netdev_features_t (*ndo_fix_features)(struct net_device *dev,
  1165. netdev_features_t features);
  1166. int (*ndo_set_features)(struct net_device *dev,
  1167. netdev_features_t features);
  1168. int (*ndo_neigh_construct)(struct net_device *dev,
  1169. struct neighbour *n);
  1170. void (*ndo_neigh_destroy)(struct net_device *dev,
  1171. struct neighbour *n);
  1172. int (*ndo_fdb_add)(struct ndmsg *ndm,
  1173. struct nlattr *tb[],
  1174. struct net_device *dev,
  1175. const unsigned char *addr,
  1176. u16 vid,
  1177. u16 flags);
  1178. int (*ndo_fdb_del)(struct ndmsg *ndm,
  1179. struct nlattr *tb[],
  1180. struct net_device *dev,
  1181. const unsigned char *addr,
  1182. u16 vid);
  1183. int (*ndo_fdb_dump)(struct sk_buff *skb,
  1184. struct netlink_callback *cb,
  1185. struct net_device *dev,
  1186. struct net_device *filter_dev,
  1187. int *idx);
  1188. int (*ndo_bridge_setlink)(struct net_device *dev,
  1189. struct nlmsghdr *nlh,
  1190. u16 flags);
  1191. int (*ndo_bridge_getlink)(struct sk_buff *skb,
  1192. u32 pid, u32 seq,
  1193. struct net_device *dev,
  1194. u32 filter_mask,
  1195. int nlflags);
  1196. int (*ndo_bridge_dellink)(struct net_device *dev,
  1197. struct nlmsghdr *nlh,
  1198. u16 flags);
  1199. int (*ndo_change_carrier)(struct net_device *dev,
  1200. bool new_carrier);
  1201. int (*ndo_get_phys_port_id)(struct net_device *dev,
  1202. struct netdev_phys_item_id *ppid);
  1203. int (*ndo_get_phys_port_name)(struct net_device *dev,
  1204. char *name, size_t len);
  1205. void (*ndo_udp_tunnel_add)(struct net_device *dev,
  1206. struct udp_tunnel_info *ti);
  1207. void (*ndo_udp_tunnel_del)(struct net_device *dev,
  1208. struct udp_tunnel_info *ti);
  1209. void* (*ndo_dfwd_add_station)(struct net_device *pdev,
  1210. struct net_device *dev);
  1211. void (*ndo_dfwd_del_station)(struct net_device *pdev,
  1212. void *priv);
  1213. netdev_tx_t (*ndo_dfwd_start_xmit) (struct sk_buff *skb,
  1214. struct net_device *dev,
  1215. void *priv);
  1216. int (*ndo_get_lock_subclass)(struct net_device *dev);
  1217. int (*ndo_set_tx_maxrate)(struct net_device *dev,
  1218. int queue_index,
  1219. u32 maxrate);
  1220. int (*ndo_get_iflink)(const struct net_device *dev);
  1221. int (*ndo_change_proto_down)(struct net_device *dev,
  1222. bool proto_down);
  1223. int (*ndo_fill_metadata_dst)(struct net_device *dev,
  1224. struct sk_buff *skb);
  1225. void (*ndo_set_rx_headroom)(struct net_device *dev,
  1226. int needed_headroom);
  1227. int (*ndo_xdp)(struct net_device *dev,
  1228. struct netdev_xdp *xdp);
  1229. };
  1230. /**
  1231. * enum net_device_priv_flags - &struct net_device priv_flags
  1232. *
  1233. * These are the &struct net_device, they are only set internally
  1234. * by drivers and used in the kernel. These flags are invisible to
  1235. * userspace; this means that the order of these flags can change
  1236. * during any kernel release.
  1237. *
  1238. * You should have a pretty good reason to be extending these flags.
  1239. *
  1240. * @IFF_802_1Q_VLAN: 802.1Q VLAN device
  1241. * @IFF_EBRIDGE: Ethernet bridging device
  1242. * @IFF_BONDING: bonding master or slave
  1243. * @IFF_ISATAP: ISATAP interface (RFC4214)
  1244. * @IFF_WAN_HDLC: WAN HDLC device
  1245. * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
  1246. * release skb->dst
  1247. * @IFF_DONT_BRIDGE: disallow bridging this ether dev
  1248. * @IFF_DISABLE_NETPOLL: disable netpoll at run-time
  1249. * @IFF_MACVLAN_PORT: device used as macvlan port
  1250. * @IFF_BRIDGE_PORT: device used as bridge port
  1251. * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port
  1252. * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit
  1253. * @IFF_UNICAST_FLT: Supports unicast filtering
  1254. * @IFF_TEAM_PORT: device used as team port
  1255. * @IFF_SUPP_NOFCS: device supports sending custom FCS
  1256. * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
  1257. * change when it's running
  1258. * @IFF_MACVLAN: Macvlan device
  1259. * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account
  1260. * underlying stacked devices
  1261. * @IFF_IPVLAN_MASTER: IPvlan master device
  1262. * @IFF_IPVLAN_SLAVE: IPvlan slave device
  1263. * @IFF_L3MDEV_MASTER: device is an L3 master device
  1264. * @IFF_NO_QUEUE: device can run without qdisc attached
  1265. * @IFF_OPENVSWITCH: device is a Open vSwitch master
  1266. * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device
  1267. * @IFF_TEAM: device is a team device
  1268. * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured
  1269. * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external
  1270. * entity (i.e. the master device for bridged veth)
  1271. * @IFF_MACSEC: device is a MACsec device
  1272. */
  1273. enum netdev_priv_flags {
  1274. IFF_802_1Q_VLAN = 1<<0,
  1275. IFF_EBRIDGE = 1<<1,
  1276. IFF_BONDING = 1<<2,
  1277. IFF_ISATAP = 1<<3,
  1278. IFF_WAN_HDLC = 1<<4,
  1279. IFF_XMIT_DST_RELEASE = 1<<5,
  1280. IFF_DONT_BRIDGE = 1<<6,
  1281. IFF_DISABLE_NETPOLL = 1<<7,
  1282. IFF_MACVLAN_PORT = 1<<8,
  1283. IFF_BRIDGE_PORT = 1<<9,
  1284. IFF_OVS_DATAPATH = 1<<10,
  1285. IFF_TX_SKB_SHARING = 1<<11,
  1286. IFF_UNICAST_FLT = 1<<12,
  1287. IFF_TEAM_PORT = 1<<13,
  1288. IFF_SUPP_NOFCS = 1<<14,
  1289. IFF_LIVE_ADDR_CHANGE = 1<<15,
  1290. IFF_MACVLAN = 1<<16,
  1291. IFF_XMIT_DST_RELEASE_PERM = 1<<17,
  1292. IFF_IPVLAN_MASTER = 1<<18,
  1293. IFF_IPVLAN_SLAVE = 1<<19,
  1294. IFF_L3MDEV_MASTER = 1<<20,
  1295. IFF_NO_QUEUE = 1<<21,
  1296. IFF_OPENVSWITCH = 1<<22,
  1297. IFF_L3MDEV_SLAVE = 1<<23,
  1298. IFF_TEAM = 1<<24,
  1299. IFF_RXFH_CONFIGURED = 1<<25,
  1300. IFF_PHONY_HEADROOM = 1<<26,
  1301. IFF_MACSEC = 1<<27,
  1302. };
  1303. #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
  1304. #define IFF_EBRIDGE IFF_EBRIDGE
  1305. #define IFF_BONDING IFF_BONDING
  1306. #define IFF_ISATAP IFF_ISATAP
  1307. #define IFF_WAN_HDLC IFF_WAN_HDLC
  1308. #define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE
  1309. #define IFF_DONT_BRIDGE IFF_DONT_BRIDGE
  1310. #define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL
  1311. #define IFF_MACVLAN_PORT IFF_MACVLAN_PORT
  1312. #define IFF_BRIDGE_PORT IFF_BRIDGE_PORT
  1313. #define IFF_OVS_DATAPATH IFF_OVS_DATAPATH
  1314. #define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING
  1315. #define IFF_UNICAST_FLT IFF_UNICAST_FLT
  1316. #define IFF_TEAM_PORT IFF_TEAM_PORT
  1317. #define IFF_SUPP_NOFCS IFF_SUPP_NOFCS
  1318. #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
  1319. #define IFF_MACVLAN IFF_MACVLAN
  1320. #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
  1321. #define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER
  1322. #define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE
  1323. #define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER
  1324. #define IFF_NO_QUEUE IFF_NO_QUEUE
  1325. #define IFF_OPENVSWITCH IFF_OPENVSWITCH
  1326. #define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE
  1327. #define IFF_TEAM IFF_TEAM
  1328. #define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED
  1329. #define IFF_MACSEC IFF_MACSEC
  1330. /**
  1331. * struct net_device - The DEVICE structure.
  1332. * Actually, this whole structure is a big mistake. It mixes I/O
  1333. * data with strictly "high-level" data, and it has to know about
  1334. * almost every data structure used in the INET module.
  1335. *
  1336. * @name: This is the first field of the "visible" part of this structure
  1337. * (i.e. as seen by users in the "Space.c" file). It is the name
  1338. * of the interface.
  1339. *
  1340. * @name_hlist: Device name hash chain, please keep it close to name[]
  1341. * @ifalias: SNMP alias
  1342. * @mem_end: Shared memory end
  1343. * @mem_start: Shared memory start
  1344. * @base_addr: Device I/O address
  1345. * @irq: Device IRQ number
  1346. *
  1347. * @carrier_changes: Stats to monitor carrier on<->off transitions
  1348. *
  1349. * @state: Generic network queuing layer state, see netdev_state_t
  1350. * @dev_list: The global list of network devices
  1351. * @napi_list: List entry used for polling NAPI devices
  1352. * @unreg_list: List entry when we are unregistering the
  1353. * device; see the function unregister_netdev
  1354. * @close_list: List entry used when we are closing the device
  1355. * @ptype_all: Device-specific packet handlers for all protocols
  1356. * @ptype_specific: Device-specific, protocol-specific packet handlers
  1357. *
  1358. * @adj_list: Directly linked devices, like slaves for bonding
  1359. * @all_adj_list: All linked devices, *including* neighbours
  1360. * @features: Currently active device features
  1361. * @hw_features: User-changeable features
  1362. *
  1363. * @wanted_features: User-requested features
  1364. * @vlan_features: Mask of features inheritable by VLAN devices
  1365. *
  1366. * @hw_enc_features: Mask of features inherited by encapsulating devices
  1367. * This field indicates what encapsulation
  1368. * offloads the hardware is capable of doing,
  1369. * and drivers will need to set them appropriately.
  1370. *
  1371. * @mpls_features: Mask of features inheritable by MPLS
  1372. *
  1373. * @ifindex: interface index
  1374. * @group: The group the device belongs to
  1375. *
  1376. * @stats: Statistics struct, which was left as a legacy, use
  1377. * rtnl_link_stats64 instead
  1378. *
  1379. * @rx_dropped: Dropped packets by core network,
  1380. * do not use this in drivers
  1381. * @tx_dropped: Dropped packets by core network,
  1382. * do not use this in drivers
  1383. * @rx_nohandler: nohandler dropped packets by core network on
  1384. * inactive devices, do not use this in drivers
  1385. *
  1386. * @wireless_handlers: List of functions to handle Wireless Extensions,
  1387. * instead of ioctl,
  1388. * see <net/iw_handler.h> for details.
  1389. * @wireless_data: Instance data managed by the core of wireless extensions
  1390. *
  1391. * @netdev_ops: Includes several pointers to callbacks,
  1392. * if one wants to override the ndo_*() functions
  1393. * @ethtool_ops: Management operations
  1394. * @ndisc_ops: Includes callbacks for different IPv6 neighbour
  1395. * discovery handling. Necessary for e.g. 6LoWPAN.
  1396. * @header_ops: Includes callbacks for creating,parsing,caching,etc
  1397. * of Layer 2 headers.
  1398. *
  1399. * @flags: Interface flags (a la BSD)
  1400. * @priv_flags: Like 'flags' but invisible to userspace,
  1401. * see if.h for the definitions
  1402. * @gflags: Global flags ( kept as legacy )
  1403. * @padded: How much padding added by alloc_netdev()
  1404. * @operstate: RFC2863 operstate
  1405. * @link_mode: Mapping policy to operstate
  1406. * @if_port: Selectable AUI, TP, ...
  1407. * @dma: DMA channel
  1408. * @mtu: Interface MTU value
  1409. * @type: Interface hardware type
  1410. * @hard_header_len: Maximum hardware header length.
  1411. * @min_header_len: Minimum hardware header length
  1412. *
  1413. * @needed_headroom: Extra headroom the hardware may need, but not in all
  1414. * cases can this be guaranteed
  1415. * @needed_tailroom: Extra tailroom the hardware may need, but not in all
  1416. * cases can this be guaranteed. Some cases also use
  1417. * LL_MAX_HEADER instead to allocate the skb
  1418. *
  1419. * interface address info:
  1420. *
  1421. * @perm_addr: Permanent hw address
  1422. * @addr_assign_type: Hw address assignment type
  1423. * @addr_len: Hardware address length
  1424. * @neigh_priv_len: Used in neigh_alloc()
  1425. * @dev_id: Used to differentiate devices that share
  1426. * the same link layer address
  1427. * @dev_port: Used to differentiate devices that share
  1428. * the same function
  1429. * @addr_list_lock: XXX: need comments on this one
  1430. * @uc_promisc: Counter that indicates promiscuous mode
  1431. * has been enabled due to the need to listen to
  1432. * additional unicast addresses in a device that
  1433. * does not implement ndo_set_rx_mode()
  1434. * @uc: unicast mac addresses
  1435. * @mc: multicast mac addresses
  1436. * @dev_addrs: list of device hw addresses
  1437. * @queues_kset: Group of all Kobjects in the Tx and RX queues
  1438. * @promiscuity: Number of times the NIC is told to work in
  1439. * promiscuous mode; if it becomes 0 the NIC will
  1440. * exit promiscuous mode
  1441. * @allmulti: Counter, enables or disables allmulticast mode
  1442. *
  1443. * @vlan_info: VLAN info
  1444. * @dsa_ptr: dsa specific data
  1445. * @tipc_ptr: TIPC specific data
  1446. * @atalk_ptr: AppleTalk link
  1447. * @ip_ptr: IPv4 specific data
  1448. * @dn_ptr: DECnet specific data
  1449. * @ip6_ptr: IPv6 specific data
  1450. * @ax25_ptr: AX.25 specific data
  1451. * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
  1452. *
  1453. * @last_rx: Time of last Rx
  1454. * @dev_addr: Hw address (before bcast,
  1455. * because most packets are unicast)
  1456. *
  1457. * @_rx: Array of RX queues
  1458. * @num_rx_queues: Number of RX queues
  1459. * allocated at register_netdev() time
  1460. * @real_num_rx_queues: Number of RX queues currently active in device
  1461. *
  1462. * @rx_handler: handler for received packets
  1463. * @rx_handler_data: XXX: need comments on this one
  1464. * @ingress_queue: XXX: need comments on this one
  1465. * @broadcast: hw bcast address
  1466. *
  1467. * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts,
  1468. * indexed by RX queue number. Assigned by driver.
  1469. * This must only be set if the ndo_rx_flow_steer
  1470. * operation is defined
  1471. * @index_hlist: Device index hash chain
  1472. *
  1473. * @_tx: Array of TX queues
  1474. * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time
  1475. * @real_num_tx_queues: Number of TX queues currently active in device
  1476. * @qdisc: Root qdisc from userspace point of view
  1477. * @tx_queue_len: Max frames per queue allowed
  1478. * @tx_global_lock: XXX: need comments on this one
  1479. *
  1480. * @xps_maps: XXX: need comments on this one
  1481. *
  1482. * @watchdog_timeo: Represents the timeout that is used by
  1483. * the watchdog (see dev_watchdog())
  1484. * @watchdog_timer: List of timers
  1485. *
  1486. * @pcpu_refcnt: Number of references to this device
  1487. * @todo_list: Delayed register/unregister
  1488. * @link_watch_list: XXX: need comments on this one
  1489. *
  1490. * @reg_state: Register/unregister state machine
  1491. * @dismantle: Device is going to be freed
  1492. * @rtnl_link_state: This enum represents the phases of creating
  1493. * a new link
  1494. *
  1495. * @destructor: Called from unregister,
  1496. * can be used to call free_netdev
  1497. * @npinfo: XXX: need comments on this one
  1498. * @nd_net: Network namespace this network device is inside
  1499. *
  1500. * @ml_priv: Mid-layer private
  1501. * @lstats: Loopback statistics
  1502. * @tstats: Tunnel statistics
  1503. * @dstats: Dummy statistics
  1504. * @vstats: Virtual ethernet statistics
  1505. *
  1506. * @garp_port: GARP
  1507. * @mrp_port: MRP
  1508. *
  1509. * @dev: Class/net/name entry
  1510. * @sysfs_groups: Space for optional device, statistics and wireless
  1511. * sysfs groups
  1512. *
  1513. * @sysfs_rx_queue_group: Space for optional per-rx queue attributes
  1514. * @rtnl_link_ops: Rtnl_link_ops
  1515. *
  1516. * @gso_max_size: Maximum size of generic segmentation offload
  1517. * @gso_max_segs: Maximum number of segments that can be passed to the
  1518. * NIC for GSO
  1519. *
  1520. * @dcbnl_ops: Data Center Bridging netlink ops
  1521. * @num_tc: Number of traffic classes in the net device
  1522. * @tc_to_txq: XXX: need comments on this one
  1523. * @prio_tc_map: XXX: need comments on this one
  1524. *
  1525. * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp
  1526. *
  1527. * @priomap: XXX: need comments on this one
  1528. * @phydev: Physical device may attach itself
  1529. * for hardware timestamping
  1530. *
  1531. * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
  1532. * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
  1533. *
  1534. * @proto_down: protocol port state information can be sent to the
  1535. * switch driver and used to set the phys state of the
  1536. * switch port.
  1537. *
  1538. * FIXME: cleanup struct net_device such that network protocol info
  1539. * moves out.
  1540. */
  1541. struct net_device {
  1542. char name[IFNAMSIZ];
  1543. struct hlist_node name_hlist;
  1544. char *ifalias;
  1545. /*
  1546. * I/O specific fields
  1547. * FIXME: Merge these and struct ifmap into one
  1548. */
  1549. unsigned long mem_end;
  1550. unsigned long mem_start;
  1551. unsigned long base_addr;
  1552. int irq;
  1553. atomic_t carrier_changes;
  1554. /*
  1555. * Some hardware also needs these fields (state,dev_list,
  1556. * napi_list,unreg_list,close_list) but they are not
  1557. * part of the usual set specified in Space.c.
  1558. */
  1559. unsigned long state;
  1560. struct list_head dev_list;
  1561. struct list_head napi_list;
  1562. struct list_head unreg_list;
  1563. struct list_head close_list;
  1564. struct list_head ptype_all;
  1565. struct list_head ptype_specific;
  1566. struct {
  1567. struct list_head upper;
  1568. struct list_head lower;
  1569. } adj_list;
  1570. struct {
  1571. struct list_head upper;
  1572. struct list_head lower;
  1573. } all_adj_list;
  1574. netdev_features_t features;
  1575. netdev_features_t hw_features;
  1576. netdev_features_t wanted_features;
  1577. netdev_features_t vlan_features;
  1578. netdev_features_t hw_enc_features;
  1579. netdev_features_t mpls_features;
  1580. netdev_features_t gso_partial_features;
  1581. int ifindex;
  1582. int group;
  1583. struct net_device_stats stats;
  1584. atomic_long_t rx_dropped;
  1585. atomic_long_t tx_dropped;
  1586. atomic_long_t rx_nohandler;
  1587. #ifdef CONFIG_WIRELESS_EXT
  1588. const struct iw_handler_def *wireless_handlers;
  1589. struct iw_public_data *wireless_data;
  1590. #endif
  1591. const struct net_device_ops *netdev_ops;
  1592. const struct ethtool_ops *ethtool_ops;
  1593. #ifdef CONFIG_NET_SWITCHDEV
  1594. const struct switchdev_ops *switchdev_ops;
  1595. #endif
  1596. #ifdef CONFIG_NET_L3_MASTER_DEV
  1597. const struct l3mdev_ops *l3mdev_ops;
  1598. #endif
  1599. #if IS_ENABLED(CONFIG_IPV6)
  1600. const struct ndisc_ops *ndisc_ops;
  1601. #endif
  1602. const struct header_ops *header_ops;
  1603. unsigned int flags;
  1604. unsigned int priv_flags;
  1605. unsigned short gflags;
  1606. unsigned short padded;
  1607. unsigned char operstate;
  1608. unsigned char link_mode;
  1609. unsigned char if_port;
  1610. unsigned char dma;
  1611. unsigned int mtu;
  1612. unsigned short type;
  1613. unsigned short hard_header_len;
  1614. unsigned short min_header_len;
  1615. unsigned short needed_headroom;
  1616. unsigned short needed_tailroom;
  1617. /* Interface address info. */
  1618. unsigned char perm_addr[MAX_ADDR_LEN];
  1619. unsigned char addr_assign_type;
  1620. unsigned char addr_len;
  1621. unsigned short neigh_priv_len;
  1622. unsigned short dev_id;
  1623. unsigned short dev_port;
  1624. spinlock_t addr_list_lock;
  1625. unsigned char name_assign_type;
  1626. bool uc_promisc;
  1627. struct netdev_hw_addr_list uc;
  1628. struct netdev_hw_addr_list mc;
  1629. struct netdev_hw_addr_list dev_addrs;
  1630. #ifdef CONFIG_SYSFS
  1631. struct kset *queues_kset;
  1632. #endif
  1633. unsigned int promiscuity;
  1634. unsigned int allmulti;
  1635. /* Protocol-specific pointers */
  1636. #if IS_ENABLED(CONFIG_VLAN_8021Q)
  1637. struct vlan_info __rcu *vlan_info;
  1638. #endif
  1639. #if IS_ENABLED(CONFIG_NET_DSA)
  1640. struct dsa_switch_tree *dsa_ptr;
  1641. #endif
  1642. #if IS_ENABLED(CONFIG_TIPC)
  1643. struct tipc_bearer __rcu *tipc_ptr;
  1644. #endif
  1645. void *atalk_ptr;
  1646. struct in_device __rcu *ip_ptr;
  1647. struct dn_dev __rcu *dn_ptr;
  1648. struct inet6_dev __rcu *ip6_ptr;
  1649. void *ax25_ptr;
  1650. struct wireless_dev *ieee80211_ptr;
  1651. struct wpan_dev *ieee802154_ptr;
  1652. #if IS_ENABLED(CONFIG_MPLS_ROUTING)
  1653. struct mpls_dev __rcu *mpls_ptr;
  1654. #endif
  1655. /*
  1656. * Cache lines mostly used on receive path (including eth_type_trans())
  1657. */
  1658. unsigned long last_rx;
  1659. /* Interface address info used in eth_type_trans() */
  1660. unsigned char *dev_addr;
  1661. #ifdef CONFIG_SYSFS
  1662. struct netdev_rx_queue *_rx;
  1663. unsigned int num_rx_queues;
  1664. unsigned int real_num_rx_queues;
  1665. #endif
  1666. unsigned long gro_flush_timeout;
  1667. rx_handler_func_t __rcu *rx_handler;
  1668. void __rcu *rx_handler_data;
  1669. #ifdef CONFIG_NET_CLS_ACT
  1670. struct tcf_proto __rcu *ingress_cl_list;
  1671. #endif
  1672. struct netdev_queue __rcu *ingress_queue;
  1673. #ifdef CONFIG_NETFILTER_INGRESS
  1674. struct nf_hook_entry __rcu *nf_hooks_ingress;
  1675. #endif
  1676. unsigned char broadcast[MAX_ADDR_LEN];
  1677. #ifdef CONFIG_RFS_ACCEL
  1678. struct cpu_rmap *rx_cpu_rmap;
  1679. #endif
  1680. struct hlist_node index_hlist;
  1681. /*
  1682. * Cache lines mostly used on transmit path
  1683. */
  1684. struct netdev_queue *_tx ____cacheline_aligned_in_smp;
  1685. unsigned int num_tx_queues;
  1686. unsigned int real_num_tx_queues;
  1687. struct Qdisc *qdisc;
  1688. #ifdef CONFIG_NET_SCHED
  1689. DECLARE_HASHTABLE (qdisc_hash, 4);
  1690. #endif
  1691. unsigned long tx_queue_len;
  1692. spinlock_t tx_global_lock;
  1693. int watchdog_timeo;
  1694. #ifdef CONFIG_XPS
  1695. struct xps_dev_maps __rcu *xps_maps;
  1696. #endif
  1697. #ifdef CONFIG_NET_CLS_ACT
  1698. struct tcf_proto __rcu *egress_cl_list;
  1699. #endif
  1700. /* These may be needed for future network-power-down code. */
  1701. struct timer_list watchdog_timer;
  1702. int __percpu *pcpu_refcnt;
  1703. struct list_head todo_list;
  1704. struct list_head link_watch_list;
  1705. enum { NETREG_UNINITIALIZED=0,
  1706. NETREG_REGISTERED, /* completed register_netdevice */
  1707. NETREG_UNREGISTERING, /* called unregister_netdevice */
  1708. NETREG_UNREGISTERED, /* completed unregister todo */
  1709. NETREG_RELEASED, /* called free_netdev */
  1710. NETREG_DUMMY, /* dummy device for NAPI poll */
  1711. } reg_state:8;
  1712. bool dismantle;
  1713. enum {
  1714. RTNL_LINK_INITIALIZED,
  1715. RTNL_LINK_INITIALIZING,
  1716. } rtnl_link_state:16;
  1717. void (*destructor)(struct net_device *dev);
  1718. #ifdef CONFIG_NETPOLL
  1719. struct netpoll_info __rcu *npinfo;
  1720. #endif
  1721. possible_net_t nd_net;
  1722. /* mid-layer private */
  1723. union {
  1724. void *ml_priv;
  1725. struct pcpu_lstats __percpu *lstats;
  1726. struct pcpu_sw_netstats __percpu *tstats;
  1727. struct pcpu_dstats __percpu *dstats;
  1728. struct pcpu_vstats __percpu *vstats;
  1729. };
  1730. struct garp_port __rcu *garp_port;
  1731. struct mrp_port __rcu *mrp_port;
  1732. struct device dev;
  1733. const struct attribute_group *sysfs_groups[4];
  1734. const struct attribute_group *sysfs_rx_queue_group;
  1735. const struct rtnl_link_ops *rtnl_link_ops;
  1736. /* for setting kernel sock attribute on TCP connection setup */
  1737. #define GSO_MAX_SIZE 65536
  1738. unsigned int gso_max_size;
  1739. #define GSO_MAX_SEGS 65535
  1740. u16 gso_max_segs;
  1741. #ifdef CONFIG_DCB
  1742. const struct dcbnl_rtnl_ops *dcbnl_ops;
  1743. #endif
  1744. u8 num_tc;
  1745. struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
  1746. u8 prio_tc_map[TC_BITMASK + 1];
  1747. #if IS_ENABLED(CONFIG_FCOE)
  1748. unsigned int fcoe_ddp_xid;
  1749. #endif
  1750. #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
  1751. struct netprio_map __rcu *priomap;
  1752. #endif
  1753. struct phy_device *phydev;
  1754. struct lock_class_key *qdisc_tx_busylock;
  1755. struct lock_class_key *qdisc_running_key;
  1756. bool proto_down;
  1757. };
  1758. #define to_net_dev(d) container_of(d, struct net_device, dev)
  1759. #define NETDEV_ALIGN 32
  1760. static inline
  1761. int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
  1762. {
  1763. return dev->prio_tc_map[prio & TC_BITMASK];
  1764. }
  1765. static inline
  1766. int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
  1767. {
  1768. if (tc >= dev->num_tc)
  1769. return -EINVAL;
  1770. dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
  1771. return 0;
  1772. }
  1773. static inline
  1774. void netdev_reset_tc(struct net_device *dev)
  1775. {
  1776. dev->num_tc = 0;
  1777. memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
  1778. memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
  1779. }
  1780. static inline
  1781. int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
  1782. {
  1783. if (tc >= dev->num_tc)
  1784. return -EINVAL;
  1785. dev->tc_to_txq[tc].count = count;
  1786. dev->tc_to_txq[tc].offset = offset;
  1787. return 0;
  1788. }
  1789. static inline
  1790. int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
  1791. {
  1792. if (num_tc > TC_MAX_QUEUE)
  1793. return -EINVAL;
  1794. dev->num_tc = num_tc;
  1795. return 0;
  1796. }
  1797. static inline
  1798. int netdev_get_num_tc(struct net_device *dev)
  1799. {
  1800. return dev->num_tc;
  1801. }
  1802. static inline
  1803. struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
  1804. unsigned int index)
  1805. {
  1806. return &dev->_tx[index];
  1807. }
  1808. static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
  1809. const struct sk_buff *skb)
  1810. {
  1811. return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
  1812. }
  1813. static inline void netdev_for_each_tx_queue(struct net_device *dev,
  1814. void (*f)(struct net_device *,
  1815. struct netdev_queue *,
  1816. void *),
  1817. void *arg)
  1818. {
  1819. unsigned int i;
  1820. for (i = 0; i < dev->num_tx_queues; i++)
  1821. f(dev, &dev->_tx[i], arg);
  1822. }
  1823. #define netdev_lockdep_set_classes(dev) \
  1824. { \
  1825. static struct lock_class_key qdisc_tx_busylock_key; \
  1826. static struct lock_class_key qdisc_running_key; \
  1827. static struct lock_class_key qdisc_xmit_lock_key; \
  1828. static struct lock_class_key dev_addr_list_lock_key; \
  1829. unsigned int i; \
  1830. \
  1831. (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
  1832. (dev)->qdisc_running_key = &qdisc_running_key; \
  1833. lockdep_set_class(&(dev)->addr_list_lock, \
  1834. &dev_addr_list_lock_key); \
  1835. for (i = 0; i < (dev)->num_tx_queues; i++) \
  1836. lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
  1837. &qdisc_xmit_lock_key); \
  1838. }
  1839. struct netdev_queue *netdev_pick_tx(struct net_device *dev,
  1840. struct sk_buff *skb,
  1841. void *accel_priv);
  1842. /* returns the headroom that the master device needs to take in account
  1843. * when forwarding to this dev
  1844. */
  1845. static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
  1846. {
  1847. return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom;
  1848. }
  1849. static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
  1850. {
  1851. if (dev->netdev_ops->ndo_set_rx_headroom)
  1852. dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr);
  1853. }
  1854. /* set the device rx headroom to the dev's default */
  1855. static inline void netdev_reset_rx_headroom(struct net_device *dev)
  1856. {
  1857. netdev_set_rx_headroom(dev, -1);
  1858. }
  1859. /*
  1860. * Net namespace inlines
  1861. */
  1862. static inline
  1863. struct net *dev_net(const struct net_device *dev)
  1864. {
  1865. return read_pnet(&dev->nd_net);
  1866. }
  1867. static inline
  1868. void dev_net_set(struct net_device *dev, struct net *net)
  1869. {
  1870. write_pnet(&dev->nd_net, net);
  1871. }
  1872. static inline bool netdev_uses_dsa(struct net_device *dev)
  1873. {
  1874. #if IS_ENABLED(CONFIG_NET_DSA)
  1875. if (dev->dsa_ptr != NULL)
  1876. return dsa_uses_tagged_protocol(dev->dsa_ptr);
  1877. #endif
  1878. return false;
  1879. }
  1880. /**
  1881. * netdev_priv - access network device private data
  1882. * @dev: network device
  1883. *
  1884. * Get network device private data
  1885. */
  1886. static inline void *netdev_priv(const struct net_device *dev)
  1887. {
  1888. return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
  1889. }
  1890. /* Set the sysfs physical device reference for the network logical device
  1891. * if set prior to registration will cause a symlink during initialization.
  1892. */
  1893. #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
  1894. /* Set the sysfs device type for the network logical device to allow
  1895. * fine-grained identification of different network device types. For
  1896. * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc.
  1897. */
  1898. #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
  1899. /* Default NAPI poll() weight
  1900. * Device drivers are strongly advised to not use bigger value
  1901. */
  1902. #define NAPI_POLL_WEIGHT 64
  1903. /**
  1904. * netif_napi_add - initialize a NAPI context
  1905. * @dev: network device
  1906. * @napi: NAPI context
  1907. * @poll: polling function
  1908. * @weight: default weight
  1909. *
  1910. * netif_napi_add() must be used to initialize a NAPI context prior to calling
  1911. * *any* of the other NAPI-related functions.
  1912. */
  1913. void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
  1914. int (*poll)(struct napi_struct *, int), int weight);
  1915. /**
  1916. * netif_tx_napi_add - initialize a NAPI context
  1917. * @dev: network device
  1918. * @napi: NAPI context
  1919. * @poll: polling function
  1920. * @weight: default weight
  1921. *
  1922. * This variant of netif_napi_add() should be used from drivers using NAPI
  1923. * to exclusively poll a TX queue.
  1924. * This will avoid we add it into napi_hash[], thus polluting this hash table.
  1925. */
  1926. static inline void netif_tx_napi_add(struct net_device *dev,
  1927. struct napi_struct *napi,
  1928. int (*poll)(struct napi_struct *, int),
  1929. int weight)
  1930. {
  1931. set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
  1932. netif_napi_add(dev, napi, poll, weight);
  1933. }
  1934. /**
  1935. * netif_napi_del - remove a NAPI context
  1936. * @napi: NAPI context
  1937. *
  1938. * netif_napi_del() removes a NAPI context from the network device NAPI list
  1939. */
  1940. void netif_napi_del(struct napi_struct *napi);
  1941. struct napi_gro_cb {
  1942. /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
  1943. void *frag0;
  1944. /* Length of frag0. */
  1945. unsigned int frag0_len;
  1946. /* This indicates where we are processing relative to skb->data. */
  1947. int data_offset;
  1948. /* This is non-zero if the packet cannot be merged with the new skb. */
  1949. u16 flush;
  1950. /* Save the IP ID here and check when we get to the transport layer */
  1951. u16 flush_id;
  1952. /* Number of segments aggregated. */
  1953. u16 count;
  1954. /* Start offset for remote checksum offload */
  1955. u16 gro_remcsum_start;
  1956. /* jiffies when first packet was created/queued */
  1957. unsigned long age;
  1958. /* Used in ipv6_gro_receive() and foo-over-udp */
  1959. u16 proto;
  1960. /* This is non-zero if the packet may be of the same flow. */
  1961. u8 same_flow:1;
  1962. /* Used in tunnel GRO receive */
  1963. u8 encap_mark:1;
  1964. /* GRO checksum is valid */
  1965. u8 csum_valid:1;
  1966. /* Number of checksums via CHECKSUM_UNNECESSARY */
  1967. u8 csum_cnt:3;
  1968. /* Free the skb? */
  1969. u8 free:2;
  1970. #define NAPI_GRO_FREE 1
  1971. #define NAPI_GRO_FREE_STOLEN_HEAD 2
  1972. /* Used in foo-over-udp, set in udp[46]_gro_receive */
  1973. u8 is_ipv6:1;
  1974. /* Used in GRE, set in fou/gue_gro_receive */
  1975. u8 is_fou:1;
  1976. /* Used to determine if flush_id can be ignored */
  1977. u8 is_atomic:1;
  1978. /* Number of gro_receive callbacks this packet already went through */
  1979. u8 recursion_counter:4;
  1980. /* 1 bit hole */
  1981. /* used to support CHECKSUM_COMPLETE for tunneling protocols */
  1982. __wsum csum;
  1983. /* used in skb_gro_receive() slow path */
  1984. struct sk_buff *last;
  1985. };
  1986. #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
  1987. #define GRO_RECURSION_LIMIT 15
  1988. static inline int gro_recursion_inc_test(struct sk_buff *skb)
  1989. {
  1990. return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
  1991. }
  1992. typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
  1993. static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
  1994. struct sk_buff **head,
  1995. struct sk_buff *skb)
  1996. {
  1997. if (unlikely(gro_recursion_inc_test(skb))) {
  1998. NAPI_GRO_CB(skb)->flush |= 1;
  1999. return NULL;
  2000. }
  2001. return cb(head, skb);
  2002. }
  2003. typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **,
  2004. struct sk_buff *);
  2005. static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb,
  2006. struct sock *sk,
  2007. struct sk_buff **head,
  2008. struct sk_buff *skb)
  2009. {
  2010. if (unlikely(gro_recursion_inc_test(skb))) {
  2011. NAPI_GRO_CB(skb)->flush |= 1;
  2012. return NULL;
  2013. }
  2014. return cb(sk, head, skb);
  2015. }
  2016. struct packet_type {
  2017. __be16 type; /* This is really htons(ether_type). */
  2018. struct net_device *dev; /* NULL is wildcarded here */
  2019. int (*func) (struct sk_buff *,
  2020. struct net_device *,
  2021. struct packet_type *,
  2022. struct net_device *);
  2023. bool (*id_match)(struct packet_type *ptype,
  2024. struct sock *sk);
  2025. void *af_packet_priv;
  2026. struct list_head list;
  2027. };
  2028. struct offload_callbacks {
  2029. struct sk_buff *(*gso_segment)(struct sk_buff *skb,
  2030. netdev_features_t features);
  2031. struct sk_buff **(*gro_receive)(struct sk_buff **head,
  2032. struct sk_buff *skb);
  2033. int (*gro_complete)(struct sk_buff *skb, int nhoff);
  2034. };
  2035. struct packet_offload {
  2036. __be16 type; /* This is really htons(ether_type). */
  2037. u16 priority;
  2038. struct offload_callbacks callbacks;
  2039. struct list_head list;
  2040. };
  2041. /* often modified stats are per-CPU, other are shared (netdev->stats) */
  2042. struct pcpu_sw_netstats {
  2043. u64 rx_packets;
  2044. u64 rx_bytes;
  2045. u64 tx_packets;
  2046. u64 tx_bytes;
  2047. struct u64_stats_sync syncp;
  2048. };
  2049. #define __netdev_alloc_pcpu_stats(type, gfp) \
  2050. ({ \
  2051. typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
  2052. if (pcpu_stats) { \
  2053. int __cpu; \
  2054. for_each_possible_cpu(__cpu) { \
  2055. typeof(type) *stat; \
  2056. stat = per_cpu_ptr(pcpu_stats, __cpu); \
  2057. u64_stats_init(&stat->syncp); \
  2058. } \
  2059. } \
  2060. pcpu_stats; \
  2061. })
  2062. #define netdev_alloc_pcpu_stats(type) \
  2063. __netdev_alloc_pcpu_stats(type, GFP_KERNEL)
  2064. enum netdev_lag_tx_type {
  2065. NETDEV_LAG_TX_TYPE_UNKNOWN,
  2066. NETDEV_LAG_TX_TYPE_RANDOM,
  2067. NETDEV_LAG_TX_TYPE_BROADCAST,
  2068. NETDEV_LAG_TX_TYPE_ROUNDROBIN,
  2069. NETDEV_LAG_TX_TYPE_ACTIVEBACKUP,
  2070. NETDEV_LAG_TX_TYPE_HASH,
  2071. };
  2072. struct netdev_lag_upper_info {
  2073. enum netdev_lag_tx_type tx_type;
  2074. };
  2075. struct netdev_lag_lower_state_info {
  2076. u8 link_up : 1,
  2077. tx_enabled : 1;
  2078. };
  2079. #include <linux/notifier.h>
  2080. /* netdevice notifier chain. Please remember to update the rtnetlink
  2081. * notification exclusion list in rtnetlink_event() when adding new
  2082. * types.
  2083. */
  2084. #define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
  2085. #define NETDEV_DOWN 0x0002
  2086. #define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
  2087. detected a hardware crash and restarted
  2088. - we can use this eg to kick tcp sessions
  2089. once done */
  2090. #define NETDEV_CHANGE 0x0004 /* Notify device state change */
  2091. #define NETDEV_REGISTER 0x0005
  2092. #define NETDEV_UNREGISTER 0x0006
  2093. #define NETDEV_CHANGEMTU 0x0007 /* notify after mtu change happened */
  2094. #define NETDEV_CHANGEADDR 0x0008
  2095. #define NETDEV_GOING_DOWN 0x0009
  2096. #define NETDEV_CHANGENAME 0x000A
  2097. #define NETDEV_FEAT_CHANGE 0x000B
  2098. #define NETDEV_BONDING_FAILOVER 0x000C
  2099. #define NETDEV_PRE_UP 0x000D
  2100. #define NETDEV_PRE_TYPE_CHANGE 0x000E
  2101. #define NETDEV_POST_TYPE_CHANGE 0x000F
  2102. #define NETDEV_POST_INIT 0x0010
  2103. #define NETDEV_UNREGISTER_FINAL 0x0011
  2104. #define NETDEV_RELEASE 0x0012
  2105. #define NETDEV_NOTIFY_PEERS 0x0013
  2106. #define NETDEV_JOIN 0x0014
  2107. #define NETDEV_CHANGEUPPER 0x0015
  2108. #define NETDEV_RESEND_IGMP 0x0016
  2109. #define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */
  2110. #define NETDEV_CHANGEINFODATA 0x0018
  2111. #define NETDEV_BONDING_INFO 0x0019
  2112. #define NETDEV_PRECHANGEUPPER 0x001A
  2113. #define NETDEV_CHANGELOWERSTATE 0x001B
  2114. #define NETDEV_UDP_TUNNEL_PUSH_INFO 0x001C
  2115. #define NETDEV_CHANGE_TX_QUEUE_LEN 0x001E
  2116. int register_netdevice_notifier(struct notifier_block *nb);
  2117. int unregister_netdevice_notifier(struct notifier_block *nb);
  2118. struct netdev_notifier_info {
  2119. struct net_device *dev;
  2120. };
  2121. struct netdev_notifier_change_info {
  2122. struct netdev_notifier_info info; /* must be first */
  2123. unsigned int flags_changed;
  2124. };
  2125. struct netdev_notifier_changeupper_info {
  2126. struct netdev_notifier_info info; /* must be first */
  2127. struct net_device *upper_dev; /* new upper dev */
  2128. bool master; /* is upper dev master */
  2129. bool linking; /* is the notification for link or unlink */
  2130. void *upper_info; /* upper dev info */
  2131. };
  2132. struct netdev_notifier_changelowerstate_info {
  2133. struct netdev_notifier_info info; /* must be first */
  2134. void *lower_state_info; /* is lower dev state */
  2135. };
  2136. static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
  2137. struct net_device *dev)
  2138. {
  2139. info->dev = dev;
  2140. }
  2141. static inline struct net_device *
  2142. netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
  2143. {
  2144. return info->dev;
  2145. }
  2146. int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
  2147. extern rwlock_t dev_base_lock; /* Device list lock */
  2148. #define for_each_netdev(net, d) \
  2149. list_for_each_entry(d, &(net)->dev_base_head, dev_list)
  2150. #define for_each_netdev_reverse(net, d) \
  2151. list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
  2152. #define for_each_netdev_rcu(net, d) \
  2153. list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
  2154. #define for_each_netdev_safe(net, d, n) \
  2155. list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
  2156. #define for_each_netdev_continue(net, d) \
  2157. list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
  2158. #define for_each_netdev_continue_rcu(net, d) \
  2159. list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
  2160. #define for_each_netdev_in_bond_rcu(bond, slave) \
  2161. for_each_netdev_rcu(&init_net, slave) \
  2162. if (netdev_master_upper_dev_get_rcu(slave) == (bond))
  2163. #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
  2164. static inline struct net_device *next_net_device(struct net_device *dev)
  2165. {
  2166. struct list_head *lh;
  2167. struct net *net;
  2168. net = dev_net(dev);
  2169. lh = dev->dev_list.next;
  2170. return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
  2171. }
  2172. static inline struct net_device *next_net_device_rcu(struct net_device *dev)
  2173. {
  2174. struct list_head *lh;
  2175. struct net *net;
  2176. net = dev_net(dev);
  2177. lh = rcu_dereference(list_next_rcu(&dev->dev_list));
  2178. return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
  2179. }
  2180. static inline struct net_device *first_net_device(struct net *net)
  2181. {
  2182. return list_empty(&net->dev_base_head) ? NULL :
  2183. net_device_entry(net->dev_base_head.next);
  2184. }
  2185. static inline struct net_device *first_net_device_rcu(struct net *net)
  2186. {
  2187. struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
  2188. return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
  2189. }
  2190. int netdev_boot_setup_check(struct net_device *dev);
  2191. unsigned long netdev_boot_base(const char *prefix, int unit);
  2192. struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
  2193. const char *hwaddr);
  2194. struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
  2195. struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
  2196. void dev_add_pack(struct packet_type *pt);
  2197. void dev_remove_pack(struct packet_type *pt);
  2198. void __dev_remove_pack(struct packet_type *pt);
  2199. void dev_add_offload(struct packet_offload *po);
  2200. void dev_remove_offload(struct packet_offload *po);
  2201. int dev_get_iflink(const struct net_device *dev);
  2202. int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
  2203. struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
  2204. unsigned short mask);
  2205. struct net_device *dev_get_by_name(struct net *net, const char *name);
  2206. struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
  2207. struct net_device *__dev_get_by_name(struct net *net, const char *name);
  2208. int dev_alloc_name(struct net_device *dev, const char *name);
  2209. int dev_open(struct net_device *dev);
  2210. int dev_close(struct net_device *dev);
  2211. int dev_close_many(struct list_head *head, bool unlink);
  2212. void dev_disable_lro(struct net_device *dev);
  2213. int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
  2214. int dev_queue_xmit(struct sk_buff *skb);
  2215. int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
  2216. int register_netdevice(struct net_device *dev);
  2217. void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
  2218. void unregister_netdevice_many(struct list_head *head);
  2219. static inline void unregister_netdevice(struct net_device *dev)
  2220. {
  2221. unregister_netdevice_queue(dev, NULL);
  2222. }
  2223. int netdev_refcnt_read(const struct net_device *dev);
  2224. void free_netdev(struct net_device *dev);
  2225. void netdev_freemem(struct net_device *dev);
  2226. void synchronize_net(void);
  2227. int init_dummy_netdev(struct net_device *dev);
  2228. DECLARE_PER_CPU(int, xmit_recursion);
  2229. #define XMIT_RECURSION_LIMIT 10
  2230. static inline int dev_recursion_level(void)
  2231. {
  2232. return this_cpu_read(xmit_recursion);
  2233. }
  2234. struct net_device *dev_get_by_index(struct net *net, int ifindex);
  2235. struct net_device *__dev_get_by_index(struct net *net, int ifindex);
  2236. struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
  2237. int netdev_get_name(struct net *net, char *name, int ifindex);
  2238. int dev_restart(struct net_device *dev);
  2239. int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
  2240. static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
  2241. {
  2242. return NAPI_GRO_CB(skb)->data_offset;
  2243. }
  2244. static inline unsigned int skb_gro_len(const struct sk_buff *skb)
  2245. {
  2246. return skb->len - NAPI_GRO_CB(skb)->data_offset;
  2247. }
  2248. static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
  2249. {
  2250. NAPI_GRO_CB(skb)->data_offset += len;
  2251. }
  2252. static inline void *skb_gro_header_fast(struct sk_buff *skb,
  2253. unsigned int offset)
  2254. {
  2255. return NAPI_GRO_CB(skb)->frag0 + offset;
  2256. }
  2257. static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
  2258. {
  2259. return NAPI_GRO_CB(skb)->frag0_len < hlen;
  2260. }
  2261. static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
  2262. {
  2263. NAPI_GRO_CB(skb)->frag0 = NULL;
  2264. NAPI_GRO_CB(skb)->frag0_len = 0;
  2265. }
  2266. static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
  2267. unsigned int offset)
  2268. {
  2269. if (!pskb_may_pull(skb, hlen))
  2270. return NULL;
  2271. skb_gro_frag0_invalidate(skb);
  2272. return skb->data + offset;
  2273. }
  2274. static inline void *skb_gro_network_header(struct sk_buff *skb)
  2275. {
  2276. return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
  2277. skb_network_offset(skb);
  2278. }
  2279. static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
  2280. const void *start, unsigned int len)
  2281. {
  2282. if (NAPI_GRO_CB(skb)->csum_valid)
  2283. NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
  2284. csum_partial(start, len, 0));
  2285. }
  2286. /* GRO checksum functions. These are logical equivalents of the normal
  2287. * checksum functions (in skbuff.h) except that they operate on the GRO
  2288. * offsets and fields in sk_buff.
  2289. */
  2290. __sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
  2291. static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
  2292. {
  2293. return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
  2294. }
  2295. static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
  2296. bool zero_okay,
  2297. __sum16 check)
  2298. {
  2299. return ((skb->ip_summed != CHECKSUM_PARTIAL ||
  2300. skb_checksum_start_offset(skb) <
  2301. skb_gro_offset(skb)) &&
  2302. !skb_at_gro_remcsum_start(skb) &&
  2303. NAPI_GRO_CB(skb)->csum_cnt == 0 &&
  2304. (!zero_okay || check));
  2305. }
  2306. static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
  2307. __wsum psum)
  2308. {
  2309. if (NAPI_GRO_CB(skb)->csum_valid &&
  2310. !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
  2311. return 0;
  2312. NAPI_GRO_CB(skb)->csum = psum;
  2313. return __skb_gro_checksum_complete(skb);
  2314. }
  2315. static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
  2316. {
  2317. if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
  2318. /* Consume a checksum from CHECKSUM_UNNECESSARY */
  2319. NAPI_GRO_CB(skb)->csum_cnt--;
  2320. } else {
  2321. /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
  2322. * verified a new top level checksum or an encapsulated one
  2323. * during GRO. This saves work if we fallback to normal path.
  2324. */
  2325. __skb_incr_checksum_unnecessary(skb);
  2326. }
  2327. }
  2328. #define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
  2329. compute_pseudo) \
  2330. ({ \
  2331. __sum16 __ret = 0; \
  2332. if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
  2333. __ret = __skb_gro_checksum_validate_complete(skb, \
  2334. compute_pseudo(skb, proto)); \
  2335. if (__ret) \
  2336. __skb_mark_checksum_bad(skb); \
  2337. else \
  2338. skb_gro_incr_csum_unnecessary(skb); \
  2339. __ret; \
  2340. })
  2341. #define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
  2342. __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
  2343. #define skb_gro_checksum_validate_zero_check(skb, proto, check, \
  2344. compute_pseudo) \
  2345. __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
  2346. #define skb_gro_checksum_simple_validate(skb) \
  2347. __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
  2348. static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
  2349. {
  2350. return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
  2351. !NAPI_GRO_CB(skb)->csum_valid);
  2352. }
  2353. static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
  2354. __sum16 check, __wsum pseudo)
  2355. {
  2356. NAPI_GRO_CB(skb)->csum = ~pseudo;
  2357. NAPI_GRO_CB(skb)->csum_valid = 1;
  2358. }
  2359. #define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \
  2360. do { \
  2361. if (__skb_gro_checksum_convert_check(skb)) \
  2362. __skb_gro_checksum_convert(skb, check, \
  2363. compute_pseudo(skb, proto)); \
  2364. } while (0)
  2365. struct gro_remcsum {
  2366. int offset;
  2367. __wsum delta;
  2368. };
  2369. static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
  2370. {
  2371. grc->offset = 0;
  2372. grc->delta = 0;
  2373. }
  2374. static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
  2375. unsigned int off, size_t hdrlen,
  2376. int start, int offset,
  2377. struct gro_remcsum *grc,
  2378. bool nopartial)
  2379. {
  2380. __wsum delta;
  2381. size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
  2382. BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
  2383. if (!nopartial) {
  2384. NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
  2385. return ptr;
  2386. }
  2387. ptr = skb_gro_header_fast(skb, off);
  2388. if (skb_gro_header_hard(skb, off + plen)) {
  2389. ptr = skb_gro_header_slow(skb, off + plen, off);
  2390. if (!ptr)
  2391. return NULL;
  2392. }
  2393. delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
  2394. start, offset);
  2395. /* Adjust skb->csum since we changed the packet */
  2396. NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
  2397. grc->offset = off + hdrlen + offset;
  2398. grc->delta = delta;
  2399. return ptr;
  2400. }
  2401. static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
  2402. struct gro_remcsum *grc)
  2403. {
  2404. void *ptr;
  2405. size_t plen = grc->offset + sizeof(u16);
  2406. if (!grc->delta)
  2407. return;
  2408. ptr = skb_gro_header_fast(skb, grc->offset);
  2409. if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
  2410. ptr = skb_gro_header_slow(skb, plen, grc->offset);
  2411. if (!ptr)
  2412. return;
  2413. }
  2414. remcsum_unadjust((__sum16 *)ptr, grc->delta);
  2415. }
  2416. struct skb_csum_offl_spec {
  2417. __u16 ipv4_okay:1,
  2418. ipv6_okay:1,
  2419. encap_okay:1,
  2420. ip_options_okay:1,
  2421. ext_hdrs_okay:1,
  2422. tcp_okay:1,
  2423. udp_okay:1,
  2424. sctp_okay:1,
  2425. vlan_okay:1,
  2426. no_encapped_ipv6:1,
  2427. no_not_encapped:1;
  2428. };
  2429. bool __skb_csum_offload_chk(struct sk_buff *skb,
  2430. const struct skb_csum_offl_spec *spec,
  2431. bool *csum_encapped,
  2432. bool csum_help);
  2433. static inline bool skb_csum_offload_chk(struct sk_buff *skb,
  2434. const struct skb_csum_offl_spec *spec,
  2435. bool *csum_encapped,
  2436. bool csum_help)
  2437. {
  2438. if (skb->ip_summed != CHECKSUM_PARTIAL)
  2439. return false;
  2440. return __skb_csum_offload_chk(skb, spec, csum_encapped, csum_help);
  2441. }
  2442. static inline bool skb_csum_offload_chk_help(struct sk_buff *skb,
  2443. const struct skb_csum_offl_spec *spec)
  2444. {
  2445. bool csum_encapped;
  2446. return skb_csum_offload_chk(skb, spec, &csum_encapped, true);
  2447. }
  2448. static inline bool skb_csum_off_chk_help_cmn(struct sk_buff *skb)
  2449. {
  2450. static const struct skb_csum_offl_spec csum_offl_spec = {
  2451. .ipv4_okay = 1,
  2452. .ip_options_okay = 1,
  2453. .ipv6_okay = 1,
  2454. .vlan_okay = 1,
  2455. .tcp_okay = 1,
  2456. .udp_okay = 1,
  2457. };
  2458. return skb_csum_offload_chk_help(skb, &csum_offl_spec);
  2459. }
  2460. static inline bool skb_csum_off_chk_help_cmn_v4_only(struct sk_buff *skb)
  2461. {
  2462. static const struct skb_csum_offl_spec csum_offl_spec = {
  2463. .ipv4_okay = 1,
  2464. .ip_options_okay = 1,
  2465. .tcp_okay = 1,
  2466. .udp_okay = 1,
  2467. .vlan_okay = 1,
  2468. };
  2469. return skb_csum_offload_chk_help(skb, &csum_offl_spec);
  2470. }
  2471. static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
  2472. unsigned short type,
  2473. const void *daddr, const void *saddr,
  2474. unsigned int len)
  2475. {
  2476. if (!dev->header_ops || !dev->header_ops->create)
  2477. return 0;
  2478. return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
  2479. }
  2480. static inline int dev_parse_header(const struct sk_buff *skb,
  2481. unsigned char *haddr)
  2482. {
  2483. const struct net_device *dev = skb->dev;
  2484. if (!dev->header_ops || !dev->header_ops->parse)
  2485. return 0;
  2486. return dev->header_ops->parse(skb, haddr);
  2487. }
  2488. /* ll_header must have at least hard_header_len allocated */
  2489. static inline bool dev_validate_header(const struct net_device *dev,
  2490. char *ll_header, int len)
  2491. {
  2492. if (likely(len >= dev->hard_header_len))
  2493. return true;
  2494. if (len < dev->min_header_len)
  2495. return false;
  2496. if (capable(CAP_SYS_RAWIO)) {
  2497. memset(ll_header + len, 0, dev->hard_header_len - len);
  2498. return true;
  2499. }
  2500. if (dev->header_ops && dev->header_ops->validate)
  2501. return dev->header_ops->validate(ll_header, len);
  2502. return false;
  2503. }
  2504. typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
  2505. int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
  2506. static inline int unregister_gifconf(unsigned int family)
  2507. {
  2508. return register_gifconf(family, NULL);
  2509. }
  2510. #ifdef CONFIG_NET_FLOW_LIMIT
  2511. #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
  2512. struct sd_flow_limit {
  2513. u64 count;
  2514. unsigned int num_buckets;
  2515. unsigned int history_head;
  2516. u16 history[FLOW_LIMIT_HISTORY];
  2517. u8 buckets[];
  2518. };
  2519. extern int netdev_flow_limit_table_len;
  2520. #endif /* CONFIG_NET_FLOW_LIMIT */
  2521. /*
  2522. * Incoming packets are placed on per-CPU queues
  2523. */
  2524. struct softnet_data {
  2525. struct list_head poll_list;
  2526. struct sk_buff_head process_queue;
  2527. /* stats */
  2528. unsigned int processed;
  2529. unsigned int time_squeeze;
  2530. unsigned int received_rps;
  2531. #ifdef CONFIG_RPS
  2532. struct softnet_data *rps_ipi_list;
  2533. #endif
  2534. #ifdef CONFIG_NET_FLOW_LIMIT
  2535. struct sd_flow_limit __rcu *flow_limit;
  2536. #endif
  2537. struct Qdisc *output_queue;
  2538. struct Qdisc **output_queue_tailp;
  2539. struct sk_buff *completion_queue;
  2540. #ifdef CONFIG_RPS
  2541. /* input_queue_head should be written by cpu owning this struct,
  2542. * and only read by other cpus. Worth using a cache line.
  2543. */
  2544. unsigned int input_queue_head ____cacheline_aligned_in_smp;
  2545. /* Elements below can be accessed between CPUs for RPS/RFS */
  2546. struct call_single_data csd ____cacheline_aligned_in_smp;
  2547. struct softnet_data *rps_ipi_next;
  2548. unsigned int cpu;
  2549. unsigned int input_queue_tail;
  2550. #endif
  2551. unsigned int dropped;
  2552. struct sk_buff_head input_pkt_queue;
  2553. struct napi_struct backlog;
  2554. };
  2555. static inline void input_queue_head_incr(struct softnet_data *sd)
  2556. {
  2557. #ifdef CONFIG_RPS
  2558. sd->input_queue_head++;
  2559. #endif
  2560. }
  2561. static inline void input_queue_tail_incr_save(struct softnet_data *sd,
  2562. unsigned int *qtail)
  2563. {
  2564. #ifdef CONFIG_RPS
  2565. *qtail = ++sd->input_queue_tail;
  2566. #endif
  2567. }
  2568. DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
  2569. void __netif_schedule(struct Qdisc *q);
  2570. void netif_schedule_queue(struct netdev_queue *txq);
  2571. static inline void netif_tx_schedule_all(struct net_device *dev)
  2572. {
  2573. unsigned int i;
  2574. for (i = 0; i < dev->num_tx_queues; i++)
  2575. netif_schedule_queue(netdev_get_tx_queue(dev, i));
  2576. }
  2577. static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
  2578. {
  2579. clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
  2580. }
  2581. /**
  2582. * netif_start_queue - allow transmit
  2583. * @dev: network device
  2584. *
  2585. * Allow upper layers to call the device hard_start_xmit routine.
  2586. */
  2587. static inline void netif_start_queue(struct net_device *dev)
  2588. {
  2589. netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
  2590. }
  2591. static inline void netif_tx_start_all_queues(struct net_device *dev)
  2592. {
  2593. unsigned int i;
  2594. for (i = 0; i < dev->num_tx_queues; i++) {
  2595. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  2596. netif_tx_start_queue(txq);
  2597. }
  2598. }
  2599. void netif_tx_wake_queue(struct netdev_queue *dev_queue);
  2600. /**
  2601. * netif_wake_queue - restart transmit
  2602. * @dev: network device
  2603. *
  2604. * Allow upper layers to call the device hard_start_xmit routine.
  2605. * Used for flow control when transmit resources are available.
  2606. */
  2607. static inline void netif_wake_queue(struct net_device *dev)
  2608. {
  2609. netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
  2610. }
  2611. static inline void netif_tx_wake_all_queues(struct net_device *dev)
  2612. {
  2613. unsigned int i;
  2614. for (i = 0; i < dev->num_tx_queues; i++) {
  2615. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  2616. netif_tx_wake_queue(txq);
  2617. }
  2618. }
  2619. static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
  2620. {
  2621. set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
  2622. }
  2623. /**
  2624. * netif_stop_queue - stop transmitted packets
  2625. * @dev: network device
  2626. *
  2627. * Stop upper layers calling the device hard_start_xmit routine.
  2628. * Used for flow control when transmit resources are unavailable.
  2629. */
  2630. static inline void netif_stop_queue(struct net_device *dev)
  2631. {
  2632. netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
  2633. }
  2634. void netif_tx_stop_all_queues(struct net_device *dev);
  2635. static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
  2636. {
  2637. return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
  2638. }
  2639. /**
  2640. * netif_queue_stopped - test if transmit queue is flowblocked
  2641. * @dev: network device
  2642. *
  2643. * Test if transmit queue on device is currently unable to send.
  2644. */
  2645. static inline bool netif_queue_stopped(const struct net_device *dev)
  2646. {
  2647. return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
  2648. }
  2649. static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
  2650. {
  2651. return dev_queue->state & QUEUE_STATE_ANY_XOFF;
  2652. }
  2653. static inline bool
  2654. netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
  2655. {
  2656. return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
  2657. }
  2658. static inline bool
  2659. netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
  2660. {
  2661. return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
  2662. }
  2663. /**
  2664. * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
  2665. * @dev_queue: pointer to transmit queue
  2666. *
  2667. * BQL enabled drivers might use this helper in their ndo_start_xmit(),
  2668. * to give appropriate hint to the CPU.
  2669. */
  2670. static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
  2671. {
  2672. #ifdef CONFIG_BQL
  2673. prefetchw(&dev_queue->dql.num_queued);
  2674. #endif
  2675. }
  2676. /**
  2677. * netdev_txq_bql_complete_prefetchw - prefetch bql data for write
  2678. * @dev_queue: pointer to transmit queue
  2679. *
  2680. * BQL enabled drivers might use this helper in their TX completion path,
  2681. * to give appropriate hint to the CPU.
  2682. */
  2683. static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
  2684. {
  2685. #ifdef CONFIG_BQL
  2686. prefetchw(&dev_queue->dql.limit);
  2687. #endif
  2688. }
  2689. static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
  2690. unsigned int bytes)
  2691. {
  2692. #ifdef CONFIG_BQL
  2693. dql_queued(&dev_queue->dql, bytes);
  2694. if (likely(dql_avail(&dev_queue->dql) >= 0))
  2695. return;
  2696. set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
  2697. /*
  2698. * The XOFF flag must be set before checking the dql_avail below,
  2699. * because in netdev_tx_completed_queue we update the dql_completed
  2700. * before checking the XOFF flag.
  2701. */
  2702. smp_mb();
  2703. /* check again in case another CPU has just made room avail */
  2704. if (unlikely(dql_avail(&dev_queue->dql) >= 0))
  2705. clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
  2706. #endif
  2707. }
  2708. /**
  2709. * netdev_sent_queue - report the number of bytes queued to hardware
  2710. * @dev: network device
  2711. * @bytes: number of bytes queued to the hardware device queue
  2712. *
  2713. * Report the number of bytes queued for sending/completion to the network
  2714. * device hardware queue. @bytes should be a good approximation and should
  2715. * exactly match netdev_completed_queue() @bytes
  2716. */
  2717. static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
  2718. {
  2719. netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
  2720. }
  2721. static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
  2722. unsigned int pkts, unsigned int bytes)
  2723. {
  2724. #ifdef CONFIG_BQL
  2725. if (unlikely(!bytes))
  2726. return;
  2727. dql_completed(&dev_queue->dql, bytes);
  2728. /*
  2729. * Without the memory barrier there is a small possiblity that
  2730. * netdev_tx_sent_queue will miss the update and cause the queue to
  2731. * be stopped forever
  2732. */
  2733. smp_mb();
  2734. if (dql_avail(&dev_queue->dql) < 0)
  2735. return;
  2736. if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
  2737. netif_schedule_queue(dev_queue);
  2738. #endif
  2739. }
  2740. /**
  2741. * netdev_completed_queue - report bytes and packets completed by device
  2742. * @dev: network device
  2743. * @pkts: actual number of packets sent over the medium
  2744. * @bytes: actual number of bytes sent over the medium
  2745. *
  2746. * Report the number of bytes and packets transmitted by the network device
  2747. * hardware queue over the physical medium, @bytes must exactly match the
  2748. * @bytes amount passed to netdev_sent_queue()
  2749. */
  2750. static inline void netdev_completed_queue(struct net_device *dev,
  2751. unsigned int pkts, unsigned int bytes)
  2752. {
  2753. netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
  2754. }
  2755. static inline void netdev_tx_reset_queue(struct netdev_queue *q)
  2756. {
  2757. #ifdef CONFIG_BQL
  2758. clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
  2759. dql_reset(&q->dql);
  2760. #endif
  2761. }
  2762. /**
  2763. * netdev_reset_queue - reset the packets and bytes count of a network device
  2764. * @dev_queue: network device
  2765. *
  2766. * Reset the bytes and packet count of a network device and clear the
  2767. * software flow control OFF bit for this network device
  2768. */
  2769. static inline void netdev_reset_queue(struct net_device *dev_queue)
  2770. {
  2771. netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
  2772. }
  2773. /**
  2774. * netdev_cap_txqueue - check if selected tx queue exceeds device queues
  2775. * @dev: network device
  2776. * @queue_index: given tx queue index
  2777. *
  2778. * Returns 0 if given tx queue index >= number of device tx queues,
  2779. * otherwise returns the originally passed tx queue index.
  2780. */
  2781. static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
  2782. {
  2783. if (unlikely(queue_index >= dev->real_num_tx_queues)) {
  2784. net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
  2785. dev->name, queue_index,
  2786. dev->real_num_tx_queues);
  2787. return 0;
  2788. }
  2789. return queue_index;
  2790. }
  2791. /**
  2792. * netif_running - test if up
  2793. * @dev: network device
  2794. *
  2795. * Test if the device has been brought up.
  2796. */
  2797. static inline bool netif_running(const struct net_device *dev)
  2798. {
  2799. return test_bit(__LINK_STATE_START, &dev->state);
  2800. }
  2801. /*
  2802. * Routines to manage the subqueues on a device. We only need start,
  2803. * stop, and a check if it's stopped. All other device management is
  2804. * done at the overall netdevice level.
  2805. * Also test the device if we're multiqueue.
  2806. */
  2807. /**
  2808. * netif_start_subqueue - allow sending packets on subqueue
  2809. * @dev: network device
  2810. * @queue_index: sub queue index
  2811. *
  2812. * Start individual transmit queue of a device with multiple transmit queues.
  2813. */
  2814. static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
  2815. {
  2816. struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
  2817. netif_tx_start_queue(txq);
  2818. }
  2819. /**
  2820. * netif_stop_subqueue - stop sending packets on subqueue
  2821. * @dev: network device
  2822. * @queue_index: sub queue index
  2823. *
  2824. * Stop individual transmit queue of a device with multiple transmit queues.
  2825. */
  2826. static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
  2827. {
  2828. struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
  2829. netif_tx_stop_queue(txq);
  2830. }
  2831. /**
  2832. * netif_subqueue_stopped - test status of subqueue
  2833. * @dev: network device
  2834. * @queue_index: sub queue index
  2835. *
  2836. * Check individual transmit queue of a device with multiple transmit queues.
  2837. */
  2838. static inline bool __netif_subqueue_stopped(const struct net_device *dev,
  2839. u16 queue_index)
  2840. {
  2841. struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
  2842. return netif_tx_queue_stopped(txq);
  2843. }
  2844. static inline bool netif_subqueue_stopped(const struct net_device *dev,
  2845. struct sk_buff *skb)
  2846. {
  2847. return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
  2848. }
  2849. void netif_wake_subqueue(struct net_device *dev, u16 queue_index);
  2850. #ifdef CONFIG_XPS
  2851. int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
  2852. u16 index);
  2853. #else
  2854. static inline int netif_set_xps_queue(struct net_device *dev,
  2855. const struct cpumask *mask,
  2856. u16 index)
  2857. {
  2858. return 0;
  2859. }
  2860. #endif
  2861. u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
  2862. unsigned int num_tx_queues);
  2863. /*
  2864. * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
  2865. * as a distribution range limit for the returned value.
  2866. */
  2867. static inline u16 skb_tx_hash(const struct net_device *dev,
  2868. struct sk_buff *skb)
  2869. {
  2870. return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
  2871. }
  2872. /**
  2873. * netif_is_multiqueue - test if device has multiple transmit queues
  2874. * @dev: network device
  2875. *
  2876. * Check if device has multiple transmit queues
  2877. */
  2878. static inline bool netif_is_multiqueue(const struct net_device *dev)
  2879. {
  2880. return dev->num_tx_queues > 1;
  2881. }
  2882. int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
  2883. #ifdef CONFIG_SYSFS
  2884. int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
  2885. #else
  2886. static inline int netif_set_real_num_rx_queues(struct net_device *dev,
  2887. unsigned int rxq)
  2888. {
  2889. return 0;
  2890. }
  2891. #endif
  2892. #ifdef CONFIG_SYSFS
  2893. static inline unsigned int get_netdev_rx_queue_index(
  2894. struct netdev_rx_queue *queue)
  2895. {
  2896. struct net_device *dev = queue->dev;
  2897. int index = queue - dev->_rx;
  2898. BUG_ON(index >= dev->num_rx_queues);
  2899. return index;
  2900. }
  2901. #endif
  2902. #define DEFAULT_MAX_NUM_RSS_QUEUES (8)
  2903. int netif_get_num_default_rss_queues(void);
  2904. enum skb_free_reason {
  2905. SKB_REASON_CONSUMED,
  2906. SKB_REASON_DROPPED,
  2907. };
  2908. void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
  2909. void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
  2910. /*
  2911. * It is not allowed to call kfree_skb() or consume_skb() from hardware
  2912. * interrupt context or with hardware interrupts being disabled.
  2913. * (in_irq() || irqs_disabled())
  2914. *
  2915. * We provide four helpers that can be used in following contexts :
  2916. *
  2917. * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
  2918. * replacing kfree_skb(skb)
  2919. *
  2920. * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
  2921. * Typically used in place of consume_skb(skb) in TX completion path
  2922. *
  2923. * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
  2924. * replacing kfree_skb(skb)
  2925. *
  2926. * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
  2927. * and consumed a packet. Used in place of consume_skb(skb)
  2928. */
  2929. static inline void dev_kfree_skb_irq(struct sk_buff *skb)
  2930. {
  2931. __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
  2932. }
  2933. static inline void dev_consume_skb_irq(struct sk_buff *skb)
  2934. {
  2935. __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
  2936. }
  2937. static inline void dev_kfree_skb_any(struct sk_buff *skb)
  2938. {
  2939. __dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
  2940. }
  2941. static inline void dev_consume_skb_any(struct sk_buff *skb)
  2942. {
  2943. __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
  2944. }
  2945. int netif_rx(struct sk_buff *skb);
  2946. int netif_rx_ni(struct sk_buff *skb);
  2947. int netif_receive_skb(struct sk_buff *skb);
  2948. gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
  2949. void napi_gro_flush(struct napi_struct *napi, bool flush_old);
  2950. struct sk_buff *napi_get_frags(struct napi_struct *napi);
  2951. gro_result_t napi_gro_frags(struct napi_struct *napi);
  2952. struct packet_offload *gro_find_receive_by_type(__be16 type);
  2953. struct packet_offload *gro_find_complete_by_type(__be16 type);
  2954. static inline void napi_free_frags(struct napi_struct *napi)
  2955. {
  2956. kfree_skb(napi->skb);
  2957. napi->skb = NULL;
  2958. }
  2959. bool netdev_is_rx_handler_busy(struct net_device *dev);
  2960. int netdev_rx_handler_register(struct net_device *dev,
  2961. rx_handler_func_t *rx_handler,
  2962. void *rx_handler_data);
  2963. void netdev_rx_handler_unregister(struct net_device *dev);
  2964. bool dev_valid_name(const char *name);
  2965. int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
  2966. int dev_ethtool(struct net *net, struct ifreq *);
  2967. unsigned int dev_get_flags(const struct net_device *);
  2968. int __dev_change_flags(struct net_device *, unsigned int flags);
  2969. int dev_change_flags(struct net_device *, unsigned int);
  2970. void __dev_notify_flags(struct net_device *, unsigned int old_flags,
  2971. unsigned int gchanges);
  2972. int dev_change_name(struct net_device *, const char *);
  2973. int dev_set_alias(struct net_device *, const char *, size_t);
  2974. int dev_change_net_namespace(struct net_device *, struct net *, const char *);
  2975. int dev_set_mtu(struct net_device *, int);
  2976. void dev_set_group(struct net_device *, int);
  2977. int dev_set_mac_address(struct net_device *, struct sockaddr *);
  2978. int dev_change_carrier(struct net_device *, bool new_carrier);
  2979. int dev_get_phys_port_id(struct net_device *dev,
  2980. struct netdev_phys_item_id *ppid);
  2981. int dev_get_phys_port_name(struct net_device *dev,
  2982. char *name, size_t len);
  2983. int dev_change_proto_down(struct net_device *dev, bool proto_down);
  2984. int dev_change_xdp_fd(struct net_device *dev, int fd);
  2985. struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
  2986. struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
  2987. struct netdev_queue *txq, int *ret);
  2988. int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
  2989. int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
  2990. bool is_skb_forwardable(const struct net_device *dev,
  2991. const struct sk_buff *skb);
  2992. static __always_inline int ____dev_forward_skb(struct net_device *dev,
  2993. struct sk_buff *skb)
  2994. {
  2995. if (skb_orphan_frags(skb, GFP_ATOMIC) ||
  2996. unlikely(!is_skb_forwardable(dev, skb))) {
  2997. atomic_long_inc(&dev->rx_dropped);
  2998. kfree_skb(skb);
  2999. return NET_RX_DROP;
  3000. }
  3001. skb_scrub_packet(skb, true);
  3002. skb->priority = 0;
  3003. return 0;
  3004. }
  3005. void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
  3006. extern int netdev_budget;
  3007. /* Called by rtnetlink.c:rtnl_unlock() */
  3008. void netdev_run_todo(void);
  3009. /**
  3010. * dev_put - release reference to device
  3011. * @dev: network device
  3012. *
  3013. * Release reference to device to allow it to be freed.
  3014. */
  3015. static inline void dev_put(struct net_device *dev)
  3016. {
  3017. this_cpu_dec(*dev->pcpu_refcnt);
  3018. }
  3019. /**
  3020. * dev_hold - get reference to device
  3021. * @dev: network device
  3022. *
  3023. * Hold reference to device to keep it from being freed.
  3024. */
  3025. static inline void dev_hold(struct net_device *dev)
  3026. {
  3027. this_cpu_inc(*dev->pcpu_refcnt);
  3028. }
  3029. /* Carrier loss detection, dial on demand. The functions netif_carrier_on
  3030. * and _off may be called from IRQ context, but it is caller
  3031. * who is responsible for serialization of these calls.
  3032. *
  3033. * The name carrier is inappropriate, these functions should really be
  3034. * called netif_lowerlayer_*() because they represent the state of any
  3035. * kind of lower layer not just hardware media.
  3036. */
  3037. void linkwatch_init_dev(struct net_device *dev);
  3038. void linkwatch_fire_event(struct net_device *dev);
  3039. void linkwatch_forget_dev(struct net_device *dev);
  3040. /**
  3041. * netif_carrier_ok - test if carrier present
  3042. * @dev: network device
  3043. *
  3044. * Check if carrier is present on device
  3045. */
  3046. static inline bool netif_carrier_ok(const struct net_device *dev)
  3047. {
  3048. return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
  3049. }
  3050. unsigned long dev_trans_start(struct net_device *dev);
  3051. void __netdev_watchdog_up(struct net_device *dev);
  3052. void netif_carrier_on(struct net_device *dev);
  3053. void netif_carrier_off(struct net_device *dev);
  3054. /**
  3055. * netif_dormant_on - mark device as dormant.
  3056. * @dev: network device
  3057. *
  3058. * Mark device as dormant (as per RFC2863).
  3059. *
  3060. * The dormant state indicates that the relevant interface is not
  3061. * actually in a condition to pass packets (i.e., it is not 'up') but is
  3062. * in a "pending" state, waiting for some external event. For "on-
  3063. * demand" interfaces, this new state identifies the situation where the
  3064. * interface is waiting for events to place it in the up state.
  3065. */
  3066. static inline void netif_dormant_on(struct net_device *dev)
  3067. {
  3068. if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
  3069. linkwatch_fire_event(dev);
  3070. }
  3071. /**
  3072. * netif_dormant_off - set device as not dormant.
  3073. * @dev: network device
  3074. *
  3075. * Device is not in dormant state.
  3076. */
  3077. static inline void netif_dormant_off(struct net_device *dev)
  3078. {
  3079. if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
  3080. linkwatch_fire_event(dev);
  3081. }
  3082. /**
  3083. * netif_dormant - test if carrier present
  3084. * @dev: network device
  3085. *
  3086. * Check if carrier is present on device
  3087. */
  3088. static inline bool netif_dormant(const struct net_device *dev)
  3089. {
  3090. return test_bit(__LINK_STATE_DORMANT, &dev->state);
  3091. }
  3092. /**
  3093. * netif_oper_up - test if device is operational
  3094. * @dev: network device
  3095. *
  3096. * Check if carrier is operational
  3097. */
  3098. static inline bool netif_oper_up(const struct net_device *dev)
  3099. {
  3100. return (dev->operstate == IF_OPER_UP ||
  3101. dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
  3102. }
  3103. /**
  3104. * netif_device_present - is device available or removed
  3105. * @dev: network device
  3106. *
  3107. * Check if device has not been removed from system.
  3108. */
  3109. static inline bool netif_device_present(struct net_device *dev)
  3110. {
  3111. return test_bit(__LINK_STATE_PRESENT, &dev->state);
  3112. }
  3113. void netif_device_detach(struct net_device *dev);
  3114. void netif_device_attach(struct net_device *dev);
  3115. /*
  3116. * Network interface message level settings
  3117. */
  3118. enum {
  3119. NETIF_MSG_DRV = 0x0001,
  3120. NETIF_MSG_PROBE = 0x0002,
  3121. NETIF_MSG_LINK = 0x0004,
  3122. NETIF_MSG_TIMER = 0x0008,
  3123. NETIF_MSG_IFDOWN = 0x0010,
  3124. NETIF_MSG_IFUP = 0x0020,
  3125. NETIF_MSG_RX_ERR = 0x0040,
  3126. NETIF_MSG_TX_ERR = 0x0080,
  3127. NETIF_MSG_TX_QUEUED = 0x0100,
  3128. NETIF_MSG_INTR = 0x0200,
  3129. NETIF_MSG_TX_DONE = 0x0400,
  3130. NETIF_MSG_RX_STATUS = 0x0800,
  3131. NETIF_MSG_PKTDATA = 0x1000,
  3132. NETIF_MSG_HW = 0x2000,
  3133. NETIF_MSG_WOL = 0x4000,
  3134. };
  3135. #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
  3136. #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
  3137. #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
  3138. #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
  3139. #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
  3140. #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
  3141. #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
  3142. #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
  3143. #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
  3144. #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
  3145. #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
  3146. #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
  3147. #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
  3148. #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
  3149. #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
  3150. static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
  3151. {
  3152. /* use default */
  3153. if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
  3154. return default_msg_enable_bits;
  3155. if (debug_value == 0) /* no output */
  3156. return 0;
  3157. /* set low N bits */
  3158. return (1 << debug_value) - 1;
  3159. }
  3160. static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
  3161. {
  3162. spin_lock(&txq->_xmit_lock);
  3163. txq->xmit_lock_owner = cpu;
  3164. }
  3165. static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
  3166. {
  3167. spin_lock_bh(&txq->_xmit_lock);
  3168. txq->xmit_lock_owner = smp_processor_id();
  3169. }
  3170. static inline bool __netif_tx_trylock(struct netdev_queue *txq)
  3171. {
  3172. bool ok = spin_trylock(&txq->_xmit_lock);
  3173. if (likely(ok))
  3174. txq->xmit_lock_owner = smp_processor_id();
  3175. return ok;
  3176. }
  3177. static inline void __netif_tx_unlock(struct netdev_queue *txq)
  3178. {
  3179. txq->xmit_lock_owner = -1;
  3180. spin_unlock(&txq->_xmit_lock);
  3181. }
  3182. static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
  3183. {
  3184. txq->xmit_lock_owner = -1;
  3185. spin_unlock_bh(&txq->_xmit_lock);
  3186. }
  3187. static inline void txq_trans_update(struct netdev_queue *txq)
  3188. {
  3189. if (txq->xmit_lock_owner != -1)
  3190. txq->trans_start = jiffies;
  3191. }
  3192. /* legacy drivers only, netdev_start_xmit() sets txq->trans_start */
  3193. static inline void netif_trans_update(struct net_device *dev)
  3194. {
  3195. struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
  3196. if (txq->trans_start != jiffies)
  3197. txq->trans_start = jiffies;
  3198. }
  3199. /**
  3200. * netif_tx_lock - grab network device transmit lock
  3201. * @dev: network device
  3202. *
  3203. * Get network device transmit lock
  3204. */
  3205. static inline void netif_tx_lock(struct net_device *dev)
  3206. {
  3207. unsigned int i;
  3208. int cpu;
  3209. spin_lock(&dev->tx_global_lock);
  3210. cpu = smp_processor_id();
  3211. for (i = 0; i < dev->num_tx_queues; i++) {
  3212. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  3213. /* We are the only thread of execution doing a
  3214. * freeze, but we have to grab the _xmit_lock in
  3215. * order to synchronize with threads which are in
  3216. * the ->hard_start_xmit() handler and already
  3217. * checked the frozen bit.
  3218. */
  3219. __netif_tx_lock(txq, cpu);
  3220. set_bit(__QUEUE_STATE_FROZEN, &txq->state);
  3221. __netif_tx_unlock(txq);
  3222. }
  3223. }
  3224. static inline void netif_tx_lock_bh(struct net_device *dev)
  3225. {
  3226. local_bh_disable();
  3227. netif_tx_lock(dev);
  3228. }
  3229. static inline void netif_tx_unlock(struct net_device *dev)
  3230. {
  3231. unsigned int i;
  3232. for (i = 0; i < dev->num_tx_queues; i++) {
  3233. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  3234. /* No need to grab the _xmit_lock here. If the
  3235. * queue is not stopped for another reason, we
  3236. * force a schedule.
  3237. */
  3238. clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
  3239. netif_schedule_queue(txq);
  3240. }
  3241. spin_unlock(&dev->tx_global_lock);
  3242. }
  3243. static inline void netif_tx_unlock_bh(struct net_device *dev)
  3244. {
  3245. netif_tx_unlock(dev);
  3246. local_bh_enable();
  3247. }
  3248. #define HARD_TX_LOCK(dev, txq, cpu) { \
  3249. if ((dev->features & NETIF_F_LLTX) == 0) { \
  3250. __netif_tx_lock(txq, cpu); \
  3251. } \
  3252. }
  3253. #define HARD_TX_TRYLOCK(dev, txq) \
  3254. (((dev->features & NETIF_F_LLTX) == 0) ? \
  3255. __netif_tx_trylock(txq) : \
  3256. true )
  3257. #define HARD_TX_UNLOCK(dev, txq) { \
  3258. if ((dev->features & NETIF_F_LLTX) == 0) { \
  3259. __netif_tx_unlock(txq); \
  3260. } \
  3261. }
  3262. static inline void netif_tx_disable(struct net_device *dev)
  3263. {
  3264. unsigned int i;
  3265. int cpu;
  3266. local_bh_disable();
  3267. cpu = smp_processor_id();
  3268. for (i = 0; i < dev->num_tx_queues; i++) {
  3269. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  3270. __netif_tx_lock(txq, cpu);
  3271. netif_tx_stop_queue(txq);
  3272. __netif_tx_unlock(txq);
  3273. }
  3274. local_bh_enable();
  3275. }
  3276. static inline void netif_addr_lock(struct net_device *dev)
  3277. {
  3278. spin_lock(&dev->addr_list_lock);
  3279. }
  3280. static inline void netif_addr_lock_nested(struct net_device *dev)
  3281. {
  3282. int subclass = SINGLE_DEPTH_NESTING;
  3283. if (dev->netdev_ops->ndo_get_lock_subclass)
  3284. subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
  3285. spin_lock_nested(&dev->addr_list_lock, subclass);
  3286. }
  3287. static inline void netif_addr_lock_bh(struct net_device *dev)
  3288. {
  3289. spin_lock_bh(&dev->addr_list_lock);
  3290. }
  3291. static inline void netif_addr_unlock(struct net_device *dev)
  3292. {
  3293. spin_unlock(&dev->addr_list_lock);
  3294. }
  3295. static inline void netif_addr_unlock_bh(struct net_device *dev)
  3296. {
  3297. spin_unlock_bh(&dev->addr_list_lock);
  3298. }
  3299. /*
  3300. * dev_addrs walker. Should be used only for read access. Call with
  3301. * rcu_read_lock held.
  3302. */
  3303. #define for_each_dev_addr(dev, ha) \
  3304. list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
  3305. /* These functions live elsewhere (drivers/net/net_init.c, but related) */
  3306. void ether_setup(struct net_device *dev);
  3307. /* Support for loadable net-drivers */
  3308. struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
  3309. unsigned char name_assign_type,
  3310. void (*setup)(struct net_device *),
  3311. unsigned int txqs, unsigned int rxqs);
  3312. #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
  3313. alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
  3314. #define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \
  3315. alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \
  3316. count)
  3317. int register_netdev(struct net_device *dev);
  3318. void unregister_netdev(struct net_device *dev);
  3319. /* General hardware address lists handling functions */
  3320. int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
  3321. struct netdev_hw_addr_list *from_list, int addr_len);
  3322. void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
  3323. struct netdev_hw_addr_list *from_list, int addr_len);
  3324. int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
  3325. struct net_device *dev,
  3326. int (*sync)(struct net_device *, const unsigned char *),
  3327. int (*unsync)(struct net_device *,
  3328. const unsigned char *));
  3329. void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
  3330. struct net_device *dev,
  3331. int (*unsync)(struct net_device *,
  3332. const unsigned char *));
  3333. void __hw_addr_init(struct netdev_hw_addr_list *list);
  3334. /* Functions used for device addresses handling */
  3335. int dev_addr_add(struct net_device *dev, const unsigned char *addr,
  3336. unsigned char addr_type);
  3337. int dev_addr_del(struct net_device *dev, const unsigned char *addr,
  3338. unsigned char addr_type);
  3339. void dev_addr_flush(struct net_device *dev);
  3340. int dev_addr_init(struct net_device *dev);
  3341. /* Functions used for unicast addresses handling */
  3342. int dev_uc_add(struct net_device *dev, const unsigned char *addr);
  3343. int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
  3344. int dev_uc_del(struct net_device *dev, const unsigned char *addr);
  3345. int dev_uc_sync(struct net_device *to, struct net_device *from);
  3346. int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
  3347. void dev_uc_unsync(struct net_device *to, struct net_device *from);
  3348. void dev_uc_flush(struct net_device *dev);
  3349. void dev_uc_init(struct net_device *dev);
  3350. /**
  3351. * __dev_uc_sync - Synchonize device's unicast list
  3352. * @dev: device to sync
  3353. * @sync: function to call if address should be added
  3354. * @unsync: function to call if address should be removed
  3355. *
  3356. * Add newly added addresses to the interface, and release
  3357. * addresses that have been deleted.
  3358. */
  3359. static inline int __dev_uc_sync(struct net_device *dev,
  3360. int (*sync)(struct net_device *,
  3361. const unsigned char *),
  3362. int (*unsync)(struct net_device *,
  3363. const unsigned char *))
  3364. {
  3365. return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
  3366. }
  3367. /**
  3368. * __dev_uc_unsync - Remove synchronized addresses from device
  3369. * @dev: device to sync
  3370. * @unsync: function to call if address should be removed
  3371. *
  3372. * Remove all addresses that were added to the device by dev_uc_sync().
  3373. */
  3374. static inline void __dev_uc_unsync(struct net_device *dev,
  3375. int (*unsync)(struct net_device *,
  3376. const unsigned char *))
  3377. {
  3378. __hw_addr_unsync_dev(&dev->uc, dev, unsync);
  3379. }
  3380. /* Functions used for multicast addresses handling */
  3381. int dev_mc_add(struct net_device *dev, const unsigned char *addr);
  3382. int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
  3383. int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
  3384. int dev_mc_del(struct net_device *dev, const unsigned char *addr);
  3385. int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
  3386. int dev_mc_sync(struct net_device *to, struct net_device *from);
  3387. int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
  3388. void dev_mc_unsync(struct net_device *to, struct net_device *from);
  3389. void dev_mc_flush(struct net_device *dev);
  3390. void dev_mc_init(struct net_device *dev);
  3391. /**
  3392. * __dev_mc_sync - Synchonize device's multicast list
  3393. * @dev: device to sync
  3394. * @sync: function to call if address should be added
  3395. * @unsync: function to call if address should be removed
  3396. *
  3397. * Add newly added addresses to the interface, and release
  3398. * addresses that have been deleted.
  3399. */
  3400. static inline int __dev_mc_sync(struct net_device *dev,
  3401. int (*sync)(struct net_device *,
  3402. const unsigned char *),
  3403. int (*unsync)(struct net_device *,
  3404. const unsigned char *))
  3405. {
  3406. return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
  3407. }
  3408. /**
  3409. * __dev_mc_unsync - Remove synchronized addresses from device
  3410. * @dev: device to sync
  3411. * @unsync: function to call if address should be removed
  3412. *
  3413. * Remove all addresses that were added to the device by dev_mc_sync().
  3414. */
  3415. static inline void __dev_mc_unsync(struct net_device *dev,
  3416. int (*unsync)(struct net_device *,
  3417. const unsigned char *))
  3418. {
  3419. __hw_addr_unsync_dev(&dev->mc, dev, unsync);
  3420. }
  3421. /* Functions used for secondary unicast and multicast support */
  3422. void dev_set_rx_mode(struct net_device *dev);
  3423. void __dev_set_rx_mode(struct net_device *dev);
  3424. int dev_set_promiscuity(struct net_device *dev, int inc);
  3425. int dev_set_allmulti(struct net_device *dev, int inc);
  3426. void netdev_state_change(struct net_device *dev);
  3427. void netdev_notify_peers(struct net_device *dev);
  3428. void netdev_features_change(struct net_device *dev);
  3429. /* Load a device via the kmod */
  3430. void dev_load(struct net *net, const char *name);
  3431. struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
  3432. struct rtnl_link_stats64 *storage);
  3433. void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
  3434. const struct net_device_stats *netdev_stats);
  3435. extern int netdev_max_backlog;
  3436. extern int netdev_tstamp_prequeue;
  3437. extern int weight_p;
  3438. bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
  3439. struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
  3440. struct list_head **iter);
  3441. struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
  3442. struct list_head **iter);
  3443. /* iterate through upper list, must be called under RCU read lock */
  3444. #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
  3445. for (iter = &(dev)->adj_list.upper, \
  3446. updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
  3447. updev; \
  3448. updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
  3449. /* iterate through upper list, must be called under RCU read lock */
  3450. #define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
  3451. for (iter = &(dev)->all_adj_list.upper, \
  3452. updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \
  3453. updev; \
  3454. updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)))
  3455. bool netdev_has_any_upper_dev(struct net_device *dev);
  3456. void *netdev_lower_get_next_private(struct net_device *dev,
  3457. struct list_head **iter);
  3458. void *netdev_lower_get_next_private_rcu(struct net_device *dev,
  3459. struct list_head **iter);
  3460. #define netdev_for_each_lower_private(dev, priv, iter) \
  3461. for (iter = (dev)->adj_list.lower.next, \
  3462. priv = netdev_lower_get_next_private(dev, &(iter)); \
  3463. priv; \
  3464. priv = netdev_lower_get_next_private(dev, &(iter)))
  3465. #define netdev_for_each_lower_private_rcu(dev, priv, iter) \
  3466. for (iter = &(dev)->adj_list.lower, \
  3467. priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
  3468. priv; \
  3469. priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
  3470. void *netdev_lower_get_next(struct net_device *dev,
  3471. struct list_head **iter);
  3472. #define netdev_for_each_lower_dev(dev, ldev, iter) \
  3473. for (iter = (dev)->adj_list.lower.next, \
  3474. ldev = netdev_lower_get_next(dev, &(iter)); \
  3475. ldev; \
  3476. ldev = netdev_lower_get_next(dev, &(iter)))
  3477. struct net_device *netdev_all_lower_get_next(struct net_device *dev,
  3478. struct list_head **iter);
  3479. struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
  3480. struct list_head **iter);
  3481. #define netdev_for_each_all_lower_dev(dev, ldev, iter) \
  3482. for (iter = (dev)->all_adj_list.lower.next, \
  3483. ldev = netdev_all_lower_get_next(dev, &(iter)); \
  3484. ldev; \
  3485. ldev = netdev_all_lower_get_next(dev, &(iter)))
  3486. #define netdev_for_each_all_lower_dev_rcu(dev, ldev, iter) \
  3487. for (iter = &(dev)->all_adj_list.lower, \
  3488. ldev = netdev_all_lower_get_next_rcu(dev, &(iter)); \
  3489. ldev; \
  3490. ldev = netdev_all_lower_get_next_rcu(dev, &(iter)))
  3491. void *netdev_adjacent_get_private(struct list_head *adj_list);
  3492. void *netdev_lower_get_first_private_rcu(struct net_device *dev);
  3493. struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
  3494. struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
  3495. int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
  3496. int netdev_master_upper_dev_link(struct net_device *dev,
  3497. struct net_device *upper_dev,
  3498. void *upper_priv, void *upper_info);
  3499. void netdev_upper_dev_unlink(struct net_device *dev,
  3500. struct net_device *upper_dev);
  3501. void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
  3502. void *netdev_lower_dev_get_private(struct net_device *dev,
  3503. struct net_device *lower_dev);
  3504. void netdev_lower_state_changed(struct net_device *lower_dev,
  3505. void *lower_state_info);
  3506. int netdev_default_l2upper_neigh_construct(struct net_device *dev,
  3507. struct neighbour *n);
  3508. void netdev_default_l2upper_neigh_destroy(struct net_device *dev,
  3509. struct neighbour *n);
  3510. /* RSS keys are 40 or 52 bytes long */
  3511. #define NETDEV_RSS_KEY_LEN 52
  3512. extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
  3513. void netdev_rss_key_fill(void *buffer, size_t len);
  3514. int dev_get_nest_level(struct net_device *dev);
  3515. int skb_checksum_help(struct sk_buff *skb);
  3516. struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
  3517. netdev_features_t features, bool tx_path);
  3518. struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
  3519. netdev_features_t features);
  3520. struct netdev_bonding_info {
  3521. ifslave slave;
  3522. ifbond master;
  3523. };
  3524. struct netdev_notifier_bonding_info {
  3525. struct netdev_notifier_info info; /* must be first */
  3526. struct netdev_bonding_info bonding_info;
  3527. };
  3528. void netdev_bonding_info_change(struct net_device *dev,
  3529. struct netdev_bonding_info *bonding_info);
  3530. static inline
  3531. struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
  3532. {
  3533. return __skb_gso_segment(skb, features, true);
  3534. }
  3535. __be16 skb_network_protocol(struct sk_buff *skb, int *depth);
  3536. static inline bool can_checksum_protocol(netdev_features_t features,
  3537. __be16 protocol)
  3538. {
  3539. if (protocol == htons(ETH_P_FCOE))
  3540. return !!(features & NETIF_F_FCOE_CRC);
  3541. /* Assume this is an IP checksum (not SCTP CRC) */
  3542. if (features & NETIF_F_HW_CSUM) {
  3543. /* Can checksum everything */
  3544. return true;
  3545. }
  3546. switch (protocol) {
  3547. case htons(ETH_P_IP):
  3548. return !!(features & NETIF_F_IP_CSUM);
  3549. case htons(ETH_P_IPV6):
  3550. return !!(features & NETIF_F_IPV6_CSUM);
  3551. default:
  3552. return false;
  3553. }
  3554. }
  3555. /* Map an ethertype into IP protocol if possible */
  3556. static inline int eproto_to_ipproto(int eproto)
  3557. {
  3558. switch (eproto) {
  3559. case htons(ETH_P_IP):
  3560. return IPPROTO_IP;
  3561. case htons(ETH_P_IPV6):
  3562. return IPPROTO_IPV6;
  3563. default:
  3564. return -1;
  3565. }
  3566. }
  3567. #ifdef CONFIG_BUG
  3568. void netdev_rx_csum_fault(struct net_device *dev);
  3569. #else
  3570. static inline void netdev_rx_csum_fault(struct net_device *dev)
  3571. {
  3572. }
  3573. #endif
  3574. /* rx skb timestamps */
  3575. void net_enable_timestamp(void);
  3576. void net_disable_timestamp(void);
  3577. #ifdef CONFIG_PROC_FS
  3578. int __init dev_proc_init(void);
  3579. #else
  3580. #define dev_proc_init() 0
  3581. #endif
  3582. static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
  3583. struct sk_buff *skb, struct net_device *dev,
  3584. bool more)
  3585. {
  3586. skb->xmit_more = more ? 1 : 0;
  3587. return ops->ndo_start_xmit(skb, dev);
  3588. }
  3589. static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
  3590. struct netdev_queue *txq, bool more)
  3591. {
  3592. const struct net_device_ops *ops = dev->netdev_ops;
  3593. int rc;
  3594. rc = __netdev_start_xmit(ops, skb, dev, more);
  3595. if (rc == NETDEV_TX_OK)
  3596. txq_trans_update(txq);
  3597. return rc;
  3598. }
  3599. int netdev_class_create_file_ns(struct class_attribute *class_attr,
  3600. const void *ns);
  3601. void netdev_class_remove_file_ns(struct class_attribute *class_attr,
  3602. const void *ns);
  3603. static inline int netdev_class_create_file(struct class_attribute *class_attr)
  3604. {
  3605. return netdev_class_create_file_ns(class_attr, NULL);
  3606. }
  3607. static inline void netdev_class_remove_file(struct class_attribute *class_attr)
  3608. {
  3609. netdev_class_remove_file_ns(class_attr, NULL);
  3610. }
  3611. extern struct kobj_ns_type_operations net_ns_type_operations;
  3612. const char *netdev_drivername(const struct net_device *dev);
  3613. void linkwatch_run_queue(void);
  3614. static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
  3615. netdev_features_t f2)
  3616. {
  3617. if ((f1 ^ f2) & NETIF_F_HW_CSUM) {
  3618. if (f1 & NETIF_F_HW_CSUM)
  3619. f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
  3620. else
  3621. f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
  3622. }
  3623. return f1 & f2;
  3624. }
  3625. static inline netdev_features_t netdev_get_wanted_features(
  3626. struct net_device *dev)
  3627. {
  3628. return (dev->features & ~dev->hw_features) | dev->wanted_features;
  3629. }
  3630. netdev_features_t netdev_increment_features(netdev_features_t all,
  3631. netdev_features_t one, netdev_features_t mask);
  3632. /* Allow TSO being used on stacked device :
  3633. * Performing the GSO segmentation before last device
  3634. * is a performance improvement.
  3635. */
  3636. static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
  3637. netdev_features_t mask)
  3638. {
  3639. return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
  3640. }
  3641. int __netdev_update_features(struct net_device *dev);
  3642. void netdev_update_features(struct net_device *dev);
  3643. void netdev_change_features(struct net_device *dev);
  3644. void netif_stacked_transfer_operstate(const struct net_device *rootdev,
  3645. struct net_device *dev);
  3646. netdev_features_t passthru_features_check(struct sk_buff *skb,
  3647. struct net_device *dev,
  3648. netdev_features_t features);
  3649. netdev_features_t netif_skb_features(struct sk_buff *skb);
  3650. static inline bool net_gso_ok(netdev_features_t features, int gso_type)
  3651. {
  3652. netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
  3653. /* check flags correspondence */
  3654. BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
  3655. BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
  3656. BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
  3657. BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
  3658. BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
  3659. BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
  3660. BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
  3661. BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
  3662. BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
  3663. BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT));
  3664. BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT));
  3665. BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
  3666. BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
  3667. BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
  3668. BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
  3669. BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
  3670. return (features & feature) == feature;
  3671. }
  3672. static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
  3673. {
  3674. return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
  3675. (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
  3676. }
  3677. static inline bool netif_needs_gso(struct sk_buff *skb,
  3678. netdev_features_t features)
  3679. {
  3680. return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
  3681. unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
  3682. (skb->ip_summed != CHECKSUM_UNNECESSARY)));
  3683. }
  3684. static inline void netif_set_gso_max_size(struct net_device *dev,
  3685. unsigned int size)
  3686. {
  3687. dev->gso_max_size = size;
  3688. }
  3689. static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
  3690. int pulled_hlen, u16 mac_offset,
  3691. int mac_len)
  3692. {
  3693. skb->protocol = protocol;
  3694. skb->encapsulation = 1;
  3695. skb_push(skb, pulled_hlen);
  3696. skb_reset_transport_header(skb);
  3697. skb->mac_header = mac_offset;
  3698. skb->network_header = skb->mac_header + mac_len;
  3699. skb->mac_len = mac_len;
  3700. }
  3701. static inline bool netif_is_macsec(const struct net_device *dev)
  3702. {
  3703. return dev->priv_flags & IFF_MACSEC;
  3704. }
  3705. static inline bool netif_is_macvlan(const struct net_device *dev)
  3706. {
  3707. return dev->priv_flags & IFF_MACVLAN;
  3708. }
  3709. static inline bool netif_is_macvlan_port(const struct net_device *dev)
  3710. {
  3711. return dev->priv_flags & IFF_MACVLAN_PORT;
  3712. }
  3713. static inline bool netif_is_ipvlan(const struct net_device *dev)
  3714. {
  3715. return dev->priv_flags & IFF_IPVLAN_SLAVE;
  3716. }
  3717. static inline bool netif_is_ipvlan_port(const struct net_device *dev)
  3718. {
  3719. return dev->priv_flags & IFF_IPVLAN_MASTER;
  3720. }
  3721. static inline bool netif_is_bond_master(const struct net_device *dev)
  3722. {
  3723. return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
  3724. }
  3725. static inline bool netif_is_bond_slave(const struct net_device *dev)
  3726. {
  3727. return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
  3728. }
  3729. static inline bool netif_supports_nofcs(struct net_device *dev)
  3730. {
  3731. return dev->priv_flags & IFF_SUPP_NOFCS;
  3732. }
  3733. static inline bool netif_is_l3_master(const struct net_device *dev)
  3734. {
  3735. return dev->priv_flags & IFF_L3MDEV_MASTER;
  3736. }
  3737. static inline bool netif_is_l3_slave(const struct net_device *dev)
  3738. {
  3739. return dev->priv_flags & IFF_L3MDEV_SLAVE;
  3740. }
  3741. static inline bool netif_is_bridge_master(const struct net_device *dev)
  3742. {
  3743. return dev->priv_flags & IFF_EBRIDGE;
  3744. }
  3745. static inline bool netif_is_bridge_port(const struct net_device *dev)
  3746. {
  3747. return dev->priv_flags & IFF_BRIDGE_PORT;
  3748. }
  3749. static inline bool netif_is_ovs_master(const struct net_device *dev)
  3750. {
  3751. return dev->priv_flags & IFF_OPENVSWITCH;
  3752. }
  3753. static inline bool netif_is_team_master(const struct net_device *dev)
  3754. {
  3755. return dev->priv_flags & IFF_TEAM;
  3756. }
  3757. static inline bool netif_is_team_port(const struct net_device *dev)
  3758. {
  3759. return dev->priv_flags & IFF_TEAM_PORT;
  3760. }
  3761. static inline bool netif_is_lag_master(const struct net_device *dev)
  3762. {
  3763. return netif_is_bond_master(dev) || netif_is_team_master(dev);
  3764. }
  3765. static inline bool netif_is_lag_port(const struct net_device *dev)
  3766. {
  3767. return netif_is_bond_slave(dev) || netif_is_team_port(dev);
  3768. }
  3769. static inline bool netif_is_rxfh_configured(const struct net_device *dev)
  3770. {
  3771. return dev->priv_flags & IFF_RXFH_CONFIGURED;
  3772. }
  3773. /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
  3774. static inline void netif_keep_dst(struct net_device *dev)
  3775. {
  3776. dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
  3777. }
  3778. /* return true if dev can't cope with mtu frames that need vlan tag insertion */
  3779. static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
  3780. {
  3781. /* TODO: reserve and use an additional IFF bit, if we get more users */
  3782. return dev->priv_flags & IFF_MACSEC;
  3783. }
  3784. extern struct pernet_operations __net_initdata loopback_net_ops;
  3785. /* Logging, debugging and troubleshooting/diagnostic helpers. */
  3786. /* netdev_printk helpers, similar to dev_printk */
  3787. static inline const char *netdev_name(const struct net_device *dev)
  3788. {
  3789. if (!dev->name[0] || strchr(dev->name, '%'))
  3790. return "(unnamed net_device)";
  3791. return dev->name;
  3792. }
  3793. static inline const char *netdev_reg_state(const struct net_device *dev)
  3794. {
  3795. switch (dev->reg_state) {
  3796. case NETREG_UNINITIALIZED: return " (uninitialized)";
  3797. case NETREG_REGISTERED: return "";
  3798. case NETREG_UNREGISTERING: return " (unregistering)";
  3799. case NETREG_UNREGISTERED: return " (unregistered)";
  3800. case NETREG_RELEASED: return " (released)";
  3801. case NETREG_DUMMY: return " (dummy)";
  3802. }
  3803. WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state);
  3804. return " (unknown)";
  3805. }
  3806. __printf(3, 4)
  3807. void netdev_printk(const char *level, const struct net_device *dev,
  3808. const char *format, ...);
  3809. __printf(2, 3)
  3810. void netdev_emerg(const struct net_device *dev, const char *format, ...);
  3811. __printf(2, 3)
  3812. void netdev_alert(const struct net_device *dev, const char *format, ...);
  3813. __printf(2, 3)
  3814. void netdev_crit(const struct net_device *dev, const char *format, ...);
  3815. __printf(2, 3)
  3816. void netdev_err(const struct net_device *dev, const char *format, ...);
  3817. __printf(2, 3)
  3818. void netdev_warn(const struct net_device *dev, const char *format, ...);
  3819. __printf(2, 3)
  3820. void netdev_notice(const struct net_device *dev, const char *format, ...);
  3821. __printf(2, 3)
  3822. void netdev_info(const struct net_device *dev, const char *format, ...);
  3823. #define MODULE_ALIAS_NETDEV(device) \
  3824. MODULE_ALIAS("netdev-" device)
  3825. #if defined(CONFIG_DYNAMIC_DEBUG)
  3826. #define netdev_dbg(__dev, format, args...) \
  3827. do { \
  3828. dynamic_netdev_dbg(__dev, format, ##args); \
  3829. } while (0)
  3830. #elif defined(DEBUG)
  3831. #define netdev_dbg(__dev, format, args...) \
  3832. netdev_printk(KERN_DEBUG, __dev, format, ##args)
  3833. #else
  3834. #define netdev_dbg(__dev, format, args...) \
  3835. ({ \
  3836. if (0) \
  3837. netdev_printk(KERN_DEBUG, __dev, format, ##args); \
  3838. })
  3839. #endif
  3840. #if defined(VERBOSE_DEBUG)
  3841. #define netdev_vdbg netdev_dbg
  3842. #else
  3843. #define netdev_vdbg(dev, format, args...) \
  3844. ({ \
  3845. if (0) \
  3846. netdev_printk(KERN_DEBUG, dev, format, ##args); \
  3847. 0; \
  3848. })
  3849. #endif
  3850. /*
  3851. * netdev_WARN() acts like dev_printk(), but with the key difference
  3852. * of using a WARN/WARN_ON to get the message out, including the
  3853. * file/line information and a backtrace.
  3854. */
  3855. #define netdev_WARN(dev, format, args...) \
  3856. WARN(1, "netdevice: %s%s\n" format, netdev_name(dev), \
  3857. netdev_reg_state(dev), ##args)
  3858. /* netif printk helpers, similar to netdev_printk */
  3859. #define netif_printk(priv, type, level, dev, fmt, args...) \
  3860. do { \
  3861. if (netif_msg_##type(priv)) \
  3862. netdev_printk(level, (dev), fmt, ##args); \
  3863. } while (0)
  3864. #define netif_level(level, priv, type, dev, fmt, args...) \
  3865. do { \
  3866. if (netif_msg_##type(priv)) \
  3867. netdev_##level(dev, fmt, ##args); \
  3868. } while (0)
  3869. #define netif_emerg(priv, type, dev, fmt, args...) \
  3870. netif_level(emerg, priv, type, dev, fmt, ##args)
  3871. #define netif_alert(priv, type, dev, fmt, args...) \
  3872. netif_level(alert, priv, type, dev, fmt, ##args)
  3873. #define netif_crit(priv, type, dev, fmt, args...) \
  3874. netif_level(crit, priv, type, dev, fmt, ##args)
  3875. #define netif_err(priv, type, dev, fmt, args...) \
  3876. netif_level(err, priv, type, dev, fmt, ##args)
  3877. #define netif_warn(priv, type, dev, fmt, args...) \
  3878. netif_level(warn, priv, type, dev, fmt, ##args)
  3879. #define netif_notice(priv, type, dev, fmt, args...) \
  3880. netif_level(notice, priv, type, dev, fmt, ##args)
  3881. #define netif_info(priv, type, dev, fmt, args...) \
  3882. netif_level(info, priv, type, dev, fmt, ##args)
  3883. #if defined(CONFIG_DYNAMIC_DEBUG)
  3884. #define netif_dbg(priv, type, netdev, format, args...) \
  3885. do { \
  3886. if (netif_msg_##type(priv)) \
  3887. dynamic_netdev_dbg(netdev, format, ##args); \
  3888. } while (0)
  3889. #elif defined(DEBUG)
  3890. #define netif_dbg(priv, type, dev, format, args...) \
  3891. netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
  3892. #else
  3893. #define netif_dbg(priv, type, dev, format, args...) \
  3894. ({ \
  3895. if (0) \
  3896. netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
  3897. 0; \
  3898. })
  3899. #endif
  3900. #if defined(VERBOSE_DEBUG)
  3901. #define netif_vdbg netif_dbg
  3902. #else
  3903. #define netif_vdbg(priv, type, dev, format, args...) \
  3904. ({ \
  3905. if (0) \
  3906. netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
  3907. 0; \
  3908. })
  3909. #endif
  3910. /*
  3911. * The list of packet types we will receive (as opposed to discard)
  3912. * and the routines to invoke.
  3913. *
  3914. * Why 16. Because with 16 the only overlap we get on a hash of the
  3915. * low nibble of the protocol value is RARP/SNAP/X.25.
  3916. *
  3917. * NOTE: That is no longer true with the addition of VLAN tags. Not
  3918. * sure which should go first, but I bet it won't make much
  3919. * difference if we are running VLANs. The good news is that
  3920. * this protocol won't be in the list unless compiled in, so
  3921. * the average user (w/out VLANs) will not be adversely affected.
  3922. * --BLG
  3923. *
  3924. * 0800 IP
  3925. * 8100 802.1Q VLAN
  3926. * 0001 802.3
  3927. * 0002 AX.25
  3928. * 0004 802.2
  3929. * 8035 RARP
  3930. * 0005 SNAP
  3931. * 0805 X.25
  3932. * 0806 ARP
  3933. * 8137 IPX
  3934. * 0009 Localtalk
  3935. * 86DD IPv6
  3936. */
  3937. #define PTYPE_HASH_SIZE (16)
  3938. #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
  3939. #endif /* _LINUX_NETDEVICE_H */