netdevice.h 139 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Definitions for the Interfaces handler.
  7. *
  8. * Version: @(#)dev.h 1.0.10 08/12/93
  9. *
  10. * Authors: Ross Biro
  11. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12. * Corey Minyard <wf-rch!minyard@relay.EU.net>
  13. * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
  14. * Alan Cox, <alan@lxorguk.ukuu.org.uk>
  15. * Bjorn Ekwall. <bj0rn@blox.se>
  16. * Pekka Riikonen <priikone@poseidon.pspt.fi>
  17. *
  18. * This program is free software; you can redistribute it and/or
  19. * modify it under the terms of the GNU General Public License
  20. * as published by the Free Software Foundation; either version
  21. * 2 of the License, or (at your option) any later version.
  22. *
  23. * Moved to /usr/include/linux for NET3
  24. */
  25. #ifndef _LINUX_NETDEVICE_H
  26. #define _LINUX_NETDEVICE_H
  27. #include <linux/timer.h>
  28. #include <linux/bug.h>
  29. #include <linux/delay.h>
  30. #include <linux/atomic.h>
  31. #include <linux/prefetch.h>
  32. #include <asm/cache.h>
  33. #include <asm/byteorder.h>
  34. #include <linux/percpu.h>
  35. #include <linux/rculist.h>
  36. #include <linux/workqueue.h>
  37. #include <linux/dynamic_queue_limits.h>
  38. #include <linux/ethtool.h>
  39. #include <net/net_namespace.h>
  40. #ifdef CONFIG_DCB
  41. #include <net/dcbnl.h>
  42. #endif
  43. #include <net/netprio_cgroup.h>
  44. #include <net/xdp.h>
  45. #include <linux/netdev_features.h>
  46. #include <linux/neighbour.h>
  47. #include <uapi/linux/netdevice.h>
  48. #include <uapi/linux/if_bonding.h>
  49. #include <uapi/linux/pkt_cls.h>
  50. #include <linux/hashtable.h>
  51. struct netpoll_info;
  52. struct device;
  53. struct phy_device;
  54. struct dsa_port;
  55. struct sfp_bus;
  56. /* 802.11 specific */
  57. struct wireless_dev;
  58. /* 802.15.4 specific */
  59. struct wpan_dev;
  60. struct mpls_dev;
  61. /* UDP Tunnel offloads */
  62. struct udp_tunnel_info;
  63. struct bpf_prog;
  64. struct xdp_buff;
  65. void netdev_set_default_ethtool_ops(struct net_device *dev,
  66. const struct ethtool_ops *ops);
  67. /* Backlog congestion levels */
  68. #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
  69. #define NET_RX_DROP 1 /* packet dropped */
  70. /*
  71. * Transmit return codes: transmit return codes originate from three different
  72. * namespaces:
  73. *
  74. * - qdisc return codes
  75. * - driver transmit return codes
  76. * - errno values
  77. *
  78. * Drivers are allowed to return any one of those in their hard_start_xmit()
  79. * function. Real network devices commonly used with qdiscs should only return
  80. * the driver transmit return codes though - when qdiscs are used, the actual
  81. * transmission happens asynchronously, so the value is not propagated to
  82. * higher layers. Virtual network devices transmit synchronously; in this case
  83. * the driver transmit return codes are consumed by dev_queue_xmit(), and all
  84. * others are propagated to higher layers.
  85. */
  86. /* qdisc ->enqueue() return codes. */
  87. #define NET_XMIT_SUCCESS 0x00
  88. #define NET_XMIT_DROP 0x01 /* skb dropped */
  89. #define NET_XMIT_CN 0x02 /* congestion notification */
  90. #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
  91. /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
  92. * indicates that the device will soon be dropping packets, or already drops
  93. * some packets of the same priority; prompting us to send less aggressively. */
  94. #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
  95. #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
  96. /* Driver transmit return codes */
  97. #define NETDEV_TX_MASK 0xf0
  98. enum netdev_tx {
  99. __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
  100. NETDEV_TX_OK = 0x00, /* driver took care of packet */
  101. NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
  102. };
  103. typedef enum netdev_tx netdev_tx_t;
  104. /*
  105. * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
  106. * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
  107. */
  108. static inline bool dev_xmit_complete(int rc)
  109. {
  110. /*
  111. * Positive cases with an skb consumed by a driver:
  112. * - successful transmission (rc == NETDEV_TX_OK)
  113. * - error while transmitting (rc < 0)
  114. * - error while queueing to a different device (rc & NET_XMIT_MASK)
  115. */
  116. if (likely(rc < NET_XMIT_MASK))
  117. return true;
  118. return false;
  119. }
  120. /*
  121. * Compute the worst-case header length according to the protocols
  122. * used.
  123. */
  124. #if defined(CONFIG_HYPERV_NET)
  125. # define LL_MAX_HEADER 128
  126. #elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
  127. # if defined(CONFIG_MAC80211_MESH)
  128. # define LL_MAX_HEADER 128
  129. # else
  130. # define LL_MAX_HEADER 96
  131. # endif
  132. #else
  133. # define LL_MAX_HEADER 32
  134. #endif
  135. #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
  136. !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
  137. #define MAX_HEADER LL_MAX_HEADER
  138. #else
  139. #define MAX_HEADER (LL_MAX_HEADER + 48)
  140. #endif
  141. /*
  142. * Old network device statistics. Fields are native words
  143. * (unsigned long) so they can be read and written atomically.
  144. */
  145. struct net_device_stats {
  146. unsigned long rx_packets;
  147. unsigned long tx_packets;
  148. unsigned long rx_bytes;
  149. unsigned long tx_bytes;
  150. unsigned long rx_errors;
  151. unsigned long tx_errors;
  152. unsigned long rx_dropped;
  153. unsigned long tx_dropped;
  154. unsigned long multicast;
  155. unsigned long collisions;
  156. unsigned long rx_length_errors;
  157. unsigned long rx_over_errors;
  158. unsigned long rx_crc_errors;
  159. unsigned long rx_frame_errors;
  160. unsigned long rx_fifo_errors;
  161. unsigned long rx_missed_errors;
  162. unsigned long tx_aborted_errors;
  163. unsigned long tx_carrier_errors;
  164. unsigned long tx_fifo_errors;
  165. unsigned long tx_heartbeat_errors;
  166. unsigned long tx_window_errors;
  167. unsigned long rx_compressed;
  168. unsigned long tx_compressed;
  169. };
  170. #include <linux/cache.h>
  171. #include <linux/skbuff.h>
  172. #ifdef CONFIG_RPS
  173. #include <linux/static_key.h>
  174. extern struct static_key rps_needed;
  175. extern struct static_key rfs_needed;
  176. #endif
  177. struct neighbour;
  178. struct neigh_parms;
  179. struct sk_buff;
  180. struct netdev_hw_addr {
  181. struct list_head list;
  182. unsigned char addr[MAX_ADDR_LEN];
  183. unsigned char type;
  184. #define NETDEV_HW_ADDR_T_LAN 1
  185. #define NETDEV_HW_ADDR_T_SAN 2
  186. #define NETDEV_HW_ADDR_T_SLAVE 3
  187. #define NETDEV_HW_ADDR_T_UNICAST 4
  188. #define NETDEV_HW_ADDR_T_MULTICAST 5
  189. bool global_use;
  190. int sync_cnt;
  191. int refcount;
  192. int synced;
  193. struct rcu_head rcu_head;
  194. };
  195. struct netdev_hw_addr_list {
  196. struct list_head list;
  197. int count;
  198. };
  199. #define netdev_hw_addr_list_count(l) ((l)->count)
  200. #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
  201. #define netdev_hw_addr_list_for_each(ha, l) \
  202. list_for_each_entry(ha, &(l)->list, list)
  203. #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
  204. #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
  205. #define netdev_for_each_uc_addr(ha, dev) \
  206. netdev_hw_addr_list_for_each(ha, &(dev)->uc)
  207. #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
  208. #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
  209. #define netdev_for_each_mc_addr(ha, dev) \
  210. netdev_hw_addr_list_for_each(ha, &(dev)->mc)
  211. struct hh_cache {
  212. unsigned int hh_len;
  213. seqlock_t hh_lock;
  214. /* cached hardware header; allow for machine alignment needs. */
  215. #define HH_DATA_MOD 16
  216. #define HH_DATA_OFF(__len) \
  217. (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
  218. #define HH_DATA_ALIGN(__len) \
  219. (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
  220. unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
  221. };
  222. /* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much.
  223. * Alternative is:
  224. * dev->hard_header_len ? (dev->hard_header_len +
  225. * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
  226. *
  227. * We could use other alignment values, but we must maintain the
  228. * relationship HH alignment <= LL alignment.
  229. */
  230. #define LL_RESERVED_SPACE(dev) \
  231. ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  232. #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
  233. ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  234. struct header_ops {
  235. int (*create) (struct sk_buff *skb, struct net_device *dev,
  236. unsigned short type, const void *daddr,
  237. const void *saddr, unsigned int len);
  238. int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
  239. int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
  240. void (*cache_update)(struct hh_cache *hh,
  241. const struct net_device *dev,
  242. const unsigned char *haddr);
  243. bool (*validate)(const char *ll_header, unsigned int len);
  244. };
  245. /* These flag bits are private to the generic network queueing
  246. * layer; they may not be explicitly referenced by any other
  247. * code.
  248. */
  249. enum netdev_state_t {
  250. __LINK_STATE_START,
  251. __LINK_STATE_PRESENT,
  252. __LINK_STATE_NOCARRIER,
  253. __LINK_STATE_LINKWATCH_PENDING,
  254. __LINK_STATE_DORMANT,
  255. };
  256. /*
  257. * This structure holds boot-time configured netdevice settings. They
  258. * are then used in the device probing.
  259. */
  260. struct netdev_boot_setup {
  261. char name[IFNAMSIZ];
  262. struct ifmap map;
  263. };
  264. #define NETDEV_BOOT_SETUP_MAX 8
  265. int __init netdev_boot_setup(char *str);
  266. /*
  267. * Structure for NAPI scheduling similar to tasklet but with weighting
  268. */
  269. struct napi_struct {
  270. /* The poll_list must only be managed by the entity which
  271. * changes the state of the NAPI_STATE_SCHED bit. This means
  272. * whoever atomically sets that bit can add this napi_struct
  273. * to the per-CPU poll_list, and whoever clears that bit
  274. * can remove from the list right before clearing the bit.
  275. */
  276. struct list_head poll_list;
  277. unsigned long state;
  278. int weight;
  279. unsigned int gro_count;
  280. int (*poll)(struct napi_struct *, int);
  281. #ifdef CONFIG_NETPOLL
  282. int poll_owner;
  283. #endif
  284. struct net_device *dev;
  285. struct sk_buff *gro_list;
  286. struct sk_buff *skb;
  287. struct hrtimer timer;
  288. struct list_head dev_list;
  289. struct hlist_node napi_hash_node;
  290. unsigned int napi_id;
  291. };
  292. enum {
  293. NAPI_STATE_SCHED, /* Poll is scheduled */
  294. NAPI_STATE_MISSED, /* reschedule a napi */
  295. NAPI_STATE_DISABLE, /* Disable pending */
  296. NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
  297. NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */
  298. NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */
  299. NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */
  300. };
  301. enum {
  302. NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED),
  303. NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED),
  304. NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE),
  305. NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC),
  306. NAPIF_STATE_HASHED = BIT(NAPI_STATE_HASHED),
  307. NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
  308. NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
  309. };
  310. enum gro_result {
  311. GRO_MERGED,
  312. GRO_MERGED_FREE,
  313. GRO_HELD,
  314. GRO_NORMAL,
  315. GRO_DROP,
  316. GRO_CONSUMED,
  317. };
  318. typedef enum gro_result gro_result_t;
  319. /*
  320. * enum rx_handler_result - Possible return values for rx_handlers.
  321. * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
  322. * further.
  323. * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
  324. * case skb->dev was changed by rx_handler.
  325. * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
  326. * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called.
  327. *
  328. * rx_handlers are functions called from inside __netif_receive_skb(), to do
  329. * special processing of the skb, prior to delivery to protocol handlers.
  330. *
  331. * Currently, a net_device can only have a single rx_handler registered. Trying
  332. * to register a second rx_handler will return -EBUSY.
  333. *
  334. * To register a rx_handler on a net_device, use netdev_rx_handler_register().
  335. * To unregister a rx_handler on a net_device, use
  336. * netdev_rx_handler_unregister().
  337. *
  338. * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
  339. * do with the skb.
  340. *
  341. * If the rx_handler consumed the skb in some way, it should return
  342. * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
  343. * the skb to be delivered in some other way.
  344. *
  345. * If the rx_handler changed skb->dev, to divert the skb to another
  346. * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
  347. * new device will be called if it exists.
  348. *
  349. * If the rx_handler decides the skb should be ignored, it should return
  350. * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
  351. * are registered on exact device (ptype->dev == skb->dev).
  352. *
  353. * If the rx_handler didn't change skb->dev, but wants the skb to be normally
  354. * delivered, it should return RX_HANDLER_PASS.
  355. *
  356. * A device without a registered rx_handler will behave as if rx_handler
  357. * returned RX_HANDLER_PASS.
  358. */
  359. enum rx_handler_result {
  360. RX_HANDLER_CONSUMED,
  361. RX_HANDLER_ANOTHER,
  362. RX_HANDLER_EXACT,
  363. RX_HANDLER_PASS,
  364. };
  365. typedef enum rx_handler_result rx_handler_result_t;
  366. typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
  367. void __napi_schedule(struct napi_struct *n);
  368. void __napi_schedule_irqoff(struct napi_struct *n);
  369. static inline bool napi_disable_pending(struct napi_struct *n)
  370. {
  371. return test_bit(NAPI_STATE_DISABLE, &n->state);
  372. }
  373. bool napi_schedule_prep(struct napi_struct *n);
  374. /**
  375. * napi_schedule - schedule NAPI poll
  376. * @n: NAPI context
  377. *
  378. * Schedule NAPI poll routine to be called if it is not already
  379. * running.
  380. */
  381. static inline void napi_schedule(struct napi_struct *n)
  382. {
  383. if (napi_schedule_prep(n))
  384. __napi_schedule(n);
  385. }
  386. /**
  387. * napi_schedule_irqoff - schedule NAPI poll
  388. * @n: NAPI context
  389. *
  390. * Variant of napi_schedule(), assuming hard irqs are masked.
  391. */
  392. static inline void napi_schedule_irqoff(struct napi_struct *n)
  393. {
  394. if (napi_schedule_prep(n))
  395. __napi_schedule_irqoff(n);
  396. }
  397. /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
  398. static inline bool napi_reschedule(struct napi_struct *napi)
  399. {
  400. if (napi_schedule_prep(napi)) {
  401. __napi_schedule(napi);
  402. return true;
  403. }
  404. return false;
  405. }
  406. bool napi_complete_done(struct napi_struct *n, int work_done);
  407. /**
  408. * napi_complete - NAPI processing complete
  409. * @n: NAPI context
  410. *
  411. * Mark NAPI processing as complete.
  412. * Consider using napi_complete_done() instead.
  413. * Return false if device should avoid rearming interrupts.
  414. */
  415. static inline bool napi_complete(struct napi_struct *n)
  416. {
  417. return napi_complete_done(n, 0);
  418. }
  419. /**
  420. * napi_hash_del - remove a NAPI from global table
  421. * @napi: NAPI context
  422. *
  423. * Warning: caller must observe RCU grace period
  424. * before freeing memory containing @napi, if
  425. * this function returns true.
  426. * Note: core networking stack automatically calls it
  427. * from netif_napi_del().
  428. * Drivers might want to call this helper to combine all
  429. * the needed RCU grace periods into a single one.
  430. */
  431. bool napi_hash_del(struct napi_struct *napi);
  432. /**
  433. * napi_disable - prevent NAPI from scheduling
  434. * @n: NAPI context
  435. *
  436. * Stop NAPI from being scheduled on this context.
  437. * Waits till any outstanding processing completes.
  438. */
  439. void napi_disable(struct napi_struct *n);
  440. /**
  441. * napi_enable - enable NAPI scheduling
  442. * @n: NAPI context
  443. *
  444. * Resume NAPI from being scheduled on this context.
  445. * Must be paired with napi_disable.
  446. */
  447. static inline void napi_enable(struct napi_struct *n)
  448. {
  449. BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
  450. smp_mb__before_atomic();
  451. clear_bit(NAPI_STATE_SCHED, &n->state);
  452. clear_bit(NAPI_STATE_NPSVC, &n->state);
  453. }
  454. /**
  455. * napi_synchronize - wait until NAPI is not running
  456. * @n: NAPI context
  457. *
  458. * Wait until NAPI is done being scheduled on this context.
  459. * Waits till any outstanding processing completes but
  460. * does not disable future activations.
  461. */
  462. static inline void napi_synchronize(const struct napi_struct *n)
  463. {
  464. if (IS_ENABLED(CONFIG_SMP))
  465. while (test_bit(NAPI_STATE_SCHED, &n->state))
  466. msleep(1);
  467. else
  468. barrier();
  469. }
  470. enum netdev_queue_state_t {
  471. __QUEUE_STATE_DRV_XOFF,
  472. __QUEUE_STATE_STACK_XOFF,
  473. __QUEUE_STATE_FROZEN,
  474. };
  475. #define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF)
  476. #define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF)
  477. #define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN)
  478. #define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
  479. #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
  480. QUEUE_STATE_FROZEN)
  481. #define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
  482. QUEUE_STATE_FROZEN)
  483. /*
  484. * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
  485. * netif_tx_* functions below are used to manipulate this flag. The
  486. * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
  487. * queue independently. The netif_xmit_*stopped functions below are called
  488. * to check if the queue has been stopped by the driver or stack (either
  489. * of the XOFF bits are set in the state). Drivers should not need to call
  490. * netif_xmit*stopped functions, they should only be using netif_tx_*.
  491. */
  492. struct netdev_queue {
  493. /*
  494. * read-mostly part
  495. */
  496. struct net_device *dev;
  497. struct Qdisc __rcu *qdisc;
  498. struct Qdisc *qdisc_sleeping;
  499. #ifdef CONFIG_SYSFS
  500. struct kobject kobj;
  501. #endif
  502. #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
  503. int numa_node;
  504. #endif
  505. unsigned long tx_maxrate;
  506. /*
  507. * Number of TX timeouts for this queue
  508. * (/sys/class/net/DEV/Q/trans_timeout)
  509. */
  510. unsigned long trans_timeout;
  511. /*
  512. * write-mostly part
  513. */
  514. spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
  515. int xmit_lock_owner;
  516. /*
  517. * Time (in jiffies) of last Tx
  518. */
  519. unsigned long trans_start;
  520. unsigned long state;
  521. #ifdef CONFIG_BQL
  522. struct dql dql;
  523. #endif
  524. } ____cacheline_aligned_in_smp;
  525. extern int sysctl_fb_tunnels_only_for_init_net;
  526. static inline bool net_has_fallback_tunnels(const struct net *net)
  527. {
  528. return net == &init_net ||
  529. !IS_ENABLED(CONFIG_SYSCTL) ||
  530. !sysctl_fb_tunnels_only_for_init_net;
  531. }
  532. static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
  533. {
  534. #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
  535. return q->numa_node;
  536. #else
  537. return NUMA_NO_NODE;
  538. #endif
  539. }
  540. static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
  541. {
  542. #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
  543. q->numa_node = node;
  544. #endif
  545. }
  546. #ifdef CONFIG_RPS
  547. /*
  548. * This structure holds an RPS map which can be of variable length. The
  549. * map is an array of CPUs.
  550. */
  551. struct rps_map {
  552. unsigned int len;
  553. struct rcu_head rcu;
  554. u16 cpus[0];
  555. };
  556. #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
  557. /*
  558. * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
  559. * tail pointer for that CPU's input queue at the time of last enqueue, and
  560. * a hardware filter index.
  561. */
  562. struct rps_dev_flow {
  563. u16 cpu;
  564. u16 filter;
  565. unsigned int last_qtail;
  566. };
  567. #define RPS_NO_FILTER 0xffff
  568. /*
  569. * The rps_dev_flow_table structure contains a table of flow mappings.
  570. */
  571. struct rps_dev_flow_table {
  572. unsigned int mask;
  573. struct rcu_head rcu;
  574. struct rps_dev_flow flows[0];
  575. };
  576. #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
  577. ((_num) * sizeof(struct rps_dev_flow)))
  578. /*
  579. * The rps_sock_flow_table contains mappings of flows to the last CPU
  580. * on which they were processed by the application (set in recvmsg).
  581. * Each entry is a 32bit value. Upper part is the high-order bits
  582. * of flow hash, lower part is CPU number.
  583. * rps_cpu_mask is used to partition the space, depending on number of
  584. * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
  585. * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f,
  586. * meaning we use 32-6=26 bits for the hash.
  587. */
  588. struct rps_sock_flow_table {
  589. u32 mask;
  590. u32 ents[0] ____cacheline_aligned_in_smp;
  591. };
  592. #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
  593. #define RPS_NO_CPU 0xffff
  594. extern u32 rps_cpu_mask;
  595. extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
  596. static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
  597. u32 hash)
  598. {
  599. if (table && hash) {
  600. unsigned int index = hash & table->mask;
  601. u32 val = hash & ~rps_cpu_mask;
  602. /* We only give a hint, preemption can change CPU under us */
  603. val |= raw_smp_processor_id();
  604. if (table->ents[index] != val)
  605. table->ents[index] = val;
  606. }
  607. }
  608. #ifdef CONFIG_RFS_ACCEL
  609. bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
  610. u16 filter_id);
  611. #endif
  612. #endif /* CONFIG_RPS */
  613. /* This structure contains an instance of an RX queue. */
  614. struct netdev_rx_queue {
  615. #ifdef CONFIG_RPS
  616. struct rps_map __rcu *rps_map;
  617. struct rps_dev_flow_table __rcu *rps_flow_table;
  618. #endif
  619. struct kobject kobj;
  620. struct net_device *dev;
  621. struct xdp_rxq_info xdp_rxq;
  622. } ____cacheline_aligned_in_smp;
  623. /*
  624. * RX queue sysfs structures and functions.
  625. */
  626. struct rx_queue_attribute {
  627. struct attribute attr;
  628. ssize_t (*show)(struct netdev_rx_queue *queue, char *buf);
  629. ssize_t (*store)(struct netdev_rx_queue *queue,
  630. const char *buf, size_t len);
  631. };
  632. #ifdef CONFIG_XPS
  633. /*
  634. * This structure holds an XPS map which can be of variable length. The
  635. * map is an array of queues.
  636. */
  637. struct xps_map {
  638. unsigned int len;
  639. unsigned int alloc_len;
  640. struct rcu_head rcu;
  641. u16 queues[0];
  642. };
  643. #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
  644. #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
  645. - sizeof(struct xps_map)) / sizeof(u16))
  646. /*
  647. * This structure holds all XPS maps for device. Maps are indexed by CPU.
  648. */
  649. struct xps_dev_maps {
  650. struct rcu_head rcu;
  651. struct xps_map __rcu *cpu_map[0];
  652. };
  653. #define XPS_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
  654. (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *)))
  655. #endif /* CONFIG_XPS */
  656. #define TC_MAX_QUEUE 16
  657. #define TC_BITMASK 15
  658. /* HW offloaded queuing disciplines txq count and offset maps */
  659. struct netdev_tc_txq {
  660. u16 count;
  661. u16 offset;
  662. };
  663. #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
  664. /*
  665. * This structure is to hold information about the device
  666. * configured to run FCoE protocol stack.
  667. */
  668. struct netdev_fcoe_hbainfo {
  669. char manufacturer[64];
  670. char serial_number[64];
  671. char hardware_version[64];
  672. char driver_version[64];
  673. char optionrom_version[64];
  674. char firmware_version[64];
  675. char model[256];
  676. char model_description[256];
  677. };
  678. #endif
  679. #define MAX_PHYS_ITEM_ID_LEN 32
  680. /* This structure holds a unique identifier to identify some
  681. * physical item (port for example) used by a netdevice.
  682. */
  683. struct netdev_phys_item_id {
  684. unsigned char id[MAX_PHYS_ITEM_ID_LEN];
  685. unsigned char id_len;
  686. };
  687. static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
  688. struct netdev_phys_item_id *b)
  689. {
  690. return a->id_len == b->id_len &&
  691. memcmp(a->id, b->id, a->id_len) == 0;
  692. }
  693. typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
  694. struct sk_buff *skb);
  695. enum tc_setup_type {
  696. TC_SETUP_QDISC_MQPRIO,
  697. TC_SETUP_CLSU32,
  698. TC_SETUP_CLSFLOWER,
  699. TC_SETUP_CLSMATCHALL,
  700. TC_SETUP_CLSBPF,
  701. TC_SETUP_BLOCK,
  702. TC_SETUP_QDISC_CBS,
  703. TC_SETUP_QDISC_RED,
  704. TC_SETUP_QDISC_PRIO,
  705. };
  706. /* These structures hold the attributes of bpf state that are being passed
  707. * to the netdevice through the bpf op.
  708. */
  709. enum bpf_netdev_command {
  710. /* Set or clear a bpf program used in the earliest stages of packet
  711. * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee
  712. * is responsible for calling bpf_prog_put on any old progs that are
  713. * stored. In case of error, the callee need not release the new prog
  714. * reference, but on success it takes ownership and must bpf_prog_put
  715. * when it is no longer used.
  716. */
  717. XDP_SETUP_PROG,
  718. XDP_SETUP_PROG_HW,
  719. /* Check if a bpf program is set on the device. The callee should
  720. * set @prog_attached to one of XDP_ATTACHED_* values, note that "true"
  721. * is equivalent to XDP_ATTACHED_DRV.
  722. */
  723. XDP_QUERY_PROG,
  724. /* BPF program for offload callbacks, invoked at program load time. */
  725. BPF_OFFLOAD_VERIFIER_PREP,
  726. BPF_OFFLOAD_TRANSLATE,
  727. BPF_OFFLOAD_DESTROY,
  728. BPF_OFFLOAD_MAP_ALLOC,
  729. BPF_OFFLOAD_MAP_FREE,
  730. };
  731. struct bpf_prog_offload_ops;
  732. struct netlink_ext_ack;
  733. struct netdev_bpf {
  734. enum bpf_netdev_command command;
  735. union {
  736. /* XDP_SETUP_PROG */
  737. struct {
  738. u32 flags;
  739. struct bpf_prog *prog;
  740. struct netlink_ext_ack *extack;
  741. };
  742. /* XDP_QUERY_PROG */
  743. struct {
  744. u8 prog_attached;
  745. u32 prog_id;
  746. /* flags with which program was installed */
  747. u32 prog_flags;
  748. };
  749. /* BPF_OFFLOAD_VERIFIER_PREP */
  750. struct {
  751. struct bpf_prog *prog;
  752. const struct bpf_prog_offload_ops *ops; /* callee set */
  753. } verifier;
  754. /* BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY */
  755. struct {
  756. struct bpf_prog *prog;
  757. } offload;
  758. /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */
  759. struct {
  760. struct bpf_offloaded_map *offmap;
  761. };
  762. };
  763. };
  764. #ifdef CONFIG_XFRM_OFFLOAD
  765. struct xfrmdev_ops {
  766. int (*xdo_dev_state_add) (struct xfrm_state *x);
  767. void (*xdo_dev_state_delete) (struct xfrm_state *x);
  768. void (*xdo_dev_state_free) (struct xfrm_state *x);
  769. bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
  770. struct xfrm_state *x);
  771. void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
  772. };
  773. #endif
  774. #if IS_ENABLED(CONFIG_TLS_DEVICE)
  775. enum tls_offload_ctx_dir {
  776. TLS_OFFLOAD_CTX_DIR_RX,
  777. TLS_OFFLOAD_CTX_DIR_TX,
  778. };
  779. struct tls_crypto_info;
  780. struct tls_context;
  781. struct tlsdev_ops {
  782. int (*tls_dev_add)(struct net_device *netdev, struct sock *sk,
  783. enum tls_offload_ctx_dir direction,
  784. struct tls_crypto_info *crypto_info,
  785. u32 start_offload_tcp_sn);
  786. void (*tls_dev_del)(struct net_device *netdev,
  787. struct tls_context *ctx,
  788. enum tls_offload_ctx_dir direction);
  789. };
  790. #endif
  791. struct dev_ifalias {
  792. struct rcu_head rcuhead;
  793. char ifalias[];
  794. };
  795. /*
  796. * This structure defines the management hooks for network devices.
  797. * The following hooks can be defined; unless noted otherwise, they are
  798. * optional and can be filled with a null pointer.
  799. *
  800. * int (*ndo_init)(struct net_device *dev);
  801. * This function is called once when a network device is registered.
  802. * The network device can use this for any late stage initialization
  803. * or semantic validation. It can fail with an error code which will
  804. * be propagated back to register_netdev.
  805. *
  806. * void (*ndo_uninit)(struct net_device *dev);
  807. * This function is called when device is unregistered or when registration
  808. * fails. It is not called if init fails.
  809. *
  810. * int (*ndo_open)(struct net_device *dev);
  811. * This function is called when a network device transitions to the up
  812. * state.
  813. *
  814. * int (*ndo_stop)(struct net_device *dev);
  815. * This function is called when a network device transitions to the down
  816. * state.
  817. *
  818. * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
  819. * struct net_device *dev);
  820. * Called when a packet needs to be transmitted.
  821. * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop
  822. * the queue before that can happen; it's for obsolete devices and weird
  823. * corner cases, but the stack really does a non-trivial amount
  824. * of useless work if you return NETDEV_TX_BUSY.
  825. * Required; cannot be NULL.
  826. *
  827. * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
  828. * struct net_device *dev
  829. * netdev_features_t features);
  830. * Called by core transmit path to determine if device is capable of
  831. * performing offload operations on a given packet. This is to give
  832. * the device an opportunity to implement any restrictions that cannot
  833. * be otherwise expressed by feature flags. The check is called with
  834. * the set of features that the stack has calculated and it returns
  835. * those the driver believes to be appropriate.
  836. *
  837. * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
  838. * void *accel_priv, select_queue_fallback_t fallback);
  839. * Called to decide which queue to use when device supports multiple
  840. * transmit queues.
  841. *
  842. * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
  843. * This function is called to allow device receiver to make
  844. * changes to configuration when multicast or promiscuous is enabled.
  845. *
  846. * void (*ndo_set_rx_mode)(struct net_device *dev);
  847. * This function is called device changes address list filtering.
  848. * If driver handles unicast address filtering, it should set
  849. * IFF_UNICAST_FLT in its priv_flags.
  850. *
  851. * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
  852. * This function is called when the Media Access Control address
  853. * needs to be changed. If this interface is not defined, the
  854. * MAC address can not be changed.
  855. *
  856. * int (*ndo_validate_addr)(struct net_device *dev);
  857. * Test if Media Access Control address is valid for the device.
  858. *
  859. * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
  860. * Called when a user requests an ioctl which can't be handled by
  861. * the generic interface code. If not defined ioctls return
  862. * not supported error code.
  863. *
  864. * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
  865. * Used to set network devices bus interface parameters. This interface
  866. * is retained for legacy reasons; new devices should use the bus
  867. * interface (PCI) for low level management.
  868. *
  869. * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
  870. * Called when a user wants to change the Maximum Transfer Unit
  871. * of a device.
  872. *
  873. * void (*ndo_tx_timeout)(struct net_device *dev);
  874. * Callback used when the transmitter has not made any progress
  875. * for dev->watchdog ticks.
  876. *
  877. * void (*ndo_get_stats64)(struct net_device *dev,
  878. * struct rtnl_link_stats64 *storage);
  879. * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
  880. * Called when a user wants to get the network device usage
  881. * statistics. Drivers must do one of the following:
  882. * 1. Define @ndo_get_stats64 to fill in a zero-initialised
  883. * rtnl_link_stats64 structure passed by the caller.
  884. * 2. Define @ndo_get_stats to update a net_device_stats structure
  885. * (which should normally be dev->stats) and return a pointer to
  886. * it. The structure may be changed asynchronously only if each
  887. * field is written atomically.
  888. * 3. Update dev->stats asynchronously and atomically, and define
  889. * neither operation.
  890. *
  891. * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id)
  892. * Return true if this device supports offload stats of this attr_id.
  893. *
  894. * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev,
  895. * void *attr_data)
  896. * Get statistics for offload operations by attr_id. Write it into the
  897. * attr_data pointer.
  898. *
  899. * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
  900. * If device supports VLAN filtering this function is called when a
  901. * VLAN id is registered.
  902. *
  903. * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
  904. * If device supports VLAN filtering this function is called when a
  905. * VLAN id is unregistered.
  906. *
  907. * void (*ndo_poll_controller)(struct net_device *dev);
  908. *
  909. * SR-IOV management functions.
  910. * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
  911. * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan,
  912. * u8 qos, __be16 proto);
  913. * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
  914. * int max_tx_rate);
  915. * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
  916. * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting);
  917. * int (*ndo_get_vf_config)(struct net_device *dev,
  918. * int vf, struct ifla_vf_info *ivf);
  919. * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
  920. * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
  921. * struct nlattr *port[]);
  922. *
  923. * Enable or disable the VF ability to query its RSS Redirection Table and
  924. * Hash Key. This is needed since on some devices VF share this information
  925. * with PF and querying it may introduce a theoretical security risk.
  926. * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
  927. * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
  928. * int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type,
  929. * void *type_data);
  930. * Called to setup any 'tc' scheduler, classifier or action on @dev.
  931. * This is always called from the stack with the rtnl lock held and netif
  932. * tx queues stopped. This allows the netdevice to perform queue
  933. * management safely.
  934. *
  935. * Fiber Channel over Ethernet (FCoE) offload functions.
  936. * int (*ndo_fcoe_enable)(struct net_device *dev);
  937. * Called when the FCoE protocol stack wants to start using LLD for FCoE
  938. * so the underlying device can perform whatever needed configuration or
  939. * initialization to support acceleration of FCoE traffic.
  940. *
  941. * int (*ndo_fcoe_disable)(struct net_device *dev);
  942. * Called when the FCoE protocol stack wants to stop using LLD for FCoE
  943. * so the underlying device can perform whatever needed clean-ups to
  944. * stop supporting acceleration of FCoE traffic.
  945. *
  946. * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
  947. * struct scatterlist *sgl, unsigned int sgc);
  948. * Called when the FCoE Initiator wants to initialize an I/O that
  949. * is a possible candidate for Direct Data Placement (DDP). The LLD can
  950. * perform necessary setup and returns 1 to indicate the device is set up
  951. * successfully to perform DDP on this I/O, otherwise this returns 0.
  952. *
  953. * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid);
  954. * Called when the FCoE Initiator/Target is done with the DDPed I/O as
  955. * indicated by the FC exchange id 'xid', so the underlying device can
  956. * clean up and reuse resources for later DDP requests.
  957. *
  958. * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
  959. * struct scatterlist *sgl, unsigned int sgc);
  960. * Called when the FCoE Target wants to initialize an I/O that
  961. * is a possible candidate for Direct Data Placement (DDP). The LLD can
  962. * perform necessary setup and returns 1 to indicate the device is set up
  963. * successfully to perform DDP on this I/O, otherwise this returns 0.
  964. *
  965. * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
  966. * struct netdev_fcoe_hbainfo *hbainfo);
  967. * Called when the FCoE Protocol stack wants information on the underlying
  968. * device. This information is utilized by the FCoE protocol stack to
  969. * register attributes with Fiber Channel management service as per the
  970. * FC-GS Fabric Device Management Information(FDMI) specification.
  971. *
  972. * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
  973. * Called when the underlying device wants to override default World Wide
  974. * Name (WWN) generation mechanism in FCoE protocol stack to pass its own
  975. * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
  976. * protocol stack to use.
  977. *
  978. * RFS acceleration.
  979. * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
  980. * u16 rxq_index, u32 flow_id);
  981. * Set hardware filter for RFS. rxq_index is the target queue index;
  982. * flow_id is a flow ID to be passed to rps_may_expire_flow() later.
  983. * Return the filter ID on success, or a negative error code.
  984. *
  985. * Slave management functions (for bridge, bonding, etc).
  986. * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
  987. * Called to make another netdev an underling.
  988. *
  989. * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
  990. * Called to release previously enslaved netdev.
  991. *
  992. * Feature/offload setting functions.
  993. * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
  994. * netdev_features_t features);
  995. * Adjusts the requested feature flags according to device-specific
  996. * constraints, and returns the resulting flags. Must not modify
  997. * the device state.
  998. *
  999. * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
  1000. * Called to update device configuration to new features. Passed
  1001. * feature set might be less than what was returned by ndo_fix_features()).
  1002. * Must return >0 or -errno if it changed dev->features itself.
  1003. *
  1004. * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
  1005. * struct net_device *dev,
  1006. * const unsigned char *addr, u16 vid, u16 flags)
  1007. * Adds an FDB entry to dev for addr.
  1008. * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
  1009. * struct net_device *dev,
  1010. * const unsigned char *addr, u16 vid)
  1011. * Deletes the FDB entry from dev coresponding to addr.
  1012. * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
  1013. * struct net_device *dev, struct net_device *filter_dev,
  1014. * int *idx)
  1015. * Used to add FDB entries to dump requests. Implementers should add
  1016. * entries to skb and update idx with the number of entries.
  1017. *
  1018. * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
  1019. * u16 flags)
  1020. * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
  1021. * struct net_device *dev, u32 filter_mask,
  1022. * int nlflags)
  1023. * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
  1024. * u16 flags);
  1025. *
  1026. * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
  1027. * Called to change device carrier. Soft-devices (like dummy, team, etc)
  1028. * which do not represent real hardware may define this to allow their
  1029. * userspace components to manage their virtual carrier state. Devices
  1030. * that determine carrier state from physical hardware properties (eg
  1031. * network cables) or protocol-dependent mechanisms (eg
  1032. * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
  1033. *
  1034. * int (*ndo_get_phys_port_id)(struct net_device *dev,
  1035. * struct netdev_phys_item_id *ppid);
  1036. * Called to get ID of physical port of this device. If driver does
  1037. * not implement this, it is assumed that the hw is not able to have
  1038. * multiple net devices on single physical port.
  1039. *
  1040. * void (*ndo_udp_tunnel_add)(struct net_device *dev,
  1041. * struct udp_tunnel_info *ti);
  1042. * Called by UDP tunnel to notify a driver about the UDP port and socket
  1043. * address family that a UDP tunnel is listnening to. It is called only
  1044. * when a new port starts listening. The operation is protected by the
  1045. * RTNL.
  1046. *
  1047. * void (*ndo_udp_tunnel_del)(struct net_device *dev,
  1048. * struct udp_tunnel_info *ti);
  1049. * Called by UDP tunnel to notify the driver about a UDP port and socket
  1050. * address family that the UDP tunnel is not listening to anymore. The
  1051. * operation is protected by the RTNL.
  1052. *
  1053. * void* (*ndo_dfwd_add_station)(struct net_device *pdev,
  1054. * struct net_device *dev)
  1055. * Called by upper layer devices to accelerate switching or other
  1056. * station functionality into hardware. 'pdev is the lowerdev
  1057. * to use for the offload and 'dev' is the net device that will
  1058. * back the offload. Returns a pointer to the private structure
  1059. * the upper layer will maintain.
  1060. * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv)
  1061. * Called by upper layer device to delete the station created
  1062. * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing
  1063. * the station and priv is the structure returned by the add
  1064. * operation.
  1065. * int (*ndo_set_tx_maxrate)(struct net_device *dev,
  1066. * int queue_index, u32 maxrate);
  1067. * Called when a user wants to set a max-rate limitation of specific
  1068. * TX queue.
  1069. * int (*ndo_get_iflink)(const struct net_device *dev);
  1070. * Called to get the iflink value of this device.
  1071. * void (*ndo_change_proto_down)(struct net_device *dev,
  1072. * bool proto_down);
  1073. * This function is used to pass protocol port error state information
  1074. * to the switch driver. The switch driver can react to the proto_down
  1075. * by doing a phys down on the associated switch port.
  1076. * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
  1077. * This function is used to get egress tunnel information for given skb.
  1078. * This is useful for retrieving outer tunnel header parameters while
  1079. * sampling packet.
  1080. * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom);
  1081. * This function is used to specify the headroom that the skb must
  1082. * consider when allocation skb during packet reception. Setting
  1083. * appropriate rx headroom value allows avoiding skb head copy on
  1084. * forward. Setting a negative value resets the rx headroom to the
  1085. * default value.
  1086. * int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf);
  1087. * This function is used to set or query state related to XDP on the
  1088. * netdevice and manage BPF offload. See definition of
  1089. * enum bpf_netdev_command for details.
  1090. * int (*ndo_xdp_xmit)(struct net_device *dev, struct xdp_frame *xdp);
  1091. * This function is used to submit a XDP packet for transmit on a
  1092. * netdevice.
  1093. * void (*ndo_xdp_flush)(struct net_device *dev);
  1094. * This function is used to inform the driver to flush a particular
  1095. * xdp tx queue. Must be called on same CPU as xdp_xmit.
  1096. */
  1097. struct net_device_ops {
  1098. int (*ndo_init)(struct net_device *dev);
  1099. void (*ndo_uninit)(struct net_device *dev);
  1100. int (*ndo_open)(struct net_device *dev);
  1101. int (*ndo_stop)(struct net_device *dev);
  1102. netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
  1103. struct net_device *dev);
  1104. netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
  1105. struct net_device *dev,
  1106. netdev_features_t features);
  1107. u16 (*ndo_select_queue)(struct net_device *dev,
  1108. struct sk_buff *skb,
  1109. void *accel_priv,
  1110. select_queue_fallback_t fallback);
  1111. void (*ndo_change_rx_flags)(struct net_device *dev,
  1112. int flags);
  1113. void (*ndo_set_rx_mode)(struct net_device *dev);
  1114. int (*ndo_set_mac_address)(struct net_device *dev,
  1115. void *addr);
  1116. int (*ndo_validate_addr)(struct net_device *dev);
  1117. int (*ndo_do_ioctl)(struct net_device *dev,
  1118. struct ifreq *ifr, int cmd);
  1119. int (*ndo_set_config)(struct net_device *dev,
  1120. struct ifmap *map);
  1121. int (*ndo_change_mtu)(struct net_device *dev,
  1122. int new_mtu);
  1123. int (*ndo_neigh_setup)(struct net_device *dev,
  1124. struct neigh_parms *);
  1125. void (*ndo_tx_timeout) (struct net_device *dev);
  1126. void (*ndo_get_stats64)(struct net_device *dev,
  1127. struct rtnl_link_stats64 *storage);
  1128. bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
  1129. int (*ndo_get_offload_stats)(int attr_id,
  1130. const struct net_device *dev,
  1131. void *attr_data);
  1132. struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
  1133. int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
  1134. __be16 proto, u16 vid);
  1135. int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
  1136. __be16 proto, u16 vid);
  1137. #ifdef CONFIG_NET_POLL_CONTROLLER
  1138. void (*ndo_poll_controller)(struct net_device *dev);
  1139. int (*ndo_netpoll_setup)(struct net_device *dev,
  1140. struct netpoll_info *info);
  1141. void (*ndo_netpoll_cleanup)(struct net_device *dev);
  1142. #endif
  1143. int (*ndo_set_vf_mac)(struct net_device *dev,
  1144. int queue, u8 *mac);
  1145. int (*ndo_set_vf_vlan)(struct net_device *dev,
  1146. int queue, u16 vlan,
  1147. u8 qos, __be16 proto);
  1148. int (*ndo_set_vf_rate)(struct net_device *dev,
  1149. int vf, int min_tx_rate,
  1150. int max_tx_rate);
  1151. int (*ndo_set_vf_spoofchk)(struct net_device *dev,
  1152. int vf, bool setting);
  1153. int (*ndo_set_vf_trust)(struct net_device *dev,
  1154. int vf, bool setting);
  1155. int (*ndo_get_vf_config)(struct net_device *dev,
  1156. int vf,
  1157. struct ifla_vf_info *ivf);
  1158. int (*ndo_set_vf_link_state)(struct net_device *dev,
  1159. int vf, int link_state);
  1160. int (*ndo_get_vf_stats)(struct net_device *dev,
  1161. int vf,
  1162. struct ifla_vf_stats
  1163. *vf_stats);
  1164. int (*ndo_set_vf_port)(struct net_device *dev,
  1165. int vf,
  1166. struct nlattr *port[]);
  1167. int (*ndo_get_vf_port)(struct net_device *dev,
  1168. int vf, struct sk_buff *skb);
  1169. int (*ndo_set_vf_guid)(struct net_device *dev,
  1170. int vf, u64 guid,
  1171. int guid_type);
  1172. int (*ndo_set_vf_rss_query_en)(
  1173. struct net_device *dev,
  1174. int vf, bool setting);
  1175. int (*ndo_setup_tc)(struct net_device *dev,
  1176. enum tc_setup_type type,
  1177. void *type_data);
  1178. #if IS_ENABLED(CONFIG_FCOE)
  1179. int (*ndo_fcoe_enable)(struct net_device *dev);
  1180. int (*ndo_fcoe_disable)(struct net_device *dev);
  1181. int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
  1182. u16 xid,
  1183. struct scatterlist *sgl,
  1184. unsigned int sgc);
  1185. int (*ndo_fcoe_ddp_done)(struct net_device *dev,
  1186. u16 xid);
  1187. int (*ndo_fcoe_ddp_target)(struct net_device *dev,
  1188. u16 xid,
  1189. struct scatterlist *sgl,
  1190. unsigned int sgc);
  1191. int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
  1192. struct netdev_fcoe_hbainfo *hbainfo);
  1193. #endif
  1194. #if IS_ENABLED(CONFIG_LIBFCOE)
  1195. #define NETDEV_FCOE_WWNN 0
  1196. #define NETDEV_FCOE_WWPN 1
  1197. int (*ndo_fcoe_get_wwn)(struct net_device *dev,
  1198. u64 *wwn, int type);
  1199. #endif
  1200. #ifdef CONFIG_RFS_ACCEL
  1201. int (*ndo_rx_flow_steer)(struct net_device *dev,
  1202. const struct sk_buff *skb,
  1203. u16 rxq_index,
  1204. u32 flow_id);
  1205. #endif
  1206. int (*ndo_add_slave)(struct net_device *dev,
  1207. struct net_device *slave_dev,
  1208. struct netlink_ext_ack *extack);
  1209. int (*ndo_del_slave)(struct net_device *dev,
  1210. struct net_device *slave_dev);
  1211. netdev_features_t (*ndo_fix_features)(struct net_device *dev,
  1212. netdev_features_t features);
  1213. int (*ndo_set_features)(struct net_device *dev,
  1214. netdev_features_t features);
  1215. int (*ndo_neigh_construct)(struct net_device *dev,
  1216. struct neighbour *n);
  1217. void (*ndo_neigh_destroy)(struct net_device *dev,
  1218. struct neighbour *n);
  1219. int (*ndo_fdb_add)(struct ndmsg *ndm,
  1220. struct nlattr *tb[],
  1221. struct net_device *dev,
  1222. const unsigned char *addr,
  1223. u16 vid,
  1224. u16 flags);
  1225. int (*ndo_fdb_del)(struct ndmsg *ndm,
  1226. struct nlattr *tb[],
  1227. struct net_device *dev,
  1228. const unsigned char *addr,
  1229. u16 vid);
  1230. int (*ndo_fdb_dump)(struct sk_buff *skb,
  1231. struct netlink_callback *cb,
  1232. struct net_device *dev,
  1233. struct net_device *filter_dev,
  1234. int *idx);
  1235. int (*ndo_bridge_setlink)(struct net_device *dev,
  1236. struct nlmsghdr *nlh,
  1237. u16 flags);
  1238. int (*ndo_bridge_getlink)(struct sk_buff *skb,
  1239. u32 pid, u32 seq,
  1240. struct net_device *dev,
  1241. u32 filter_mask,
  1242. int nlflags);
  1243. int (*ndo_bridge_dellink)(struct net_device *dev,
  1244. struct nlmsghdr *nlh,
  1245. u16 flags);
  1246. int (*ndo_change_carrier)(struct net_device *dev,
  1247. bool new_carrier);
  1248. int (*ndo_get_phys_port_id)(struct net_device *dev,
  1249. struct netdev_phys_item_id *ppid);
  1250. int (*ndo_get_phys_port_name)(struct net_device *dev,
  1251. char *name, size_t len);
  1252. void (*ndo_udp_tunnel_add)(struct net_device *dev,
  1253. struct udp_tunnel_info *ti);
  1254. void (*ndo_udp_tunnel_del)(struct net_device *dev,
  1255. struct udp_tunnel_info *ti);
  1256. void* (*ndo_dfwd_add_station)(struct net_device *pdev,
  1257. struct net_device *dev);
  1258. void (*ndo_dfwd_del_station)(struct net_device *pdev,
  1259. void *priv);
  1260. int (*ndo_get_lock_subclass)(struct net_device *dev);
  1261. int (*ndo_set_tx_maxrate)(struct net_device *dev,
  1262. int queue_index,
  1263. u32 maxrate);
  1264. int (*ndo_get_iflink)(const struct net_device *dev);
  1265. int (*ndo_change_proto_down)(struct net_device *dev,
  1266. bool proto_down);
  1267. int (*ndo_fill_metadata_dst)(struct net_device *dev,
  1268. struct sk_buff *skb);
  1269. void (*ndo_set_rx_headroom)(struct net_device *dev,
  1270. int needed_headroom);
  1271. int (*ndo_bpf)(struct net_device *dev,
  1272. struct netdev_bpf *bpf);
  1273. int (*ndo_xdp_xmit)(struct net_device *dev,
  1274. struct xdp_frame *xdp);
  1275. void (*ndo_xdp_flush)(struct net_device *dev);
  1276. };
  1277. /**
  1278. * enum net_device_priv_flags - &struct net_device priv_flags
  1279. *
  1280. * These are the &struct net_device, they are only set internally
  1281. * by drivers and used in the kernel. These flags are invisible to
  1282. * userspace; this means that the order of these flags can change
  1283. * during any kernel release.
  1284. *
  1285. * You should have a pretty good reason to be extending these flags.
  1286. *
  1287. * @IFF_802_1Q_VLAN: 802.1Q VLAN device
  1288. * @IFF_EBRIDGE: Ethernet bridging device
  1289. * @IFF_BONDING: bonding master or slave
  1290. * @IFF_ISATAP: ISATAP interface (RFC4214)
  1291. * @IFF_WAN_HDLC: WAN HDLC device
  1292. * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
  1293. * release skb->dst
  1294. * @IFF_DONT_BRIDGE: disallow bridging this ether dev
  1295. * @IFF_DISABLE_NETPOLL: disable netpoll at run-time
  1296. * @IFF_MACVLAN_PORT: device used as macvlan port
  1297. * @IFF_BRIDGE_PORT: device used as bridge port
  1298. * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port
  1299. * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit
  1300. * @IFF_UNICAST_FLT: Supports unicast filtering
  1301. * @IFF_TEAM_PORT: device used as team port
  1302. * @IFF_SUPP_NOFCS: device supports sending custom FCS
  1303. * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
  1304. * change when it's running
  1305. * @IFF_MACVLAN: Macvlan device
  1306. * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account
  1307. * underlying stacked devices
  1308. * @IFF_L3MDEV_MASTER: device is an L3 master device
  1309. * @IFF_NO_QUEUE: device can run without qdisc attached
  1310. * @IFF_OPENVSWITCH: device is a Open vSwitch master
  1311. * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device
  1312. * @IFF_TEAM: device is a team device
  1313. * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured
  1314. * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external
  1315. * entity (i.e. the master device for bridged veth)
  1316. * @IFF_MACSEC: device is a MACsec device
  1317. * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook
  1318. */
  1319. enum netdev_priv_flags {
  1320. IFF_802_1Q_VLAN = 1<<0,
  1321. IFF_EBRIDGE = 1<<1,
  1322. IFF_BONDING = 1<<2,
  1323. IFF_ISATAP = 1<<3,
  1324. IFF_WAN_HDLC = 1<<4,
  1325. IFF_XMIT_DST_RELEASE = 1<<5,
  1326. IFF_DONT_BRIDGE = 1<<6,
  1327. IFF_DISABLE_NETPOLL = 1<<7,
  1328. IFF_MACVLAN_PORT = 1<<8,
  1329. IFF_BRIDGE_PORT = 1<<9,
  1330. IFF_OVS_DATAPATH = 1<<10,
  1331. IFF_TX_SKB_SHARING = 1<<11,
  1332. IFF_UNICAST_FLT = 1<<12,
  1333. IFF_TEAM_PORT = 1<<13,
  1334. IFF_SUPP_NOFCS = 1<<14,
  1335. IFF_LIVE_ADDR_CHANGE = 1<<15,
  1336. IFF_MACVLAN = 1<<16,
  1337. IFF_XMIT_DST_RELEASE_PERM = 1<<17,
  1338. IFF_L3MDEV_MASTER = 1<<18,
  1339. IFF_NO_QUEUE = 1<<19,
  1340. IFF_OPENVSWITCH = 1<<20,
  1341. IFF_L3MDEV_SLAVE = 1<<21,
  1342. IFF_TEAM = 1<<22,
  1343. IFF_RXFH_CONFIGURED = 1<<23,
  1344. IFF_PHONY_HEADROOM = 1<<24,
  1345. IFF_MACSEC = 1<<25,
  1346. IFF_NO_RX_HANDLER = 1<<26,
  1347. };
  1348. #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
  1349. #define IFF_EBRIDGE IFF_EBRIDGE
  1350. #define IFF_BONDING IFF_BONDING
  1351. #define IFF_ISATAP IFF_ISATAP
  1352. #define IFF_WAN_HDLC IFF_WAN_HDLC
  1353. #define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE
  1354. #define IFF_DONT_BRIDGE IFF_DONT_BRIDGE
  1355. #define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL
  1356. #define IFF_MACVLAN_PORT IFF_MACVLAN_PORT
  1357. #define IFF_BRIDGE_PORT IFF_BRIDGE_PORT
  1358. #define IFF_OVS_DATAPATH IFF_OVS_DATAPATH
  1359. #define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING
  1360. #define IFF_UNICAST_FLT IFF_UNICAST_FLT
  1361. #define IFF_TEAM_PORT IFF_TEAM_PORT
  1362. #define IFF_SUPP_NOFCS IFF_SUPP_NOFCS
  1363. #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
  1364. #define IFF_MACVLAN IFF_MACVLAN
  1365. #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
  1366. #define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER
  1367. #define IFF_NO_QUEUE IFF_NO_QUEUE
  1368. #define IFF_OPENVSWITCH IFF_OPENVSWITCH
  1369. #define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE
  1370. #define IFF_TEAM IFF_TEAM
  1371. #define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED
  1372. #define IFF_MACSEC IFF_MACSEC
  1373. #define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER
  1374. /**
  1375. * struct net_device - The DEVICE structure.
  1376. *
  1377. * Actually, this whole structure is a big mistake. It mixes I/O
  1378. * data with strictly "high-level" data, and it has to know about
  1379. * almost every data structure used in the INET module.
  1380. *
  1381. * @name: This is the first field of the "visible" part of this structure
  1382. * (i.e. as seen by users in the "Space.c" file). It is the name
  1383. * of the interface.
  1384. *
  1385. * @name_hlist: Device name hash chain, please keep it close to name[]
  1386. * @ifalias: SNMP alias
  1387. * @mem_end: Shared memory end
  1388. * @mem_start: Shared memory start
  1389. * @base_addr: Device I/O address
  1390. * @irq: Device IRQ number
  1391. *
  1392. * @state: Generic network queuing layer state, see netdev_state_t
  1393. * @dev_list: The global list of network devices
  1394. * @napi_list: List entry used for polling NAPI devices
  1395. * @unreg_list: List entry when we are unregistering the
  1396. * device; see the function unregister_netdev
  1397. * @close_list: List entry used when we are closing the device
  1398. * @ptype_all: Device-specific packet handlers for all protocols
  1399. * @ptype_specific: Device-specific, protocol-specific packet handlers
  1400. *
  1401. * @adj_list: Directly linked devices, like slaves for bonding
  1402. * @features: Currently active device features
  1403. * @hw_features: User-changeable features
  1404. *
  1405. * @wanted_features: User-requested features
  1406. * @vlan_features: Mask of features inheritable by VLAN devices
  1407. *
  1408. * @hw_enc_features: Mask of features inherited by encapsulating devices
  1409. * This field indicates what encapsulation
  1410. * offloads the hardware is capable of doing,
  1411. * and drivers will need to set them appropriately.
  1412. *
  1413. * @mpls_features: Mask of features inheritable by MPLS
  1414. *
  1415. * @ifindex: interface index
  1416. * @group: The group the device belongs to
  1417. *
  1418. * @stats: Statistics struct, which was left as a legacy, use
  1419. * rtnl_link_stats64 instead
  1420. *
  1421. * @rx_dropped: Dropped packets by core network,
  1422. * do not use this in drivers
  1423. * @tx_dropped: Dropped packets by core network,
  1424. * do not use this in drivers
  1425. * @rx_nohandler: nohandler dropped packets by core network on
  1426. * inactive devices, do not use this in drivers
  1427. * @carrier_up_count: Number of times the carrier has been up
  1428. * @carrier_down_count: Number of times the carrier has been down
  1429. *
  1430. * @wireless_handlers: List of functions to handle Wireless Extensions,
  1431. * instead of ioctl,
  1432. * see <net/iw_handler.h> for details.
  1433. * @wireless_data: Instance data managed by the core of wireless extensions
  1434. *
  1435. * @netdev_ops: Includes several pointers to callbacks,
  1436. * if one wants to override the ndo_*() functions
  1437. * @ethtool_ops: Management operations
  1438. * @ndisc_ops: Includes callbacks for different IPv6 neighbour
  1439. * discovery handling. Necessary for e.g. 6LoWPAN.
  1440. * @header_ops: Includes callbacks for creating,parsing,caching,etc
  1441. * of Layer 2 headers.
  1442. *
  1443. * @flags: Interface flags (a la BSD)
  1444. * @priv_flags: Like 'flags' but invisible to userspace,
  1445. * see if.h for the definitions
  1446. * @gflags: Global flags ( kept as legacy )
  1447. * @padded: How much padding added by alloc_netdev()
  1448. * @operstate: RFC2863 operstate
  1449. * @link_mode: Mapping policy to operstate
  1450. * @if_port: Selectable AUI, TP, ...
  1451. * @dma: DMA channel
  1452. * @mtu: Interface MTU value
  1453. * @min_mtu: Interface Minimum MTU value
  1454. * @max_mtu: Interface Maximum MTU value
  1455. * @type: Interface hardware type
  1456. * @hard_header_len: Maximum hardware header length.
  1457. * @min_header_len: Minimum hardware header length
  1458. *
  1459. * @needed_headroom: Extra headroom the hardware may need, but not in all
  1460. * cases can this be guaranteed
  1461. * @needed_tailroom: Extra tailroom the hardware may need, but not in all
  1462. * cases can this be guaranteed. Some cases also use
  1463. * LL_MAX_HEADER instead to allocate the skb
  1464. *
  1465. * interface address info:
  1466. *
  1467. * @perm_addr: Permanent hw address
  1468. * @addr_assign_type: Hw address assignment type
  1469. * @addr_len: Hardware address length
  1470. * @neigh_priv_len: Used in neigh_alloc()
  1471. * @dev_id: Used to differentiate devices that share
  1472. * the same link layer address
  1473. * @dev_port: Used to differentiate devices that share
  1474. * the same function
  1475. * @addr_list_lock: XXX: need comments on this one
  1476. * @uc_promisc: Counter that indicates promiscuous mode
  1477. * has been enabled due to the need to listen to
  1478. * additional unicast addresses in a device that
  1479. * does not implement ndo_set_rx_mode()
  1480. * @uc: unicast mac addresses
  1481. * @mc: multicast mac addresses
  1482. * @dev_addrs: list of device hw addresses
  1483. * @queues_kset: Group of all Kobjects in the Tx and RX queues
  1484. * @promiscuity: Number of times the NIC is told to work in
  1485. * promiscuous mode; if it becomes 0 the NIC will
  1486. * exit promiscuous mode
  1487. * @allmulti: Counter, enables or disables allmulticast mode
  1488. *
  1489. * @vlan_info: VLAN info
  1490. * @dsa_ptr: dsa specific data
  1491. * @tipc_ptr: TIPC specific data
  1492. * @atalk_ptr: AppleTalk link
  1493. * @ip_ptr: IPv4 specific data
  1494. * @dn_ptr: DECnet specific data
  1495. * @ip6_ptr: IPv6 specific data
  1496. * @ax25_ptr: AX.25 specific data
  1497. * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
  1498. *
  1499. * @dev_addr: Hw address (before bcast,
  1500. * because most packets are unicast)
  1501. *
  1502. * @_rx: Array of RX queues
  1503. * @num_rx_queues: Number of RX queues
  1504. * allocated at register_netdev() time
  1505. * @real_num_rx_queues: Number of RX queues currently active in device
  1506. *
  1507. * @rx_handler: handler for received packets
  1508. * @rx_handler_data: XXX: need comments on this one
  1509. * @miniq_ingress: ingress/clsact qdisc specific data for
  1510. * ingress processing
  1511. * @ingress_queue: XXX: need comments on this one
  1512. * @broadcast: hw bcast address
  1513. *
  1514. * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts,
  1515. * indexed by RX queue number. Assigned by driver.
  1516. * This must only be set if the ndo_rx_flow_steer
  1517. * operation is defined
  1518. * @index_hlist: Device index hash chain
  1519. *
  1520. * @_tx: Array of TX queues
  1521. * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time
  1522. * @real_num_tx_queues: Number of TX queues currently active in device
  1523. * @qdisc: Root qdisc from userspace point of view
  1524. * @tx_queue_len: Max frames per queue allowed
  1525. * @tx_global_lock: XXX: need comments on this one
  1526. *
  1527. * @xps_maps: XXX: need comments on this one
  1528. * @miniq_egress: clsact qdisc specific data for
  1529. * egress processing
  1530. * @watchdog_timeo: Represents the timeout that is used by
  1531. * the watchdog (see dev_watchdog())
  1532. * @watchdog_timer: List of timers
  1533. *
  1534. * @pcpu_refcnt: Number of references to this device
  1535. * @todo_list: Delayed register/unregister
  1536. * @link_watch_list: XXX: need comments on this one
  1537. *
  1538. * @reg_state: Register/unregister state machine
  1539. * @dismantle: Device is going to be freed
  1540. * @rtnl_link_state: This enum represents the phases of creating
  1541. * a new link
  1542. *
  1543. * @needs_free_netdev: Should unregister perform free_netdev?
  1544. * @priv_destructor: Called from unregister
  1545. * @npinfo: XXX: need comments on this one
  1546. * @nd_net: Network namespace this network device is inside
  1547. *
  1548. * @ml_priv: Mid-layer private
  1549. * @lstats: Loopback statistics
  1550. * @tstats: Tunnel statistics
  1551. * @dstats: Dummy statistics
  1552. * @vstats: Virtual ethernet statistics
  1553. *
  1554. * @garp_port: GARP
  1555. * @mrp_port: MRP
  1556. *
  1557. * @dev: Class/net/name entry
  1558. * @sysfs_groups: Space for optional device, statistics and wireless
  1559. * sysfs groups
  1560. *
  1561. * @sysfs_rx_queue_group: Space for optional per-rx queue attributes
  1562. * @rtnl_link_ops: Rtnl_link_ops
  1563. *
  1564. * @gso_max_size: Maximum size of generic segmentation offload
  1565. * @gso_max_segs: Maximum number of segments that can be passed to the
  1566. * NIC for GSO
  1567. *
  1568. * @dcbnl_ops: Data Center Bridging netlink ops
  1569. * @num_tc: Number of traffic classes in the net device
  1570. * @tc_to_txq: XXX: need comments on this one
  1571. * @prio_tc_map: XXX: need comments on this one
  1572. *
  1573. * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp
  1574. *
  1575. * @priomap: XXX: need comments on this one
  1576. * @phydev: Physical device may attach itself
  1577. * for hardware timestamping
  1578. * @sfp_bus: attached &struct sfp_bus structure.
  1579. *
  1580. * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
  1581. * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
  1582. *
  1583. * @proto_down: protocol port state information can be sent to the
  1584. * switch driver and used to set the phys state of the
  1585. * switch port.
  1586. *
  1587. * FIXME: cleanup struct net_device such that network protocol info
  1588. * moves out.
  1589. */
  1590. struct net_device {
  1591. char name[IFNAMSIZ];
  1592. struct hlist_node name_hlist;
  1593. struct dev_ifalias __rcu *ifalias;
  1594. /*
  1595. * I/O specific fields
  1596. * FIXME: Merge these and struct ifmap into one
  1597. */
  1598. unsigned long mem_end;
  1599. unsigned long mem_start;
  1600. unsigned long base_addr;
  1601. int irq;
  1602. /*
  1603. * Some hardware also needs these fields (state,dev_list,
  1604. * napi_list,unreg_list,close_list) but they are not
  1605. * part of the usual set specified in Space.c.
  1606. */
  1607. unsigned long state;
  1608. struct list_head dev_list;
  1609. struct list_head napi_list;
  1610. struct list_head unreg_list;
  1611. struct list_head close_list;
  1612. struct list_head ptype_all;
  1613. struct list_head ptype_specific;
  1614. struct {
  1615. struct list_head upper;
  1616. struct list_head lower;
  1617. } adj_list;
  1618. netdev_features_t features;
  1619. netdev_features_t hw_features;
  1620. netdev_features_t wanted_features;
  1621. netdev_features_t vlan_features;
  1622. netdev_features_t hw_enc_features;
  1623. netdev_features_t mpls_features;
  1624. netdev_features_t gso_partial_features;
  1625. int ifindex;
  1626. int group;
  1627. struct net_device_stats stats;
  1628. atomic_long_t rx_dropped;
  1629. atomic_long_t tx_dropped;
  1630. atomic_long_t rx_nohandler;
  1631. /* Stats to monitor link on/off, flapping */
  1632. atomic_t carrier_up_count;
  1633. atomic_t carrier_down_count;
  1634. #ifdef CONFIG_WIRELESS_EXT
  1635. const struct iw_handler_def *wireless_handlers;
  1636. struct iw_public_data *wireless_data;
  1637. #endif
  1638. const struct net_device_ops *netdev_ops;
  1639. const struct ethtool_ops *ethtool_ops;
  1640. #ifdef CONFIG_NET_SWITCHDEV
  1641. const struct switchdev_ops *switchdev_ops;
  1642. #endif
  1643. #ifdef CONFIG_NET_L3_MASTER_DEV
  1644. const struct l3mdev_ops *l3mdev_ops;
  1645. #endif
  1646. #if IS_ENABLED(CONFIG_IPV6)
  1647. const struct ndisc_ops *ndisc_ops;
  1648. #endif
  1649. #ifdef CONFIG_XFRM_OFFLOAD
  1650. const struct xfrmdev_ops *xfrmdev_ops;
  1651. #endif
  1652. #if IS_ENABLED(CONFIG_TLS_DEVICE)
  1653. const struct tlsdev_ops *tlsdev_ops;
  1654. #endif
  1655. const struct header_ops *header_ops;
  1656. unsigned int flags;
  1657. unsigned int priv_flags;
  1658. unsigned short gflags;
  1659. unsigned short padded;
  1660. unsigned char operstate;
  1661. unsigned char link_mode;
  1662. unsigned char if_port;
  1663. unsigned char dma;
  1664. unsigned int mtu;
  1665. unsigned int min_mtu;
  1666. unsigned int max_mtu;
  1667. unsigned short type;
  1668. unsigned short hard_header_len;
  1669. unsigned char min_header_len;
  1670. unsigned short needed_headroom;
  1671. unsigned short needed_tailroom;
  1672. /* Interface address info. */
  1673. unsigned char perm_addr[MAX_ADDR_LEN];
  1674. unsigned char addr_assign_type;
  1675. unsigned char addr_len;
  1676. unsigned short neigh_priv_len;
  1677. unsigned short dev_id;
  1678. unsigned short dev_port;
  1679. spinlock_t addr_list_lock;
  1680. unsigned char name_assign_type;
  1681. bool uc_promisc;
  1682. struct netdev_hw_addr_list uc;
  1683. struct netdev_hw_addr_list mc;
  1684. struct netdev_hw_addr_list dev_addrs;
  1685. #ifdef CONFIG_SYSFS
  1686. struct kset *queues_kset;
  1687. #endif
  1688. unsigned int promiscuity;
  1689. unsigned int allmulti;
  1690. /* Protocol-specific pointers */
  1691. #if IS_ENABLED(CONFIG_VLAN_8021Q)
  1692. struct vlan_info __rcu *vlan_info;
  1693. #endif
  1694. #if IS_ENABLED(CONFIG_NET_DSA)
  1695. struct dsa_port *dsa_ptr;
  1696. #endif
  1697. #if IS_ENABLED(CONFIG_TIPC)
  1698. struct tipc_bearer __rcu *tipc_ptr;
  1699. #endif
  1700. #if IS_ENABLED(CONFIG_IRDA) || IS_ENABLED(CONFIG_ATALK)
  1701. void *atalk_ptr;
  1702. #endif
  1703. struct in_device __rcu *ip_ptr;
  1704. #if IS_ENABLED(CONFIG_DECNET)
  1705. struct dn_dev __rcu *dn_ptr;
  1706. #endif
  1707. struct inet6_dev __rcu *ip6_ptr;
  1708. #if IS_ENABLED(CONFIG_AX25)
  1709. void *ax25_ptr;
  1710. #endif
  1711. struct wireless_dev *ieee80211_ptr;
  1712. struct wpan_dev *ieee802154_ptr;
  1713. #if IS_ENABLED(CONFIG_MPLS_ROUTING)
  1714. struct mpls_dev __rcu *mpls_ptr;
  1715. #endif
  1716. /*
  1717. * Cache lines mostly used on receive path (including eth_type_trans())
  1718. */
  1719. /* Interface address info used in eth_type_trans() */
  1720. unsigned char *dev_addr;
  1721. struct netdev_rx_queue *_rx;
  1722. unsigned int num_rx_queues;
  1723. unsigned int real_num_rx_queues;
  1724. struct bpf_prog __rcu *xdp_prog;
  1725. unsigned long gro_flush_timeout;
  1726. rx_handler_func_t __rcu *rx_handler;
  1727. void __rcu *rx_handler_data;
  1728. #ifdef CONFIG_NET_CLS_ACT
  1729. struct mini_Qdisc __rcu *miniq_ingress;
  1730. #endif
  1731. struct netdev_queue __rcu *ingress_queue;
  1732. #ifdef CONFIG_NETFILTER_INGRESS
  1733. struct nf_hook_entries __rcu *nf_hooks_ingress;
  1734. #endif
  1735. unsigned char broadcast[MAX_ADDR_LEN];
  1736. #ifdef CONFIG_RFS_ACCEL
  1737. struct cpu_rmap *rx_cpu_rmap;
  1738. #endif
  1739. struct hlist_node index_hlist;
  1740. /*
  1741. * Cache lines mostly used on transmit path
  1742. */
  1743. struct netdev_queue *_tx ____cacheline_aligned_in_smp;
  1744. unsigned int num_tx_queues;
  1745. unsigned int real_num_tx_queues;
  1746. struct Qdisc *qdisc;
  1747. #ifdef CONFIG_NET_SCHED
  1748. DECLARE_HASHTABLE (qdisc_hash, 4);
  1749. #endif
  1750. unsigned int tx_queue_len;
  1751. spinlock_t tx_global_lock;
  1752. int watchdog_timeo;
  1753. #ifdef CONFIG_XPS
  1754. struct xps_dev_maps __rcu *xps_maps;
  1755. #endif
  1756. #ifdef CONFIG_NET_CLS_ACT
  1757. struct mini_Qdisc __rcu *miniq_egress;
  1758. #endif
  1759. /* These may be needed for future network-power-down code. */
  1760. struct timer_list watchdog_timer;
  1761. int __percpu *pcpu_refcnt;
  1762. struct list_head todo_list;
  1763. struct list_head link_watch_list;
  1764. enum { NETREG_UNINITIALIZED=0,
  1765. NETREG_REGISTERED, /* completed register_netdevice */
  1766. NETREG_UNREGISTERING, /* called unregister_netdevice */
  1767. NETREG_UNREGISTERED, /* completed unregister todo */
  1768. NETREG_RELEASED, /* called free_netdev */
  1769. NETREG_DUMMY, /* dummy device for NAPI poll */
  1770. } reg_state:8;
  1771. bool dismantle;
  1772. enum {
  1773. RTNL_LINK_INITIALIZED,
  1774. RTNL_LINK_INITIALIZING,
  1775. } rtnl_link_state:16;
  1776. bool needs_free_netdev;
  1777. void (*priv_destructor)(struct net_device *dev);
  1778. #ifdef CONFIG_NETPOLL
  1779. struct netpoll_info __rcu *npinfo;
  1780. #endif
  1781. possible_net_t nd_net;
  1782. /* mid-layer private */
  1783. union {
  1784. void *ml_priv;
  1785. struct pcpu_lstats __percpu *lstats;
  1786. struct pcpu_sw_netstats __percpu *tstats;
  1787. struct pcpu_dstats __percpu *dstats;
  1788. struct pcpu_vstats __percpu *vstats;
  1789. };
  1790. #if IS_ENABLED(CONFIG_GARP)
  1791. struct garp_port __rcu *garp_port;
  1792. #endif
  1793. #if IS_ENABLED(CONFIG_MRP)
  1794. struct mrp_port __rcu *mrp_port;
  1795. #endif
  1796. struct device dev;
  1797. const struct attribute_group *sysfs_groups[4];
  1798. const struct attribute_group *sysfs_rx_queue_group;
  1799. const struct rtnl_link_ops *rtnl_link_ops;
  1800. /* for setting kernel sock attribute on TCP connection setup */
  1801. #define GSO_MAX_SIZE 65536
  1802. unsigned int gso_max_size;
  1803. #define GSO_MAX_SEGS 65535
  1804. u16 gso_max_segs;
  1805. #ifdef CONFIG_DCB
  1806. const struct dcbnl_rtnl_ops *dcbnl_ops;
  1807. #endif
  1808. u8 num_tc;
  1809. struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
  1810. u8 prio_tc_map[TC_BITMASK + 1];
  1811. #if IS_ENABLED(CONFIG_FCOE)
  1812. unsigned int fcoe_ddp_xid;
  1813. #endif
  1814. #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
  1815. struct netprio_map __rcu *priomap;
  1816. #endif
  1817. struct phy_device *phydev;
  1818. struct sfp_bus *sfp_bus;
  1819. struct lock_class_key *qdisc_tx_busylock;
  1820. struct lock_class_key *qdisc_running_key;
  1821. bool proto_down;
  1822. };
  1823. #define to_net_dev(d) container_of(d, struct net_device, dev)
  1824. static inline bool netif_elide_gro(const struct net_device *dev)
  1825. {
  1826. if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog)
  1827. return true;
  1828. return false;
  1829. }
  1830. #define NETDEV_ALIGN 32
  1831. static inline
  1832. int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
  1833. {
  1834. return dev->prio_tc_map[prio & TC_BITMASK];
  1835. }
  1836. static inline
  1837. int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
  1838. {
  1839. if (tc >= dev->num_tc)
  1840. return -EINVAL;
  1841. dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
  1842. return 0;
  1843. }
  1844. int netdev_txq_to_tc(struct net_device *dev, unsigned int txq);
  1845. void netdev_reset_tc(struct net_device *dev);
  1846. int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset);
  1847. int netdev_set_num_tc(struct net_device *dev, u8 num_tc);
  1848. static inline
  1849. int netdev_get_num_tc(struct net_device *dev)
  1850. {
  1851. return dev->num_tc;
  1852. }
  1853. static inline
  1854. struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
  1855. unsigned int index)
  1856. {
  1857. return &dev->_tx[index];
  1858. }
  1859. static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
  1860. const struct sk_buff *skb)
  1861. {
  1862. return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
  1863. }
  1864. static inline void netdev_for_each_tx_queue(struct net_device *dev,
  1865. void (*f)(struct net_device *,
  1866. struct netdev_queue *,
  1867. void *),
  1868. void *arg)
  1869. {
  1870. unsigned int i;
  1871. for (i = 0; i < dev->num_tx_queues; i++)
  1872. f(dev, &dev->_tx[i], arg);
  1873. }
  1874. #define netdev_lockdep_set_classes(dev) \
  1875. { \
  1876. static struct lock_class_key qdisc_tx_busylock_key; \
  1877. static struct lock_class_key qdisc_running_key; \
  1878. static struct lock_class_key qdisc_xmit_lock_key; \
  1879. static struct lock_class_key dev_addr_list_lock_key; \
  1880. unsigned int i; \
  1881. \
  1882. (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
  1883. (dev)->qdisc_running_key = &qdisc_running_key; \
  1884. lockdep_set_class(&(dev)->addr_list_lock, \
  1885. &dev_addr_list_lock_key); \
  1886. for (i = 0; i < (dev)->num_tx_queues; i++) \
  1887. lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
  1888. &qdisc_xmit_lock_key); \
  1889. }
  1890. struct netdev_queue *netdev_pick_tx(struct net_device *dev,
  1891. struct sk_buff *skb,
  1892. void *accel_priv);
  1893. /* returns the headroom that the master device needs to take in account
  1894. * when forwarding to this dev
  1895. */
  1896. static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
  1897. {
  1898. return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom;
  1899. }
  1900. static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
  1901. {
  1902. if (dev->netdev_ops->ndo_set_rx_headroom)
  1903. dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr);
  1904. }
  1905. /* set the device rx headroom to the dev's default */
  1906. static inline void netdev_reset_rx_headroom(struct net_device *dev)
  1907. {
  1908. netdev_set_rx_headroom(dev, -1);
  1909. }
  1910. /*
  1911. * Net namespace inlines
  1912. */
  1913. static inline
  1914. struct net *dev_net(const struct net_device *dev)
  1915. {
  1916. return read_pnet(&dev->nd_net);
  1917. }
  1918. static inline
  1919. void dev_net_set(struct net_device *dev, struct net *net)
  1920. {
  1921. write_pnet(&dev->nd_net, net);
  1922. }
  1923. /**
  1924. * netdev_priv - access network device private data
  1925. * @dev: network device
  1926. *
  1927. * Get network device private data
  1928. */
  1929. static inline void *netdev_priv(const struct net_device *dev)
  1930. {
  1931. return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
  1932. }
  1933. /* Set the sysfs physical device reference for the network logical device
  1934. * if set prior to registration will cause a symlink during initialization.
  1935. */
  1936. #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
  1937. /* Set the sysfs device type for the network logical device to allow
  1938. * fine-grained identification of different network device types. For
  1939. * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc.
  1940. */
  1941. #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
  1942. /* Default NAPI poll() weight
  1943. * Device drivers are strongly advised to not use bigger value
  1944. */
  1945. #define NAPI_POLL_WEIGHT 64
  1946. /**
  1947. * netif_napi_add - initialize a NAPI context
  1948. * @dev: network device
  1949. * @napi: NAPI context
  1950. * @poll: polling function
  1951. * @weight: default weight
  1952. *
  1953. * netif_napi_add() must be used to initialize a NAPI context prior to calling
  1954. * *any* of the other NAPI-related functions.
  1955. */
  1956. void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
  1957. int (*poll)(struct napi_struct *, int), int weight);
  1958. /**
  1959. * netif_tx_napi_add - initialize a NAPI context
  1960. * @dev: network device
  1961. * @napi: NAPI context
  1962. * @poll: polling function
  1963. * @weight: default weight
  1964. *
  1965. * This variant of netif_napi_add() should be used from drivers using NAPI
  1966. * to exclusively poll a TX queue.
  1967. * This will avoid we add it into napi_hash[], thus polluting this hash table.
  1968. */
  1969. static inline void netif_tx_napi_add(struct net_device *dev,
  1970. struct napi_struct *napi,
  1971. int (*poll)(struct napi_struct *, int),
  1972. int weight)
  1973. {
  1974. set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
  1975. netif_napi_add(dev, napi, poll, weight);
  1976. }
  1977. /**
  1978. * netif_napi_del - remove a NAPI context
  1979. * @napi: NAPI context
  1980. *
  1981. * netif_napi_del() removes a NAPI context from the network device NAPI list
  1982. */
  1983. void netif_napi_del(struct napi_struct *napi);
  1984. struct napi_gro_cb {
  1985. /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
  1986. void *frag0;
  1987. /* Length of frag0. */
  1988. unsigned int frag0_len;
  1989. /* This indicates where we are processing relative to skb->data. */
  1990. int data_offset;
  1991. /* This is non-zero if the packet cannot be merged with the new skb. */
  1992. u16 flush;
  1993. /* Save the IP ID here and check when we get to the transport layer */
  1994. u16 flush_id;
  1995. /* Number of segments aggregated. */
  1996. u16 count;
  1997. /* Start offset for remote checksum offload */
  1998. u16 gro_remcsum_start;
  1999. /* jiffies when first packet was created/queued */
  2000. unsigned long age;
  2001. /* Used in ipv6_gro_receive() and foo-over-udp */
  2002. u16 proto;
  2003. /* This is non-zero if the packet may be of the same flow. */
  2004. u8 same_flow:1;
  2005. /* Used in tunnel GRO receive */
  2006. u8 encap_mark:1;
  2007. /* GRO checksum is valid */
  2008. u8 csum_valid:1;
  2009. /* Number of checksums via CHECKSUM_UNNECESSARY */
  2010. u8 csum_cnt:3;
  2011. /* Free the skb? */
  2012. u8 free:2;
  2013. #define NAPI_GRO_FREE 1
  2014. #define NAPI_GRO_FREE_STOLEN_HEAD 2
  2015. /* Used in foo-over-udp, set in udp[46]_gro_receive */
  2016. u8 is_ipv6:1;
  2017. /* Used in GRE, set in fou/gue_gro_receive */
  2018. u8 is_fou:1;
  2019. /* Used to determine if flush_id can be ignored */
  2020. u8 is_atomic:1;
  2021. /* Number of gro_receive callbacks this packet already went through */
  2022. u8 recursion_counter:4;
  2023. /* 1 bit hole */
  2024. /* used to support CHECKSUM_COMPLETE for tunneling protocols */
  2025. __wsum csum;
  2026. /* used in skb_gro_receive() slow path */
  2027. struct sk_buff *last;
  2028. };
  2029. #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
  2030. #define GRO_RECURSION_LIMIT 15
  2031. static inline int gro_recursion_inc_test(struct sk_buff *skb)
  2032. {
  2033. return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
  2034. }
  2035. typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
  2036. static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
  2037. struct sk_buff **head,
  2038. struct sk_buff *skb)
  2039. {
  2040. if (unlikely(gro_recursion_inc_test(skb))) {
  2041. NAPI_GRO_CB(skb)->flush |= 1;
  2042. return NULL;
  2043. }
  2044. return cb(head, skb);
  2045. }
  2046. typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **,
  2047. struct sk_buff *);
  2048. static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb,
  2049. struct sock *sk,
  2050. struct sk_buff **head,
  2051. struct sk_buff *skb)
  2052. {
  2053. if (unlikely(gro_recursion_inc_test(skb))) {
  2054. NAPI_GRO_CB(skb)->flush |= 1;
  2055. return NULL;
  2056. }
  2057. return cb(sk, head, skb);
  2058. }
  2059. struct packet_type {
  2060. __be16 type; /* This is really htons(ether_type). */
  2061. struct net_device *dev; /* NULL is wildcarded here */
  2062. int (*func) (struct sk_buff *,
  2063. struct net_device *,
  2064. struct packet_type *,
  2065. struct net_device *);
  2066. bool (*id_match)(struct packet_type *ptype,
  2067. struct sock *sk);
  2068. void *af_packet_priv;
  2069. struct list_head list;
  2070. };
  2071. struct offload_callbacks {
  2072. struct sk_buff *(*gso_segment)(struct sk_buff *skb,
  2073. netdev_features_t features);
  2074. struct sk_buff **(*gro_receive)(struct sk_buff **head,
  2075. struct sk_buff *skb);
  2076. int (*gro_complete)(struct sk_buff *skb, int nhoff);
  2077. };
  2078. struct packet_offload {
  2079. __be16 type; /* This is really htons(ether_type). */
  2080. u16 priority;
  2081. struct offload_callbacks callbacks;
  2082. struct list_head list;
  2083. };
  2084. /* often modified stats are per-CPU, other are shared (netdev->stats) */
  2085. struct pcpu_sw_netstats {
  2086. u64 rx_packets;
  2087. u64 rx_bytes;
  2088. u64 tx_packets;
  2089. u64 tx_bytes;
  2090. struct u64_stats_sync syncp;
  2091. };
  2092. #define __netdev_alloc_pcpu_stats(type, gfp) \
  2093. ({ \
  2094. typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
  2095. if (pcpu_stats) { \
  2096. int __cpu; \
  2097. for_each_possible_cpu(__cpu) { \
  2098. typeof(type) *stat; \
  2099. stat = per_cpu_ptr(pcpu_stats, __cpu); \
  2100. u64_stats_init(&stat->syncp); \
  2101. } \
  2102. } \
  2103. pcpu_stats; \
  2104. })
  2105. #define netdev_alloc_pcpu_stats(type) \
  2106. __netdev_alloc_pcpu_stats(type, GFP_KERNEL)
  2107. enum netdev_lag_tx_type {
  2108. NETDEV_LAG_TX_TYPE_UNKNOWN,
  2109. NETDEV_LAG_TX_TYPE_RANDOM,
  2110. NETDEV_LAG_TX_TYPE_BROADCAST,
  2111. NETDEV_LAG_TX_TYPE_ROUNDROBIN,
  2112. NETDEV_LAG_TX_TYPE_ACTIVEBACKUP,
  2113. NETDEV_LAG_TX_TYPE_HASH,
  2114. };
  2115. struct netdev_lag_upper_info {
  2116. enum netdev_lag_tx_type tx_type;
  2117. };
  2118. struct netdev_lag_lower_state_info {
  2119. u8 link_up : 1,
  2120. tx_enabled : 1;
  2121. };
  2122. #include <linux/notifier.h>
  2123. /* netdevice notifier chain. Please remember to update netdev_cmd_to_name()
  2124. * and the rtnetlink notification exclusion list in rtnetlink_event() when
  2125. * adding new types.
  2126. */
  2127. enum netdev_cmd {
  2128. NETDEV_UP = 1, /* For now you can't veto a device up/down */
  2129. NETDEV_DOWN,
  2130. NETDEV_REBOOT, /* Tell a protocol stack a network interface
  2131. detected a hardware crash and restarted
  2132. - we can use this eg to kick tcp sessions
  2133. once done */
  2134. NETDEV_CHANGE, /* Notify device state change */
  2135. NETDEV_REGISTER,
  2136. NETDEV_UNREGISTER,
  2137. NETDEV_CHANGEMTU, /* notify after mtu change happened */
  2138. NETDEV_CHANGEADDR,
  2139. NETDEV_GOING_DOWN,
  2140. NETDEV_CHANGENAME,
  2141. NETDEV_FEAT_CHANGE,
  2142. NETDEV_BONDING_FAILOVER,
  2143. NETDEV_PRE_UP,
  2144. NETDEV_PRE_TYPE_CHANGE,
  2145. NETDEV_POST_TYPE_CHANGE,
  2146. NETDEV_POST_INIT,
  2147. NETDEV_RELEASE,
  2148. NETDEV_NOTIFY_PEERS,
  2149. NETDEV_JOIN,
  2150. NETDEV_CHANGEUPPER,
  2151. NETDEV_RESEND_IGMP,
  2152. NETDEV_PRECHANGEMTU, /* notify before mtu change happened */
  2153. NETDEV_CHANGEINFODATA,
  2154. NETDEV_BONDING_INFO,
  2155. NETDEV_PRECHANGEUPPER,
  2156. NETDEV_CHANGELOWERSTATE,
  2157. NETDEV_UDP_TUNNEL_PUSH_INFO,
  2158. NETDEV_UDP_TUNNEL_DROP_INFO,
  2159. NETDEV_CHANGE_TX_QUEUE_LEN,
  2160. NETDEV_CVLAN_FILTER_PUSH_INFO,
  2161. NETDEV_CVLAN_FILTER_DROP_INFO,
  2162. NETDEV_SVLAN_FILTER_PUSH_INFO,
  2163. NETDEV_SVLAN_FILTER_DROP_INFO,
  2164. };
  2165. const char *netdev_cmd_to_name(enum netdev_cmd cmd);
  2166. int register_netdevice_notifier(struct notifier_block *nb);
  2167. int unregister_netdevice_notifier(struct notifier_block *nb);
  2168. struct netdev_notifier_info {
  2169. struct net_device *dev;
  2170. struct netlink_ext_ack *extack;
  2171. };
  2172. struct netdev_notifier_change_info {
  2173. struct netdev_notifier_info info; /* must be first */
  2174. unsigned int flags_changed;
  2175. };
  2176. struct netdev_notifier_changeupper_info {
  2177. struct netdev_notifier_info info; /* must be first */
  2178. struct net_device *upper_dev; /* new upper dev */
  2179. bool master; /* is upper dev master */
  2180. bool linking; /* is the notification for link or unlink */
  2181. void *upper_info; /* upper dev info */
  2182. };
  2183. struct netdev_notifier_changelowerstate_info {
  2184. struct netdev_notifier_info info; /* must be first */
  2185. void *lower_state_info; /* is lower dev state */
  2186. };
  2187. static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
  2188. struct net_device *dev)
  2189. {
  2190. info->dev = dev;
  2191. info->extack = NULL;
  2192. }
  2193. static inline struct net_device *
  2194. netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
  2195. {
  2196. return info->dev;
  2197. }
  2198. static inline struct netlink_ext_ack *
  2199. netdev_notifier_info_to_extack(const struct netdev_notifier_info *info)
  2200. {
  2201. return info->extack;
  2202. }
  2203. int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
  2204. extern rwlock_t dev_base_lock; /* Device list lock */
  2205. #define for_each_netdev(net, d) \
  2206. list_for_each_entry(d, &(net)->dev_base_head, dev_list)
  2207. #define for_each_netdev_reverse(net, d) \
  2208. list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
  2209. #define for_each_netdev_rcu(net, d) \
  2210. list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
  2211. #define for_each_netdev_safe(net, d, n) \
  2212. list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
  2213. #define for_each_netdev_continue(net, d) \
  2214. list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
  2215. #define for_each_netdev_continue_rcu(net, d) \
  2216. list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
  2217. #define for_each_netdev_in_bond_rcu(bond, slave) \
  2218. for_each_netdev_rcu(&init_net, slave) \
  2219. if (netdev_master_upper_dev_get_rcu(slave) == (bond))
  2220. #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
  2221. static inline struct net_device *next_net_device(struct net_device *dev)
  2222. {
  2223. struct list_head *lh;
  2224. struct net *net;
  2225. net = dev_net(dev);
  2226. lh = dev->dev_list.next;
  2227. return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
  2228. }
  2229. static inline struct net_device *next_net_device_rcu(struct net_device *dev)
  2230. {
  2231. struct list_head *lh;
  2232. struct net *net;
  2233. net = dev_net(dev);
  2234. lh = rcu_dereference(list_next_rcu(&dev->dev_list));
  2235. return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
  2236. }
  2237. static inline struct net_device *first_net_device(struct net *net)
  2238. {
  2239. return list_empty(&net->dev_base_head) ? NULL :
  2240. net_device_entry(net->dev_base_head.next);
  2241. }
  2242. static inline struct net_device *first_net_device_rcu(struct net *net)
  2243. {
  2244. struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
  2245. return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
  2246. }
  2247. int netdev_boot_setup_check(struct net_device *dev);
  2248. unsigned long netdev_boot_base(const char *prefix, int unit);
  2249. struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
  2250. const char *hwaddr);
  2251. struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
  2252. struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
  2253. void dev_add_pack(struct packet_type *pt);
  2254. void dev_remove_pack(struct packet_type *pt);
  2255. void __dev_remove_pack(struct packet_type *pt);
  2256. void dev_add_offload(struct packet_offload *po);
  2257. void dev_remove_offload(struct packet_offload *po);
  2258. int dev_get_iflink(const struct net_device *dev);
  2259. int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
  2260. struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
  2261. unsigned short mask);
  2262. struct net_device *dev_get_by_name(struct net *net, const char *name);
  2263. struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
  2264. struct net_device *__dev_get_by_name(struct net *net, const char *name);
  2265. int dev_alloc_name(struct net_device *dev, const char *name);
  2266. int dev_open(struct net_device *dev);
  2267. void dev_close(struct net_device *dev);
  2268. void dev_close_many(struct list_head *head, bool unlink);
  2269. void dev_disable_lro(struct net_device *dev);
  2270. int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
  2271. int dev_queue_xmit(struct sk_buff *skb);
  2272. int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
  2273. int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
  2274. int register_netdevice(struct net_device *dev);
  2275. void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
  2276. void unregister_netdevice_many(struct list_head *head);
  2277. static inline void unregister_netdevice(struct net_device *dev)
  2278. {
  2279. unregister_netdevice_queue(dev, NULL);
  2280. }
  2281. int netdev_refcnt_read(const struct net_device *dev);
  2282. void free_netdev(struct net_device *dev);
  2283. void netdev_freemem(struct net_device *dev);
  2284. void synchronize_net(void);
  2285. int init_dummy_netdev(struct net_device *dev);
  2286. DECLARE_PER_CPU(int, xmit_recursion);
  2287. #define XMIT_RECURSION_LIMIT 10
  2288. static inline int dev_recursion_level(void)
  2289. {
  2290. return this_cpu_read(xmit_recursion);
  2291. }
  2292. struct net_device *dev_get_by_index(struct net *net, int ifindex);
  2293. struct net_device *__dev_get_by_index(struct net *net, int ifindex);
  2294. struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
  2295. struct net_device *dev_get_by_napi_id(unsigned int napi_id);
  2296. int netdev_get_name(struct net *net, char *name, int ifindex);
  2297. int dev_restart(struct net_device *dev);
  2298. int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
  2299. static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
  2300. {
  2301. return NAPI_GRO_CB(skb)->data_offset;
  2302. }
  2303. static inline unsigned int skb_gro_len(const struct sk_buff *skb)
  2304. {
  2305. return skb->len - NAPI_GRO_CB(skb)->data_offset;
  2306. }
  2307. static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
  2308. {
  2309. NAPI_GRO_CB(skb)->data_offset += len;
  2310. }
  2311. static inline void *skb_gro_header_fast(struct sk_buff *skb,
  2312. unsigned int offset)
  2313. {
  2314. return NAPI_GRO_CB(skb)->frag0 + offset;
  2315. }
  2316. static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
  2317. {
  2318. return NAPI_GRO_CB(skb)->frag0_len < hlen;
  2319. }
  2320. static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
  2321. {
  2322. NAPI_GRO_CB(skb)->frag0 = NULL;
  2323. NAPI_GRO_CB(skb)->frag0_len = 0;
  2324. }
  2325. static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
  2326. unsigned int offset)
  2327. {
  2328. if (!pskb_may_pull(skb, hlen))
  2329. return NULL;
  2330. skb_gro_frag0_invalidate(skb);
  2331. return skb->data + offset;
  2332. }
  2333. static inline void *skb_gro_network_header(struct sk_buff *skb)
  2334. {
  2335. return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
  2336. skb_network_offset(skb);
  2337. }
  2338. static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
  2339. const void *start, unsigned int len)
  2340. {
  2341. if (NAPI_GRO_CB(skb)->csum_valid)
  2342. NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
  2343. csum_partial(start, len, 0));
  2344. }
  2345. /* GRO checksum functions. These are logical equivalents of the normal
  2346. * checksum functions (in skbuff.h) except that they operate on the GRO
  2347. * offsets and fields in sk_buff.
  2348. */
  2349. __sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
  2350. static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
  2351. {
  2352. return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
  2353. }
  2354. static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
  2355. bool zero_okay,
  2356. __sum16 check)
  2357. {
  2358. return ((skb->ip_summed != CHECKSUM_PARTIAL ||
  2359. skb_checksum_start_offset(skb) <
  2360. skb_gro_offset(skb)) &&
  2361. !skb_at_gro_remcsum_start(skb) &&
  2362. NAPI_GRO_CB(skb)->csum_cnt == 0 &&
  2363. (!zero_okay || check));
  2364. }
  2365. static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
  2366. __wsum psum)
  2367. {
  2368. if (NAPI_GRO_CB(skb)->csum_valid &&
  2369. !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
  2370. return 0;
  2371. NAPI_GRO_CB(skb)->csum = psum;
  2372. return __skb_gro_checksum_complete(skb);
  2373. }
  2374. static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
  2375. {
  2376. if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
  2377. /* Consume a checksum from CHECKSUM_UNNECESSARY */
  2378. NAPI_GRO_CB(skb)->csum_cnt--;
  2379. } else {
  2380. /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
  2381. * verified a new top level checksum or an encapsulated one
  2382. * during GRO. This saves work if we fallback to normal path.
  2383. */
  2384. __skb_incr_checksum_unnecessary(skb);
  2385. }
  2386. }
  2387. #define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
  2388. compute_pseudo) \
  2389. ({ \
  2390. __sum16 __ret = 0; \
  2391. if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
  2392. __ret = __skb_gro_checksum_validate_complete(skb, \
  2393. compute_pseudo(skb, proto)); \
  2394. if (!__ret) \
  2395. skb_gro_incr_csum_unnecessary(skb); \
  2396. __ret; \
  2397. })
  2398. #define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
  2399. __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
  2400. #define skb_gro_checksum_validate_zero_check(skb, proto, check, \
  2401. compute_pseudo) \
  2402. __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
  2403. #define skb_gro_checksum_simple_validate(skb) \
  2404. __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
  2405. static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
  2406. {
  2407. return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
  2408. !NAPI_GRO_CB(skb)->csum_valid);
  2409. }
  2410. static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
  2411. __sum16 check, __wsum pseudo)
  2412. {
  2413. NAPI_GRO_CB(skb)->csum = ~pseudo;
  2414. NAPI_GRO_CB(skb)->csum_valid = 1;
  2415. }
  2416. #define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \
  2417. do { \
  2418. if (__skb_gro_checksum_convert_check(skb)) \
  2419. __skb_gro_checksum_convert(skb, check, \
  2420. compute_pseudo(skb, proto)); \
  2421. } while (0)
  2422. struct gro_remcsum {
  2423. int offset;
  2424. __wsum delta;
  2425. };
  2426. static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
  2427. {
  2428. grc->offset = 0;
  2429. grc->delta = 0;
  2430. }
  2431. static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
  2432. unsigned int off, size_t hdrlen,
  2433. int start, int offset,
  2434. struct gro_remcsum *grc,
  2435. bool nopartial)
  2436. {
  2437. __wsum delta;
  2438. size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
  2439. BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
  2440. if (!nopartial) {
  2441. NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
  2442. return ptr;
  2443. }
  2444. ptr = skb_gro_header_fast(skb, off);
  2445. if (skb_gro_header_hard(skb, off + plen)) {
  2446. ptr = skb_gro_header_slow(skb, off + plen, off);
  2447. if (!ptr)
  2448. return NULL;
  2449. }
  2450. delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
  2451. start, offset);
  2452. /* Adjust skb->csum since we changed the packet */
  2453. NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
  2454. grc->offset = off + hdrlen + offset;
  2455. grc->delta = delta;
  2456. return ptr;
  2457. }
  2458. static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
  2459. struct gro_remcsum *grc)
  2460. {
  2461. void *ptr;
  2462. size_t plen = grc->offset + sizeof(u16);
  2463. if (!grc->delta)
  2464. return;
  2465. ptr = skb_gro_header_fast(skb, grc->offset);
  2466. if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
  2467. ptr = skb_gro_header_slow(skb, plen, grc->offset);
  2468. if (!ptr)
  2469. return;
  2470. }
  2471. remcsum_unadjust((__sum16 *)ptr, grc->delta);
  2472. }
  2473. #ifdef CONFIG_XFRM_OFFLOAD
  2474. static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
  2475. {
  2476. if (PTR_ERR(pp) != -EINPROGRESS)
  2477. NAPI_GRO_CB(skb)->flush |= flush;
  2478. }
  2479. #else
  2480. static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
  2481. {
  2482. NAPI_GRO_CB(skb)->flush |= flush;
  2483. }
  2484. #endif
  2485. static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
  2486. unsigned short type,
  2487. const void *daddr, const void *saddr,
  2488. unsigned int len)
  2489. {
  2490. if (!dev->header_ops || !dev->header_ops->create)
  2491. return 0;
  2492. return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
  2493. }
  2494. static inline int dev_parse_header(const struct sk_buff *skb,
  2495. unsigned char *haddr)
  2496. {
  2497. const struct net_device *dev = skb->dev;
  2498. if (!dev->header_ops || !dev->header_ops->parse)
  2499. return 0;
  2500. return dev->header_ops->parse(skb, haddr);
  2501. }
  2502. /* ll_header must have at least hard_header_len allocated */
  2503. static inline bool dev_validate_header(const struct net_device *dev,
  2504. char *ll_header, int len)
  2505. {
  2506. if (likely(len >= dev->hard_header_len))
  2507. return true;
  2508. if (len < dev->min_header_len)
  2509. return false;
  2510. if (capable(CAP_SYS_RAWIO)) {
  2511. memset(ll_header + len, 0, dev->hard_header_len - len);
  2512. return true;
  2513. }
  2514. if (dev->header_ops && dev->header_ops->validate)
  2515. return dev->header_ops->validate(ll_header, len);
  2516. return false;
  2517. }
  2518. typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr,
  2519. int len, int size);
  2520. int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
  2521. static inline int unregister_gifconf(unsigned int family)
  2522. {
  2523. return register_gifconf(family, NULL);
  2524. }
  2525. #ifdef CONFIG_NET_FLOW_LIMIT
  2526. #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
  2527. struct sd_flow_limit {
  2528. u64 count;
  2529. unsigned int num_buckets;
  2530. unsigned int history_head;
  2531. u16 history[FLOW_LIMIT_HISTORY];
  2532. u8 buckets[];
  2533. };
  2534. extern int netdev_flow_limit_table_len;
  2535. #endif /* CONFIG_NET_FLOW_LIMIT */
  2536. /*
  2537. * Incoming packets are placed on per-CPU queues
  2538. */
  2539. struct softnet_data {
  2540. struct list_head poll_list;
  2541. struct sk_buff_head process_queue;
  2542. /* stats */
  2543. unsigned int processed;
  2544. unsigned int time_squeeze;
  2545. unsigned int received_rps;
  2546. #ifdef CONFIG_RPS
  2547. struct softnet_data *rps_ipi_list;
  2548. #endif
  2549. #ifdef CONFIG_NET_FLOW_LIMIT
  2550. struct sd_flow_limit __rcu *flow_limit;
  2551. #endif
  2552. struct Qdisc *output_queue;
  2553. struct Qdisc **output_queue_tailp;
  2554. struct sk_buff *completion_queue;
  2555. #ifdef CONFIG_XFRM_OFFLOAD
  2556. struct sk_buff_head xfrm_backlog;
  2557. #endif
  2558. #ifdef CONFIG_RPS
  2559. /* input_queue_head should be written by cpu owning this struct,
  2560. * and only read by other cpus. Worth using a cache line.
  2561. */
  2562. unsigned int input_queue_head ____cacheline_aligned_in_smp;
  2563. /* Elements below can be accessed between CPUs for RPS/RFS */
  2564. call_single_data_t csd ____cacheline_aligned_in_smp;
  2565. struct softnet_data *rps_ipi_next;
  2566. unsigned int cpu;
  2567. unsigned int input_queue_tail;
  2568. #endif
  2569. unsigned int dropped;
  2570. struct sk_buff_head input_pkt_queue;
  2571. struct napi_struct backlog;
  2572. };
  2573. static inline void input_queue_head_incr(struct softnet_data *sd)
  2574. {
  2575. #ifdef CONFIG_RPS
  2576. sd->input_queue_head++;
  2577. #endif
  2578. }
  2579. static inline void input_queue_tail_incr_save(struct softnet_data *sd,
  2580. unsigned int *qtail)
  2581. {
  2582. #ifdef CONFIG_RPS
  2583. *qtail = ++sd->input_queue_tail;
  2584. #endif
  2585. }
  2586. DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
  2587. void __netif_schedule(struct Qdisc *q);
  2588. void netif_schedule_queue(struct netdev_queue *txq);
  2589. static inline void netif_tx_schedule_all(struct net_device *dev)
  2590. {
  2591. unsigned int i;
  2592. for (i = 0; i < dev->num_tx_queues; i++)
  2593. netif_schedule_queue(netdev_get_tx_queue(dev, i));
  2594. }
  2595. static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
  2596. {
  2597. clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
  2598. }
  2599. /**
  2600. * netif_start_queue - allow transmit
  2601. * @dev: network device
  2602. *
  2603. * Allow upper layers to call the device hard_start_xmit routine.
  2604. */
  2605. static inline void netif_start_queue(struct net_device *dev)
  2606. {
  2607. netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
  2608. }
  2609. static inline void netif_tx_start_all_queues(struct net_device *dev)
  2610. {
  2611. unsigned int i;
  2612. for (i = 0; i < dev->num_tx_queues; i++) {
  2613. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  2614. netif_tx_start_queue(txq);
  2615. }
  2616. }
  2617. void netif_tx_wake_queue(struct netdev_queue *dev_queue);
  2618. /**
  2619. * netif_wake_queue - restart transmit
  2620. * @dev: network device
  2621. *
  2622. * Allow upper layers to call the device hard_start_xmit routine.
  2623. * Used for flow control when transmit resources are available.
  2624. */
  2625. static inline void netif_wake_queue(struct net_device *dev)
  2626. {
  2627. netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
  2628. }
  2629. static inline void netif_tx_wake_all_queues(struct net_device *dev)
  2630. {
  2631. unsigned int i;
  2632. for (i = 0; i < dev->num_tx_queues; i++) {
  2633. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  2634. netif_tx_wake_queue(txq);
  2635. }
  2636. }
  2637. static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
  2638. {
  2639. set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
  2640. }
  2641. /**
  2642. * netif_stop_queue - stop transmitted packets
  2643. * @dev: network device
  2644. *
  2645. * Stop upper layers calling the device hard_start_xmit routine.
  2646. * Used for flow control when transmit resources are unavailable.
  2647. */
  2648. static inline void netif_stop_queue(struct net_device *dev)
  2649. {
  2650. netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
  2651. }
  2652. void netif_tx_stop_all_queues(struct net_device *dev);
  2653. static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
  2654. {
  2655. return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
  2656. }
  2657. /**
  2658. * netif_queue_stopped - test if transmit queue is flowblocked
  2659. * @dev: network device
  2660. *
  2661. * Test if transmit queue on device is currently unable to send.
  2662. */
  2663. static inline bool netif_queue_stopped(const struct net_device *dev)
  2664. {
  2665. return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
  2666. }
  2667. static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
  2668. {
  2669. return dev_queue->state & QUEUE_STATE_ANY_XOFF;
  2670. }
  2671. static inline bool
  2672. netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
  2673. {
  2674. return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
  2675. }
  2676. static inline bool
  2677. netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
  2678. {
  2679. return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
  2680. }
  2681. /**
  2682. * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
  2683. * @dev_queue: pointer to transmit queue
  2684. *
  2685. * BQL enabled drivers might use this helper in their ndo_start_xmit(),
  2686. * to give appropriate hint to the CPU.
  2687. */
  2688. static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
  2689. {
  2690. #ifdef CONFIG_BQL
  2691. prefetchw(&dev_queue->dql.num_queued);
  2692. #endif
  2693. }
  2694. /**
  2695. * netdev_txq_bql_complete_prefetchw - prefetch bql data for write
  2696. * @dev_queue: pointer to transmit queue
  2697. *
  2698. * BQL enabled drivers might use this helper in their TX completion path,
  2699. * to give appropriate hint to the CPU.
  2700. */
  2701. static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
  2702. {
  2703. #ifdef CONFIG_BQL
  2704. prefetchw(&dev_queue->dql.limit);
  2705. #endif
  2706. }
  2707. static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
  2708. unsigned int bytes)
  2709. {
  2710. #ifdef CONFIG_BQL
  2711. dql_queued(&dev_queue->dql, bytes);
  2712. if (likely(dql_avail(&dev_queue->dql) >= 0))
  2713. return;
  2714. set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
  2715. /*
  2716. * The XOFF flag must be set before checking the dql_avail below,
  2717. * because in netdev_tx_completed_queue we update the dql_completed
  2718. * before checking the XOFF flag.
  2719. */
  2720. smp_mb();
  2721. /* check again in case another CPU has just made room avail */
  2722. if (unlikely(dql_avail(&dev_queue->dql) >= 0))
  2723. clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
  2724. #endif
  2725. }
  2726. /**
  2727. * netdev_sent_queue - report the number of bytes queued to hardware
  2728. * @dev: network device
  2729. * @bytes: number of bytes queued to the hardware device queue
  2730. *
  2731. * Report the number of bytes queued for sending/completion to the network
  2732. * device hardware queue. @bytes should be a good approximation and should
  2733. * exactly match netdev_completed_queue() @bytes
  2734. */
  2735. static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
  2736. {
  2737. netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
  2738. }
  2739. static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
  2740. unsigned int pkts, unsigned int bytes)
  2741. {
  2742. #ifdef CONFIG_BQL
  2743. if (unlikely(!bytes))
  2744. return;
  2745. dql_completed(&dev_queue->dql, bytes);
  2746. /*
  2747. * Without the memory barrier there is a small possiblity that
  2748. * netdev_tx_sent_queue will miss the update and cause the queue to
  2749. * be stopped forever
  2750. */
  2751. smp_mb();
  2752. if (dql_avail(&dev_queue->dql) < 0)
  2753. return;
  2754. if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
  2755. netif_schedule_queue(dev_queue);
  2756. #endif
  2757. }
  2758. /**
  2759. * netdev_completed_queue - report bytes and packets completed by device
  2760. * @dev: network device
  2761. * @pkts: actual number of packets sent over the medium
  2762. * @bytes: actual number of bytes sent over the medium
  2763. *
  2764. * Report the number of bytes and packets transmitted by the network device
  2765. * hardware queue over the physical medium, @bytes must exactly match the
  2766. * @bytes amount passed to netdev_sent_queue()
  2767. */
  2768. static inline void netdev_completed_queue(struct net_device *dev,
  2769. unsigned int pkts, unsigned int bytes)
  2770. {
  2771. netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
  2772. }
  2773. static inline void netdev_tx_reset_queue(struct netdev_queue *q)
  2774. {
  2775. #ifdef CONFIG_BQL
  2776. clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
  2777. dql_reset(&q->dql);
  2778. #endif
  2779. }
  2780. /**
  2781. * netdev_reset_queue - reset the packets and bytes count of a network device
  2782. * @dev_queue: network device
  2783. *
  2784. * Reset the bytes and packet count of a network device and clear the
  2785. * software flow control OFF bit for this network device
  2786. */
  2787. static inline void netdev_reset_queue(struct net_device *dev_queue)
  2788. {
  2789. netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
  2790. }
  2791. /**
  2792. * netdev_cap_txqueue - check if selected tx queue exceeds device queues
  2793. * @dev: network device
  2794. * @queue_index: given tx queue index
  2795. *
  2796. * Returns 0 if given tx queue index >= number of device tx queues,
  2797. * otherwise returns the originally passed tx queue index.
  2798. */
  2799. static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
  2800. {
  2801. if (unlikely(queue_index >= dev->real_num_tx_queues)) {
  2802. net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
  2803. dev->name, queue_index,
  2804. dev->real_num_tx_queues);
  2805. return 0;
  2806. }
  2807. return queue_index;
  2808. }
  2809. /**
  2810. * netif_running - test if up
  2811. * @dev: network device
  2812. *
  2813. * Test if the device has been brought up.
  2814. */
  2815. static inline bool netif_running(const struct net_device *dev)
  2816. {
  2817. return test_bit(__LINK_STATE_START, &dev->state);
  2818. }
  2819. /*
  2820. * Routines to manage the subqueues on a device. We only need start,
  2821. * stop, and a check if it's stopped. All other device management is
  2822. * done at the overall netdevice level.
  2823. * Also test the device if we're multiqueue.
  2824. */
  2825. /**
  2826. * netif_start_subqueue - allow sending packets on subqueue
  2827. * @dev: network device
  2828. * @queue_index: sub queue index
  2829. *
  2830. * Start individual transmit queue of a device with multiple transmit queues.
  2831. */
  2832. static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
  2833. {
  2834. struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
  2835. netif_tx_start_queue(txq);
  2836. }
  2837. /**
  2838. * netif_stop_subqueue - stop sending packets on subqueue
  2839. * @dev: network device
  2840. * @queue_index: sub queue index
  2841. *
  2842. * Stop individual transmit queue of a device with multiple transmit queues.
  2843. */
  2844. static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
  2845. {
  2846. struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
  2847. netif_tx_stop_queue(txq);
  2848. }
  2849. /**
  2850. * netif_subqueue_stopped - test status of subqueue
  2851. * @dev: network device
  2852. * @queue_index: sub queue index
  2853. *
  2854. * Check individual transmit queue of a device with multiple transmit queues.
  2855. */
  2856. static inline bool __netif_subqueue_stopped(const struct net_device *dev,
  2857. u16 queue_index)
  2858. {
  2859. struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
  2860. return netif_tx_queue_stopped(txq);
  2861. }
  2862. static inline bool netif_subqueue_stopped(const struct net_device *dev,
  2863. struct sk_buff *skb)
  2864. {
  2865. return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
  2866. }
  2867. /**
  2868. * netif_wake_subqueue - allow sending packets on subqueue
  2869. * @dev: network device
  2870. * @queue_index: sub queue index
  2871. *
  2872. * Resume individual transmit queue of a device with multiple transmit queues.
  2873. */
  2874. static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
  2875. {
  2876. struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
  2877. netif_tx_wake_queue(txq);
  2878. }
  2879. #ifdef CONFIG_XPS
  2880. int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
  2881. u16 index);
  2882. #else
  2883. static inline int netif_set_xps_queue(struct net_device *dev,
  2884. const struct cpumask *mask,
  2885. u16 index)
  2886. {
  2887. return 0;
  2888. }
  2889. #endif
  2890. /**
  2891. * netif_is_multiqueue - test if device has multiple transmit queues
  2892. * @dev: network device
  2893. *
  2894. * Check if device has multiple transmit queues
  2895. */
  2896. static inline bool netif_is_multiqueue(const struct net_device *dev)
  2897. {
  2898. return dev->num_tx_queues > 1;
  2899. }
  2900. int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
  2901. #ifdef CONFIG_SYSFS
  2902. int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
  2903. #else
  2904. static inline int netif_set_real_num_rx_queues(struct net_device *dev,
  2905. unsigned int rxq)
  2906. {
  2907. return 0;
  2908. }
  2909. #endif
  2910. static inline struct netdev_rx_queue *
  2911. __netif_get_rx_queue(struct net_device *dev, unsigned int rxq)
  2912. {
  2913. return dev->_rx + rxq;
  2914. }
  2915. #ifdef CONFIG_SYSFS
  2916. static inline unsigned int get_netdev_rx_queue_index(
  2917. struct netdev_rx_queue *queue)
  2918. {
  2919. struct net_device *dev = queue->dev;
  2920. int index = queue - dev->_rx;
  2921. BUG_ON(index >= dev->num_rx_queues);
  2922. return index;
  2923. }
  2924. #endif
  2925. #define DEFAULT_MAX_NUM_RSS_QUEUES (8)
  2926. int netif_get_num_default_rss_queues(void);
  2927. enum skb_free_reason {
  2928. SKB_REASON_CONSUMED,
  2929. SKB_REASON_DROPPED,
  2930. };
  2931. void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
  2932. void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
  2933. /*
  2934. * It is not allowed to call kfree_skb() or consume_skb() from hardware
  2935. * interrupt context or with hardware interrupts being disabled.
  2936. * (in_irq() || irqs_disabled())
  2937. *
  2938. * We provide four helpers that can be used in following contexts :
  2939. *
  2940. * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
  2941. * replacing kfree_skb(skb)
  2942. *
  2943. * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
  2944. * Typically used in place of consume_skb(skb) in TX completion path
  2945. *
  2946. * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
  2947. * replacing kfree_skb(skb)
  2948. *
  2949. * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
  2950. * and consumed a packet. Used in place of consume_skb(skb)
  2951. */
  2952. static inline void dev_kfree_skb_irq(struct sk_buff *skb)
  2953. {
  2954. __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
  2955. }
  2956. static inline void dev_consume_skb_irq(struct sk_buff *skb)
  2957. {
  2958. __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
  2959. }
  2960. static inline void dev_kfree_skb_any(struct sk_buff *skb)
  2961. {
  2962. __dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
  2963. }
  2964. static inline void dev_consume_skb_any(struct sk_buff *skb)
  2965. {
  2966. __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
  2967. }
  2968. void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
  2969. int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
  2970. int netif_rx(struct sk_buff *skb);
  2971. int netif_rx_ni(struct sk_buff *skb);
  2972. int netif_receive_skb(struct sk_buff *skb);
  2973. int netif_receive_skb_core(struct sk_buff *skb);
  2974. gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
  2975. void napi_gro_flush(struct napi_struct *napi, bool flush_old);
  2976. struct sk_buff *napi_get_frags(struct napi_struct *napi);
  2977. gro_result_t napi_gro_frags(struct napi_struct *napi);
  2978. struct packet_offload *gro_find_receive_by_type(__be16 type);
  2979. struct packet_offload *gro_find_complete_by_type(__be16 type);
  2980. static inline void napi_free_frags(struct napi_struct *napi)
  2981. {
  2982. kfree_skb(napi->skb);
  2983. napi->skb = NULL;
  2984. }
  2985. bool netdev_is_rx_handler_busy(struct net_device *dev);
  2986. int netdev_rx_handler_register(struct net_device *dev,
  2987. rx_handler_func_t *rx_handler,
  2988. void *rx_handler_data);
  2989. void netdev_rx_handler_unregister(struct net_device *dev);
  2990. bool dev_valid_name(const char *name);
  2991. int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
  2992. bool *need_copyout);
  2993. int dev_ifconf(struct net *net, struct ifconf *, int);
  2994. int dev_ethtool(struct net *net, struct ifreq *);
  2995. unsigned int dev_get_flags(const struct net_device *);
  2996. int __dev_change_flags(struct net_device *, unsigned int flags);
  2997. int dev_change_flags(struct net_device *, unsigned int);
  2998. void __dev_notify_flags(struct net_device *, unsigned int old_flags,
  2999. unsigned int gchanges);
  3000. int dev_change_name(struct net_device *, const char *);
  3001. int dev_set_alias(struct net_device *, const char *, size_t);
  3002. int dev_get_alias(const struct net_device *, char *, size_t);
  3003. int dev_change_net_namespace(struct net_device *, struct net *, const char *);
  3004. int __dev_set_mtu(struct net_device *, int);
  3005. int dev_set_mtu(struct net_device *, int);
  3006. int dev_change_tx_queue_len(struct net_device *, unsigned long);
  3007. void dev_set_group(struct net_device *, int);
  3008. int dev_set_mac_address(struct net_device *, struct sockaddr *);
  3009. int dev_change_carrier(struct net_device *, bool new_carrier);
  3010. int dev_get_phys_port_id(struct net_device *dev,
  3011. struct netdev_phys_item_id *ppid);
  3012. int dev_get_phys_port_name(struct net_device *dev,
  3013. char *name, size_t len);
  3014. int dev_change_proto_down(struct net_device *dev, bool proto_down);
  3015. struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
  3016. struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
  3017. struct netdev_queue *txq, int *ret);
  3018. typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
  3019. int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
  3020. int fd, u32 flags);
  3021. void __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op,
  3022. struct netdev_bpf *xdp);
  3023. int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
  3024. int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
  3025. bool is_skb_forwardable(const struct net_device *dev,
  3026. const struct sk_buff *skb);
  3027. static __always_inline int ____dev_forward_skb(struct net_device *dev,
  3028. struct sk_buff *skb)
  3029. {
  3030. if (skb_orphan_frags(skb, GFP_ATOMIC) ||
  3031. unlikely(!is_skb_forwardable(dev, skb))) {
  3032. atomic_long_inc(&dev->rx_dropped);
  3033. kfree_skb(skb);
  3034. return NET_RX_DROP;
  3035. }
  3036. skb_scrub_packet(skb, true);
  3037. skb->priority = 0;
  3038. return 0;
  3039. }
  3040. void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
  3041. extern int netdev_budget;
  3042. extern unsigned int netdev_budget_usecs;
  3043. /* Called by rtnetlink.c:rtnl_unlock() */
  3044. void netdev_run_todo(void);
  3045. /**
  3046. * dev_put - release reference to device
  3047. * @dev: network device
  3048. *
  3049. * Release reference to device to allow it to be freed.
  3050. */
  3051. static inline void dev_put(struct net_device *dev)
  3052. {
  3053. this_cpu_dec(*dev->pcpu_refcnt);
  3054. }
  3055. /**
  3056. * dev_hold - get reference to device
  3057. * @dev: network device
  3058. *
  3059. * Hold reference to device to keep it from being freed.
  3060. */
  3061. static inline void dev_hold(struct net_device *dev)
  3062. {
  3063. this_cpu_inc(*dev->pcpu_refcnt);
  3064. }
  3065. /* Carrier loss detection, dial on demand. The functions netif_carrier_on
  3066. * and _off may be called from IRQ context, but it is caller
  3067. * who is responsible for serialization of these calls.
  3068. *
  3069. * The name carrier is inappropriate, these functions should really be
  3070. * called netif_lowerlayer_*() because they represent the state of any
  3071. * kind of lower layer not just hardware media.
  3072. */
  3073. void linkwatch_init_dev(struct net_device *dev);
  3074. void linkwatch_fire_event(struct net_device *dev);
  3075. void linkwatch_forget_dev(struct net_device *dev);
  3076. /**
  3077. * netif_carrier_ok - test if carrier present
  3078. * @dev: network device
  3079. *
  3080. * Check if carrier is present on device
  3081. */
  3082. static inline bool netif_carrier_ok(const struct net_device *dev)
  3083. {
  3084. return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
  3085. }
  3086. unsigned long dev_trans_start(struct net_device *dev);
  3087. void __netdev_watchdog_up(struct net_device *dev);
  3088. void netif_carrier_on(struct net_device *dev);
  3089. void netif_carrier_off(struct net_device *dev);
  3090. /**
  3091. * netif_dormant_on - mark device as dormant.
  3092. * @dev: network device
  3093. *
  3094. * Mark device as dormant (as per RFC2863).
  3095. *
  3096. * The dormant state indicates that the relevant interface is not
  3097. * actually in a condition to pass packets (i.e., it is not 'up') but is
  3098. * in a "pending" state, waiting for some external event. For "on-
  3099. * demand" interfaces, this new state identifies the situation where the
  3100. * interface is waiting for events to place it in the up state.
  3101. */
  3102. static inline void netif_dormant_on(struct net_device *dev)
  3103. {
  3104. if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
  3105. linkwatch_fire_event(dev);
  3106. }
  3107. /**
  3108. * netif_dormant_off - set device as not dormant.
  3109. * @dev: network device
  3110. *
  3111. * Device is not in dormant state.
  3112. */
  3113. static inline void netif_dormant_off(struct net_device *dev)
  3114. {
  3115. if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
  3116. linkwatch_fire_event(dev);
  3117. }
  3118. /**
  3119. * netif_dormant - test if device is dormant
  3120. * @dev: network device
  3121. *
  3122. * Check if device is dormant.
  3123. */
  3124. static inline bool netif_dormant(const struct net_device *dev)
  3125. {
  3126. return test_bit(__LINK_STATE_DORMANT, &dev->state);
  3127. }
  3128. /**
  3129. * netif_oper_up - test if device is operational
  3130. * @dev: network device
  3131. *
  3132. * Check if carrier is operational
  3133. */
  3134. static inline bool netif_oper_up(const struct net_device *dev)
  3135. {
  3136. return (dev->operstate == IF_OPER_UP ||
  3137. dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
  3138. }
  3139. /**
  3140. * netif_device_present - is device available or removed
  3141. * @dev: network device
  3142. *
  3143. * Check if device has not been removed from system.
  3144. */
  3145. static inline bool netif_device_present(struct net_device *dev)
  3146. {
  3147. return test_bit(__LINK_STATE_PRESENT, &dev->state);
  3148. }
  3149. void netif_device_detach(struct net_device *dev);
  3150. void netif_device_attach(struct net_device *dev);
  3151. /*
  3152. * Network interface message level settings
  3153. */
  3154. enum {
  3155. NETIF_MSG_DRV = 0x0001,
  3156. NETIF_MSG_PROBE = 0x0002,
  3157. NETIF_MSG_LINK = 0x0004,
  3158. NETIF_MSG_TIMER = 0x0008,
  3159. NETIF_MSG_IFDOWN = 0x0010,
  3160. NETIF_MSG_IFUP = 0x0020,
  3161. NETIF_MSG_RX_ERR = 0x0040,
  3162. NETIF_MSG_TX_ERR = 0x0080,
  3163. NETIF_MSG_TX_QUEUED = 0x0100,
  3164. NETIF_MSG_INTR = 0x0200,
  3165. NETIF_MSG_TX_DONE = 0x0400,
  3166. NETIF_MSG_RX_STATUS = 0x0800,
  3167. NETIF_MSG_PKTDATA = 0x1000,
  3168. NETIF_MSG_HW = 0x2000,
  3169. NETIF_MSG_WOL = 0x4000,
  3170. };
  3171. #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
  3172. #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
  3173. #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
  3174. #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
  3175. #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
  3176. #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
  3177. #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
  3178. #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
  3179. #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
  3180. #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
  3181. #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
  3182. #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
  3183. #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
  3184. #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
  3185. #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
  3186. static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
  3187. {
  3188. /* use default */
  3189. if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
  3190. return default_msg_enable_bits;
  3191. if (debug_value == 0) /* no output */
  3192. return 0;
  3193. /* set low N bits */
  3194. return (1 << debug_value) - 1;
  3195. }
  3196. static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
  3197. {
  3198. spin_lock(&txq->_xmit_lock);
  3199. txq->xmit_lock_owner = cpu;
  3200. }
  3201. static inline bool __netif_tx_acquire(struct netdev_queue *txq)
  3202. {
  3203. __acquire(&txq->_xmit_lock);
  3204. return true;
  3205. }
  3206. static inline void __netif_tx_release(struct netdev_queue *txq)
  3207. {
  3208. __release(&txq->_xmit_lock);
  3209. }
  3210. static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
  3211. {
  3212. spin_lock_bh(&txq->_xmit_lock);
  3213. txq->xmit_lock_owner = smp_processor_id();
  3214. }
  3215. static inline bool __netif_tx_trylock(struct netdev_queue *txq)
  3216. {
  3217. bool ok = spin_trylock(&txq->_xmit_lock);
  3218. if (likely(ok))
  3219. txq->xmit_lock_owner = smp_processor_id();
  3220. return ok;
  3221. }
  3222. static inline void __netif_tx_unlock(struct netdev_queue *txq)
  3223. {
  3224. txq->xmit_lock_owner = -1;
  3225. spin_unlock(&txq->_xmit_lock);
  3226. }
  3227. static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
  3228. {
  3229. txq->xmit_lock_owner = -1;
  3230. spin_unlock_bh(&txq->_xmit_lock);
  3231. }
  3232. static inline void txq_trans_update(struct netdev_queue *txq)
  3233. {
  3234. if (txq->xmit_lock_owner != -1)
  3235. txq->trans_start = jiffies;
  3236. }
  3237. /* legacy drivers only, netdev_start_xmit() sets txq->trans_start */
  3238. static inline void netif_trans_update(struct net_device *dev)
  3239. {
  3240. struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
  3241. if (txq->trans_start != jiffies)
  3242. txq->trans_start = jiffies;
  3243. }
  3244. /**
  3245. * netif_tx_lock - grab network device transmit lock
  3246. * @dev: network device
  3247. *
  3248. * Get network device transmit lock
  3249. */
  3250. static inline void netif_tx_lock(struct net_device *dev)
  3251. {
  3252. unsigned int i;
  3253. int cpu;
  3254. spin_lock(&dev->tx_global_lock);
  3255. cpu = smp_processor_id();
  3256. for (i = 0; i < dev->num_tx_queues; i++) {
  3257. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  3258. /* We are the only thread of execution doing a
  3259. * freeze, but we have to grab the _xmit_lock in
  3260. * order to synchronize with threads which are in
  3261. * the ->hard_start_xmit() handler and already
  3262. * checked the frozen bit.
  3263. */
  3264. __netif_tx_lock(txq, cpu);
  3265. set_bit(__QUEUE_STATE_FROZEN, &txq->state);
  3266. __netif_tx_unlock(txq);
  3267. }
  3268. }
  3269. static inline void netif_tx_lock_bh(struct net_device *dev)
  3270. {
  3271. local_bh_disable();
  3272. netif_tx_lock(dev);
  3273. }
  3274. static inline void netif_tx_unlock(struct net_device *dev)
  3275. {
  3276. unsigned int i;
  3277. for (i = 0; i < dev->num_tx_queues; i++) {
  3278. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  3279. /* No need to grab the _xmit_lock here. If the
  3280. * queue is not stopped for another reason, we
  3281. * force a schedule.
  3282. */
  3283. clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
  3284. netif_schedule_queue(txq);
  3285. }
  3286. spin_unlock(&dev->tx_global_lock);
  3287. }
  3288. static inline void netif_tx_unlock_bh(struct net_device *dev)
  3289. {
  3290. netif_tx_unlock(dev);
  3291. local_bh_enable();
  3292. }
  3293. #define HARD_TX_LOCK(dev, txq, cpu) { \
  3294. if ((dev->features & NETIF_F_LLTX) == 0) { \
  3295. __netif_tx_lock(txq, cpu); \
  3296. } else { \
  3297. __netif_tx_acquire(txq); \
  3298. } \
  3299. }
  3300. #define HARD_TX_TRYLOCK(dev, txq) \
  3301. (((dev->features & NETIF_F_LLTX) == 0) ? \
  3302. __netif_tx_trylock(txq) : \
  3303. __netif_tx_acquire(txq))
  3304. #define HARD_TX_UNLOCK(dev, txq) { \
  3305. if ((dev->features & NETIF_F_LLTX) == 0) { \
  3306. __netif_tx_unlock(txq); \
  3307. } else { \
  3308. __netif_tx_release(txq); \
  3309. } \
  3310. }
  3311. static inline void netif_tx_disable(struct net_device *dev)
  3312. {
  3313. unsigned int i;
  3314. int cpu;
  3315. local_bh_disable();
  3316. cpu = smp_processor_id();
  3317. for (i = 0; i < dev->num_tx_queues; i++) {
  3318. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  3319. __netif_tx_lock(txq, cpu);
  3320. netif_tx_stop_queue(txq);
  3321. __netif_tx_unlock(txq);
  3322. }
  3323. local_bh_enable();
  3324. }
  3325. static inline void netif_addr_lock(struct net_device *dev)
  3326. {
  3327. spin_lock(&dev->addr_list_lock);
  3328. }
  3329. static inline void netif_addr_lock_nested(struct net_device *dev)
  3330. {
  3331. int subclass = SINGLE_DEPTH_NESTING;
  3332. if (dev->netdev_ops->ndo_get_lock_subclass)
  3333. subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
  3334. spin_lock_nested(&dev->addr_list_lock, subclass);
  3335. }
  3336. static inline void netif_addr_lock_bh(struct net_device *dev)
  3337. {
  3338. spin_lock_bh(&dev->addr_list_lock);
  3339. }
  3340. static inline void netif_addr_unlock(struct net_device *dev)
  3341. {
  3342. spin_unlock(&dev->addr_list_lock);
  3343. }
  3344. static inline void netif_addr_unlock_bh(struct net_device *dev)
  3345. {
  3346. spin_unlock_bh(&dev->addr_list_lock);
  3347. }
  3348. /*
  3349. * dev_addrs walker. Should be used only for read access. Call with
  3350. * rcu_read_lock held.
  3351. */
  3352. #define for_each_dev_addr(dev, ha) \
  3353. list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
  3354. /* These functions live elsewhere (drivers/net/net_init.c, but related) */
  3355. void ether_setup(struct net_device *dev);
  3356. /* Support for loadable net-drivers */
  3357. struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
  3358. unsigned char name_assign_type,
  3359. void (*setup)(struct net_device *),
  3360. unsigned int txqs, unsigned int rxqs);
  3361. int dev_get_valid_name(struct net *net, struct net_device *dev,
  3362. const char *name);
  3363. #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
  3364. alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
  3365. #define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \
  3366. alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \
  3367. count)
  3368. int register_netdev(struct net_device *dev);
  3369. void unregister_netdev(struct net_device *dev);
  3370. /* General hardware address lists handling functions */
  3371. int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
  3372. struct netdev_hw_addr_list *from_list, int addr_len);
  3373. void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
  3374. struct netdev_hw_addr_list *from_list, int addr_len);
  3375. int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
  3376. struct net_device *dev,
  3377. int (*sync)(struct net_device *, const unsigned char *),
  3378. int (*unsync)(struct net_device *,
  3379. const unsigned char *));
  3380. void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
  3381. struct net_device *dev,
  3382. int (*unsync)(struct net_device *,
  3383. const unsigned char *));
  3384. void __hw_addr_init(struct netdev_hw_addr_list *list);
  3385. /* Functions used for device addresses handling */
  3386. int dev_addr_add(struct net_device *dev, const unsigned char *addr,
  3387. unsigned char addr_type);
  3388. int dev_addr_del(struct net_device *dev, const unsigned char *addr,
  3389. unsigned char addr_type);
  3390. void dev_addr_flush(struct net_device *dev);
  3391. int dev_addr_init(struct net_device *dev);
  3392. /* Functions used for unicast addresses handling */
  3393. int dev_uc_add(struct net_device *dev, const unsigned char *addr);
  3394. int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
  3395. int dev_uc_del(struct net_device *dev, const unsigned char *addr);
  3396. int dev_uc_sync(struct net_device *to, struct net_device *from);
  3397. int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
  3398. void dev_uc_unsync(struct net_device *to, struct net_device *from);
  3399. void dev_uc_flush(struct net_device *dev);
  3400. void dev_uc_init(struct net_device *dev);
  3401. /**
  3402. * __dev_uc_sync - Synchonize device's unicast list
  3403. * @dev: device to sync
  3404. * @sync: function to call if address should be added
  3405. * @unsync: function to call if address should be removed
  3406. *
  3407. * Add newly added addresses to the interface, and release
  3408. * addresses that have been deleted.
  3409. */
  3410. static inline int __dev_uc_sync(struct net_device *dev,
  3411. int (*sync)(struct net_device *,
  3412. const unsigned char *),
  3413. int (*unsync)(struct net_device *,
  3414. const unsigned char *))
  3415. {
  3416. return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
  3417. }
  3418. /**
  3419. * __dev_uc_unsync - Remove synchronized addresses from device
  3420. * @dev: device to sync
  3421. * @unsync: function to call if address should be removed
  3422. *
  3423. * Remove all addresses that were added to the device by dev_uc_sync().
  3424. */
  3425. static inline void __dev_uc_unsync(struct net_device *dev,
  3426. int (*unsync)(struct net_device *,
  3427. const unsigned char *))
  3428. {
  3429. __hw_addr_unsync_dev(&dev->uc, dev, unsync);
  3430. }
  3431. /* Functions used for multicast addresses handling */
  3432. int dev_mc_add(struct net_device *dev, const unsigned char *addr);
  3433. int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
  3434. int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
  3435. int dev_mc_del(struct net_device *dev, const unsigned char *addr);
  3436. int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
  3437. int dev_mc_sync(struct net_device *to, struct net_device *from);
  3438. int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
  3439. void dev_mc_unsync(struct net_device *to, struct net_device *from);
  3440. void dev_mc_flush(struct net_device *dev);
  3441. void dev_mc_init(struct net_device *dev);
  3442. /**
  3443. * __dev_mc_sync - Synchonize device's multicast list
  3444. * @dev: device to sync
  3445. * @sync: function to call if address should be added
  3446. * @unsync: function to call if address should be removed
  3447. *
  3448. * Add newly added addresses to the interface, and release
  3449. * addresses that have been deleted.
  3450. */
  3451. static inline int __dev_mc_sync(struct net_device *dev,
  3452. int (*sync)(struct net_device *,
  3453. const unsigned char *),
  3454. int (*unsync)(struct net_device *,
  3455. const unsigned char *))
  3456. {
  3457. return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
  3458. }
  3459. /**
  3460. * __dev_mc_unsync - Remove synchronized addresses from device
  3461. * @dev: device to sync
  3462. * @unsync: function to call if address should be removed
  3463. *
  3464. * Remove all addresses that were added to the device by dev_mc_sync().
  3465. */
  3466. static inline void __dev_mc_unsync(struct net_device *dev,
  3467. int (*unsync)(struct net_device *,
  3468. const unsigned char *))
  3469. {
  3470. __hw_addr_unsync_dev(&dev->mc, dev, unsync);
  3471. }
  3472. /* Functions used for secondary unicast and multicast support */
  3473. void dev_set_rx_mode(struct net_device *dev);
  3474. void __dev_set_rx_mode(struct net_device *dev);
  3475. int dev_set_promiscuity(struct net_device *dev, int inc);
  3476. int dev_set_allmulti(struct net_device *dev, int inc);
  3477. void netdev_state_change(struct net_device *dev);
  3478. void netdev_notify_peers(struct net_device *dev);
  3479. void netdev_features_change(struct net_device *dev);
  3480. /* Load a device via the kmod */
  3481. void dev_load(struct net *net, const char *name);
  3482. struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
  3483. struct rtnl_link_stats64 *storage);
  3484. void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
  3485. const struct net_device_stats *netdev_stats);
  3486. extern int netdev_max_backlog;
  3487. extern int netdev_tstamp_prequeue;
  3488. extern int weight_p;
  3489. extern int dev_weight_rx_bias;
  3490. extern int dev_weight_tx_bias;
  3491. extern int dev_rx_weight;
  3492. extern int dev_tx_weight;
  3493. bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
  3494. struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
  3495. struct list_head **iter);
  3496. struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
  3497. struct list_head **iter);
  3498. /* iterate through upper list, must be called under RCU read lock */
  3499. #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
  3500. for (iter = &(dev)->adj_list.upper, \
  3501. updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
  3502. updev; \
  3503. updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
  3504. int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
  3505. int (*fn)(struct net_device *upper_dev,
  3506. void *data),
  3507. void *data);
  3508. bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
  3509. struct net_device *upper_dev);
  3510. bool netdev_has_any_upper_dev(struct net_device *dev);
  3511. void *netdev_lower_get_next_private(struct net_device *dev,
  3512. struct list_head **iter);
  3513. void *netdev_lower_get_next_private_rcu(struct net_device *dev,
  3514. struct list_head **iter);
  3515. #define netdev_for_each_lower_private(dev, priv, iter) \
  3516. for (iter = (dev)->adj_list.lower.next, \
  3517. priv = netdev_lower_get_next_private(dev, &(iter)); \
  3518. priv; \
  3519. priv = netdev_lower_get_next_private(dev, &(iter)))
  3520. #define netdev_for_each_lower_private_rcu(dev, priv, iter) \
  3521. for (iter = &(dev)->adj_list.lower, \
  3522. priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
  3523. priv; \
  3524. priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
  3525. void *netdev_lower_get_next(struct net_device *dev,
  3526. struct list_head **iter);
  3527. #define netdev_for_each_lower_dev(dev, ldev, iter) \
  3528. for (iter = (dev)->adj_list.lower.next, \
  3529. ldev = netdev_lower_get_next(dev, &(iter)); \
  3530. ldev; \
  3531. ldev = netdev_lower_get_next(dev, &(iter)))
  3532. struct net_device *netdev_all_lower_get_next(struct net_device *dev,
  3533. struct list_head **iter);
  3534. struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
  3535. struct list_head **iter);
  3536. int netdev_walk_all_lower_dev(struct net_device *dev,
  3537. int (*fn)(struct net_device *lower_dev,
  3538. void *data),
  3539. void *data);
  3540. int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
  3541. int (*fn)(struct net_device *lower_dev,
  3542. void *data),
  3543. void *data);
  3544. void *netdev_adjacent_get_private(struct list_head *adj_list);
  3545. void *netdev_lower_get_first_private_rcu(struct net_device *dev);
  3546. struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
  3547. struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
  3548. int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev,
  3549. struct netlink_ext_ack *extack);
  3550. int netdev_master_upper_dev_link(struct net_device *dev,
  3551. struct net_device *upper_dev,
  3552. void *upper_priv, void *upper_info,
  3553. struct netlink_ext_ack *extack);
  3554. void netdev_upper_dev_unlink(struct net_device *dev,
  3555. struct net_device *upper_dev);
  3556. void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
  3557. void *netdev_lower_dev_get_private(struct net_device *dev,
  3558. struct net_device *lower_dev);
  3559. void netdev_lower_state_changed(struct net_device *lower_dev,
  3560. void *lower_state_info);
  3561. /* RSS keys are 40 or 52 bytes long */
  3562. #define NETDEV_RSS_KEY_LEN 52
  3563. extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
  3564. void netdev_rss_key_fill(void *buffer, size_t len);
  3565. int dev_get_nest_level(struct net_device *dev);
  3566. int skb_checksum_help(struct sk_buff *skb);
  3567. int skb_crc32c_csum_help(struct sk_buff *skb);
  3568. int skb_csum_hwoffload_help(struct sk_buff *skb,
  3569. const netdev_features_t features);
  3570. struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
  3571. netdev_features_t features, bool tx_path);
  3572. struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
  3573. netdev_features_t features);
  3574. struct netdev_bonding_info {
  3575. ifslave slave;
  3576. ifbond master;
  3577. };
  3578. struct netdev_notifier_bonding_info {
  3579. struct netdev_notifier_info info; /* must be first */
  3580. struct netdev_bonding_info bonding_info;
  3581. };
  3582. void netdev_bonding_info_change(struct net_device *dev,
  3583. struct netdev_bonding_info *bonding_info);
  3584. static inline
  3585. struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
  3586. {
  3587. return __skb_gso_segment(skb, features, true);
  3588. }
  3589. __be16 skb_network_protocol(struct sk_buff *skb, int *depth);
  3590. static inline bool can_checksum_protocol(netdev_features_t features,
  3591. __be16 protocol)
  3592. {
  3593. if (protocol == htons(ETH_P_FCOE))
  3594. return !!(features & NETIF_F_FCOE_CRC);
  3595. /* Assume this is an IP checksum (not SCTP CRC) */
  3596. if (features & NETIF_F_HW_CSUM) {
  3597. /* Can checksum everything */
  3598. return true;
  3599. }
  3600. switch (protocol) {
  3601. case htons(ETH_P_IP):
  3602. return !!(features & NETIF_F_IP_CSUM);
  3603. case htons(ETH_P_IPV6):
  3604. return !!(features & NETIF_F_IPV6_CSUM);
  3605. default:
  3606. return false;
  3607. }
  3608. }
  3609. #ifdef CONFIG_BUG
  3610. void netdev_rx_csum_fault(struct net_device *dev);
  3611. #else
  3612. static inline void netdev_rx_csum_fault(struct net_device *dev)
  3613. {
  3614. }
  3615. #endif
  3616. /* rx skb timestamps */
  3617. void net_enable_timestamp(void);
  3618. void net_disable_timestamp(void);
  3619. #ifdef CONFIG_PROC_FS
  3620. int __init dev_proc_init(void);
  3621. #else
  3622. #define dev_proc_init() 0
  3623. #endif
  3624. static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
  3625. struct sk_buff *skb, struct net_device *dev,
  3626. bool more)
  3627. {
  3628. skb->xmit_more = more ? 1 : 0;
  3629. return ops->ndo_start_xmit(skb, dev);
  3630. }
  3631. static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
  3632. struct netdev_queue *txq, bool more)
  3633. {
  3634. const struct net_device_ops *ops = dev->netdev_ops;
  3635. int rc;
  3636. rc = __netdev_start_xmit(ops, skb, dev, more);
  3637. if (rc == NETDEV_TX_OK)
  3638. txq_trans_update(txq);
  3639. return rc;
  3640. }
  3641. int netdev_class_create_file_ns(const struct class_attribute *class_attr,
  3642. const void *ns);
  3643. void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
  3644. const void *ns);
  3645. static inline int netdev_class_create_file(const struct class_attribute *class_attr)
  3646. {
  3647. return netdev_class_create_file_ns(class_attr, NULL);
  3648. }
  3649. static inline void netdev_class_remove_file(const struct class_attribute *class_attr)
  3650. {
  3651. netdev_class_remove_file_ns(class_attr, NULL);
  3652. }
  3653. extern const struct kobj_ns_type_operations net_ns_type_operations;
  3654. const char *netdev_drivername(const struct net_device *dev);
  3655. void linkwatch_run_queue(void);
  3656. static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
  3657. netdev_features_t f2)
  3658. {
  3659. if ((f1 ^ f2) & NETIF_F_HW_CSUM) {
  3660. if (f1 & NETIF_F_HW_CSUM)
  3661. f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
  3662. else
  3663. f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
  3664. }
  3665. return f1 & f2;
  3666. }
  3667. static inline netdev_features_t netdev_get_wanted_features(
  3668. struct net_device *dev)
  3669. {
  3670. return (dev->features & ~dev->hw_features) | dev->wanted_features;
  3671. }
  3672. netdev_features_t netdev_increment_features(netdev_features_t all,
  3673. netdev_features_t one, netdev_features_t mask);
  3674. /* Allow TSO being used on stacked device :
  3675. * Performing the GSO segmentation before last device
  3676. * is a performance improvement.
  3677. */
  3678. static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
  3679. netdev_features_t mask)
  3680. {
  3681. return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
  3682. }
  3683. int __netdev_update_features(struct net_device *dev);
  3684. void netdev_update_features(struct net_device *dev);
  3685. void netdev_change_features(struct net_device *dev);
  3686. void netif_stacked_transfer_operstate(const struct net_device *rootdev,
  3687. struct net_device *dev);
  3688. netdev_features_t passthru_features_check(struct sk_buff *skb,
  3689. struct net_device *dev,
  3690. netdev_features_t features);
  3691. netdev_features_t netif_skb_features(struct sk_buff *skb);
  3692. static inline bool net_gso_ok(netdev_features_t features, int gso_type)
  3693. {
  3694. netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
  3695. /* check flags correspondence */
  3696. BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
  3697. BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
  3698. BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
  3699. BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
  3700. BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
  3701. BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
  3702. BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
  3703. BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
  3704. BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT));
  3705. BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT));
  3706. BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
  3707. BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
  3708. BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
  3709. BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
  3710. BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
  3711. BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
  3712. BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT));
  3713. BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT));
  3714. return (features & feature) == feature;
  3715. }
  3716. static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
  3717. {
  3718. return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
  3719. (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
  3720. }
  3721. static inline bool netif_needs_gso(struct sk_buff *skb,
  3722. netdev_features_t features)
  3723. {
  3724. return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
  3725. unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
  3726. (skb->ip_summed != CHECKSUM_UNNECESSARY)));
  3727. }
  3728. static inline void netif_set_gso_max_size(struct net_device *dev,
  3729. unsigned int size)
  3730. {
  3731. dev->gso_max_size = size;
  3732. }
  3733. static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
  3734. int pulled_hlen, u16 mac_offset,
  3735. int mac_len)
  3736. {
  3737. skb->protocol = protocol;
  3738. skb->encapsulation = 1;
  3739. skb_push(skb, pulled_hlen);
  3740. skb_reset_transport_header(skb);
  3741. skb->mac_header = mac_offset;
  3742. skb->network_header = skb->mac_header + mac_len;
  3743. skb->mac_len = mac_len;
  3744. }
  3745. static inline bool netif_is_macsec(const struct net_device *dev)
  3746. {
  3747. return dev->priv_flags & IFF_MACSEC;
  3748. }
  3749. static inline bool netif_is_macvlan(const struct net_device *dev)
  3750. {
  3751. return dev->priv_flags & IFF_MACVLAN;
  3752. }
  3753. static inline bool netif_is_macvlan_port(const struct net_device *dev)
  3754. {
  3755. return dev->priv_flags & IFF_MACVLAN_PORT;
  3756. }
  3757. static inline bool netif_is_bond_master(const struct net_device *dev)
  3758. {
  3759. return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
  3760. }
  3761. static inline bool netif_is_bond_slave(const struct net_device *dev)
  3762. {
  3763. return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
  3764. }
  3765. static inline bool netif_supports_nofcs(struct net_device *dev)
  3766. {
  3767. return dev->priv_flags & IFF_SUPP_NOFCS;
  3768. }
  3769. static inline bool netif_is_l3_master(const struct net_device *dev)
  3770. {
  3771. return dev->priv_flags & IFF_L3MDEV_MASTER;
  3772. }
  3773. static inline bool netif_is_l3_slave(const struct net_device *dev)
  3774. {
  3775. return dev->priv_flags & IFF_L3MDEV_SLAVE;
  3776. }
  3777. static inline bool netif_is_bridge_master(const struct net_device *dev)
  3778. {
  3779. return dev->priv_flags & IFF_EBRIDGE;
  3780. }
  3781. static inline bool netif_is_bridge_port(const struct net_device *dev)
  3782. {
  3783. return dev->priv_flags & IFF_BRIDGE_PORT;
  3784. }
  3785. static inline bool netif_is_ovs_master(const struct net_device *dev)
  3786. {
  3787. return dev->priv_flags & IFF_OPENVSWITCH;
  3788. }
  3789. static inline bool netif_is_ovs_port(const struct net_device *dev)
  3790. {
  3791. return dev->priv_flags & IFF_OVS_DATAPATH;
  3792. }
  3793. static inline bool netif_is_team_master(const struct net_device *dev)
  3794. {
  3795. return dev->priv_flags & IFF_TEAM;
  3796. }
  3797. static inline bool netif_is_team_port(const struct net_device *dev)
  3798. {
  3799. return dev->priv_flags & IFF_TEAM_PORT;
  3800. }
  3801. static inline bool netif_is_lag_master(const struct net_device *dev)
  3802. {
  3803. return netif_is_bond_master(dev) || netif_is_team_master(dev);
  3804. }
  3805. static inline bool netif_is_lag_port(const struct net_device *dev)
  3806. {
  3807. return netif_is_bond_slave(dev) || netif_is_team_port(dev);
  3808. }
  3809. static inline bool netif_is_rxfh_configured(const struct net_device *dev)
  3810. {
  3811. return dev->priv_flags & IFF_RXFH_CONFIGURED;
  3812. }
  3813. /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
  3814. static inline void netif_keep_dst(struct net_device *dev)
  3815. {
  3816. dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
  3817. }
  3818. /* return true if dev can't cope with mtu frames that need vlan tag insertion */
  3819. static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
  3820. {
  3821. /* TODO: reserve and use an additional IFF bit, if we get more users */
  3822. return dev->priv_flags & IFF_MACSEC;
  3823. }
  3824. extern struct pernet_operations __net_initdata loopback_net_ops;
  3825. /* Logging, debugging and troubleshooting/diagnostic helpers. */
  3826. /* netdev_printk helpers, similar to dev_printk */
  3827. static inline const char *netdev_name(const struct net_device *dev)
  3828. {
  3829. if (!dev->name[0] || strchr(dev->name, '%'))
  3830. return "(unnamed net_device)";
  3831. return dev->name;
  3832. }
  3833. static inline bool netdev_unregistering(const struct net_device *dev)
  3834. {
  3835. return dev->reg_state == NETREG_UNREGISTERING;
  3836. }
  3837. static inline const char *netdev_reg_state(const struct net_device *dev)
  3838. {
  3839. switch (dev->reg_state) {
  3840. case NETREG_UNINITIALIZED: return " (uninitialized)";
  3841. case NETREG_REGISTERED: return "";
  3842. case NETREG_UNREGISTERING: return " (unregistering)";
  3843. case NETREG_UNREGISTERED: return " (unregistered)";
  3844. case NETREG_RELEASED: return " (released)";
  3845. case NETREG_DUMMY: return " (dummy)";
  3846. }
  3847. WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state);
  3848. return " (unknown)";
  3849. }
  3850. __printf(3, 4)
  3851. void netdev_printk(const char *level, const struct net_device *dev,
  3852. const char *format, ...);
  3853. __printf(2, 3)
  3854. void netdev_emerg(const struct net_device *dev, const char *format, ...);
  3855. __printf(2, 3)
  3856. void netdev_alert(const struct net_device *dev, const char *format, ...);
  3857. __printf(2, 3)
  3858. void netdev_crit(const struct net_device *dev, const char *format, ...);
  3859. __printf(2, 3)
  3860. void netdev_err(const struct net_device *dev, const char *format, ...);
  3861. __printf(2, 3)
  3862. void netdev_warn(const struct net_device *dev, const char *format, ...);
  3863. __printf(2, 3)
  3864. void netdev_notice(const struct net_device *dev, const char *format, ...);
  3865. __printf(2, 3)
  3866. void netdev_info(const struct net_device *dev, const char *format, ...);
  3867. #define netdev_level_once(level, dev, fmt, ...) \
  3868. do { \
  3869. static bool __print_once __read_mostly; \
  3870. \
  3871. if (!__print_once) { \
  3872. __print_once = true; \
  3873. netdev_printk(level, dev, fmt, ##__VA_ARGS__); \
  3874. } \
  3875. } while (0)
  3876. #define netdev_emerg_once(dev, fmt, ...) \
  3877. netdev_level_once(KERN_EMERG, dev, fmt, ##__VA_ARGS__)
  3878. #define netdev_alert_once(dev, fmt, ...) \
  3879. netdev_level_once(KERN_ALERT, dev, fmt, ##__VA_ARGS__)
  3880. #define netdev_crit_once(dev, fmt, ...) \
  3881. netdev_level_once(KERN_CRIT, dev, fmt, ##__VA_ARGS__)
  3882. #define netdev_err_once(dev, fmt, ...) \
  3883. netdev_level_once(KERN_ERR, dev, fmt, ##__VA_ARGS__)
  3884. #define netdev_warn_once(dev, fmt, ...) \
  3885. netdev_level_once(KERN_WARNING, dev, fmt, ##__VA_ARGS__)
  3886. #define netdev_notice_once(dev, fmt, ...) \
  3887. netdev_level_once(KERN_NOTICE, dev, fmt, ##__VA_ARGS__)
  3888. #define netdev_info_once(dev, fmt, ...) \
  3889. netdev_level_once(KERN_INFO, dev, fmt, ##__VA_ARGS__)
  3890. #define MODULE_ALIAS_NETDEV(device) \
  3891. MODULE_ALIAS("netdev-" device)
  3892. #if defined(CONFIG_DYNAMIC_DEBUG)
  3893. #define netdev_dbg(__dev, format, args...) \
  3894. do { \
  3895. dynamic_netdev_dbg(__dev, format, ##args); \
  3896. } while (0)
  3897. #elif defined(DEBUG)
  3898. #define netdev_dbg(__dev, format, args...) \
  3899. netdev_printk(KERN_DEBUG, __dev, format, ##args)
  3900. #else
  3901. #define netdev_dbg(__dev, format, args...) \
  3902. ({ \
  3903. if (0) \
  3904. netdev_printk(KERN_DEBUG, __dev, format, ##args); \
  3905. })
  3906. #endif
  3907. #if defined(VERBOSE_DEBUG)
  3908. #define netdev_vdbg netdev_dbg
  3909. #else
  3910. #define netdev_vdbg(dev, format, args...) \
  3911. ({ \
  3912. if (0) \
  3913. netdev_printk(KERN_DEBUG, dev, format, ##args); \
  3914. 0; \
  3915. })
  3916. #endif
  3917. /*
  3918. * netdev_WARN() acts like dev_printk(), but with the key difference
  3919. * of using a WARN/WARN_ON to get the message out, including the
  3920. * file/line information and a backtrace.
  3921. */
  3922. #define netdev_WARN(dev, format, args...) \
  3923. WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \
  3924. netdev_reg_state(dev), ##args)
  3925. #define netdev_WARN_ONCE(dev, format, args...) \
  3926. WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \
  3927. netdev_reg_state(dev), ##args)
  3928. /* netif printk helpers, similar to netdev_printk */
  3929. #define netif_printk(priv, type, level, dev, fmt, args...) \
  3930. do { \
  3931. if (netif_msg_##type(priv)) \
  3932. netdev_printk(level, (dev), fmt, ##args); \
  3933. } while (0)
  3934. #define netif_level(level, priv, type, dev, fmt, args...) \
  3935. do { \
  3936. if (netif_msg_##type(priv)) \
  3937. netdev_##level(dev, fmt, ##args); \
  3938. } while (0)
  3939. #define netif_emerg(priv, type, dev, fmt, args...) \
  3940. netif_level(emerg, priv, type, dev, fmt, ##args)
  3941. #define netif_alert(priv, type, dev, fmt, args...) \
  3942. netif_level(alert, priv, type, dev, fmt, ##args)
  3943. #define netif_crit(priv, type, dev, fmt, args...) \
  3944. netif_level(crit, priv, type, dev, fmt, ##args)
  3945. #define netif_err(priv, type, dev, fmt, args...) \
  3946. netif_level(err, priv, type, dev, fmt, ##args)
  3947. #define netif_warn(priv, type, dev, fmt, args...) \
  3948. netif_level(warn, priv, type, dev, fmt, ##args)
  3949. #define netif_notice(priv, type, dev, fmt, args...) \
  3950. netif_level(notice, priv, type, dev, fmt, ##args)
  3951. #define netif_info(priv, type, dev, fmt, args...) \
  3952. netif_level(info, priv, type, dev, fmt, ##args)
  3953. #if defined(CONFIG_DYNAMIC_DEBUG)
  3954. #define netif_dbg(priv, type, netdev, format, args...) \
  3955. do { \
  3956. if (netif_msg_##type(priv)) \
  3957. dynamic_netdev_dbg(netdev, format, ##args); \
  3958. } while (0)
  3959. #elif defined(DEBUG)
  3960. #define netif_dbg(priv, type, dev, format, args...) \
  3961. netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
  3962. #else
  3963. #define netif_dbg(priv, type, dev, format, args...) \
  3964. ({ \
  3965. if (0) \
  3966. netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
  3967. 0; \
  3968. })
  3969. #endif
  3970. /* if @cond then downgrade to debug, else print at @level */
  3971. #define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \
  3972. do { \
  3973. if (cond) \
  3974. netif_dbg(priv, type, netdev, fmt, ##args); \
  3975. else \
  3976. netif_ ## level(priv, type, netdev, fmt, ##args); \
  3977. } while (0)
  3978. #if defined(VERBOSE_DEBUG)
  3979. #define netif_vdbg netif_dbg
  3980. #else
  3981. #define netif_vdbg(priv, type, dev, format, args...) \
  3982. ({ \
  3983. if (0) \
  3984. netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
  3985. 0; \
  3986. })
  3987. #endif
  3988. /*
  3989. * The list of packet types we will receive (as opposed to discard)
  3990. * and the routines to invoke.
  3991. *
  3992. * Why 16. Because with 16 the only overlap we get on a hash of the
  3993. * low nibble of the protocol value is RARP/SNAP/X.25.
  3994. *
  3995. * 0800 IP
  3996. * 0001 802.3
  3997. * 0002 AX.25
  3998. * 0004 802.2
  3999. * 8035 RARP
  4000. * 0005 SNAP
  4001. * 0805 X.25
  4002. * 0806 ARP
  4003. * 8137 IPX
  4004. * 0009 Localtalk
  4005. * 86DD IPv6
  4006. */
  4007. #define PTYPE_HASH_SIZE (16)
  4008. #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
  4009. #endif /* _LINUX_NETDEVICE_H */