af_packet.c 107 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * PACKET - implements raw packet sockets.
  7. *
  8. * Authors: Ross Biro
  9. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  10. * Alan Cox, <gw4pts@gw4pts.ampr.org>
  11. *
  12. * Fixes:
  13. * Alan Cox : verify_area() now used correctly
  14. * Alan Cox : new skbuff lists, look ma no backlogs!
  15. * Alan Cox : tidied skbuff lists.
  16. * Alan Cox : Now uses generic datagram routines I
  17. * added. Also fixed the peek/read crash
  18. * from all old Linux datagram code.
  19. * Alan Cox : Uses the improved datagram code.
  20. * Alan Cox : Added NULL's for socket options.
  21. * Alan Cox : Re-commented the code.
  22. * Alan Cox : Use new kernel side addressing
  23. * Rob Janssen : Correct MTU usage.
  24. * Dave Platt : Counter leaks caused by incorrect
  25. * interrupt locking and some slightly
  26. * dubious gcc output. Can you read
  27. * compiler: it said _VOLATILE_
  28. * Richard Kooijman : Timestamp fixes.
  29. * Alan Cox : New buffers. Use sk->mac.raw.
  30. * Alan Cox : sendmsg/recvmsg support.
  31. * Alan Cox : Protocol setting support
  32. * Alexey Kuznetsov : Untied from IPv4 stack.
  33. * Cyrus Durgin : Fixed kerneld for kmod.
  34. * Michal Ostrowski : Module initialization cleanup.
  35. * Ulises Alonso : Frame number limit removal and
  36. * packet_set_ring memory leak.
  37. * Eric Biederman : Allow for > 8 byte hardware addresses.
  38. * The convention is that longer addresses
  39. * will simply extend the hardware address
  40. * byte arrays at the end of sockaddr_ll
  41. * and packet_mreq.
  42. * Johann Baudy : Added TX RING.
  43. * Chetan Loke : Implemented TPACKET_V3 block abstraction
  44. * layer.
  45. * Copyright (C) 2011, <lokec@ccs.neu.edu>
  46. *
  47. *
  48. * This program is free software; you can redistribute it and/or
  49. * modify it under the terms of the GNU General Public License
  50. * as published by the Free Software Foundation; either version
  51. * 2 of the License, or (at your option) any later version.
  52. *
  53. */
  54. #include <linux/types.h>
  55. #include <linux/mm.h>
  56. #include <linux/capability.h>
  57. #include <linux/fcntl.h>
  58. #include <linux/socket.h>
  59. #include <linux/in.h>
  60. #include <linux/inet.h>
  61. #include <linux/netdevice.h>
  62. #include <linux/if_packet.h>
  63. #include <linux/wireless.h>
  64. #include <linux/kernel.h>
  65. #include <linux/kmod.h>
  66. #include <linux/slab.h>
  67. #include <linux/vmalloc.h>
  68. #include <net/net_namespace.h>
  69. #include <net/ip.h>
  70. #include <net/protocol.h>
  71. #include <linux/skbuff.h>
  72. #include <net/sock.h>
  73. #include <linux/errno.h>
  74. #include <linux/timer.h>
  75. #include <linux/uaccess.h>
  76. #include <asm/ioctls.h>
  77. #include <asm/page.h>
  78. #include <asm/cacheflush.h>
  79. #include <asm/io.h>
  80. #include <linux/proc_fs.h>
  81. #include <linux/seq_file.h>
  82. #include <linux/poll.h>
  83. #include <linux/module.h>
  84. #include <linux/init.h>
  85. #include <linux/mutex.h>
  86. #include <linux/if_vlan.h>
  87. #include <linux/virtio_net.h>
  88. #include <linux/errqueue.h>
  89. #include <linux/net_tstamp.h>
  90. #include <linux/percpu.h>
  91. #ifdef CONFIG_INET
  92. #include <net/inet_common.h>
  93. #endif
  94. #include <linux/bpf.h>
  95. #include <net/compat.h>
  96. #include "internal.h"
  97. /*
  98. Assumptions:
  99. - if device has no dev->hard_header routine, it adds and removes ll header
  100. inside itself. In this case ll header is invisible outside of device,
  101. but higher levels still should reserve dev->hard_header_len.
  102. Some devices are enough clever to reallocate skb, when header
  103. will not fit to reserved space (tunnel), another ones are silly
  104. (PPP).
  105. - packet socket receives packets with pulled ll header,
  106. so that SOCK_RAW should push it back.
  107. On receive:
  108. -----------
  109. Incoming, dev->hard_header!=NULL
  110. mac_header -> ll header
  111. data -> data
  112. Outgoing, dev->hard_header!=NULL
  113. mac_header -> ll header
  114. data -> ll header
  115. Incoming, dev->hard_header==NULL
  116. mac_header -> UNKNOWN position. It is very likely, that it points to ll
  117. header. PPP makes it, that is wrong, because introduce
  118. assymetry between rx and tx paths.
  119. data -> data
  120. Outgoing, dev->hard_header==NULL
  121. mac_header -> data. ll header is still not built!
  122. data -> data
  123. Resume
  124. If dev->hard_header==NULL we are unlikely to restore sensible ll header.
  125. On transmit:
  126. ------------
  127. dev->hard_header != NULL
  128. mac_header -> ll header
  129. data -> ll header
  130. dev->hard_header == NULL (ll header is added by device, we cannot control it)
  131. mac_header -> data
  132. data -> data
  133. We should set nh.raw on output to correct posistion,
  134. packet classifier depends on it.
  135. */
  136. /* Private packet socket structures. */
  137. /* identical to struct packet_mreq except it has
  138. * a longer address field.
  139. */
  140. struct packet_mreq_max {
  141. int mr_ifindex;
  142. unsigned short mr_type;
  143. unsigned short mr_alen;
  144. unsigned char mr_address[MAX_ADDR_LEN];
  145. };
  146. union tpacket_uhdr {
  147. struct tpacket_hdr *h1;
  148. struct tpacket2_hdr *h2;
  149. struct tpacket3_hdr *h3;
  150. void *raw;
  151. };
  152. static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
  153. int closing, int tx_ring);
  154. #define V3_ALIGNMENT (8)
  155. #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
  156. #define BLK_PLUS_PRIV(sz_of_priv) \
  157. (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
  158. #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
  159. #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
  160. #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
  161. #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
  162. #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
  163. #define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
  164. #define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
  165. struct packet_sock;
  166. static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
  167. struct packet_type *pt, struct net_device *orig_dev);
  168. static void *packet_previous_frame(struct packet_sock *po,
  169. struct packet_ring_buffer *rb,
  170. int status);
  171. static void packet_increment_head(struct packet_ring_buffer *buff);
  172. static int prb_curr_blk_in_use(struct tpacket_block_desc *);
  173. static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
  174. struct packet_sock *);
  175. static void prb_retire_current_block(struct tpacket_kbdq_core *,
  176. struct packet_sock *, unsigned int status);
  177. static int prb_queue_frozen(struct tpacket_kbdq_core *);
  178. static void prb_open_block(struct tpacket_kbdq_core *,
  179. struct tpacket_block_desc *);
  180. static void prb_retire_rx_blk_timer_expired(struct timer_list *);
  181. static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
  182. static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
  183. static void prb_clear_rxhash(struct tpacket_kbdq_core *,
  184. struct tpacket3_hdr *);
  185. static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
  186. struct tpacket3_hdr *);
  187. static void packet_flush_mclist(struct sock *sk);
  188. static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb);
  189. struct packet_skb_cb {
  190. union {
  191. struct sockaddr_pkt pkt;
  192. union {
  193. /* Trick: alias skb original length with
  194. * ll.sll_family and ll.protocol in order
  195. * to save room.
  196. */
  197. unsigned int origlen;
  198. struct sockaddr_ll ll;
  199. };
  200. } sa;
  201. };
  202. #define vio_le() virtio_legacy_is_little_endian()
  203. #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
  204. #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
  205. #define GET_PBLOCK_DESC(x, bid) \
  206. ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
  207. #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
  208. ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
  209. #define GET_NEXT_PRB_BLK_NUM(x) \
  210. (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
  211. ((x)->kactive_blk_num+1) : 0)
  212. static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
  213. static void __fanout_link(struct sock *sk, struct packet_sock *po);
  214. static int packet_direct_xmit(struct sk_buff *skb)
  215. {
  216. struct net_device *dev = skb->dev;
  217. struct sk_buff *orig_skb = skb;
  218. struct netdev_queue *txq;
  219. int ret = NETDEV_TX_BUSY;
  220. bool again = false;
  221. if (unlikely(!netif_running(dev) ||
  222. !netif_carrier_ok(dev)))
  223. goto drop;
  224. skb = validate_xmit_skb_list(skb, dev, &again);
  225. if (skb != orig_skb)
  226. goto drop;
  227. packet_pick_tx_queue(dev, skb);
  228. txq = skb_get_tx_queue(dev, skb);
  229. local_bh_disable();
  230. HARD_TX_LOCK(dev, txq, smp_processor_id());
  231. if (!netif_xmit_frozen_or_drv_stopped(txq))
  232. ret = netdev_start_xmit(skb, dev, txq, false);
  233. HARD_TX_UNLOCK(dev, txq);
  234. local_bh_enable();
  235. if (!dev_xmit_complete(ret))
  236. kfree_skb(skb);
  237. return ret;
  238. drop:
  239. atomic_long_inc(&dev->tx_dropped);
  240. kfree_skb_list(skb);
  241. return NET_XMIT_DROP;
  242. }
  243. static struct net_device *packet_cached_dev_get(struct packet_sock *po)
  244. {
  245. struct net_device *dev;
  246. rcu_read_lock();
  247. dev = rcu_dereference(po->cached_dev);
  248. if (likely(dev))
  249. dev_hold(dev);
  250. rcu_read_unlock();
  251. return dev;
  252. }
  253. static void packet_cached_dev_assign(struct packet_sock *po,
  254. struct net_device *dev)
  255. {
  256. rcu_assign_pointer(po->cached_dev, dev);
  257. }
  258. static void packet_cached_dev_reset(struct packet_sock *po)
  259. {
  260. RCU_INIT_POINTER(po->cached_dev, NULL);
  261. }
  262. static bool packet_use_direct_xmit(const struct packet_sock *po)
  263. {
  264. return po->xmit == packet_direct_xmit;
  265. }
  266. static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
  267. {
  268. return (u16) raw_smp_processor_id() % dev->real_num_tx_queues;
  269. }
  270. static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
  271. {
  272. const struct net_device_ops *ops = dev->netdev_ops;
  273. u16 queue_index;
  274. if (ops->ndo_select_queue) {
  275. queue_index = ops->ndo_select_queue(dev, skb, NULL,
  276. __packet_pick_tx_queue);
  277. queue_index = netdev_cap_txqueue(dev, queue_index);
  278. } else {
  279. queue_index = __packet_pick_tx_queue(dev, skb);
  280. }
  281. skb_set_queue_mapping(skb, queue_index);
  282. }
  283. /* register_prot_hook must be invoked with the po->bind_lock held,
  284. * or from a context in which asynchronous accesses to the packet
  285. * socket is not possible (packet_create()).
  286. */
  287. static void register_prot_hook(struct sock *sk)
  288. {
  289. struct packet_sock *po = pkt_sk(sk);
  290. if (!po->running) {
  291. if (po->fanout)
  292. __fanout_link(sk, po);
  293. else
  294. dev_add_pack(&po->prot_hook);
  295. sock_hold(sk);
  296. po->running = 1;
  297. }
  298. }
  299. /* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
  300. * held. If the sync parameter is true, we will temporarily drop
  301. * the po->bind_lock and do a synchronize_net to make sure no
  302. * asynchronous packet processing paths still refer to the elements
  303. * of po->prot_hook. If the sync parameter is false, it is the
  304. * callers responsibility to take care of this.
  305. */
  306. static void __unregister_prot_hook(struct sock *sk, bool sync)
  307. {
  308. struct packet_sock *po = pkt_sk(sk);
  309. po->running = 0;
  310. if (po->fanout)
  311. __fanout_unlink(sk, po);
  312. else
  313. __dev_remove_pack(&po->prot_hook);
  314. __sock_put(sk);
  315. if (sync) {
  316. spin_unlock(&po->bind_lock);
  317. synchronize_net();
  318. spin_lock(&po->bind_lock);
  319. }
  320. }
  321. static void unregister_prot_hook(struct sock *sk, bool sync)
  322. {
  323. struct packet_sock *po = pkt_sk(sk);
  324. if (po->running)
  325. __unregister_prot_hook(sk, sync);
  326. }
  327. static inline struct page * __pure pgv_to_page(void *addr)
  328. {
  329. if (is_vmalloc_addr(addr))
  330. return vmalloc_to_page(addr);
  331. return virt_to_page(addr);
  332. }
  333. static void __packet_set_status(struct packet_sock *po, void *frame, int status)
  334. {
  335. union tpacket_uhdr h;
  336. h.raw = frame;
  337. switch (po->tp_version) {
  338. case TPACKET_V1:
  339. h.h1->tp_status = status;
  340. flush_dcache_page(pgv_to_page(&h.h1->tp_status));
  341. break;
  342. case TPACKET_V2:
  343. h.h2->tp_status = status;
  344. flush_dcache_page(pgv_to_page(&h.h2->tp_status));
  345. break;
  346. case TPACKET_V3:
  347. h.h3->tp_status = status;
  348. flush_dcache_page(pgv_to_page(&h.h3->tp_status));
  349. break;
  350. default:
  351. WARN(1, "TPACKET version not supported.\n");
  352. BUG();
  353. }
  354. smp_wmb();
  355. }
  356. static int __packet_get_status(struct packet_sock *po, void *frame)
  357. {
  358. union tpacket_uhdr h;
  359. smp_rmb();
  360. h.raw = frame;
  361. switch (po->tp_version) {
  362. case TPACKET_V1:
  363. flush_dcache_page(pgv_to_page(&h.h1->tp_status));
  364. return h.h1->tp_status;
  365. case TPACKET_V2:
  366. flush_dcache_page(pgv_to_page(&h.h2->tp_status));
  367. return h.h2->tp_status;
  368. case TPACKET_V3:
  369. flush_dcache_page(pgv_to_page(&h.h3->tp_status));
  370. return h.h3->tp_status;
  371. default:
  372. WARN(1, "TPACKET version not supported.\n");
  373. BUG();
  374. return 0;
  375. }
  376. }
  377. static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts,
  378. unsigned int flags)
  379. {
  380. struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
  381. if (shhwtstamps &&
  382. (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
  383. ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts))
  384. return TP_STATUS_TS_RAW_HARDWARE;
  385. if (ktime_to_timespec_cond(skb->tstamp, ts))
  386. return TP_STATUS_TS_SOFTWARE;
  387. return 0;
  388. }
  389. static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
  390. struct sk_buff *skb)
  391. {
  392. union tpacket_uhdr h;
  393. struct timespec ts;
  394. __u32 ts_status;
  395. if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
  396. return 0;
  397. h.raw = frame;
  398. switch (po->tp_version) {
  399. case TPACKET_V1:
  400. h.h1->tp_sec = ts.tv_sec;
  401. h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
  402. break;
  403. case TPACKET_V2:
  404. h.h2->tp_sec = ts.tv_sec;
  405. h.h2->tp_nsec = ts.tv_nsec;
  406. break;
  407. case TPACKET_V3:
  408. h.h3->tp_sec = ts.tv_sec;
  409. h.h3->tp_nsec = ts.tv_nsec;
  410. break;
  411. default:
  412. WARN(1, "TPACKET version not supported.\n");
  413. BUG();
  414. }
  415. /* one flush is safe, as both fields always lie on the same cacheline */
  416. flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
  417. smp_wmb();
  418. return ts_status;
  419. }
  420. static void *packet_lookup_frame(struct packet_sock *po,
  421. struct packet_ring_buffer *rb,
  422. unsigned int position,
  423. int status)
  424. {
  425. unsigned int pg_vec_pos, frame_offset;
  426. union tpacket_uhdr h;
  427. pg_vec_pos = position / rb->frames_per_block;
  428. frame_offset = position % rb->frames_per_block;
  429. h.raw = rb->pg_vec[pg_vec_pos].buffer +
  430. (frame_offset * rb->frame_size);
  431. if (status != __packet_get_status(po, h.raw))
  432. return NULL;
  433. return h.raw;
  434. }
  435. static void *packet_current_frame(struct packet_sock *po,
  436. struct packet_ring_buffer *rb,
  437. int status)
  438. {
  439. return packet_lookup_frame(po, rb, rb->head, status);
  440. }
  441. static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
  442. {
  443. del_timer_sync(&pkc->retire_blk_timer);
  444. }
  445. static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
  446. struct sk_buff_head *rb_queue)
  447. {
  448. struct tpacket_kbdq_core *pkc;
  449. pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
  450. spin_lock_bh(&rb_queue->lock);
  451. pkc->delete_blk_timer = 1;
  452. spin_unlock_bh(&rb_queue->lock);
  453. prb_del_retire_blk_timer(pkc);
  454. }
  455. static void prb_setup_retire_blk_timer(struct packet_sock *po)
  456. {
  457. struct tpacket_kbdq_core *pkc;
  458. pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
  459. timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired,
  460. 0);
  461. pkc->retire_blk_timer.expires = jiffies;
  462. }
  463. static int prb_calc_retire_blk_tmo(struct packet_sock *po,
  464. int blk_size_in_bytes)
  465. {
  466. struct net_device *dev;
  467. unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
  468. struct ethtool_link_ksettings ecmd;
  469. int err;
  470. rtnl_lock();
  471. dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
  472. if (unlikely(!dev)) {
  473. rtnl_unlock();
  474. return DEFAULT_PRB_RETIRE_TOV;
  475. }
  476. err = __ethtool_get_link_ksettings(dev, &ecmd);
  477. rtnl_unlock();
  478. if (!err) {
  479. /*
  480. * If the link speed is so slow you don't really
  481. * need to worry about perf anyways
  482. */
  483. if (ecmd.base.speed < SPEED_1000 ||
  484. ecmd.base.speed == SPEED_UNKNOWN) {
  485. return DEFAULT_PRB_RETIRE_TOV;
  486. } else {
  487. msec = 1;
  488. div = ecmd.base.speed / 1000;
  489. }
  490. }
  491. mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
  492. if (div)
  493. mbits /= div;
  494. tmo = mbits * msec;
  495. if (div)
  496. return tmo+1;
  497. return tmo;
  498. }
  499. static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
  500. union tpacket_req_u *req_u)
  501. {
  502. p1->feature_req_word = req_u->req3.tp_feature_req_word;
  503. }
  504. static void init_prb_bdqc(struct packet_sock *po,
  505. struct packet_ring_buffer *rb,
  506. struct pgv *pg_vec,
  507. union tpacket_req_u *req_u)
  508. {
  509. struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
  510. struct tpacket_block_desc *pbd;
  511. memset(p1, 0x0, sizeof(*p1));
  512. p1->knxt_seq_num = 1;
  513. p1->pkbdq = pg_vec;
  514. pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
  515. p1->pkblk_start = pg_vec[0].buffer;
  516. p1->kblk_size = req_u->req3.tp_block_size;
  517. p1->knum_blocks = req_u->req3.tp_block_nr;
  518. p1->hdrlen = po->tp_hdrlen;
  519. p1->version = po->tp_version;
  520. p1->last_kactive_blk_num = 0;
  521. po->stats.stats3.tp_freeze_q_cnt = 0;
  522. if (req_u->req3.tp_retire_blk_tov)
  523. p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
  524. else
  525. p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
  526. req_u->req3.tp_block_size);
  527. p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
  528. p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
  529. p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
  530. prb_init_ft_ops(p1, req_u);
  531. prb_setup_retire_blk_timer(po);
  532. prb_open_block(p1, pbd);
  533. }
  534. /* Do NOT update the last_blk_num first.
  535. * Assumes sk_buff_head lock is held.
  536. */
  537. static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
  538. {
  539. mod_timer(&pkc->retire_blk_timer,
  540. jiffies + pkc->tov_in_jiffies);
  541. pkc->last_kactive_blk_num = pkc->kactive_blk_num;
  542. }
  543. /*
  544. * Timer logic:
  545. * 1) We refresh the timer only when we open a block.
  546. * By doing this we don't waste cycles refreshing the timer
  547. * on packet-by-packet basis.
  548. *
  549. * With a 1MB block-size, on a 1Gbps line, it will take
  550. * i) ~8 ms to fill a block + ii) memcpy etc.
  551. * In this cut we are not accounting for the memcpy time.
  552. *
  553. * So, if the user sets the 'tmo' to 10ms then the timer
  554. * will never fire while the block is still getting filled
  555. * (which is what we want). However, the user could choose
  556. * to close a block early and that's fine.
  557. *
  558. * But when the timer does fire, we check whether or not to refresh it.
  559. * Since the tmo granularity is in msecs, it is not too expensive
  560. * to refresh the timer, lets say every '8' msecs.
  561. * Either the user can set the 'tmo' or we can derive it based on
  562. * a) line-speed and b) block-size.
  563. * prb_calc_retire_blk_tmo() calculates the tmo.
  564. *
  565. */
  566. static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
  567. {
  568. struct packet_sock *po =
  569. from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer);
  570. struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
  571. unsigned int frozen;
  572. struct tpacket_block_desc *pbd;
  573. spin_lock(&po->sk.sk_receive_queue.lock);
  574. frozen = prb_queue_frozen(pkc);
  575. pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
  576. if (unlikely(pkc->delete_blk_timer))
  577. goto out;
  578. /* We only need to plug the race when the block is partially filled.
  579. * tpacket_rcv:
  580. * lock(); increment BLOCK_NUM_PKTS; unlock()
  581. * copy_bits() is in progress ...
  582. * timer fires on other cpu:
  583. * we can't retire the current block because copy_bits
  584. * is in progress.
  585. *
  586. */
  587. if (BLOCK_NUM_PKTS(pbd)) {
  588. while (atomic_read(&pkc->blk_fill_in_prog)) {
  589. /* Waiting for skb_copy_bits to finish... */
  590. cpu_relax();
  591. }
  592. }
  593. if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
  594. if (!frozen) {
  595. if (!BLOCK_NUM_PKTS(pbd)) {
  596. /* An empty block. Just refresh the timer. */
  597. goto refresh_timer;
  598. }
  599. prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
  600. if (!prb_dispatch_next_block(pkc, po))
  601. goto refresh_timer;
  602. else
  603. goto out;
  604. } else {
  605. /* Case 1. Queue was frozen because user-space was
  606. * lagging behind.
  607. */
  608. if (prb_curr_blk_in_use(pbd)) {
  609. /*
  610. * Ok, user-space is still behind.
  611. * So just refresh the timer.
  612. */
  613. goto refresh_timer;
  614. } else {
  615. /* Case 2. queue was frozen,user-space caught up,
  616. * now the link went idle && the timer fired.
  617. * We don't have a block to close.So we open this
  618. * block and restart the timer.
  619. * opening a block thaws the queue,restarts timer
  620. * Thawing/timer-refresh is a side effect.
  621. */
  622. prb_open_block(pkc, pbd);
  623. goto out;
  624. }
  625. }
  626. }
  627. refresh_timer:
  628. _prb_refresh_rx_retire_blk_timer(pkc);
  629. out:
  630. spin_unlock(&po->sk.sk_receive_queue.lock);
  631. }
  632. static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
  633. struct tpacket_block_desc *pbd1, __u32 status)
  634. {
  635. /* Flush everything minus the block header */
  636. #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
  637. u8 *start, *end;
  638. start = (u8 *)pbd1;
  639. /* Skip the block header(we know header WILL fit in 4K) */
  640. start += PAGE_SIZE;
  641. end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
  642. for (; start < end; start += PAGE_SIZE)
  643. flush_dcache_page(pgv_to_page(start));
  644. smp_wmb();
  645. #endif
  646. /* Now update the block status. */
  647. BLOCK_STATUS(pbd1) = status;
  648. /* Flush the block header */
  649. #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
  650. start = (u8 *)pbd1;
  651. flush_dcache_page(pgv_to_page(start));
  652. smp_wmb();
  653. #endif
  654. }
  655. /*
  656. * Side effect:
  657. *
  658. * 1) flush the block
  659. * 2) Increment active_blk_num
  660. *
  661. * Note:We DONT refresh the timer on purpose.
  662. * Because almost always the next block will be opened.
  663. */
  664. static void prb_close_block(struct tpacket_kbdq_core *pkc1,
  665. struct tpacket_block_desc *pbd1,
  666. struct packet_sock *po, unsigned int stat)
  667. {
  668. __u32 status = TP_STATUS_USER | stat;
  669. struct tpacket3_hdr *last_pkt;
  670. struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
  671. struct sock *sk = &po->sk;
  672. if (po->stats.stats3.tp_drops)
  673. status |= TP_STATUS_LOSING;
  674. last_pkt = (struct tpacket3_hdr *)pkc1->prev;
  675. last_pkt->tp_next_offset = 0;
  676. /* Get the ts of the last pkt */
  677. if (BLOCK_NUM_PKTS(pbd1)) {
  678. h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
  679. h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
  680. } else {
  681. /* Ok, we tmo'd - so get the current time.
  682. *
  683. * It shouldn't really happen as we don't close empty
  684. * blocks. See prb_retire_rx_blk_timer_expired().
  685. */
  686. struct timespec ts;
  687. getnstimeofday(&ts);
  688. h1->ts_last_pkt.ts_sec = ts.tv_sec;
  689. h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
  690. }
  691. smp_wmb();
  692. /* Flush the block */
  693. prb_flush_block(pkc1, pbd1, status);
  694. sk->sk_data_ready(sk);
  695. pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
  696. }
  697. static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
  698. {
  699. pkc->reset_pending_on_curr_blk = 0;
  700. }
  701. /*
  702. * Side effect of opening a block:
  703. *
  704. * 1) prb_queue is thawed.
  705. * 2) retire_blk_timer is refreshed.
  706. *
  707. */
  708. static void prb_open_block(struct tpacket_kbdq_core *pkc1,
  709. struct tpacket_block_desc *pbd1)
  710. {
  711. struct timespec ts;
  712. struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
  713. smp_rmb();
  714. /* We could have just memset this but we will lose the
  715. * flexibility of making the priv area sticky
  716. */
  717. BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
  718. BLOCK_NUM_PKTS(pbd1) = 0;
  719. BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
  720. getnstimeofday(&ts);
  721. h1->ts_first_pkt.ts_sec = ts.tv_sec;
  722. h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
  723. pkc1->pkblk_start = (char *)pbd1;
  724. pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
  725. BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
  726. BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
  727. pbd1->version = pkc1->version;
  728. pkc1->prev = pkc1->nxt_offset;
  729. pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
  730. prb_thaw_queue(pkc1);
  731. _prb_refresh_rx_retire_blk_timer(pkc1);
  732. smp_wmb();
  733. }
  734. /*
  735. * Queue freeze logic:
  736. * 1) Assume tp_block_nr = 8 blocks.
  737. * 2) At time 't0', user opens Rx ring.
  738. * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
  739. * 4) user-space is either sleeping or processing block '0'.
  740. * 5) tpacket_rcv is currently filling block '7', since there is no space left,
  741. * it will close block-7,loop around and try to fill block '0'.
  742. * call-flow:
  743. * __packet_lookup_frame_in_block
  744. * prb_retire_current_block()
  745. * prb_dispatch_next_block()
  746. * |->(BLOCK_STATUS == USER) evaluates to true
  747. * 5.1) Since block-0 is currently in-use, we just freeze the queue.
  748. * 6) Now there are two cases:
  749. * 6.1) Link goes idle right after the queue is frozen.
  750. * But remember, the last open_block() refreshed the timer.
  751. * When this timer expires,it will refresh itself so that we can
  752. * re-open block-0 in near future.
  753. * 6.2) Link is busy and keeps on receiving packets. This is a simple
  754. * case and __packet_lookup_frame_in_block will check if block-0
  755. * is free and can now be re-used.
  756. */
  757. static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
  758. struct packet_sock *po)
  759. {
  760. pkc->reset_pending_on_curr_blk = 1;
  761. po->stats.stats3.tp_freeze_q_cnt++;
  762. }
  763. #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
  764. /*
  765. * If the next block is free then we will dispatch it
  766. * and return a good offset.
  767. * Else, we will freeze the queue.
  768. * So, caller must check the return value.
  769. */
  770. static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
  771. struct packet_sock *po)
  772. {
  773. struct tpacket_block_desc *pbd;
  774. smp_rmb();
  775. /* 1. Get current block num */
  776. pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
  777. /* 2. If this block is currently in_use then freeze the queue */
  778. if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
  779. prb_freeze_queue(pkc, po);
  780. return NULL;
  781. }
  782. /*
  783. * 3.
  784. * open this block and return the offset where the first packet
  785. * needs to get stored.
  786. */
  787. prb_open_block(pkc, pbd);
  788. return (void *)pkc->nxt_offset;
  789. }
  790. static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
  791. struct packet_sock *po, unsigned int status)
  792. {
  793. struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
  794. /* retire/close the current block */
  795. if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
  796. /*
  797. * Plug the case where copy_bits() is in progress on
  798. * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
  799. * have space to copy the pkt in the current block and
  800. * called prb_retire_current_block()
  801. *
  802. * We don't need to worry about the TMO case because
  803. * the timer-handler already handled this case.
  804. */
  805. if (!(status & TP_STATUS_BLK_TMO)) {
  806. while (atomic_read(&pkc->blk_fill_in_prog)) {
  807. /* Waiting for skb_copy_bits to finish... */
  808. cpu_relax();
  809. }
  810. }
  811. prb_close_block(pkc, pbd, po, status);
  812. return;
  813. }
  814. }
  815. static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd)
  816. {
  817. return TP_STATUS_USER & BLOCK_STATUS(pbd);
  818. }
  819. static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
  820. {
  821. return pkc->reset_pending_on_curr_blk;
  822. }
  823. static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
  824. {
  825. struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
  826. atomic_dec(&pkc->blk_fill_in_prog);
  827. }
  828. static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
  829. struct tpacket3_hdr *ppd)
  830. {
  831. ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
  832. }
  833. static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
  834. struct tpacket3_hdr *ppd)
  835. {
  836. ppd->hv1.tp_rxhash = 0;
  837. }
  838. static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
  839. struct tpacket3_hdr *ppd)
  840. {
  841. if (skb_vlan_tag_present(pkc->skb)) {
  842. ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
  843. ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
  844. ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
  845. } else {
  846. ppd->hv1.tp_vlan_tci = 0;
  847. ppd->hv1.tp_vlan_tpid = 0;
  848. ppd->tp_status = TP_STATUS_AVAILABLE;
  849. }
  850. }
  851. static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
  852. struct tpacket3_hdr *ppd)
  853. {
  854. ppd->hv1.tp_padding = 0;
  855. prb_fill_vlan_info(pkc, ppd);
  856. if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
  857. prb_fill_rxhash(pkc, ppd);
  858. else
  859. prb_clear_rxhash(pkc, ppd);
  860. }
  861. static void prb_fill_curr_block(char *curr,
  862. struct tpacket_kbdq_core *pkc,
  863. struct tpacket_block_desc *pbd,
  864. unsigned int len)
  865. {
  866. struct tpacket3_hdr *ppd;
  867. ppd = (struct tpacket3_hdr *)curr;
  868. ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
  869. pkc->prev = curr;
  870. pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
  871. BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
  872. BLOCK_NUM_PKTS(pbd) += 1;
  873. atomic_inc(&pkc->blk_fill_in_prog);
  874. prb_run_all_ft_ops(pkc, ppd);
  875. }
  876. /* Assumes caller has the sk->rx_queue.lock */
  877. static void *__packet_lookup_frame_in_block(struct packet_sock *po,
  878. struct sk_buff *skb,
  879. int status,
  880. unsigned int len
  881. )
  882. {
  883. struct tpacket_kbdq_core *pkc;
  884. struct tpacket_block_desc *pbd;
  885. char *curr, *end;
  886. pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
  887. pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
  888. /* Queue is frozen when user space is lagging behind */
  889. if (prb_queue_frozen(pkc)) {
  890. /*
  891. * Check if that last block which caused the queue to freeze,
  892. * is still in_use by user-space.
  893. */
  894. if (prb_curr_blk_in_use(pbd)) {
  895. /* Can't record this packet */
  896. return NULL;
  897. } else {
  898. /*
  899. * Ok, the block was released by user-space.
  900. * Now let's open that block.
  901. * opening a block also thaws the queue.
  902. * Thawing is a side effect.
  903. */
  904. prb_open_block(pkc, pbd);
  905. }
  906. }
  907. smp_mb();
  908. curr = pkc->nxt_offset;
  909. pkc->skb = skb;
  910. end = (char *)pbd + pkc->kblk_size;
  911. /* first try the current block */
  912. if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
  913. prb_fill_curr_block(curr, pkc, pbd, len);
  914. return (void *)curr;
  915. }
  916. /* Ok, close the current block */
  917. prb_retire_current_block(pkc, po, 0);
  918. /* Now, try to dispatch the next block */
  919. curr = (char *)prb_dispatch_next_block(pkc, po);
  920. if (curr) {
  921. pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
  922. prb_fill_curr_block(curr, pkc, pbd, len);
  923. return (void *)curr;
  924. }
  925. /*
  926. * No free blocks are available.user_space hasn't caught up yet.
  927. * Queue was just frozen and now this packet will get dropped.
  928. */
  929. return NULL;
  930. }
  931. static void *packet_current_rx_frame(struct packet_sock *po,
  932. struct sk_buff *skb,
  933. int status, unsigned int len)
  934. {
  935. char *curr = NULL;
  936. switch (po->tp_version) {
  937. case TPACKET_V1:
  938. case TPACKET_V2:
  939. curr = packet_lookup_frame(po, &po->rx_ring,
  940. po->rx_ring.head, status);
  941. return curr;
  942. case TPACKET_V3:
  943. return __packet_lookup_frame_in_block(po, skb, status, len);
  944. default:
  945. WARN(1, "TPACKET version not supported\n");
  946. BUG();
  947. return NULL;
  948. }
  949. }
  950. static void *prb_lookup_block(struct packet_sock *po,
  951. struct packet_ring_buffer *rb,
  952. unsigned int idx,
  953. int status)
  954. {
  955. struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
  956. struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
  957. if (status != BLOCK_STATUS(pbd))
  958. return NULL;
  959. return pbd;
  960. }
  961. static int prb_previous_blk_num(struct packet_ring_buffer *rb)
  962. {
  963. unsigned int prev;
  964. if (rb->prb_bdqc.kactive_blk_num)
  965. prev = rb->prb_bdqc.kactive_blk_num-1;
  966. else
  967. prev = rb->prb_bdqc.knum_blocks-1;
  968. return prev;
  969. }
  970. /* Assumes caller has held the rx_queue.lock */
  971. static void *__prb_previous_block(struct packet_sock *po,
  972. struct packet_ring_buffer *rb,
  973. int status)
  974. {
  975. unsigned int previous = prb_previous_blk_num(rb);
  976. return prb_lookup_block(po, rb, previous, status);
  977. }
  978. static void *packet_previous_rx_frame(struct packet_sock *po,
  979. struct packet_ring_buffer *rb,
  980. int status)
  981. {
  982. if (po->tp_version <= TPACKET_V2)
  983. return packet_previous_frame(po, rb, status);
  984. return __prb_previous_block(po, rb, status);
  985. }
  986. static void packet_increment_rx_head(struct packet_sock *po,
  987. struct packet_ring_buffer *rb)
  988. {
  989. switch (po->tp_version) {
  990. case TPACKET_V1:
  991. case TPACKET_V2:
  992. return packet_increment_head(rb);
  993. case TPACKET_V3:
  994. default:
  995. WARN(1, "TPACKET version not supported.\n");
  996. BUG();
  997. return;
  998. }
  999. }
  1000. static void *packet_previous_frame(struct packet_sock *po,
  1001. struct packet_ring_buffer *rb,
  1002. int status)
  1003. {
  1004. unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
  1005. return packet_lookup_frame(po, rb, previous, status);
  1006. }
  1007. static void packet_increment_head(struct packet_ring_buffer *buff)
  1008. {
  1009. buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
  1010. }
  1011. static void packet_inc_pending(struct packet_ring_buffer *rb)
  1012. {
  1013. this_cpu_inc(*rb->pending_refcnt);
  1014. }
  1015. static void packet_dec_pending(struct packet_ring_buffer *rb)
  1016. {
  1017. this_cpu_dec(*rb->pending_refcnt);
  1018. }
  1019. static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
  1020. {
  1021. unsigned int refcnt = 0;
  1022. int cpu;
  1023. /* We don't use pending refcount in rx_ring. */
  1024. if (rb->pending_refcnt == NULL)
  1025. return 0;
  1026. for_each_possible_cpu(cpu)
  1027. refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
  1028. return refcnt;
  1029. }
  1030. static int packet_alloc_pending(struct packet_sock *po)
  1031. {
  1032. po->rx_ring.pending_refcnt = NULL;
  1033. po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
  1034. if (unlikely(po->tx_ring.pending_refcnt == NULL))
  1035. return -ENOBUFS;
  1036. return 0;
  1037. }
  1038. static void packet_free_pending(struct packet_sock *po)
  1039. {
  1040. free_percpu(po->tx_ring.pending_refcnt);
  1041. }
  1042. #define ROOM_POW_OFF 2
  1043. #define ROOM_NONE 0x0
  1044. #define ROOM_LOW 0x1
  1045. #define ROOM_NORMAL 0x2
  1046. static bool __tpacket_has_room(struct packet_sock *po, int pow_off)
  1047. {
  1048. int idx, len;
  1049. len = po->rx_ring.frame_max + 1;
  1050. idx = po->rx_ring.head;
  1051. if (pow_off)
  1052. idx += len >> pow_off;
  1053. if (idx >= len)
  1054. idx -= len;
  1055. return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
  1056. }
  1057. static bool __tpacket_v3_has_room(struct packet_sock *po, int pow_off)
  1058. {
  1059. int idx, len;
  1060. len = po->rx_ring.prb_bdqc.knum_blocks;
  1061. idx = po->rx_ring.prb_bdqc.kactive_blk_num;
  1062. if (pow_off)
  1063. idx += len >> pow_off;
  1064. if (idx >= len)
  1065. idx -= len;
  1066. return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
  1067. }
  1068. static int __packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
  1069. {
  1070. struct sock *sk = &po->sk;
  1071. int ret = ROOM_NONE;
  1072. if (po->prot_hook.func != tpacket_rcv) {
  1073. int avail = sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc)
  1074. - (skb ? skb->truesize : 0);
  1075. if (avail > (sk->sk_rcvbuf >> ROOM_POW_OFF))
  1076. return ROOM_NORMAL;
  1077. else if (avail > 0)
  1078. return ROOM_LOW;
  1079. else
  1080. return ROOM_NONE;
  1081. }
  1082. if (po->tp_version == TPACKET_V3) {
  1083. if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
  1084. ret = ROOM_NORMAL;
  1085. else if (__tpacket_v3_has_room(po, 0))
  1086. ret = ROOM_LOW;
  1087. } else {
  1088. if (__tpacket_has_room(po, ROOM_POW_OFF))
  1089. ret = ROOM_NORMAL;
  1090. else if (__tpacket_has_room(po, 0))
  1091. ret = ROOM_LOW;
  1092. }
  1093. return ret;
  1094. }
  1095. static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
  1096. {
  1097. int ret;
  1098. bool has_room;
  1099. spin_lock_bh(&po->sk.sk_receive_queue.lock);
  1100. ret = __packet_rcv_has_room(po, skb);
  1101. has_room = ret == ROOM_NORMAL;
  1102. if (po->pressure == has_room)
  1103. po->pressure = !has_room;
  1104. spin_unlock_bh(&po->sk.sk_receive_queue.lock);
  1105. return ret;
  1106. }
  1107. static void packet_sock_destruct(struct sock *sk)
  1108. {
  1109. skb_queue_purge(&sk->sk_error_queue);
  1110. WARN_ON(atomic_read(&sk->sk_rmem_alloc));
  1111. WARN_ON(refcount_read(&sk->sk_wmem_alloc));
  1112. if (!sock_flag(sk, SOCK_DEAD)) {
  1113. pr_err("Attempt to release alive packet socket: %p\n", sk);
  1114. return;
  1115. }
  1116. sk_refcnt_debug_dec(sk);
  1117. }
  1118. static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
  1119. {
  1120. u32 rxhash;
  1121. int i, count = 0;
  1122. rxhash = skb_get_hash(skb);
  1123. for (i = 0; i < ROLLOVER_HLEN; i++)
  1124. if (po->rollover->history[i] == rxhash)
  1125. count++;
  1126. po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash;
  1127. return count > (ROLLOVER_HLEN >> 1);
  1128. }
  1129. static unsigned int fanout_demux_hash(struct packet_fanout *f,
  1130. struct sk_buff *skb,
  1131. unsigned int num)
  1132. {
  1133. return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
  1134. }
  1135. static unsigned int fanout_demux_lb(struct packet_fanout *f,
  1136. struct sk_buff *skb,
  1137. unsigned int num)
  1138. {
  1139. unsigned int val = atomic_inc_return(&f->rr_cur);
  1140. return val % num;
  1141. }
  1142. static unsigned int fanout_demux_cpu(struct packet_fanout *f,
  1143. struct sk_buff *skb,
  1144. unsigned int num)
  1145. {
  1146. return smp_processor_id() % num;
  1147. }
  1148. static unsigned int fanout_demux_rnd(struct packet_fanout *f,
  1149. struct sk_buff *skb,
  1150. unsigned int num)
  1151. {
  1152. return prandom_u32_max(num);
  1153. }
  1154. static unsigned int fanout_demux_rollover(struct packet_fanout *f,
  1155. struct sk_buff *skb,
  1156. unsigned int idx, bool try_self,
  1157. unsigned int num)
  1158. {
  1159. struct packet_sock *po, *po_next, *po_skip = NULL;
  1160. unsigned int i, j, room = ROOM_NONE;
  1161. po = pkt_sk(f->arr[idx]);
  1162. if (try_self) {
  1163. room = packet_rcv_has_room(po, skb);
  1164. if (room == ROOM_NORMAL ||
  1165. (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
  1166. return idx;
  1167. po_skip = po;
  1168. }
  1169. i = j = min_t(int, po->rollover->sock, num - 1);
  1170. do {
  1171. po_next = pkt_sk(f->arr[i]);
  1172. if (po_next != po_skip && !po_next->pressure &&
  1173. packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
  1174. if (i != j)
  1175. po->rollover->sock = i;
  1176. atomic_long_inc(&po->rollover->num);
  1177. if (room == ROOM_LOW)
  1178. atomic_long_inc(&po->rollover->num_huge);
  1179. return i;
  1180. }
  1181. if (++i == num)
  1182. i = 0;
  1183. } while (i != j);
  1184. atomic_long_inc(&po->rollover->num_failed);
  1185. return idx;
  1186. }
  1187. static unsigned int fanout_demux_qm(struct packet_fanout *f,
  1188. struct sk_buff *skb,
  1189. unsigned int num)
  1190. {
  1191. return skb_get_queue_mapping(skb) % num;
  1192. }
  1193. static unsigned int fanout_demux_bpf(struct packet_fanout *f,
  1194. struct sk_buff *skb,
  1195. unsigned int num)
  1196. {
  1197. struct bpf_prog *prog;
  1198. unsigned int ret = 0;
  1199. rcu_read_lock();
  1200. prog = rcu_dereference(f->bpf_prog);
  1201. if (prog)
  1202. ret = bpf_prog_run_clear_cb(prog, skb) % num;
  1203. rcu_read_unlock();
  1204. return ret;
  1205. }
  1206. static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
  1207. {
  1208. return f->flags & (flag >> 8);
  1209. }
  1210. static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
  1211. struct packet_type *pt, struct net_device *orig_dev)
  1212. {
  1213. struct packet_fanout *f = pt->af_packet_priv;
  1214. unsigned int num = READ_ONCE(f->num_members);
  1215. struct net *net = read_pnet(&f->net);
  1216. struct packet_sock *po;
  1217. unsigned int idx;
  1218. if (!net_eq(dev_net(dev), net) || !num) {
  1219. kfree_skb(skb);
  1220. return 0;
  1221. }
  1222. if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
  1223. skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
  1224. if (!skb)
  1225. return 0;
  1226. }
  1227. switch (f->type) {
  1228. case PACKET_FANOUT_HASH:
  1229. default:
  1230. idx = fanout_demux_hash(f, skb, num);
  1231. break;
  1232. case PACKET_FANOUT_LB:
  1233. idx = fanout_demux_lb(f, skb, num);
  1234. break;
  1235. case PACKET_FANOUT_CPU:
  1236. idx = fanout_demux_cpu(f, skb, num);
  1237. break;
  1238. case PACKET_FANOUT_RND:
  1239. idx = fanout_demux_rnd(f, skb, num);
  1240. break;
  1241. case PACKET_FANOUT_QM:
  1242. idx = fanout_demux_qm(f, skb, num);
  1243. break;
  1244. case PACKET_FANOUT_ROLLOVER:
  1245. idx = fanout_demux_rollover(f, skb, 0, false, num);
  1246. break;
  1247. case PACKET_FANOUT_CBPF:
  1248. case PACKET_FANOUT_EBPF:
  1249. idx = fanout_demux_bpf(f, skb, num);
  1250. break;
  1251. }
  1252. if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
  1253. idx = fanout_demux_rollover(f, skb, idx, true, num);
  1254. po = pkt_sk(f->arr[idx]);
  1255. return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
  1256. }
  1257. DEFINE_MUTEX(fanout_mutex);
  1258. EXPORT_SYMBOL_GPL(fanout_mutex);
  1259. static LIST_HEAD(fanout_list);
  1260. static u16 fanout_next_id;
  1261. static void __fanout_link(struct sock *sk, struct packet_sock *po)
  1262. {
  1263. struct packet_fanout *f = po->fanout;
  1264. spin_lock(&f->lock);
  1265. f->arr[f->num_members] = sk;
  1266. smp_wmb();
  1267. f->num_members++;
  1268. if (f->num_members == 1)
  1269. dev_add_pack(&f->prot_hook);
  1270. spin_unlock(&f->lock);
  1271. }
  1272. static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
  1273. {
  1274. struct packet_fanout *f = po->fanout;
  1275. int i;
  1276. spin_lock(&f->lock);
  1277. for (i = 0; i < f->num_members; i++) {
  1278. if (f->arr[i] == sk)
  1279. break;
  1280. }
  1281. BUG_ON(i >= f->num_members);
  1282. f->arr[i] = f->arr[f->num_members - 1];
  1283. f->num_members--;
  1284. if (f->num_members == 0)
  1285. __dev_remove_pack(&f->prot_hook);
  1286. spin_unlock(&f->lock);
  1287. }
  1288. static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
  1289. {
  1290. if (sk->sk_family != PF_PACKET)
  1291. return false;
  1292. return ptype->af_packet_priv == pkt_sk(sk)->fanout;
  1293. }
  1294. static void fanout_init_data(struct packet_fanout *f)
  1295. {
  1296. switch (f->type) {
  1297. case PACKET_FANOUT_LB:
  1298. atomic_set(&f->rr_cur, 0);
  1299. break;
  1300. case PACKET_FANOUT_CBPF:
  1301. case PACKET_FANOUT_EBPF:
  1302. RCU_INIT_POINTER(f->bpf_prog, NULL);
  1303. break;
  1304. }
  1305. }
  1306. static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
  1307. {
  1308. struct bpf_prog *old;
  1309. spin_lock(&f->lock);
  1310. old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
  1311. rcu_assign_pointer(f->bpf_prog, new);
  1312. spin_unlock(&f->lock);
  1313. if (old) {
  1314. synchronize_net();
  1315. bpf_prog_destroy(old);
  1316. }
  1317. }
  1318. static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data,
  1319. unsigned int len)
  1320. {
  1321. struct bpf_prog *new;
  1322. struct sock_fprog fprog;
  1323. int ret;
  1324. if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
  1325. return -EPERM;
  1326. if (len != sizeof(fprog))
  1327. return -EINVAL;
  1328. if (copy_from_user(&fprog, data, len))
  1329. return -EFAULT;
  1330. ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
  1331. if (ret)
  1332. return ret;
  1333. __fanout_set_data_bpf(po->fanout, new);
  1334. return 0;
  1335. }
  1336. static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data,
  1337. unsigned int len)
  1338. {
  1339. struct bpf_prog *new;
  1340. u32 fd;
  1341. if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
  1342. return -EPERM;
  1343. if (len != sizeof(fd))
  1344. return -EINVAL;
  1345. if (copy_from_user(&fd, data, len))
  1346. return -EFAULT;
  1347. new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
  1348. if (IS_ERR(new))
  1349. return PTR_ERR(new);
  1350. __fanout_set_data_bpf(po->fanout, new);
  1351. return 0;
  1352. }
  1353. static int fanout_set_data(struct packet_sock *po, char __user *data,
  1354. unsigned int len)
  1355. {
  1356. switch (po->fanout->type) {
  1357. case PACKET_FANOUT_CBPF:
  1358. return fanout_set_data_cbpf(po, data, len);
  1359. case PACKET_FANOUT_EBPF:
  1360. return fanout_set_data_ebpf(po, data, len);
  1361. default:
  1362. return -EINVAL;
  1363. };
  1364. }
  1365. static void fanout_release_data(struct packet_fanout *f)
  1366. {
  1367. switch (f->type) {
  1368. case PACKET_FANOUT_CBPF:
  1369. case PACKET_FANOUT_EBPF:
  1370. __fanout_set_data_bpf(f, NULL);
  1371. };
  1372. }
  1373. static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id)
  1374. {
  1375. struct packet_fanout *f;
  1376. list_for_each_entry(f, &fanout_list, list) {
  1377. if (f->id == candidate_id &&
  1378. read_pnet(&f->net) == sock_net(sk)) {
  1379. return false;
  1380. }
  1381. }
  1382. return true;
  1383. }
  1384. static bool fanout_find_new_id(struct sock *sk, u16 *new_id)
  1385. {
  1386. u16 id = fanout_next_id;
  1387. do {
  1388. if (__fanout_id_is_free(sk, id)) {
  1389. *new_id = id;
  1390. fanout_next_id = id + 1;
  1391. return true;
  1392. }
  1393. id++;
  1394. } while (id != fanout_next_id);
  1395. return false;
  1396. }
  1397. static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
  1398. {
  1399. struct packet_rollover *rollover = NULL;
  1400. struct packet_sock *po = pkt_sk(sk);
  1401. struct packet_fanout *f, *match;
  1402. u8 type = type_flags & 0xff;
  1403. u8 flags = type_flags >> 8;
  1404. int err;
  1405. switch (type) {
  1406. case PACKET_FANOUT_ROLLOVER:
  1407. if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
  1408. return -EINVAL;
  1409. case PACKET_FANOUT_HASH:
  1410. case PACKET_FANOUT_LB:
  1411. case PACKET_FANOUT_CPU:
  1412. case PACKET_FANOUT_RND:
  1413. case PACKET_FANOUT_QM:
  1414. case PACKET_FANOUT_CBPF:
  1415. case PACKET_FANOUT_EBPF:
  1416. break;
  1417. default:
  1418. return -EINVAL;
  1419. }
  1420. mutex_lock(&fanout_mutex);
  1421. err = -EALREADY;
  1422. if (po->fanout)
  1423. goto out;
  1424. if (type == PACKET_FANOUT_ROLLOVER ||
  1425. (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
  1426. err = -ENOMEM;
  1427. rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
  1428. if (!rollover)
  1429. goto out;
  1430. atomic_long_set(&rollover->num, 0);
  1431. atomic_long_set(&rollover->num_huge, 0);
  1432. atomic_long_set(&rollover->num_failed, 0);
  1433. }
  1434. if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
  1435. if (id != 0) {
  1436. err = -EINVAL;
  1437. goto out;
  1438. }
  1439. if (!fanout_find_new_id(sk, &id)) {
  1440. err = -ENOMEM;
  1441. goto out;
  1442. }
  1443. /* ephemeral flag for the first socket in the group: drop it */
  1444. flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8);
  1445. }
  1446. match = NULL;
  1447. list_for_each_entry(f, &fanout_list, list) {
  1448. if (f->id == id &&
  1449. read_pnet(&f->net) == sock_net(sk)) {
  1450. match = f;
  1451. break;
  1452. }
  1453. }
  1454. err = -EINVAL;
  1455. if (match && match->flags != flags)
  1456. goto out;
  1457. if (!match) {
  1458. err = -ENOMEM;
  1459. match = kzalloc(sizeof(*match), GFP_KERNEL);
  1460. if (!match)
  1461. goto out;
  1462. write_pnet(&match->net, sock_net(sk));
  1463. match->id = id;
  1464. match->type = type;
  1465. match->flags = flags;
  1466. INIT_LIST_HEAD(&match->list);
  1467. spin_lock_init(&match->lock);
  1468. refcount_set(&match->sk_ref, 0);
  1469. fanout_init_data(match);
  1470. match->prot_hook.type = po->prot_hook.type;
  1471. match->prot_hook.dev = po->prot_hook.dev;
  1472. match->prot_hook.func = packet_rcv_fanout;
  1473. match->prot_hook.af_packet_priv = match;
  1474. match->prot_hook.id_match = match_fanout_group;
  1475. list_add(&match->list, &fanout_list);
  1476. }
  1477. err = -EINVAL;
  1478. spin_lock(&po->bind_lock);
  1479. if (po->running &&
  1480. match->type == type &&
  1481. match->prot_hook.type == po->prot_hook.type &&
  1482. match->prot_hook.dev == po->prot_hook.dev) {
  1483. err = -ENOSPC;
  1484. if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
  1485. __dev_remove_pack(&po->prot_hook);
  1486. po->fanout = match;
  1487. po->rollover = rollover;
  1488. rollover = NULL;
  1489. refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
  1490. __fanout_link(sk, po);
  1491. err = 0;
  1492. }
  1493. }
  1494. spin_unlock(&po->bind_lock);
  1495. if (err && !refcount_read(&match->sk_ref)) {
  1496. list_del(&match->list);
  1497. kfree(match);
  1498. }
  1499. out:
  1500. kfree(rollover);
  1501. mutex_unlock(&fanout_mutex);
  1502. return err;
  1503. }
  1504. /* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
  1505. * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
  1506. * It is the responsibility of the caller to call fanout_release_data() and
  1507. * free the returned packet_fanout (after synchronize_net())
  1508. */
  1509. static struct packet_fanout *fanout_release(struct sock *sk)
  1510. {
  1511. struct packet_sock *po = pkt_sk(sk);
  1512. struct packet_fanout *f;
  1513. mutex_lock(&fanout_mutex);
  1514. f = po->fanout;
  1515. if (f) {
  1516. po->fanout = NULL;
  1517. if (refcount_dec_and_test(&f->sk_ref))
  1518. list_del(&f->list);
  1519. else
  1520. f = NULL;
  1521. }
  1522. mutex_unlock(&fanout_mutex);
  1523. return f;
  1524. }
  1525. static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
  1526. struct sk_buff *skb)
  1527. {
  1528. /* Earlier code assumed this would be a VLAN pkt, double-check
  1529. * this now that we have the actual packet in hand. We can only
  1530. * do this check on Ethernet devices.
  1531. */
  1532. if (unlikely(dev->type != ARPHRD_ETHER))
  1533. return false;
  1534. skb_reset_mac_header(skb);
  1535. return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
  1536. }
  1537. static const struct proto_ops packet_ops;
  1538. static const struct proto_ops packet_ops_spkt;
  1539. static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
  1540. struct packet_type *pt, struct net_device *orig_dev)
  1541. {
  1542. struct sock *sk;
  1543. struct sockaddr_pkt *spkt;
  1544. /*
  1545. * When we registered the protocol we saved the socket in the data
  1546. * field for just this event.
  1547. */
  1548. sk = pt->af_packet_priv;
  1549. /*
  1550. * Yank back the headers [hope the device set this
  1551. * right or kerboom...]
  1552. *
  1553. * Incoming packets have ll header pulled,
  1554. * push it back.
  1555. *
  1556. * For outgoing ones skb->data == skb_mac_header(skb)
  1557. * so that this procedure is noop.
  1558. */
  1559. if (skb->pkt_type == PACKET_LOOPBACK)
  1560. goto out;
  1561. if (!net_eq(dev_net(dev), sock_net(sk)))
  1562. goto out;
  1563. skb = skb_share_check(skb, GFP_ATOMIC);
  1564. if (skb == NULL)
  1565. goto oom;
  1566. /* drop any routing info */
  1567. skb_dst_drop(skb);
  1568. /* drop conntrack reference */
  1569. nf_reset(skb);
  1570. spkt = &PACKET_SKB_CB(skb)->sa.pkt;
  1571. skb_push(skb, skb->data - skb_mac_header(skb));
  1572. /*
  1573. * The SOCK_PACKET socket receives _all_ frames.
  1574. */
  1575. spkt->spkt_family = dev->type;
  1576. strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
  1577. spkt->spkt_protocol = skb->protocol;
  1578. /*
  1579. * Charge the memory to the socket. This is done specifically
  1580. * to prevent sockets using all the memory up.
  1581. */
  1582. if (sock_queue_rcv_skb(sk, skb) == 0)
  1583. return 0;
  1584. out:
  1585. kfree_skb(skb);
  1586. oom:
  1587. return 0;
  1588. }
  1589. /*
  1590. * Output a raw packet to a device layer. This bypasses all the other
  1591. * protocol layers and you must therefore supply it with a complete frame
  1592. */
  1593. static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
  1594. size_t len)
  1595. {
  1596. struct sock *sk = sock->sk;
  1597. DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
  1598. struct sk_buff *skb = NULL;
  1599. struct net_device *dev;
  1600. struct sockcm_cookie sockc;
  1601. __be16 proto = 0;
  1602. int err;
  1603. int extra_len = 0;
  1604. /*
  1605. * Get and verify the address.
  1606. */
  1607. if (saddr) {
  1608. if (msg->msg_namelen < sizeof(struct sockaddr))
  1609. return -EINVAL;
  1610. if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
  1611. proto = saddr->spkt_protocol;
  1612. } else
  1613. return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
  1614. /*
  1615. * Find the device first to size check it
  1616. */
  1617. saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
  1618. retry:
  1619. rcu_read_lock();
  1620. dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
  1621. err = -ENODEV;
  1622. if (dev == NULL)
  1623. goto out_unlock;
  1624. err = -ENETDOWN;
  1625. if (!(dev->flags & IFF_UP))
  1626. goto out_unlock;
  1627. /*
  1628. * You may not queue a frame bigger than the mtu. This is the lowest level
  1629. * raw protocol and you must do your own fragmentation at this level.
  1630. */
  1631. if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
  1632. if (!netif_supports_nofcs(dev)) {
  1633. err = -EPROTONOSUPPORT;
  1634. goto out_unlock;
  1635. }
  1636. extra_len = 4; /* We're doing our own CRC */
  1637. }
  1638. err = -EMSGSIZE;
  1639. if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
  1640. goto out_unlock;
  1641. if (!skb) {
  1642. size_t reserved = LL_RESERVED_SPACE(dev);
  1643. int tlen = dev->needed_tailroom;
  1644. unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
  1645. rcu_read_unlock();
  1646. skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
  1647. if (skb == NULL)
  1648. return -ENOBUFS;
  1649. /* FIXME: Save some space for broken drivers that write a hard
  1650. * header at transmission time by themselves. PPP is the notable
  1651. * one here. This should really be fixed at the driver level.
  1652. */
  1653. skb_reserve(skb, reserved);
  1654. skb_reset_network_header(skb);
  1655. /* Try to align data part correctly */
  1656. if (hhlen) {
  1657. skb->data -= hhlen;
  1658. skb->tail -= hhlen;
  1659. if (len < hhlen)
  1660. skb_reset_network_header(skb);
  1661. }
  1662. err = memcpy_from_msg(skb_put(skb, len), msg, len);
  1663. if (err)
  1664. goto out_free;
  1665. goto retry;
  1666. }
  1667. if (!dev_validate_header(dev, skb->data, len)) {
  1668. err = -EINVAL;
  1669. goto out_unlock;
  1670. }
  1671. if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
  1672. !packet_extra_vlan_len_allowed(dev, skb)) {
  1673. err = -EMSGSIZE;
  1674. goto out_unlock;
  1675. }
  1676. sockc.tsflags = sk->sk_tsflags;
  1677. if (msg->msg_controllen) {
  1678. err = sock_cmsg_send(sk, msg, &sockc);
  1679. if (unlikely(err))
  1680. goto out_unlock;
  1681. }
  1682. skb->protocol = proto;
  1683. skb->dev = dev;
  1684. skb->priority = sk->sk_priority;
  1685. skb->mark = sk->sk_mark;
  1686. sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags);
  1687. if (unlikely(extra_len == 4))
  1688. skb->no_fcs = 1;
  1689. skb_probe_transport_header(skb, 0);
  1690. dev_queue_xmit(skb);
  1691. rcu_read_unlock();
  1692. return len;
  1693. out_unlock:
  1694. rcu_read_unlock();
  1695. out_free:
  1696. kfree_skb(skb);
  1697. return err;
  1698. }
  1699. static unsigned int run_filter(struct sk_buff *skb,
  1700. const struct sock *sk,
  1701. unsigned int res)
  1702. {
  1703. struct sk_filter *filter;
  1704. rcu_read_lock();
  1705. filter = rcu_dereference(sk->sk_filter);
  1706. if (filter != NULL)
  1707. res = bpf_prog_run_clear_cb(filter->prog, skb);
  1708. rcu_read_unlock();
  1709. return res;
  1710. }
  1711. static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
  1712. size_t *len)
  1713. {
  1714. struct virtio_net_hdr vnet_hdr;
  1715. if (*len < sizeof(vnet_hdr))
  1716. return -EINVAL;
  1717. *len -= sizeof(vnet_hdr);
  1718. if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true))
  1719. return -EINVAL;
  1720. return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
  1721. }
  1722. /*
  1723. * This function makes lazy skb cloning in hope that most of packets
  1724. * are discarded by BPF.
  1725. *
  1726. * Note tricky part: we DO mangle shared skb! skb->data, skb->len
  1727. * and skb->cb are mangled. It works because (and until) packets
  1728. * falling here are owned by current CPU. Output packets are cloned
  1729. * by dev_queue_xmit_nit(), input packets are processed by net_bh
  1730. * sequencially, so that if we return skb to original state on exit,
  1731. * we will not harm anyone.
  1732. */
  1733. static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
  1734. struct packet_type *pt, struct net_device *orig_dev)
  1735. {
  1736. struct sock *sk;
  1737. struct sockaddr_ll *sll;
  1738. struct packet_sock *po;
  1739. u8 *skb_head = skb->data;
  1740. int skb_len = skb->len;
  1741. unsigned int snaplen, res;
  1742. bool is_drop_n_account = false;
  1743. if (skb->pkt_type == PACKET_LOOPBACK)
  1744. goto drop;
  1745. sk = pt->af_packet_priv;
  1746. po = pkt_sk(sk);
  1747. if (!net_eq(dev_net(dev), sock_net(sk)))
  1748. goto drop;
  1749. skb->dev = dev;
  1750. if (dev->header_ops) {
  1751. /* The device has an explicit notion of ll header,
  1752. * exported to higher levels.
  1753. *
  1754. * Otherwise, the device hides details of its frame
  1755. * structure, so that corresponding packet head is
  1756. * never delivered to user.
  1757. */
  1758. if (sk->sk_type != SOCK_DGRAM)
  1759. skb_push(skb, skb->data - skb_mac_header(skb));
  1760. else if (skb->pkt_type == PACKET_OUTGOING) {
  1761. /* Special case: outgoing packets have ll header at head */
  1762. skb_pull(skb, skb_network_offset(skb));
  1763. }
  1764. }
  1765. snaplen = skb->len;
  1766. res = run_filter(skb, sk, snaplen);
  1767. if (!res)
  1768. goto drop_n_restore;
  1769. if (snaplen > res)
  1770. snaplen = res;
  1771. if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
  1772. goto drop_n_acct;
  1773. if (skb_shared(skb)) {
  1774. struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
  1775. if (nskb == NULL)
  1776. goto drop_n_acct;
  1777. if (skb_head != skb->data) {
  1778. skb->data = skb_head;
  1779. skb->len = skb_len;
  1780. }
  1781. consume_skb(skb);
  1782. skb = nskb;
  1783. }
  1784. sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
  1785. sll = &PACKET_SKB_CB(skb)->sa.ll;
  1786. sll->sll_hatype = dev->type;
  1787. sll->sll_pkttype = skb->pkt_type;
  1788. if (unlikely(po->origdev))
  1789. sll->sll_ifindex = orig_dev->ifindex;
  1790. else
  1791. sll->sll_ifindex = dev->ifindex;
  1792. sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
  1793. /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
  1794. * Use their space for storing the original skb length.
  1795. */
  1796. PACKET_SKB_CB(skb)->sa.origlen = skb->len;
  1797. if (pskb_trim(skb, snaplen))
  1798. goto drop_n_acct;
  1799. skb_set_owner_r(skb, sk);
  1800. skb->dev = NULL;
  1801. skb_dst_drop(skb);
  1802. /* drop conntrack reference */
  1803. nf_reset(skb);
  1804. spin_lock(&sk->sk_receive_queue.lock);
  1805. po->stats.stats1.tp_packets++;
  1806. sock_skb_set_dropcount(sk, skb);
  1807. __skb_queue_tail(&sk->sk_receive_queue, skb);
  1808. spin_unlock(&sk->sk_receive_queue.lock);
  1809. sk->sk_data_ready(sk);
  1810. return 0;
  1811. drop_n_acct:
  1812. is_drop_n_account = true;
  1813. spin_lock(&sk->sk_receive_queue.lock);
  1814. po->stats.stats1.tp_drops++;
  1815. atomic_inc(&sk->sk_drops);
  1816. spin_unlock(&sk->sk_receive_queue.lock);
  1817. drop_n_restore:
  1818. if (skb_head != skb->data && skb_shared(skb)) {
  1819. skb->data = skb_head;
  1820. skb->len = skb_len;
  1821. }
  1822. drop:
  1823. if (!is_drop_n_account)
  1824. consume_skb(skb);
  1825. else
  1826. kfree_skb(skb);
  1827. return 0;
  1828. }
  1829. static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
  1830. struct packet_type *pt, struct net_device *orig_dev)
  1831. {
  1832. struct sock *sk;
  1833. struct packet_sock *po;
  1834. struct sockaddr_ll *sll;
  1835. union tpacket_uhdr h;
  1836. u8 *skb_head = skb->data;
  1837. int skb_len = skb->len;
  1838. unsigned int snaplen, res;
  1839. unsigned long status = TP_STATUS_USER;
  1840. unsigned short macoff, netoff, hdrlen;
  1841. struct sk_buff *copy_skb = NULL;
  1842. struct timespec ts;
  1843. __u32 ts_status;
  1844. bool is_drop_n_account = false;
  1845. bool do_vnet = false;
  1846. /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
  1847. * We may add members to them until current aligned size without forcing
  1848. * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
  1849. */
  1850. BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
  1851. BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
  1852. if (skb->pkt_type == PACKET_LOOPBACK)
  1853. goto drop;
  1854. sk = pt->af_packet_priv;
  1855. po = pkt_sk(sk);
  1856. if (!net_eq(dev_net(dev), sock_net(sk)))
  1857. goto drop;
  1858. if (dev->header_ops) {
  1859. if (sk->sk_type != SOCK_DGRAM)
  1860. skb_push(skb, skb->data - skb_mac_header(skb));
  1861. else if (skb->pkt_type == PACKET_OUTGOING) {
  1862. /* Special case: outgoing packets have ll header at head */
  1863. skb_pull(skb, skb_network_offset(skb));
  1864. }
  1865. }
  1866. snaplen = skb->len;
  1867. res = run_filter(skb, sk, snaplen);
  1868. if (!res)
  1869. goto drop_n_restore;
  1870. if (skb->ip_summed == CHECKSUM_PARTIAL)
  1871. status |= TP_STATUS_CSUMNOTREADY;
  1872. else if (skb->pkt_type != PACKET_OUTGOING &&
  1873. (skb->ip_summed == CHECKSUM_COMPLETE ||
  1874. skb_csum_unnecessary(skb)))
  1875. status |= TP_STATUS_CSUM_VALID;
  1876. if (snaplen > res)
  1877. snaplen = res;
  1878. if (sk->sk_type == SOCK_DGRAM) {
  1879. macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
  1880. po->tp_reserve;
  1881. } else {
  1882. unsigned int maclen = skb_network_offset(skb);
  1883. netoff = TPACKET_ALIGN(po->tp_hdrlen +
  1884. (maclen < 16 ? 16 : maclen)) +
  1885. po->tp_reserve;
  1886. if (po->has_vnet_hdr) {
  1887. netoff += sizeof(struct virtio_net_hdr);
  1888. do_vnet = true;
  1889. }
  1890. macoff = netoff - maclen;
  1891. }
  1892. if (po->tp_version <= TPACKET_V2) {
  1893. if (macoff + snaplen > po->rx_ring.frame_size) {
  1894. if (po->copy_thresh &&
  1895. atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
  1896. if (skb_shared(skb)) {
  1897. copy_skb = skb_clone(skb, GFP_ATOMIC);
  1898. } else {
  1899. copy_skb = skb_get(skb);
  1900. skb_head = skb->data;
  1901. }
  1902. if (copy_skb)
  1903. skb_set_owner_r(copy_skb, sk);
  1904. }
  1905. snaplen = po->rx_ring.frame_size - macoff;
  1906. if ((int)snaplen < 0) {
  1907. snaplen = 0;
  1908. do_vnet = false;
  1909. }
  1910. }
  1911. } else if (unlikely(macoff + snaplen >
  1912. GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
  1913. u32 nval;
  1914. nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
  1915. pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
  1916. snaplen, nval, macoff);
  1917. snaplen = nval;
  1918. if (unlikely((int)snaplen < 0)) {
  1919. snaplen = 0;
  1920. macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
  1921. do_vnet = false;
  1922. }
  1923. }
  1924. spin_lock(&sk->sk_receive_queue.lock);
  1925. h.raw = packet_current_rx_frame(po, skb,
  1926. TP_STATUS_KERNEL, (macoff+snaplen));
  1927. if (!h.raw)
  1928. goto drop_n_account;
  1929. if (po->tp_version <= TPACKET_V2) {
  1930. packet_increment_rx_head(po, &po->rx_ring);
  1931. /*
  1932. * LOSING will be reported till you read the stats,
  1933. * because it's COR - Clear On Read.
  1934. * Anyways, moving it for V1/V2 only as V3 doesn't need this
  1935. * at packet level.
  1936. */
  1937. if (po->stats.stats1.tp_drops)
  1938. status |= TP_STATUS_LOSING;
  1939. }
  1940. po->stats.stats1.tp_packets++;
  1941. if (copy_skb) {
  1942. status |= TP_STATUS_COPY;
  1943. __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
  1944. }
  1945. spin_unlock(&sk->sk_receive_queue.lock);
  1946. if (do_vnet) {
  1947. if (virtio_net_hdr_from_skb(skb, h.raw + macoff -
  1948. sizeof(struct virtio_net_hdr),
  1949. vio_le(), true)) {
  1950. spin_lock(&sk->sk_receive_queue.lock);
  1951. goto drop_n_account;
  1952. }
  1953. }
  1954. skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
  1955. if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
  1956. getnstimeofday(&ts);
  1957. status |= ts_status;
  1958. switch (po->tp_version) {
  1959. case TPACKET_V1:
  1960. h.h1->tp_len = skb->len;
  1961. h.h1->tp_snaplen = snaplen;
  1962. h.h1->tp_mac = macoff;
  1963. h.h1->tp_net = netoff;
  1964. h.h1->tp_sec = ts.tv_sec;
  1965. h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
  1966. hdrlen = sizeof(*h.h1);
  1967. break;
  1968. case TPACKET_V2:
  1969. h.h2->tp_len = skb->len;
  1970. h.h2->tp_snaplen = snaplen;
  1971. h.h2->tp_mac = macoff;
  1972. h.h2->tp_net = netoff;
  1973. h.h2->tp_sec = ts.tv_sec;
  1974. h.h2->tp_nsec = ts.tv_nsec;
  1975. if (skb_vlan_tag_present(skb)) {
  1976. h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
  1977. h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
  1978. status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
  1979. } else {
  1980. h.h2->tp_vlan_tci = 0;
  1981. h.h2->tp_vlan_tpid = 0;
  1982. }
  1983. memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
  1984. hdrlen = sizeof(*h.h2);
  1985. break;
  1986. case TPACKET_V3:
  1987. /* tp_nxt_offset,vlan are already populated above.
  1988. * So DONT clear those fields here
  1989. */
  1990. h.h3->tp_status |= status;
  1991. h.h3->tp_len = skb->len;
  1992. h.h3->tp_snaplen = snaplen;
  1993. h.h3->tp_mac = macoff;
  1994. h.h3->tp_net = netoff;
  1995. h.h3->tp_sec = ts.tv_sec;
  1996. h.h3->tp_nsec = ts.tv_nsec;
  1997. memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
  1998. hdrlen = sizeof(*h.h3);
  1999. break;
  2000. default:
  2001. BUG();
  2002. }
  2003. sll = h.raw + TPACKET_ALIGN(hdrlen);
  2004. sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
  2005. sll->sll_family = AF_PACKET;
  2006. sll->sll_hatype = dev->type;
  2007. sll->sll_protocol = skb->protocol;
  2008. sll->sll_pkttype = skb->pkt_type;
  2009. if (unlikely(po->origdev))
  2010. sll->sll_ifindex = orig_dev->ifindex;
  2011. else
  2012. sll->sll_ifindex = dev->ifindex;
  2013. smp_mb();
  2014. #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
  2015. if (po->tp_version <= TPACKET_V2) {
  2016. u8 *start, *end;
  2017. end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
  2018. macoff + snaplen);
  2019. for (start = h.raw; start < end; start += PAGE_SIZE)
  2020. flush_dcache_page(pgv_to_page(start));
  2021. }
  2022. smp_wmb();
  2023. #endif
  2024. if (po->tp_version <= TPACKET_V2) {
  2025. __packet_set_status(po, h.raw, status);
  2026. sk->sk_data_ready(sk);
  2027. } else {
  2028. prb_clear_blk_fill_status(&po->rx_ring);
  2029. }
  2030. drop_n_restore:
  2031. if (skb_head != skb->data && skb_shared(skb)) {
  2032. skb->data = skb_head;
  2033. skb->len = skb_len;
  2034. }
  2035. drop:
  2036. if (!is_drop_n_account)
  2037. consume_skb(skb);
  2038. else
  2039. kfree_skb(skb);
  2040. return 0;
  2041. drop_n_account:
  2042. is_drop_n_account = true;
  2043. po->stats.stats1.tp_drops++;
  2044. spin_unlock(&sk->sk_receive_queue.lock);
  2045. sk->sk_data_ready(sk);
  2046. kfree_skb(copy_skb);
  2047. goto drop_n_restore;
  2048. }
  2049. static void tpacket_destruct_skb(struct sk_buff *skb)
  2050. {
  2051. struct packet_sock *po = pkt_sk(skb->sk);
  2052. if (likely(po->tx_ring.pg_vec)) {
  2053. void *ph;
  2054. __u32 ts;
  2055. ph = skb_shinfo(skb)->destructor_arg;
  2056. packet_dec_pending(&po->tx_ring);
  2057. ts = __packet_set_timestamp(po, ph, skb);
  2058. __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
  2059. }
  2060. sock_wfree(skb);
  2061. }
  2062. static void tpacket_set_protocol(const struct net_device *dev,
  2063. struct sk_buff *skb)
  2064. {
  2065. if (dev->type == ARPHRD_ETHER) {
  2066. skb_reset_mac_header(skb);
  2067. skb->protocol = eth_hdr(skb)->h_proto;
  2068. }
  2069. }
  2070. static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
  2071. {
  2072. if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
  2073. (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
  2074. __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
  2075. __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
  2076. vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
  2077. __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
  2078. __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
  2079. if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
  2080. return -EINVAL;
  2081. return 0;
  2082. }
  2083. static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
  2084. struct virtio_net_hdr *vnet_hdr)
  2085. {
  2086. if (*len < sizeof(*vnet_hdr))
  2087. return -EINVAL;
  2088. *len -= sizeof(*vnet_hdr);
  2089. if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
  2090. return -EFAULT;
  2091. return __packet_snd_vnet_parse(vnet_hdr, *len);
  2092. }
  2093. static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
  2094. void *frame, struct net_device *dev, void *data, int tp_len,
  2095. __be16 proto, unsigned char *addr, int hlen, int copylen,
  2096. const struct sockcm_cookie *sockc)
  2097. {
  2098. union tpacket_uhdr ph;
  2099. int to_write, offset, len, nr_frags, len_max;
  2100. struct socket *sock = po->sk.sk_socket;
  2101. struct page *page;
  2102. int err;
  2103. ph.raw = frame;
  2104. skb->protocol = proto;
  2105. skb->dev = dev;
  2106. skb->priority = po->sk.sk_priority;
  2107. skb->mark = po->sk.sk_mark;
  2108. sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags);
  2109. skb_shinfo(skb)->destructor_arg = ph.raw;
  2110. skb_reserve(skb, hlen);
  2111. skb_reset_network_header(skb);
  2112. to_write = tp_len;
  2113. if (sock->type == SOCK_DGRAM) {
  2114. err = dev_hard_header(skb, dev, ntohs(proto), addr,
  2115. NULL, tp_len);
  2116. if (unlikely(err < 0))
  2117. return -EINVAL;
  2118. } else if (copylen) {
  2119. int hdrlen = min_t(int, copylen, tp_len);
  2120. skb_push(skb, dev->hard_header_len);
  2121. skb_put(skb, copylen - dev->hard_header_len);
  2122. err = skb_store_bits(skb, 0, data, hdrlen);
  2123. if (unlikely(err))
  2124. return err;
  2125. if (!dev_validate_header(dev, skb->data, hdrlen))
  2126. return -EINVAL;
  2127. if (!skb->protocol)
  2128. tpacket_set_protocol(dev, skb);
  2129. data += hdrlen;
  2130. to_write -= hdrlen;
  2131. }
  2132. offset = offset_in_page(data);
  2133. len_max = PAGE_SIZE - offset;
  2134. len = ((to_write > len_max) ? len_max : to_write);
  2135. skb->data_len = to_write;
  2136. skb->len += to_write;
  2137. skb->truesize += to_write;
  2138. refcount_add(to_write, &po->sk.sk_wmem_alloc);
  2139. while (likely(to_write)) {
  2140. nr_frags = skb_shinfo(skb)->nr_frags;
  2141. if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
  2142. pr_err("Packet exceed the number of skb frags(%lu)\n",
  2143. MAX_SKB_FRAGS);
  2144. return -EFAULT;
  2145. }
  2146. page = pgv_to_page(data);
  2147. data += len;
  2148. flush_dcache_page(page);
  2149. get_page(page);
  2150. skb_fill_page_desc(skb, nr_frags, page, offset, len);
  2151. to_write -= len;
  2152. offset = 0;
  2153. len_max = PAGE_SIZE;
  2154. len = ((to_write > len_max) ? len_max : to_write);
  2155. }
  2156. skb_probe_transport_header(skb, 0);
  2157. return tp_len;
  2158. }
  2159. static int tpacket_parse_header(struct packet_sock *po, void *frame,
  2160. int size_max, void **data)
  2161. {
  2162. union tpacket_uhdr ph;
  2163. int tp_len, off;
  2164. ph.raw = frame;
  2165. switch (po->tp_version) {
  2166. case TPACKET_V3:
  2167. if (ph.h3->tp_next_offset != 0) {
  2168. pr_warn_once("variable sized slot not supported");
  2169. return -EINVAL;
  2170. }
  2171. tp_len = ph.h3->tp_len;
  2172. break;
  2173. case TPACKET_V2:
  2174. tp_len = ph.h2->tp_len;
  2175. break;
  2176. default:
  2177. tp_len = ph.h1->tp_len;
  2178. break;
  2179. }
  2180. if (unlikely(tp_len > size_max)) {
  2181. pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
  2182. return -EMSGSIZE;
  2183. }
  2184. if (unlikely(po->tp_tx_has_off)) {
  2185. int off_min, off_max;
  2186. off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
  2187. off_max = po->tx_ring.frame_size - tp_len;
  2188. if (po->sk.sk_type == SOCK_DGRAM) {
  2189. switch (po->tp_version) {
  2190. case TPACKET_V3:
  2191. off = ph.h3->tp_net;
  2192. break;
  2193. case TPACKET_V2:
  2194. off = ph.h2->tp_net;
  2195. break;
  2196. default:
  2197. off = ph.h1->tp_net;
  2198. break;
  2199. }
  2200. } else {
  2201. switch (po->tp_version) {
  2202. case TPACKET_V3:
  2203. off = ph.h3->tp_mac;
  2204. break;
  2205. case TPACKET_V2:
  2206. off = ph.h2->tp_mac;
  2207. break;
  2208. default:
  2209. off = ph.h1->tp_mac;
  2210. break;
  2211. }
  2212. }
  2213. if (unlikely((off < off_min) || (off_max < off)))
  2214. return -EINVAL;
  2215. } else {
  2216. off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
  2217. }
  2218. *data = frame + off;
  2219. return tp_len;
  2220. }
  2221. static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
  2222. {
  2223. struct sk_buff *skb;
  2224. struct net_device *dev;
  2225. struct virtio_net_hdr *vnet_hdr = NULL;
  2226. struct sockcm_cookie sockc;
  2227. __be16 proto;
  2228. int err, reserve = 0;
  2229. void *ph;
  2230. DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
  2231. bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
  2232. int tp_len, size_max;
  2233. unsigned char *addr;
  2234. void *data;
  2235. int len_sum = 0;
  2236. int status = TP_STATUS_AVAILABLE;
  2237. int hlen, tlen, copylen = 0;
  2238. mutex_lock(&po->pg_vec_lock);
  2239. if (likely(saddr == NULL)) {
  2240. dev = packet_cached_dev_get(po);
  2241. proto = po->num;
  2242. addr = NULL;
  2243. } else {
  2244. err = -EINVAL;
  2245. if (msg->msg_namelen < sizeof(struct sockaddr_ll))
  2246. goto out;
  2247. if (msg->msg_namelen < (saddr->sll_halen
  2248. + offsetof(struct sockaddr_ll,
  2249. sll_addr)))
  2250. goto out;
  2251. proto = saddr->sll_protocol;
  2252. addr = saddr->sll_addr;
  2253. dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
  2254. }
  2255. err = -ENXIO;
  2256. if (unlikely(dev == NULL))
  2257. goto out;
  2258. err = -ENETDOWN;
  2259. if (unlikely(!(dev->flags & IFF_UP)))
  2260. goto out_put;
  2261. sockc.tsflags = po->sk.sk_tsflags;
  2262. if (msg->msg_controllen) {
  2263. err = sock_cmsg_send(&po->sk, msg, &sockc);
  2264. if (unlikely(err))
  2265. goto out_put;
  2266. }
  2267. if (po->sk.sk_socket->type == SOCK_RAW)
  2268. reserve = dev->hard_header_len;
  2269. size_max = po->tx_ring.frame_size
  2270. - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
  2271. if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr)
  2272. size_max = dev->mtu + reserve + VLAN_HLEN;
  2273. do {
  2274. ph = packet_current_frame(po, &po->tx_ring,
  2275. TP_STATUS_SEND_REQUEST);
  2276. if (unlikely(ph == NULL)) {
  2277. if (need_wait && need_resched())
  2278. schedule();
  2279. continue;
  2280. }
  2281. skb = NULL;
  2282. tp_len = tpacket_parse_header(po, ph, size_max, &data);
  2283. if (tp_len < 0)
  2284. goto tpacket_error;
  2285. status = TP_STATUS_SEND_REQUEST;
  2286. hlen = LL_RESERVED_SPACE(dev);
  2287. tlen = dev->needed_tailroom;
  2288. if (po->has_vnet_hdr) {
  2289. vnet_hdr = data;
  2290. data += sizeof(*vnet_hdr);
  2291. tp_len -= sizeof(*vnet_hdr);
  2292. if (tp_len < 0 ||
  2293. __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
  2294. tp_len = -EINVAL;
  2295. goto tpacket_error;
  2296. }
  2297. copylen = __virtio16_to_cpu(vio_le(),
  2298. vnet_hdr->hdr_len);
  2299. }
  2300. copylen = max_t(int, copylen, dev->hard_header_len);
  2301. skb = sock_alloc_send_skb(&po->sk,
  2302. hlen + tlen + sizeof(struct sockaddr_ll) +
  2303. (copylen - dev->hard_header_len),
  2304. !need_wait, &err);
  2305. if (unlikely(skb == NULL)) {
  2306. /* we assume the socket was initially writeable ... */
  2307. if (likely(len_sum > 0))
  2308. err = len_sum;
  2309. goto out_status;
  2310. }
  2311. tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
  2312. addr, hlen, copylen, &sockc);
  2313. if (likely(tp_len >= 0) &&
  2314. tp_len > dev->mtu + reserve &&
  2315. !po->has_vnet_hdr &&
  2316. !packet_extra_vlan_len_allowed(dev, skb))
  2317. tp_len = -EMSGSIZE;
  2318. if (unlikely(tp_len < 0)) {
  2319. tpacket_error:
  2320. if (po->tp_loss) {
  2321. __packet_set_status(po, ph,
  2322. TP_STATUS_AVAILABLE);
  2323. packet_increment_head(&po->tx_ring);
  2324. kfree_skb(skb);
  2325. continue;
  2326. } else {
  2327. status = TP_STATUS_WRONG_FORMAT;
  2328. err = tp_len;
  2329. goto out_status;
  2330. }
  2331. }
  2332. if (po->has_vnet_hdr && virtio_net_hdr_to_skb(skb, vnet_hdr,
  2333. vio_le())) {
  2334. tp_len = -EINVAL;
  2335. goto tpacket_error;
  2336. }
  2337. skb->destructor = tpacket_destruct_skb;
  2338. __packet_set_status(po, ph, TP_STATUS_SENDING);
  2339. packet_inc_pending(&po->tx_ring);
  2340. status = TP_STATUS_SEND_REQUEST;
  2341. err = po->xmit(skb);
  2342. if (unlikely(err > 0)) {
  2343. err = net_xmit_errno(err);
  2344. if (err && __packet_get_status(po, ph) ==
  2345. TP_STATUS_AVAILABLE) {
  2346. /* skb was destructed already */
  2347. skb = NULL;
  2348. goto out_status;
  2349. }
  2350. /*
  2351. * skb was dropped but not destructed yet;
  2352. * let's treat it like congestion or err < 0
  2353. */
  2354. err = 0;
  2355. }
  2356. packet_increment_head(&po->tx_ring);
  2357. len_sum += tp_len;
  2358. } while (likely((ph != NULL) ||
  2359. /* Note: packet_read_pending() might be slow if we have
  2360. * to call it as it's per_cpu variable, but in fast-path
  2361. * we already short-circuit the loop with the first
  2362. * condition, and luckily don't have to go that path
  2363. * anyway.
  2364. */
  2365. (need_wait && packet_read_pending(&po->tx_ring))));
  2366. err = len_sum;
  2367. goto out_put;
  2368. out_status:
  2369. __packet_set_status(po, ph, status);
  2370. kfree_skb(skb);
  2371. out_put:
  2372. dev_put(dev);
  2373. out:
  2374. mutex_unlock(&po->pg_vec_lock);
  2375. return err;
  2376. }
  2377. static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
  2378. size_t reserve, size_t len,
  2379. size_t linear, int noblock,
  2380. int *err)
  2381. {
  2382. struct sk_buff *skb;
  2383. /* Under a page? Don't bother with paged skb. */
  2384. if (prepad + len < PAGE_SIZE || !linear)
  2385. linear = len;
  2386. skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
  2387. err, 0);
  2388. if (!skb)
  2389. return NULL;
  2390. skb_reserve(skb, reserve);
  2391. skb_put(skb, linear);
  2392. skb->data_len = len - linear;
  2393. skb->len += len - linear;
  2394. return skb;
  2395. }
  2396. static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
  2397. {
  2398. struct sock *sk = sock->sk;
  2399. DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
  2400. struct sk_buff *skb;
  2401. struct net_device *dev;
  2402. __be16 proto;
  2403. unsigned char *addr;
  2404. int err, reserve = 0;
  2405. struct sockcm_cookie sockc;
  2406. struct virtio_net_hdr vnet_hdr = { 0 };
  2407. int offset = 0;
  2408. struct packet_sock *po = pkt_sk(sk);
  2409. bool has_vnet_hdr = false;
  2410. int hlen, tlen, linear;
  2411. int extra_len = 0;
  2412. /*
  2413. * Get and verify the address.
  2414. */
  2415. if (likely(saddr == NULL)) {
  2416. dev = packet_cached_dev_get(po);
  2417. proto = po->num;
  2418. addr = NULL;
  2419. } else {
  2420. err = -EINVAL;
  2421. if (msg->msg_namelen < sizeof(struct sockaddr_ll))
  2422. goto out;
  2423. if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
  2424. goto out;
  2425. proto = saddr->sll_protocol;
  2426. addr = saddr->sll_addr;
  2427. dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
  2428. }
  2429. err = -ENXIO;
  2430. if (unlikely(dev == NULL))
  2431. goto out_unlock;
  2432. err = -ENETDOWN;
  2433. if (unlikely(!(dev->flags & IFF_UP)))
  2434. goto out_unlock;
  2435. sockc.tsflags = sk->sk_tsflags;
  2436. sockc.mark = sk->sk_mark;
  2437. if (msg->msg_controllen) {
  2438. err = sock_cmsg_send(sk, msg, &sockc);
  2439. if (unlikely(err))
  2440. goto out_unlock;
  2441. }
  2442. if (sock->type == SOCK_RAW)
  2443. reserve = dev->hard_header_len;
  2444. if (po->has_vnet_hdr) {
  2445. err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
  2446. if (err)
  2447. goto out_unlock;
  2448. has_vnet_hdr = true;
  2449. }
  2450. if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
  2451. if (!netif_supports_nofcs(dev)) {
  2452. err = -EPROTONOSUPPORT;
  2453. goto out_unlock;
  2454. }
  2455. extra_len = 4; /* We're doing our own CRC */
  2456. }
  2457. err = -EMSGSIZE;
  2458. if (!vnet_hdr.gso_type &&
  2459. (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
  2460. goto out_unlock;
  2461. err = -ENOBUFS;
  2462. hlen = LL_RESERVED_SPACE(dev);
  2463. tlen = dev->needed_tailroom;
  2464. linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
  2465. linear = max(linear, min_t(int, len, dev->hard_header_len));
  2466. skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
  2467. msg->msg_flags & MSG_DONTWAIT, &err);
  2468. if (skb == NULL)
  2469. goto out_unlock;
  2470. skb_set_network_header(skb, reserve);
  2471. err = -EINVAL;
  2472. if (sock->type == SOCK_DGRAM) {
  2473. offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
  2474. if (unlikely(offset < 0))
  2475. goto out_free;
  2476. }
  2477. /* Returns -EFAULT on error */
  2478. err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
  2479. if (err)
  2480. goto out_free;
  2481. if (sock->type == SOCK_RAW &&
  2482. !dev_validate_header(dev, skb->data, len)) {
  2483. err = -EINVAL;
  2484. goto out_free;
  2485. }
  2486. sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags);
  2487. if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
  2488. !packet_extra_vlan_len_allowed(dev, skb)) {
  2489. err = -EMSGSIZE;
  2490. goto out_free;
  2491. }
  2492. skb->protocol = proto;
  2493. skb->dev = dev;
  2494. skb->priority = sk->sk_priority;
  2495. skb->mark = sockc.mark;
  2496. if (has_vnet_hdr) {
  2497. err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
  2498. if (err)
  2499. goto out_free;
  2500. len += sizeof(vnet_hdr);
  2501. }
  2502. skb_probe_transport_header(skb, reserve);
  2503. if (unlikely(extra_len == 4))
  2504. skb->no_fcs = 1;
  2505. err = po->xmit(skb);
  2506. if (err > 0 && (err = net_xmit_errno(err)) != 0)
  2507. goto out_unlock;
  2508. dev_put(dev);
  2509. return len;
  2510. out_free:
  2511. kfree_skb(skb);
  2512. out_unlock:
  2513. if (dev)
  2514. dev_put(dev);
  2515. out:
  2516. return err;
  2517. }
  2518. static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
  2519. {
  2520. struct sock *sk = sock->sk;
  2521. struct packet_sock *po = pkt_sk(sk);
  2522. if (po->tx_ring.pg_vec)
  2523. return tpacket_snd(po, msg);
  2524. else
  2525. return packet_snd(sock, msg, len);
  2526. }
  2527. /*
  2528. * Close a PACKET socket. This is fairly simple. We immediately go
  2529. * to 'closed' state and remove our protocol entry in the device list.
  2530. */
  2531. static int packet_release(struct socket *sock)
  2532. {
  2533. struct sock *sk = sock->sk;
  2534. struct packet_sock *po;
  2535. struct packet_fanout *f;
  2536. struct net *net;
  2537. union tpacket_req_u req_u;
  2538. if (!sk)
  2539. return 0;
  2540. net = sock_net(sk);
  2541. po = pkt_sk(sk);
  2542. mutex_lock(&net->packet.sklist_lock);
  2543. sk_del_node_init_rcu(sk);
  2544. mutex_unlock(&net->packet.sklist_lock);
  2545. preempt_disable();
  2546. sock_prot_inuse_add(net, sk->sk_prot, -1);
  2547. preempt_enable();
  2548. spin_lock(&po->bind_lock);
  2549. unregister_prot_hook(sk, false);
  2550. packet_cached_dev_reset(po);
  2551. if (po->prot_hook.dev) {
  2552. dev_put(po->prot_hook.dev);
  2553. po->prot_hook.dev = NULL;
  2554. }
  2555. spin_unlock(&po->bind_lock);
  2556. packet_flush_mclist(sk);
  2557. lock_sock(sk);
  2558. if (po->rx_ring.pg_vec) {
  2559. memset(&req_u, 0, sizeof(req_u));
  2560. packet_set_ring(sk, &req_u, 1, 0);
  2561. }
  2562. if (po->tx_ring.pg_vec) {
  2563. memset(&req_u, 0, sizeof(req_u));
  2564. packet_set_ring(sk, &req_u, 1, 1);
  2565. }
  2566. release_sock(sk);
  2567. f = fanout_release(sk);
  2568. synchronize_net();
  2569. if (f) {
  2570. kfree(po->rollover);
  2571. fanout_release_data(f);
  2572. kfree(f);
  2573. }
  2574. /*
  2575. * Now the socket is dead. No more input will appear.
  2576. */
  2577. sock_orphan(sk);
  2578. sock->sk = NULL;
  2579. /* Purge queues */
  2580. skb_queue_purge(&sk->sk_receive_queue);
  2581. packet_free_pending(po);
  2582. sk_refcnt_debug_release(sk);
  2583. sock_put(sk);
  2584. return 0;
  2585. }
  2586. /*
  2587. * Attach a packet hook.
  2588. */
  2589. static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
  2590. __be16 proto)
  2591. {
  2592. struct packet_sock *po = pkt_sk(sk);
  2593. struct net_device *dev_curr;
  2594. __be16 proto_curr;
  2595. bool need_rehook;
  2596. struct net_device *dev = NULL;
  2597. int ret = 0;
  2598. bool unlisted = false;
  2599. lock_sock(sk);
  2600. spin_lock(&po->bind_lock);
  2601. rcu_read_lock();
  2602. if (po->fanout) {
  2603. ret = -EINVAL;
  2604. goto out_unlock;
  2605. }
  2606. if (name) {
  2607. dev = dev_get_by_name_rcu(sock_net(sk), name);
  2608. if (!dev) {
  2609. ret = -ENODEV;
  2610. goto out_unlock;
  2611. }
  2612. } else if (ifindex) {
  2613. dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
  2614. if (!dev) {
  2615. ret = -ENODEV;
  2616. goto out_unlock;
  2617. }
  2618. }
  2619. if (dev)
  2620. dev_hold(dev);
  2621. proto_curr = po->prot_hook.type;
  2622. dev_curr = po->prot_hook.dev;
  2623. need_rehook = proto_curr != proto || dev_curr != dev;
  2624. if (need_rehook) {
  2625. if (po->running) {
  2626. rcu_read_unlock();
  2627. /* prevents packet_notifier() from calling
  2628. * register_prot_hook()
  2629. */
  2630. po->num = 0;
  2631. __unregister_prot_hook(sk, true);
  2632. rcu_read_lock();
  2633. dev_curr = po->prot_hook.dev;
  2634. if (dev)
  2635. unlisted = !dev_get_by_index_rcu(sock_net(sk),
  2636. dev->ifindex);
  2637. }
  2638. BUG_ON(po->running);
  2639. po->num = proto;
  2640. po->prot_hook.type = proto;
  2641. if (unlikely(unlisted)) {
  2642. dev_put(dev);
  2643. po->prot_hook.dev = NULL;
  2644. po->ifindex = -1;
  2645. packet_cached_dev_reset(po);
  2646. } else {
  2647. po->prot_hook.dev = dev;
  2648. po->ifindex = dev ? dev->ifindex : 0;
  2649. packet_cached_dev_assign(po, dev);
  2650. }
  2651. }
  2652. if (dev_curr)
  2653. dev_put(dev_curr);
  2654. if (proto == 0 || !need_rehook)
  2655. goto out_unlock;
  2656. if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
  2657. register_prot_hook(sk);
  2658. } else {
  2659. sk->sk_err = ENETDOWN;
  2660. if (!sock_flag(sk, SOCK_DEAD))
  2661. sk->sk_error_report(sk);
  2662. }
  2663. out_unlock:
  2664. rcu_read_unlock();
  2665. spin_unlock(&po->bind_lock);
  2666. release_sock(sk);
  2667. return ret;
  2668. }
  2669. /*
  2670. * Bind a packet socket to a device
  2671. */
  2672. static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
  2673. int addr_len)
  2674. {
  2675. struct sock *sk = sock->sk;
  2676. char name[sizeof(uaddr->sa_data) + 1];
  2677. /*
  2678. * Check legality
  2679. */
  2680. if (addr_len != sizeof(struct sockaddr))
  2681. return -EINVAL;
  2682. /* uaddr->sa_data comes from the userspace, it's not guaranteed to be
  2683. * zero-terminated.
  2684. */
  2685. memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
  2686. name[sizeof(uaddr->sa_data)] = 0;
  2687. return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
  2688. }
  2689. static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
  2690. {
  2691. struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
  2692. struct sock *sk = sock->sk;
  2693. /*
  2694. * Check legality
  2695. */
  2696. if (addr_len < sizeof(struct sockaddr_ll))
  2697. return -EINVAL;
  2698. if (sll->sll_family != AF_PACKET)
  2699. return -EINVAL;
  2700. return packet_do_bind(sk, NULL, sll->sll_ifindex,
  2701. sll->sll_protocol ? : pkt_sk(sk)->num);
  2702. }
  2703. static struct proto packet_proto = {
  2704. .name = "PACKET",
  2705. .owner = THIS_MODULE,
  2706. .obj_size = sizeof(struct packet_sock),
  2707. };
  2708. /*
  2709. * Create a packet of type SOCK_PACKET.
  2710. */
  2711. static int packet_create(struct net *net, struct socket *sock, int protocol,
  2712. int kern)
  2713. {
  2714. struct sock *sk;
  2715. struct packet_sock *po;
  2716. __be16 proto = (__force __be16)protocol; /* weird, but documented */
  2717. int err;
  2718. if (!ns_capable(net->user_ns, CAP_NET_RAW))
  2719. return -EPERM;
  2720. if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
  2721. sock->type != SOCK_PACKET)
  2722. return -ESOCKTNOSUPPORT;
  2723. sock->state = SS_UNCONNECTED;
  2724. err = -ENOBUFS;
  2725. sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
  2726. if (sk == NULL)
  2727. goto out;
  2728. sock->ops = &packet_ops;
  2729. if (sock->type == SOCK_PACKET)
  2730. sock->ops = &packet_ops_spkt;
  2731. sock_init_data(sock, sk);
  2732. po = pkt_sk(sk);
  2733. sk->sk_family = PF_PACKET;
  2734. po->num = proto;
  2735. po->xmit = dev_queue_xmit;
  2736. err = packet_alloc_pending(po);
  2737. if (err)
  2738. goto out2;
  2739. packet_cached_dev_reset(po);
  2740. sk->sk_destruct = packet_sock_destruct;
  2741. sk_refcnt_debug_inc(sk);
  2742. /*
  2743. * Attach a protocol block
  2744. */
  2745. spin_lock_init(&po->bind_lock);
  2746. mutex_init(&po->pg_vec_lock);
  2747. po->rollover = NULL;
  2748. po->prot_hook.func = packet_rcv;
  2749. if (sock->type == SOCK_PACKET)
  2750. po->prot_hook.func = packet_rcv_spkt;
  2751. po->prot_hook.af_packet_priv = sk;
  2752. if (proto) {
  2753. po->prot_hook.type = proto;
  2754. register_prot_hook(sk);
  2755. }
  2756. mutex_lock(&net->packet.sklist_lock);
  2757. sk_add_node_rcu(sk, &net->packet.sklist);
  2758. mutex_unlock(&net->packet.sklist_lock);
  2759. preempt_disable();
  2760. sock_prot_inuse_add(net, &packet_proto, 1);
  2761. preempt_enable();
  2762. return 0;
  2763. out2:
  2764. sk_free(sk);
  2765. out:
  2766. return err;
  2767. }
  2768. /*
  2769. * Pull a packet from our receive queue and hand it to the user.
  2770. * If necessary we block.
  2771. */
  2772. static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
  2773. int flags)
  2774. {
  2775. struct sock *sk = sock->sk;
  2776. struct sk_buff *skb;
  2777. int copied, err;
  2778. int vnet_hdr_len = 0;
  2779. unsigned int origlen = 0;
  2780. err = -EINVAL;
  2781. if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
  2782. goto out;
  2783. #if 0
  2784. /* What error should we return now? EUNATTACH? */
  2785. if (pkt_sk(sk)->ifindex < 0)
  2786. return -ENODEV;
  2787. #endif
  2788. if (flags & MSG_ERRQUEUE) {
  2789. err = sock_recv_errqueue(sk, msg, len,
  2790. SOL_PACKET, PACKET_TX_TIMESTAMP);
  2791. goto out;
  2792. }
  2793. /*
  2794. * Call the generic datagram receiver. This handles all sorts
  2795. * of horrible races and re-entrancy so we can forget about it
  2796. * in the protocol layers.
  2797. *
  2798. * Now it will return ENETDOWN, if device have just gone down,
  2799. * but then it will block.
  2800. */
  2801. skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
  2802. /*
  2803. * An error occurred so return it. Because skb_recv_datagram()
  2804. * handles the blocking we don't see and worry about blocking
  2805. * retries.
  2806. */
  2807. if (skb == NULL)
  2808. goto out;
  2809. if (pkt_sk(sk)->pressure)
  2810. packet_rcv_has_room(pkt_sk(sk), NULL);
  2811. if (pkt_sk(sk)->has_vnet_hdr) {
  2812. err = packet_rcv_vnet(msg, skb, &len);
  2813. if (err)
  2814. goto out_free;
  2815. vnet_hdr_len = sizeof(struct virtio_net_hdr);
  2816. }
  2817. /* You lose any data beyond the buffer you gave. If it worries
  2818. * a user program they can ask the device for its MTU
  2819. * anyway.
  2820. */
  2821. copied = skb->len;
  2822. if (copied > len) {
  2823. copied = len;
  2824. msg->msg_flags |= MSG_TRUNC;
  2825. }
  2826. err = skb_copy_datagram_msg(skb, 0, msg, copied);
  2827. if (err)
  2828. goto out_free;
  2829. if (sock->type != SOCK_PACKET) {
  2830. struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
  2831. /* Original length was stored in sockaddr_ll fields */
  2832. origlen = PACKET_SKB_CB(skb)->sa.origlen;
  2833. sll->sll_family = AF_PACKET;
  2834. sll->sll_protocol = skb->protocol;
  2835. }
  2836. sock_recv_ts_and_drops(msg, sk, skb);
  2837. if (msg->msg_name) {
  2838. /* If the address length field is there to be filled
  2839. * in, we fill it in now.
  2840. */
  2841. if (sock->type == SOCK_PACKET) {
  2842. __sockaddr_check_size(sizeof(struct sockaddr_pkt));
  2843. msg->msg_namelen = sizeof(struct sockaddr_pkt);
  2844. } else {
  2845. struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
  2846. msg->msg_namelen = sll->sll_halen +
  2847. offsetof(struct sockaddr_ll, sll_addr);
  2848. }
  2849. memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
  2850. msg->msg_namelen);
  2851. }
  2852. if (pkt_sk(sk)->auxdata) {
  2853. struct tpacket_auxdata aux;
  2854. aux.tp_status = TP_STATUS_USER;
  2855. if (skb->ip_summed == CHECKSUM_PARTIAL)
  2856. aux.tp_status |= TP_STATUS_CSUMNOTREADY;
  2857. else if (skb->pkt_type != PACKET_OUTGOING &&
  2858. (skb->ip_summed == CHECKSUM_COMPLETE ||
  2859. skb_csum_unnecessary(skb)))
  2860. aux.tp_status |= TP_STATUS_CSUM_VALID;
  2861. aux.tp_len = origlen;
  2862. aux.tp_snaplen = skb->len;
  2863. aux.tp_mac = 0;
  2864. aux.tp_net = skb_network_offset(skb);
  2865. if (skb_vlan_tag_present(skb)) {
  2866. aux.tp_vlan_tci = skb_vlan_tag_get(skb);
  2867. aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
  2868. aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
  2869. } else {
  2870. aux.tp_vlan_tci = 0;
  2871. aux.tp_vlan_tpid = 0;
  2872. }
  2873. put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
  2874. }
  2875. /*
  2876. * Free or return the buffer as appropriate. Again this
  2877. * hides all the races and re-entrancy issues from us.
  2878. */
  2879. err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
  2880. out_free:
  2881. skb_free_datagram(sk, skb);
  2882. out:
  2883. return err;
  2884. }
  2885. static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
  2886. int peer)
  2887. {
  2888. struct net_device *dev;
  2889. struct sock *sk = sock->sk;
  2890. if (peer)
  2891. return -EOPNOTSUPP;
  2892. uaddr->sa_family = AF_PACKET;
  2893. memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
  2894. rcu_read_lock();
  2895. dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
  2896. if (dev)
  2897. strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
  2898. rcu_read_unlock();
  2899. return sizeof(*uaddr);
  2900. }
  2901. static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
  2902. int peer)
  2903. {
  2904. struct net_device *dev;
  2905. struct sock *sk = sock->sk;
  2906. struct packet_sock *po = pkt_sk(sk);
  2907. DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
  2908. if (peer)
  2909. return -EOPNOTSUPP;
  2910. sll->sll_family = AF_PACKET;
  2911. sll->sll_ifindex = po->ifindex;
  2912. sll->sll_protocol = po->num;
  2913. sll->sll_pkttype = 0;
  2914. rcu_read_lock();
  2915. dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
  2916. if (dev) {
  2917. sll->sll_hatype = dev->type;
  2918. sll->sll_halen = dev->addr_len;
  2919. memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
  2920. } else {
  2921. sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
  2922. sll->sll_halen = 0;
  2923. }
  2924. rcu_read_unlock();
  2925. return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
  2926. }
  2927. static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
  2928. int what)
  2929. {
  2930. switch (i->type) {
  2931. case PACKET_MR_MULTICAST:
  2932. if (i->alen != dev->addr_len)
  2933. return -EINVAL;
  2934. if (what > 0)
  2935. return dev_mc_add(dev, i->addr);
  2936. else
  2937. return dev_mc_del(dev, i->addr);
  2938. break;
  2939. case PACKET_MR_PROMISC:
  2940. return dev_set_promiscuity(dev, what);
  2941. case PACKET_MR_ALLMULTI:
  2942. return dev_set_allmulti(dev, what);
  2943. case PACKET_MR_UNICAST:
  2944. if (i->alen != dev->addr_len)
  2945. return -EINVAL;
  2946. if (what > 0)
  2947. return dev_uc_add(dev, i->addr);
  2948. else
  2949. return dev_uc_del(dev, i->addr);
  2950. break;
  2951. default:
  2952. break;
  2953. }
  2954. return 0;
  2955. }
  2956. static void packet_dev_mclist_delete(struct net_device *dev,
  2957. struct packet_mclist **mlp)
  2958. {
  2959. struct packet_mclist *ml;
  2960. while ((ml = *mlp) != NULL) {
  2961. if (ml->ifindex == dev->ifindex) {
  2962. packet_dev_mc(dev, ml, -1);
  2963. *mlp = ml->next;
  2964. kfree(ml);
  2965. } else
  2966. mlp = &ml->next;
  2967. }
  2968. }
  2969. static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
  2970. {
  2971. struct packet_sock *po = pkt_sk(sk);
  2972. struct packet_mclist *ml, *i;
  2973. struct net_device *dev;
  2974. int err;
  2975. rtnl_lock();
  2976. err = -ENODEV;
  2977. dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
  2978. if (!dev)
  2979. goto done;
  2980. err = -EINVAL;
  2981. if (mreq->mr_alen > dev->addr_len)
  2982. goto done;
  2983. err = -ENOBUFS;
  2984. i = kmalloc(sizeof(*i), GFP_KERNEL);
  2985. if (i == NULL)
  2986. goto done;
  2987. err = 0;
  2988. for (ml = po->mclist; ml; ml = ml->next) {
  2989. if (ml->ifindex == mreq->mr_ifindex &&
  2990. ml->type == mreq->mr_type &&
  2991. ml->alen == mreq->mr_alen &&
  2992. memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
  2993. ml->count++;
  2994. /* Free the new element ... */
  2995. kfree(i);
  2996. goto done;
  2997. }
  2998. }
  2999. i->type = mreq->mr_type;
  3000. i->ifindex = mreq->mr_ifindex;
  3001. i->alen = mreq->mr_alen;
  3002. memcpy(i->addr, mreq->mr_address, i->alen);
  3003. memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
  3004. i->count = 1;
  3005. i->next = po->mclist;
  3006. po->mclist = i;
  3007. err = packet_dev_mc(dev, i, 1);
  3008. if (err) {
  3009. po->mclist = i->next;
  3010. kfree(i);
  3011. }
  3012. done:
  3013. rtnl_unlock();
  3014. return err;
  3015. }
  3016. static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
  3017. {
  3018. struct packet_mclist *ml, **mlp;
  3019. rtnl_lock();
  3020. for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
  3021. if (ml->ifindex == mreq->mr_ifindex &&
  3022. ml->type == mreq->mr_type &&
  3023. ml->alen == mreq->mr_alen &&
  3024. memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
  3025. if (--ml->count == 0) {
  3026. struct net_device *dev;
  3027. *mlp = ml->next;
  3028. dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
  3029. if (dev)
  3030. packet_dev_mc(dev, ml, -1);
  3031. kfree(ml);
  3032. }
  3033. break;
  3034. }
  3035. }
  3036. rtnl_unlock();
  3037. return 0;
  3038. }
  3039. static void packet_flush_mclist(struct sock *sk)
  3040. {
  3041. struct packet_sock *po = pkt_sk(sk);
  3042. struct packet_mclist *ml;
  3043. if (!po->mclist)
  3044. return;
  3045. rtnl_lock();
  3046. while ((ml = po->mclist) != NULL) {
  3047. struct net_device *dev;
  3048. po->mclist = ml->next;
  3049. dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
  3050. if (dev != NULL)
  3051. packet_dev_mc(dev, ml, -1);
  3052. kfree(ml);
  3053. }
  3054. rtnl_unlock();
  3055. }
  3056. static int
  3057. packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
  3058. {
  3059. struct sock *sk = sock->sk;
  3060. struct packet_sock *po = pkt_sk(sk);
  3061. int ret;
  3062. if (level != SOL_PACKET)
  3063. return -ENOPROTOOPT;
  3064. switch (optname) {
  3065. case PACKET_ADD_MEMBERSHIP:
  3066. case PACKET_DROP_MEMBERSHIP:
  3067. {
  3068. struct packet_mreq_max mreq;
  3069. int len = optlen;
  3070. memset(&mreq, 0, sizeof(mreq));
  3071. if (len < sizeof(struct packet_mreq))
  3072. return -EINVAL;
  3073. if (len > sizeof(mreq))
  3074. len = sizeof(mreq);
  3075. if (copy_from_user(&mreq, optval, len))
  3076. return -EFAULT;
  3077. if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
  3078. return -EINVAL;
  3079. if (optname == PACKET_ADD_MEMBERSHIP)
  3080. ret = packet_mc_add(sk, &mreq);
  3081. else
  3082. ret = packet_mc_drop(sk, &mreq);
  3083. return ret;
  3084. }
  3085. case PACKET_RX_RING:
  3086. case PACKET_TX_RING:
  3087. {
  3088. union tpacket_req_u req_u;
  3089. int len;
  3090. lock_sock(sk);
  3091. switch (po->tp_version) {
  3092. case TPACKET_V1:
  3093. case TPACKET_V2:
  3094. len = sizeof(req_u.req);
  3095. break;
  3096. case TPACKET_V3:
  3097. default:
  3098. len = sizeof(req_u.req3);
  3099. break;
  3100. }
  3101. if (optlen < len) {
  3102. ret = -EINVAL;
  3103. } else {
  3104. if (copy_from_user(&req_u.req, optval, len))
  3105. ret = -EFAULT;
  3106. else
  3107. ret = packet_set_ring(sk, &req_u, 0,
  3108. optname == PACKET_TX_RING);
  3109. }
  3110. release_sock(sk);
  3111. return ret;
  3112. }
  3113. case PACKET_COPY_THRESH:
  3114. {
  3115. int val;
  3116. if (optlen != sizeof(val))
  3117. return -EINVAL;
  3118. if (copy_from_user(&val, optval, sizeof(val)))
  3119. return -EFAULT;
  3120. pkt_sk(sk)->copy_thresh = val;
  3121. return 0;
  3122. }
  3123. case PACKET_VERSION:
  3124. {
  3125. int val;
  3126. if (optlen != sizeof(val))
  3127. return -EINVAL;
  3128. if (copy_from_user(&val, optval, sizeof(val)))
  3129. return -EFAULT;
  3130. switch (val) {
  3131. case TPACKET_V1:
  3132. case TPACKET_V2:
  3133. case TPACKET_V3:
  3134. break;
  3135. default:
  3136. return -EINVAL;
  3137. }
  3138. lock_sock(sk);
  3139. if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
  3140. ret = -EBUSY;
  3141. } else {
  3142. po->tp_version = val;
  3143. ret = 0;
  3144. }
  3145. release_sock(sk);
  3146. return ret;
  3147. }
  3148. case PACKET_RESERVE:
  3149. {
  3150. unsigned int val;
  3151. if (optlen != sizeof(val))
  3152. return -EINVAL;
  3153. if (copy_from_user(&val, optval, sizeof(val)))
  3154. return -EFAULT;
  3155. if (val > INT_MAX)
  3156. return -EINVAL;
  3157. lock_sock(sk);
  3158. if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
  3159. ret = -EBUSY;
  3160. } else {
  3161. po->tp_reserve = val;
  3162. ret = 0;
  3163. }
  3164. release_sock(sk);
  3165. return ret;
  3166. }
  3167. case PACKET_LOSS:
  3168. {
  3169. unsigned int val;
  3170. if (optlen != sizeof(val))
  3171. return -EINVAL;
  3172. if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
  3173. return -EBUSY;
  3174. if (copy_from_user(&val, optval, sizeof(val)))
  3175. return -EFAULT;
  3176. po->tp_loss = !!val;
  3177. return 0;
  3178. }
  3179. case PACKET_AUXDATA:
  3180. {
  3181. int val;
  3182. if (optlen < sizeof(val))
  3183. return -EINVAL;
  3184. if (copy_from_user(&val, optval, sizeof(val)))
  3185. return -EFAULT;
  3186. po->auxdata = !!val;
  3187. return 0;
  3188. }
  3189. case PACKET_ORIGDEV:
  3190. {
  3191. int val;
  3192. if (optlen < sizeof(val))
  3193. return -EINVAL;
  3194. if (copy_from_user(&val, optval, sizeof(val)))
  3195. return -EFAULT;
  3196. po->origdev = !!val;
  3197. return 0;
  3198. }
  3199. case PACKET_VNET_HDR:
  3200. {
  3201. int val;
  3202. if (sock->type != SOCK_RAW)
  3203. return -EINVAL;
  3204. if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
  3205. return -EBUSY;
  3206. if (optlen < sizeof(val))
  3207. return -EINVAL;
  3208. if (copy_from_user(&val, optval, sizeof(val)))
  3209. return -EFAULT;
  3210. po->has_vnet_hdr = !!val;
  3211. return 0;
  3212. }
  3213. case PACKET_TIMESTAMP:
  3214. {
  3215. int val;
  3216. if (optlen != sizeof(val))
  3217. return -EINVAL;
  3218. if (copy_from_user(&val, optval, sizeof(val)))
  3219. return -EFAULT;
  3220. po->tp_tstamp = val;
  3221. return 0;
  3222. }
  3223. case PACKET_FANOUT:
  3224. {
  3225. int val;
  3226. if (optlen != sizeof(val))
  3227. return -EINVAL;
  3228. if (copy_from_user(&val, optval, sizeof(val)))
  3229. return -EFAULT;
  3230. return fanout_add(sk, val & 0xffff, val >> 16);
  3231. }
  3232. case PACKET_FANOUT_DATA:
  3233. {
  3234. if (!po->fanout)
  3235. return -EINVAL;
  3236. return fanout_set_data(po, optval, optlen);
  3237. }
  3238. case PACKET_TX_HAS_OFF:
  3239. {
  3240. unsigned int val;
  3241. if (optlen != sizeof(val))
  3242. return -EINVAL;
  3243. if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
  3244. return -EBUSY;
  3245. if (copy_from_user(&val, optval, sizeof(val)))
  3246. return -EFAULT;
  3247. po->tp_tx_has_off = !!val;
  3248. return 0;
  3249. }
  3250. case PACKET_QDISC_BYPASS:
  3251. {
  3252. int val;
  3253. if (optlen != sizeof(val))
  3254. return -EINVAL;
  3255. if (copy_from_user(&val, optval, sizeof(val)))
  3256. return -EFAULT;
  3257. po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
  3258. return 0;
  3259. }
  3260. default:
  3261. return -ENOPROTOOPT;
  3262. }
  3263. }
  3264. static int packet_getsockopt(struct socket *sock, int level, int optname,
  3265. char __user *optval, int __user *optlen)
  3266. {
  3267. int len;
  3268. int val, lv = sizeof(val);
  3269. struct sock *sk = sock->sk;
  3270. struct packet_sock *po = pkt_sk(sk);
  3271. void *data = &val;
  3272. union tpacket_stats_u st;
  3273. struct tpacket_rollover_stats rstats;
  3274. if (level != SOL_PACKET)
  3275. return -ENOPROTOOPT;
  3276. if (get_user(len, optlen))
  3277. return -EFAULT;
  3278. if (len < 0)
  3279. return -EINVAL;
  3280. switch (optname) {
  3281. case PACKET_STATISTICS:
  3282. spin_lock_bh(&sk->sk_receive_queue.lock);
  3283. memcpy(&st, &po->stats, sizeof(st));
  3284. memset(&po->stats, 0, sizeof(po->stats));
  3285. spin_unlock_bh(&sk->sk_receive_queue.lock);
  3286. if (po->tp_version == TPACKET_V3) {
  3287. lv = sizeof(struct tpacket_stats_v3);
  3288. st.stats3.tp_packets += st.stats3.tp_drops;
  3289. data = &st.stats3;
  3290. } else {
  3291. lv = sizeof(struct tpacket_stats);
  3292. st.stats1.tp_packets += st.stats1.tp_drops;
  3293. data = &st.stats1;
  3294. }
  3295. break;
  3296. case PACKET_AUXDATA:
  3297. val = po->auxdata;
  3298. break;
  3299. case PACKET_ORIGDEV:
  3300. val = po->origdev;
  3301. break;
  3302. case PACKET_VNET_HDR:
  3303. val = po->has_vnet_hdr;
  3304. break;
  3305. case PACKET_VERSION:
  3306. val = po->tp_version;
  3307. break;
  3308. case PACKET_HDRLEN:
  3309. if (len > sizeof(int))
  3310. len = sizeof(int);
  3311. if (len < sizeof(int))
  3312. return -EINVAL;
  3313. if (copy_from_user(&val, optval, len))
  3314. return -EFAULT;
  3315. switch (val) {
  3316. case TPACKET_V1:
  3317. val = sizeof(struct tpacket_hdr);
  3318. break;
  3319. case TPACKET_V2:
  3320. val = sizeof(struct tpacket2_hdr);
  3321. break;
  3322. case TPACKET_V3:
  3323. val = sizeof(struct tpacket3_hdr);
  3324. break;
  3325. default:
  3326. return -EINVAL;
  3327. }
  3328. break;
  3329. case PACKET_RESERVE:
  3330. val = po->tp_reserve;
  3331. break;
  3332. case PACKET_LOSS:
  3333. val = po->tp_loss;
  3334. break;
  3335. case PACKET_TIMESTAMP:
  3336. val = po->tp_tstamp;
  3337. break;
  3338. case PACKET_FANOUT:
  3339. val = (po->fanout ?
  3340. ((u32)po->fanout->id |
  3341. ((u32)po->fanout->type << 16) |
  3342. ((u32)po->fanout->flags << 24)) :
  3343. 0);
  3344. break;
  3345. case PACKET_ROLLOVER_STATS:
  3346. if (!po->rollover)
  3347. return -EINVAL;
  3348. rstats.tp_all = atomic_long_read(&po->rollover->num);
  3349. rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
  3350. rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
  3351. data = &rstats;
  3352. lv = sizeof(rstats);
  3353. break;
  3354. case PACKET_TX_HAS_OFF:
  3355. val = po->tp_tx_has_off;
  3356. break;
  3357. case PACKET_QDISC_BYPASS:
  3358. val = packet_use_direct_xmit(po);
  3359. break;
  3360. default:
  3361. return -ENOPROTOOPT;
  3362. }
  3363. if (len > lv)
  3364. len = lv;
  3365. if (put_user(len, optlen))
  3366. return -EFAULT;
  3367. if (copy_to_user(optval, data, len))
  3368. return -EFAULT;
  3369. return 0;
  3370. }
  3371. #ifdef CONFIG_COMPAT
  3372. static int compat_packet_setsockopt(struct socket *sock, int level, int optname,
  3373. char __user *optval, unsigned int optlen)
  3374. {
  3375. struct packet_sock *po = pkt_sk(sock->sk);
  3376. if (level != SOL_PACKET)
  3377. return -ENOPROTOOPT;
  3378. if (optname == PACKET_FANOUT_DATA &&
  3379. po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) {
  3380. optval = (char __user *)get_compat_bpf_fprog(optval);
  3381. if (!optval)
  3382. return -EFAULT;
  3383. optlen = sizeof(struct sock_fprog);
  3384. }
  3385. return packet_setsockopt(sock, level, optname, optval, optlen);
  3386. }
  3387. #endif
  3388. static int packet_notifier(struct notifier_block *this,
  3389. unsigned long msg, void *ptr)
  3390. {
  3391. struct sock *sk;
  3392. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  3393. struct net *net = dev_net(dev);
  3394. rcu_read_lock();
  3395. sk_for_each_rcu(sk, &net->packet.sklist) {
  3396. struct packet_sock *po = pkt_sk(sk);
  3397. switch (msg) {
  3398. case NETDEV_UNREGISTER:
  3399. if (po->mclist)
  3400. packet_dev_mclist_delete(dev, &po->mclist);
  3401. /* fallthrough */
  3402. case NETDEV_DOWN:
  3403. if (dev->ifindex == po->ifindex) {
  3404. spin_lock(&po->bind_lock);
  3405. if (po->running) {
  3406. __unregister_prot_hook(sk, false);
  3407. sk->sk_err = ENETDOWN;
  3408. if (!sock_flag(sk, SOCK_DEAD))
  3409. sk->sk_error_report(sk);
  3410. }
  3411. if (msg == NETDEV_UNREGISTER) {
  3412. packet_cached_dev_reset(po);
  3413. po->ifindex = -1;
  3414. if (po->prot_hook.dev)
  3415. dev_put(po->prot_hook.dev);
  3416. po->prot_hook.dev = NULL;
  3417. }
  3418. spin_unlock(&po->bind_lock);
  3419. }
  3420. break;
  3421. case NETDEV_UP:
  3422. if (dev->ifindex == po->ifindex) {
  3423. spin_lock(&po->bind_lock);
  3424. if (po->num)
  3425. register_prot_hook(sk);
  3426. spin_unlock(&po->bind_lock);
  3427. }
  3428. break;
  3429. }
  3430. }
  3431. rcu_read_unlock();
  3432. return NOTIFY_DONE;
  3433. }
  3434. static int packet_ioctl(struct socket *sock, unsigned int cmd,
  3435. unsigned long arg)
  3436. {
  3437. struct sock *sk = sock->sk;
  3438. switch (cmd) {
  3439. case SIOCOUTQ:
  3440. {
  3441. int amount = sk_wmem_alloc_get(sk);
  3442. return put_user(amount, (int __user *)arg);
  3443. }
  3444. case SIOCINQ:
  3445. {
  3446. struct sk_buff *skb;
  3447. int amount = 0;
  3448. spin_lock_bh(&sk->sk_receive_queue.lock);
  3449. skb = skb_peek(&sk->sk_receive_queue);
  3450. if (skb)
  3451. amount = skb->len;
  3452. spin_unlock_bh(&sk->sk_receive_queue.lock);
  3453. return put_user(amount, (int __user *)arg);
  3454. }
  3455. case SIOCGSTAMP:
  3456. return sock_get_timestamp(sk, (struct timeval __user *)arg);
  3457. case SIOCGSTAMPNS:
  3458. return sock_get_timestampns(sk, (struct timespec __user *)arg);
  3459. #ifdef CONFIG_INET
  3460. case SIOCADDRT:
  3461. case SIOCDELRT:
  3462. case SIOCDARP:
  3463. case SIOCGARP:
  3464. case SIOCSARP:
  3465. case SIOCGIFADDR:
  3466. case SIOCSIFADDR:
  3467. case SIOCGIFBRDADDR:
  3468. case SIOCSIFBRDADDR:
  3469. case SIOCGIFNETMASK:
  3470. case SIOCSIFNETMASK:
  3471. case SIOCGIFDSTADDR:
  3472. case SIOCSIFDSTADDR:
  3473. case SIOCSIFFLAGS:
  3474. return inet_dgram_ops.ioctl(sock, cmd, arg);
  3475. #endif
  3476. default:
  3477. return -ENOIOCTLCMD;
  3478. }
  3479. return 0;
  3480. }
  3481. static __poll_t packet_poll(struct file *file, struct socket *sock,
  3482. poll_table *wait)
  3483. {
  3484. struct sock *sk = sock->sk;
  3485. struct packet_sock *po = pkt_sk(sk);
  3486. __poll_t mask = datagram_poll(file, sock, wait);
  3487. spin_lock_bh(&sk->sk_receive_queue.lock);
  3488. if (po->rx_ring.pg_vec) {
  3489. if (!packet_previous_rx_frame(po, &po->rx_ring,
  3490. TP_STATUS_KERNEL))
  3491. mask |= EPOLLIN | EPOLLRDNORM;
  3492. }
  3493. if (po->pressure && __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
  3494. po->pressure = 0;
  3495. spin_unlock_bh(&sk->sk_receive_queue.lock);
  3496. spin_lock_bh(&sk->sk_write_queue.lock);
  3497. if (po->tx_ring.pg_vec) {
  3498. if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
  3499. mask |= EPOLLOUT | EPOLLWRNORM;
  3500. }
  3501. spin_unlock_bh(&sk->sk_write_queue.lock);
  3502. return mask;
  3503. }
  3504. /* Dirty? Well, I still did not learn better way to account
  3505. * for user mmaps.
  3506. */
  3507. static void packet_mm_open(struct vm_area_struct *vma)
  3508. {
  3509. struct file *file = vma->vm_file;
  3510. struct socket *sock = file->private_data;
  3511. struct sock *sk = sock->sk;
  3512. if (sk)
  3513. atomic_inc(&pkt_sk(sk)->mapped);
  3514. }
  3515. static void packet_mm_close(struct vm_area_struct *vma)
  3516. {
  3517. struct file *file = vma->vm_file;
  3518. struct socket *sock = file->private_data;
  3519. struct sock *sk = sock->sk;
  3520. if (sk)
  3521. atomic_dec(&pkt_sk(sk)->mapped);
  3522. }
  3523. static const struct vm_operations_struct packet_mmap_ops = {
  3524. .open = packet_mm_open,
  3525. .close = packet_mm_close,
  3526. };
  3527. static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
  3528. unsigned int len)
  3529. {
  3530. int i;
  3531. for (i = 0; i < len; i++) {
  3532. if (likely(pg_vec[i].buffer)) {
  3533. if (is_vmalloc_addr(pg_vec[i].buffer))
  3534. vfree(pg_vec[i].buffer);
  3535. else
  3536. free_pages((unsigned long)pg_vec[i].buffer,
  3537. order);
  3538. pg_vec[i].buffer = NULL;
  3539. }
  3540. }
  3541. kfree(pg_vec);
  3542. }
  3543. static char *alloc_one_pg_vec_page(unsigned long order)
  3544. {
  3545. char *buffer;
  3546. gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
  3547. __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
  3548. buffer = (char *) __get_free_pages(gfp_flags, order);
  3549. if (buffer)
  3550. return buffer;
  3551. /* __get_free_pages failed, fall back to vmalloc */
  3552. buffer = vzalloc((1 << order) * PAGE_SIZE);
  3553. if (buffer)
  3554. return buffer;
  3555. /* vmalloc failed, lets dig into swap here */
  3556. gfp_flags &= ~__GFP_NORETRY;
  3557. buffer = (char *) __get_free_pages(gfp_flags, order);
  3558. if (buffer)
  3559. return buffer;
  3560. /* complete and utter failure */
  3561. return NULL;
  3562. }
  3563. static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
  3564. {
  3565. unsigned int block_nr = req->tp_block_nr;
  3566. struct pgv *pg_vec;
  3567. int i;
  3568. pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
  3569. if (unlikely(!pg_vec))
  3570. goto out;
  3571. for (i = 0; i < block_nr; i++) {
  3572. pg_vec[i].buffer = alloc_one_pg_vec_page(order);
  3573. if (unlikely(!pg_vec[i].buffer))
  3574. goto out_free_pgvec;
  3575. }
  3576. out:
  3577. return pg_vec;
  3578. out_free_pgvec:
  3579. free_pg_vec(pg_vec, order, block_nr);
  3580. pg_vec = NULL;
  3581. goto out;
  3582. }
  3583. static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
  3584. int closing, int tx_ring)
  3585. {
  3586. struct pgv *pg_vec = NULL;
  3587. struct packet_sock *po = pkt_sk(sk);
  3588. int was_running, order = 0;
  3589. struct packet_ring_buffer *rb;
  3590. struct sk_buff_head *rb_queue;
  3591. __be16 num;
  3592. int err = -EINVAL;
  3593. /* Added to avoid minimal code churn */
  3594. struct tpacket_req *req = &req_u->req;
  3595. rb = tx_ring ? &po->tx_ring : &po->rx_ring;
  3596. rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
  3597. err = -EBUSY;
  3598. if (!closing) {
  3599. if (atomic_read(&po->mapped))
  3600. goto out;
  3601. if (packet_read_pending(rb))
  3602. goto out;
  3603. }
  3604. if (req->tp_block_nr) {
  3605. /* Sanity tests and some calculations */
  3606. err = -EBUSY;
  3607. if (unlikely(rb->pg_vec))
  3608. goto out;
  3609. switch (po->tp_version) {
  3610. case TPACKET_V1:
  3611. po->tp_hdrlen = TPACKET_HDRLEN;
  3612. break;
  3613. case TPACKET_V2:
  3614. po->tp_hdrlen = TPACKET2_HDRLEN;
  3615. break;
  3616. case TPACKET_V3:
  3617. po->tp_hdrlen = TPACKET3_HDRLEN;
  3618. break;
  3619. }
  3620. err = -EINVAL;
  3621. if (unlikely((int)req->tp_block_size <= 0))
  3622. goto out;
  3623. if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
  3624. goto out;
  3625. if (po->tp_version >= TPACKET_V3 &&
  3626. req->tp_block_size <=
  3627. BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv))
  3628. goto out;
  3629. if (unlikely(req->tp_frame_size < po->tp_hdrlen +
  3630. po->tp_reserve))
  3631. goto out;
  3632. if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
  3633. goto out;
  3634. rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
  3635. if (unlikely(rb->frames_per_block == 0))
  3636. goto out;
  3637. if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
  3638. goto out;
  3639. if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
  3640. req->tp_frame_nr))
  3641. goto out;
  3642. err = -ENOMEM;
  3643. order = get_order(req->tp_block_size);
  3644. pg_vec = alloc_pg_vec(req, order);
  3645. if (unlikely(!pg_vec))
  3646. goto out;
  3647. switch (po->tp_version) {
  3648. case TPACKET_V3:
  3649. /* Block transmit is not supported yet */
  3650. if (!tx_ring) {
  3651. init_prb_bdqc(po, rb, pg_vec, req_u);
  3652. } else {
  3653. struct tpacket_req3 *req3 = &req_u->req3;
  3654. if (req3->tp_retire_blk_tov ||
  3655. req3->tp_sizeof_priv ||
  3656. req3->tp_feature_req_word) {
  3657. err = -EINVAL;
  3658. goto out;
  3659. }
  3660. }
  3661. break;
  3662. default:
  3663. break;
  3664. }
  3665. }
  3666. /* Done */
  3667. else {
  3668. err = -EINVAL;
  3669. if (unlikely(req->tp_frame_nr))
  3670. goto out;
  3671. }
  3672. /* Detach socket from network */
  3673. spin_lock(&po->bind_lock);
  3674. was_running = po->running;
  3675. num = po->num;
  3676. if (was_running) {
  3677. po->num = 0;
  3678. __unregister_prot_hook(sk, false);
  3679. }
  3680. spin_unlock(&po->bind_lock);
  3681. synchronize_net();
  3682. err = -EBUSY;
  3683. mutex_lock(&po->pg_vec_lock);
  3684. if (closing || atomic_read(&po->mapped) == 0) {
  3685. err = 0;
  3686. spin_lock_bh(&rb_queue->lock);
  3687. swap(rb->pg_vec, pg_vec);
  3688. rb->frame_max = (req->tp_frame_nr - 1);
  3689. rb->head = 0;
  3690. rb->frame_size = req->tp_frame_size;
  3691. spin_unlock_bh(&rb_queue->lock);
  3692. swap(rb->pg_vec_order, order);
  3693. swap(rb->pg_vec_len, req->tp_block_nr);
  3694. rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
  3695. po->prot_hook.func = (po->rx_ring.pg_vec) ?
  3696. tpacket_rcv : packet_rcv;
  3697. skb_queue_purge(rb_queue);
  3698. if (atomic_read(&po->mapped))
  3699. pr_err("packet_mmap: vma is busy: %d\n",
  3700. atomic_read(&po->mapped));
  3701. }
  3702. mutex_unlock(&po->pg_vec_lock);
  3703. spin_lock(&po->bind_lock);
  3704. if (was_running) {
  3705. po->num = num;
  3706. register_prot_hook(sk);
  3707. }
  3708. spin_unlock(&po->bind_lock);
  3709. if (pg_vec && (po->tp_version > TPACKET_V2)) {
  3710. /* Because we don't support block-based V3 on tx-ring */
  3711. if (!tx_ring)
  3712. prb_shutdown_retire_blk_timer(po, rb_queue);
  3713. }
  3714. if (pg_vec)
  3715. free_pg_vec(pg_vec, order, req->tp_block_nr);
  3716. out:
  3717. return err;
  3718. }
  3719. static int packet_mmap(struct file *file, struct socket *sock,
  3720. struct vm_area_struct *vma)
  3721. {
  3722. struct sock *sk = sock->sk;
  3723. struct packet_sock *po = pkt_sk(sk);
  3724. unsigned long size, expected_size;
  3725. struct packet_ring_buffer *rb;
  3726. unsigned long start;
  3727. int err = -EINVAL;
  3728. int i;
  3729. if (vma->vm_pgoff)
  3730. return -EINVAL;
  3731. mutex_lock(&po->pg_vec_lock);
  3732. expected_size = 0;
  3733. for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
  3734. if (rb->pg_vec) {
  3735. expected_size += rb->pg_vec_len
  3736. * rb->pg_vec_pages
  3737. * PAGE_SIZE;
  3738. }
  3739. }
  3740. if (expected_size == 0)
  3741. goto out;
  3742. size = vma->vm_end - vma->vm_start;
  3743. if (size != expected_size)
  3744. goto out;
  3745. start = vma->vm_start;
  3746. for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
  3747. if (rb->pg_vec == NULL)
  3748. continue;
  3749. for (i = 0; i < rb->pg_vec_len; i++) {
  3750. struct page *page;
  3751. void *kaddr = rb->pg_vec[i].buffer;
  3752. int pg_num;
  3753. for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
  3754. page = pgv_to_page(kaddr);
  3755. err = vm_insert_page(vma, start, page);
  3756. if (unlikely(err))
  3757. goto out;
  3758. start += PAGE_SIZE;
  3759. kaddr += PAGE_SIZE;
  3760. }
  3761. }
  3762. }
  3763. atomic_inc(&po->mapped);
  3764. vma->vm_ops = &packet_mmap_ops;
  3765. err = 0;
  3766. out:
  3767. mutex_unlock(&po->pg_vec_lock);
  3768. return err;
  3769. }
  3770. static const struct proto_ops packet_ops_spkt = {
  3771. .family = PF_PACKET,
  3772. .owner = THIS_MODULE,
  3773. .release = packet_release,
  3774. .bind = packet_bind_spkt,
  3775. .connect = sock_no_connect,
  3776. .socketpair = sock_no_socketpair,
  3777. .accept = sock_no_accept,
  3778. .getname = packet_getname_spkt,
  3779. .poll = datagram_poll,
  3780. .ioctl = packet_ioctl,
  3781. .listen = sock_no_listen,
  3782. .shutdown = sock_no_shutdown,
  3783. .setsockopt = sock_no_setsockopt,
  3784. .getsockopt = sock_no_getsockopt,
  3785. .sendmsg = packet_sendmsg_spkt,
  3786. .recvmsg = packet_recvmsg,
  3787. .mmap = sock_no_mmap,
  3788. .sendpage = sock_no_sendpage,
  3789. };
  3790. static const struct proto_ops packet_ops = {
  3791. .family = PF_PACKET,
  3792. .owner = THIS_MODULE,
  3793. .release = packet_release,
  3794. .bind = packet_bind,
  3795. .connect = sock_no_connect,
  3796. .socketpair = sock_no_socketpair,
  3797. .accept = sock_no_accept,
  3798. .getname = packet_getname,
  3799. .poll = packet_poll,
  3800. .ioctl = packet_ioctl,
  3801. .listen = sock_no_listen,
  3802. .shutdown = sock_no_shutdown,
  3803. .setsockopt = packet_setsockopt,
  3804. .getsockopt = packet_getsockopt,
  3805. #ifdef CONFIG_COMPAT
  3806. .compat_setsockopt = compat_packet_setsockopt,
  3807. #endif
  3808. .sendmsg = packet_sendmsg,
  3809. .recvmsg = packet_recvmsg,
  3810. .mmap = packet_mmap,
  3811. .sendpage = sock_no_sendpage,
  3812. };
  3813. static const struct net_proto_family packet_family_ops = {
  3814. .family = PF_PACKET,
  3815. .create = packet_create,
  3816. .owner = THIS_MODULE,
  3817. };
  3818. static struct notifier_block packet_netdev_notifier = {
  3819. .notifier_call = packet_notifier,
  3820. };
  3821. #ifdef CONFIG_PROC_FS
  3822. static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
  3823. __acquires(RCU)
  3824. {
  3825. struct net *net = seq_file_net(seq);
  3826. rcu_read_lock();
  3827. return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
  3828. }
  3829. static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  3830. {
  3831. struct net *net = seq_file_net(seq);
  3832. return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
  3833. }
  3834. static void packet_seq_stop(struct seq_file *seq, void *v)
  3835. __releases(RCU)
  3836. {
  3837. rcu_read_unlock();
  3838. }
  3839. static int packet_seq_show(struct seq_file *seq, void *v)
  3840. {
  3841. if (v == SEQ_START_TOKEN)
  3842. seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
  3843. else {
  3844. struct sock *s = sk_entry(v);
  3845. const struct packet_sock *po = pkt_sk(s);
  3846. seq_printf(seq,
  3847. "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
  3848. s,
  3849. refcount_read(&s->sk_refcnt),
  3850. s->sk_type,
  3851. ntohs(po->num),
  3852. po->ifindex,
  3853. po->running,
  3854. atomic_read(&s->sk_rmem_alloc),
  3855. from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
  3856. sock_i_ino(s));
  3857. }
  3858. return 0;
  3859. }
  3860. static const struct seq_operations packet_seq_ops = {
  3861. .start = packet_seq_start,
  3862. .next = packet_seq_next,
  3863. .stop = packet_seq_stop,
  3864. .show = packet_seq_show,
  3865. };
  3866. static int packet_seq_open(struct inode *inode, struct file *file)
  3867. {
  3868. return seq_open_net(inode, file, &packet_seq_ops,
  3869. sizeof(struct seq_net_private));
  3870. }
  3871. static const struct file_operations packet_seq_fops = {
  3872. .open = packet_seq_open,
  3873. .read = seq_read,
  3874. .llseek = seq_lseek,
  3875. .release = seq_release_net,
  3876. };
  3877. #endif
  3878. static int __net_init packet_net_init(struct net *net)
  3879. {
  3880. mutex_init(&net->packet.sklist_lock);
  3881. INIT_HLIST_HEAD(&net->packet.sklist);
  3882. if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops))
  3883. return -ENOMEM;
  3884. return 0;
  3885. }
  3886. static void __net_exit packet_net_exit(struct net *net)
  3887. {
  3888. remove_proc_entry("packet", net->proc_net);
  3889. WARN_ON_ONCE(!hlist_empty(&net->packet.sklist));
  3890. }
  3891. static struct pernet_operations packet_net_ops = {
  3892. .init = packet_net_init,
  3893. .exit = packet_net_exit,
  3894. };
  3895. static void __exit packet_exit(void)
  3896. {
  3897. unregister_netdevice_notifier(&packet_netdev_notifier);
  3898. unregister_pernet_subsys(&packet_net_ops);
  3899. sock_unregister(PF_PACKET);
  3900. proto_unregister(&packet_proto);
  3901. }
  3902. static int __init packet_init(void)
  3903. {
  3904. int rc = proto_register(&packet_proto, 0);
  3905. if (rc != 0)
  3906. goto out;
  3907. sock_register(&packet_family_ops);
  3908. register_pernet_subsys(&packet_net_ops);
  3909. register_netdevice_notifier(&packet_netdev_notifier);
  3910. out:
  3911. return rc;
  3912. }
  3913. module_init(packet_init);
  3914. module_exit(packet_exit);
  3915. MODULE_LICENSE("GPL");
  3916. MODULE_ALIAS_NETPROTO(PF_PACKET);