af_packet.c 104 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * PACKET - implements raw packet sockets.
  7. *
  8. * Authors: Ross Biro
  9. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  10. * Alan Cox, <gw4pts@gw4pts.ampr.org>
  11. *
  12. * Fixes:
  13. * Alan Cox : verify_area() now used correctly
  14. * Alan Cox : new skbuff lists, look ma no backlogs!
  15. * Alan Cox : tidied skbuff lists.
  16. * Alan Cox : Now uses generic datagram routines I
  17. * added. Also fixed the peek/read crash
  18. * from all old Linux datagram code.
  19. * Alan Cox : Uses the improved datagram code.
  20. * Alan Cox : Added NULL's for socket options.
  21. * Alan Cox : Re-commented the code.
  22. * Alan Cox : Use new kernel side addressing
  23. * Rob Janssen : Correct MTU usage.
  24. * Dave Platt : Counter leaks caused by incorrect
  25. * interrupt locking and some slightly
  26. * dubious gcc output. Can you read
  27. * compiler: it said _VOLATILE_
  28. * Richard Kooijman : Timestamp fixes.
  29. * Alan Cox : New buffers. Use sk->mac.raw.
  30. * Alan Cox : sendmsg/recvmsg support.
  31. * Alan Cox : Protocol setting support
  32. * Alexey Kuznetsov : Untied from IPv4 stack.
  33. * Cyrus Durgin : Fixed kerneld for kmod.
  34. * Michal Ostrowski : Module initialization cleanup.
  35. * Ulises Alonso : Frame number limit removal and
  36. * packet_set_ring memory leak.
  37. * Eric Biederman : Allow for > 8 byte hardware addresses.
  38. * The convention is that longer addresses
  39. * will simply extend the hardware address
  40. * byte arrays at the end of sockaddr_ll
  41. * and packet_mreq.
  42. * Johann Baudy : Added TX RING.
  43. * Chetan Loke : Implemented TPACKET_V3 block abstraction
  44. * layer.
  45. * Copyright (C) 2011, <lokec@ccs.neu.edu>
  46. *
  47. *
  48. * This program is free software; you can redistribute it and/or
  49. * modify it under the terms of the GNU General Public License
  50. * as published by the Free Software Foundation; either version
  51. * 2 of the License, or (at your option) any later version.
  52. *
  53. */
  54. #include <linux/types.h>
  55. #include <linux/mm.h>
  56. #include <linux/capability.h>
  57. #include <linux/fcntl.h>
  58. #include <linux/socket.h>
  59. #include <linux/in.h>
  60. #include <linux/inet.h>
  61. #include <linux/netdevice.h>
  62. #include <linux/if_packet.h>
  63. #include <linux/wireless.h>
  64. #include <linux/kernel.h>
  65. #include <linux/kmod.h>
  66. #include <linux/slab.h>
  67. #include <linux/vmalloc.h>
  68. #include <net/net_namespace.h>
  69. #include <net/ip.h>
  70. #include <net/protocol.h>
  71. #include <linux/skbuff.h>
  72. #include <net/sock.h>
  73. #include <linux/errno.h>
  74. #include <linux/timer.h>
  75. #include <asm/uaccess.h>
  76. #include <asm/ioctls.h>
  77. #include <asm/page.h>
  78. #include <asm/cacheflush.h>
  79. #include <asm/io.h>
  80. #include <linux/proc_fs.h>
  81. #include <linux/seq_file.h>
  82. #include <linux/poll.h>
  83. #include <linux/module.h>
  84. #include <linux/init.h>
  85. #include <linux/mutex.h>
  86. #include <linux/if_vlan.h>
  87. #include <linux/virtio_net.h>
  88. #include <linux/errqueue.h>
  89. #include <linux/net_tstamp.h>
  90. #include <linux/percpu.h>
  91. #ifdef CONFIG_INET
  92. #include <net/inet_common.h>
  93. #endif
  94. #include <linux/bpf.h>
  95. #include "internal.h"
  96. /*
  97. Assumptions:
  98. - if device has no dev->hard_header routine, it adds and removes ll header
  99. inside itself. In this case ll header is invisible outside of device,
  100. but higher levels still should reserve dev->hard_header_len.
  101. Some devices are enough clever to reallocate skb, when header
  102. will not fit to reserved space (tunnel), another ones are silly
  103. (PPP).
  104. - packet socket receives packets with pulled ll header,
  105. so that SOCK_RAW should push it back.
  106. On receive:
  107. -----------
  108. Incoming, dev->hard_header!=NULL
  109. mac_header -> ll header
  110. data -> data
  111. Outgoing, dev->hard_header!=NULL
  112. mac_header -> ll header
  113. data -> ll header
  114. Incoming, dev->hard_header==NULL
  115. mac_header -> UNKNOWN position. It is very likely, that it points to ll
  116. header. PPP makes it, that is wrong, because introduce
  117. assymetry between rx and tx paths.
  118. data -> data
  119. Outgoing, dev->hard_header==NULL
  120. mac_header -> data. ll header is still not built!
  121. data -> data
  122. Resume
  123. If dev->hard_header==NULL we are unlikely to restore sensible ll header.
  124. On transmit:
  125. ------------
  126. dev->hard_header != NULL
  127. mac_header -> ll header
  128. data -> ll header
  129. dev->hard_header == NULL (ll header is added by device, we cannot control it)
  130. mac_header -> data
  131. data -> data
  132. We should set nh.raw on output to correct posistion,
  133. packet classifier depends on it.
  134. */
  135. /* Private packet socket structures. */
  136. /* identical to struct packet_mreq except it has
  137. * a longer address field.
  138. */
  139. struct packet_mreq_max {
  140. int mr_ifindex;
  141. unsigned short mr_type;
  142. unsigned short mr_alen;
  143. unsigned char mr_address[MAX_ADDR_LEN];
  144. };
  145. union tpacket_uhdr {
  146. struct tpacket_hdr *h1;
  147. struct tpacket2_hdr *h2;
  148. struct tpacket3_hdr *h3;
  149. void *raw;
  150. };
  151. static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
  152. int closing, int tx_ring);
  153. #define V3_ALIGNMENT (8)
  154. #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
  155. #define BLK_PLUS_PRIV(sz_of_priv) \
  156. (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
  157. #define PGV_FROM_VMALLOC 1
  158. #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
  159. #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
  160. #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
  161. #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
  162. #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
  163. #define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
  164. #define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
  165. struct packet_sock;
  166. static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
  167. static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
  168. struct packet_type *pt, struct net_device *orig_dev);
  169. static void *packet_previous_frame(struct packet_sock *po,
  170. struct packet_ring_buffer *rb,
  171. int status);
  172. static void packet_increment_head(struct packet_ring_buffer *buff);
  173. static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
  174. struct tpacket_block_desc *);
  175. static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
  176. struct packet_sock *);
  177. static void prb_retire_current_block(struct tpacket_kbdq_core *,
  178. struct packet_sock *, unsigned int status);
  179. static int prb_queue_frozen(struct tpacket_kbdq_core *);
  180. static void prb_open_block(struct tpacket_kbdq_core *,
  181. struct tpacket_block_desc *);
  182. static void prb_retire_rx_blk_timer_expired(unsigned long);
  183. static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
  184. static void prb_init_blk_timer(struct packet_sock *,
  185. struct tpacket_kbdq_core *,
  186. void (*func) (unsigned long));
  187. static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
  188. static void prb_clear_rxhash(struct tpacket_kbdq_core *,
  189. struct tpacket3_hdr *);
  190. static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
  191. struct tpacket3_hdr *);
  192. static void packet_flush_mclist(struct sock *sk);
  193. struct packet_skb_cb {
  194. union {
  195. struct sockaddr_pkt pkt;
  196. union {
  197. /* Trick: alias skb original length with
  198. * ll.sll_family and ll.protocol in order
  199. * to save room.
  200. */
  201. unsigned int origlen;
  202. struct sockaddr_ll ll;
  203. };
  204. } sa;
  205. };
  206. #define vio_le() virtio_legacy_is_little_endian()
  207. #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
  208. #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
  209. #define GET_PBLOCK_DESC(x, bid) \
  210. ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
  211. #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
  212. ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
  213. #define GET_NEXT_PRB_BLK_NUM(x) \
  214. (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
  215. ((x)->kactive_blk_num+1) : 0)
  216. static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
  217. static void __fanout_link(struct sock *sk, struct packet_sock *po);
  218. static int packet_direct_xmit(struct sk_buff *skb)
  219. {
  220. struct net_device *dev = skb->dev;
  221. netdev_features_t features;
  222. struct netdev_queue *txq;
  223. int ret = NETDEV_TX_BUSY;
  224. if (unlikely(!netif_running(dev) ||
  225. !netif_carrier_ok(dev)))
  226. goto drop;
  227. features = netif_skb_features(skb);
  228. if (skb_needs_linearize(skb, features) &&
  229. __skb_linearize(skb))
  230. goto drop;
  231. txq = skb_get_tx_queue(dev, skb);
  232. local_bh_disable();
  233. HARD_TX_LOCK(dev, txq, smp_processor_id());
  234. if (!netif_xmit_frozen_or_drv_stopped(txq))
  235. ret = netdev_start_xmit(skb, dev, txq, false);
  236. HARD_TX_UNLOCK(dev, txq);
  237. local_bh_enable();
  238. if (!dev_xmit_complete(ret))
  239. kfree_skb(skb);
  240. return ret;
  241. drop:
  242. atomic_long_inc(&dev->tx_dropped);
  243. kfree_skb(skb);
  244. return NET_XMIT_DROP;
  245. }
  246. static struct net_device *packet_cached_dev_get(struct packet_sock *po)
  247. {
  248. struct net_device *dev;
  249. rcu_read_lock();
  250. dev = rcu_dereference(po->cached_dev);
  251. if (likely(dev))
  252. dev_hold(dev);
  253. rcu_read_unlock();
  254. return dev;
  255. }
  256. static void packet_cached_dev_assign(struct packet_sock *po,
  257. struct net_device *dev)
  258. {
  259. rcu_assign_pointer(po->cached_dev, dev);
  260. }
  261. static void packet_cached_dev_reset(struct packet_sock *po)
  262. {
  263. RCU_INIT_POINTER(po->cached_dev, NULL);
  264. }
  265. static bool packet_use_direct_xmit(const struct packet_sock *po)
  266. {
  267. return po->xmit == packet_direct_xmit;
  268. }
  269. static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
  270. {
  271. return (u16) raw_smp_processor_id() % dev->real_num_tx_queues;
  272. }
  273. static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
  274. {
  275. const struct net_device_ops *ops = dev->netdev_ops;
  276. u16 queue_index;
  277. if (ops->ndo_select_queue) {
  278. queue_index = ops->ndo_select_queue(dev, skb, NULL,
  279. __packet_pick_tx_queue);
  280. queue_index = netdev_cap_txqueue(dev, queue_index);
  281. } else {
  282. queue_index = __packet_pick_tx_queue(dev, skb);
  283. }
  284. skb_set_queue_mapping(skb, queue_index);
  285. }
  286. /* register_prot_hook must be invoked with the po->bind_lock held,
  287. * or from a context in which asynchronous accesses to the packet
  288. * socket is not possible (packet_create()).
  289. */
  290. static void register_prot_hook(struct sock *sk)
  291. {
  292. struct packet_sock *po = pkt_sk(sk);
  293. if (!po->running) {
  294. if (po->fanout)
  295. __fanout_link(sk, po);
  296. else
  297. dev_add_pack(&po->prot_hook);
  298. sock_hold(sk);
  299. po->running = 1;
  300. }
  301. }
  302. /* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
  303. * held. If the sync parameter is true, we will temporarily drop
  304. * the po->bind_lock and do a synchronize_net to make sure no
  305. * asynchronous packet processing paths still refer to the elements
  306. * of po->prot_hook. If the sync parameter is false, it is the
  307. * callers responsibility to take care of this.
  308. */
  309. static void __unregister_prot_hook(struct sock *sk, bool sync)
  310. {
  311. struct packet_sock *po = pkt_sk(sk);
  312. po->running = 0;
  313. if (po->fanout)
  314. __fanout_unlink(sk, po);
  315. else
  316. __dev_remove_pack(&po->prot_hook);
  317. __sock_put(sk);
  318. if (sync) {
  319. spin_unlock(&po->bind_lock);
  320. synchronize_net();
  321. spin_lock(&po->bind_lock);
  322. }
  323. }
  324. static void unregister_prot_hook(struct sock *sk, bool sync)
  325. {
  326. struct packet_sock *po = pkt_sk(sk);
  327. if (po->running)
  328. __unregister_prot_hook(sk, sync);
  329. }
  330. static inline struct page * __pure pgv_to_page(void *addr)
  331. {
  332. if (is_vmalloc_addr(addr))
  333. return vmalloc_to_page(addr);
  334. return virt_to_page(addr);
  335. }
  336. static void __packet_set_status(struct packet_sock *po, void *frame, int status)
  337. {
  338. union tpacket_uhdr h;
  339. h.raw = frame;
  340. switch (po->tp_version) {
  341. case TPACKET_V1:
  342. h.h1->tp_status = status;
  343. flush_dcache_page(pgv_to_page(&h.h1->tp_status));
  344. break;
  345. case TPACKET_V2:
  346. h.h2->tp_status = status;
  347. flush_dcache_page(pgv_to_page(&h.h2->tp_status));
  348. break;
  349. case TPACKET_V3:
  350. default:
  351. WARN(1, "TPACKET version not supported.\n");
  352. BUG();
  353. }
  354. smp_wmb();
  355. }
  356. static int __packet_get_status(struct packet_sock *po, void *frame)
  357. {
  358. union tpacket_uhdr h;
  359. smp_rmb();
  360. h.raw = frame;
  361. switch (po->tp_version) {
  362. case TPACKET_V1:
  363. flush_dcache_page(pgv_to_page(&h.h1->tp_status));
  364. return h.h1->tp_status;
  365. case TPACKET_V2:
  366. flush_dcache_page(pgv_to_page(&h.h2->tp_status));
  367. return h.h2->tp_status;
  368. case TPACKET_V3:
  369. default:
  370. WARN(1, "TPACKET version not supported.\n");
  371. BUG();
  372. return 0;
  373. }
  374. }
  375. static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts,
  376. unsigned int flags)
  377. {
  378. struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
  379. if (shhwtstamps &&
  380. (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
  381. ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts))
  382. return TP_STATUS_TS_RAW_HARDWARE;
  383. if (ktime_to_timespec_cond(skb->tstamp, ts))
  384. return TP_STATUS_TS_SOFTWARE;
  385. return 0;
  386. }
  387. static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
  388. struct sk_buff *skb)
  389. {
  390. union tpacket_uhdr h;
  391. struct timespec ts;
  392. __u32 ts_status;
  393. if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
  394. return 0;
  395. h.raw = frame;
  396. switch (po->tp_version) {
  397. case TPACKET_V1:
  398. h.h1->tp_sec = ts.tv_sec;
  399. h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
  400. break;
  401. case TPACKET_V2:
  402. h.h2->tp_sec = ts.tv_sec;
  403. h.h2->tp_nsec = ts.tv_nsec;
  404. break;
  405. case TPACKET_V3:
  406. default:
  407. WARN(1, "TPACKET version not supported.\n");
  408. BUG();
  409. }
  410. /* one flush is safe, as both fields always lie on the same cacheline */
  411. flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
  412. smp_wmb();
  413. return ts_status;
  414. }
  415. static void *packet_lookup_frame(struct packet_sock *po,
  416. struct packet_ring_buffer *rb,
  417. unsigned int position,
  418. int status)
  419. {
  420. unsigned int pg_vec_pos, frame_offset;
  421. union tpacket_uhdr h;
  422. pg_vec_pos = position / rb->frames_per_block;
  423. frame_offset = position % rb->frames_per_block;
  424. h.raw = rb->pg_vec[pg_vec_pos].buffer +
  425. (frame_offset * rb->frame_size);
  426. if (status != __packet_get_status(po, h.raw))
  427. return NULL;
  428. return h.raw;
  429. }
  430. static void *packet_current_frame(struct packet_sock *po,
  431. struct packet_ring_buffer *rb,
  432. int status)
  433. {
  434. return packet_lookup_frame(po, rb, rb->head, status);
  435. }
  436. static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
  437. {
  438. del_timer_sync(&pkc->retire_blk_timer);
  439. }
  440. static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
  441. struct sk_buff_head *rb_queue)
  442. {
  443. struct tpacket_kbdq_core *pkc;
  444. pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
  445. spin_lock_bh(&rb_queue->lock);
  446. pkc->delete_blk_timer = 1;
  447. spin_unlock_bh(&rb_queue->lock);
  448. prb_del_retire_blk_timer(pkc);
  449. }
  450. static void prb_init_blk_timer(struct packet_sock *po,
  451. struct tpacket_kbdq_core *pkc,
  452. void (*func) (unsigned long))
  453. {
  454. init_timer(&pkc->retire_blk_timer);
  455. pkc->retire_blk_timer.data = (long)po;
  456. pkc->retire_blk_timer.function = func;
  457. pkc->retire_blk_timer.expires = jiffies;
  458. }
  459. static void prb_setup_retire_blk_timer(struct packet_sock *po)
  460. {
  461. struct tpacket_kbdq_core *pkc;
  462. pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
  463. prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
  464. }
  465. static int prb_calc_retire_blk_tmo(struct packet_sock *po,
  466. int blk_size_in_bytes)
  467. {
  468. struct net_device *dev;
  469. unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
  470. struct ethtool_cmd ecmd;
  471. int err;
  472. u32 speed;
  473. rtnl_lock();
  474. dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
  475. if (unlikely(!dev)) {
  476. rtnl_unlock();
  477. return DEFAULT_PRB_RETIRE_TOV;
  478. }
  479. err = __ethtool_get_settings(dev, &ecmd);
  480. speed = ethtool_cmd_speed(&ecmd);
  481. rtnl_unlock();
  482. if (!err) {
  483. /*
  484. * If the link speed is so slow you don't really
  485. * need to worry about perf anyways
  486. */
  487. if (speed < SPEED_1000 || speed == SPEED_UNKNOWN) {
  488. return DEFAULT_PRB_RETIRE_TOV;
  489. } else {
  490. msec = 1;
  491. div = speed / 1000;
  492. }
  493. }
  494. mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
  495. if (div)
  496. mbits /= div;
  497. tmo = mbits * msec;
  498. if (div)
  499. return tmo+1;
  500. return tmo;
  501. }
  502. static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
  503. union tpacket_req_u *req_u)
  504. {
  505. p1->feature_req_word = req_u->req3.tp_feature_req_word;
  506. }
  507. static void init_prb_bdqc(struct packet_sock *po,
  508. struct packet_ring_buffer *rb,
  509. struct pgv *pg_vec,
  510. union tpacket_req_u *req_u)
  511. {
  512. struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
  513. struct tpacket_block_desc *pbd;
  514. memset(p1, 0x0, sizeof(*p1));
  515. p1->knxt_seq_num = 1;
  516. p1->pkbdq = pg_vec;
  517. pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
  518. p1->pkblk_start = pg_vec[0].buffer;
  519. p1->kblk_size = req_u->req3.tp_block_size;
  520. p1->knum_blocks = req_u->req3.tp_block_nr;
  521. p1->hdrlen = po->tp_hdrlen;
  522. p1->version = po->tp_version;
  523. p1->last_kactive_blk_num = 0;
  524. po->stats.stats3.tp_freeze_q_cnt = 0;
  525. if (req_u->req3.tp_retire_blk_tov)
  526. p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
  527. else
  528. p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
  529. req_u->req3.tp_block_size);
  530. p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
  531. p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
  532. p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
  533. prb_init_ft_ops(p1, req_u);
  534. prb_setup_retire_blk_timer(po);
  535. prb_open_block(p1, pbd);
  536. }
  537. /* Do NOT update the last_blk_num first.
  538. * Assumes sk_buff_head lock is held.
  539. */
  540. static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
  541. {
  542. mod_timer(&pkc->retire_blk_timer,
  543. jiffies + pkc->tov_in_jiffies);
  544. pkc->last_kactive_blk_num = pkc->kactive_blk_num;
  545. }
  546. /*
  547. * Timer logic:
  548. * 1) We refresh the timer only when we open a block.
  549. * By doing this we don't waste cycles refreshing the timer
  550. * on packet-by-packet basis.
  551. *
  552. * With a 1MB block-size, on a 1Gbps line, it will take
  553. * i) ~8 ms to fill a block + ii) memcpy etc.
  554. * In this cut we are not accounting for the memcpy time.
  555. *
  556. * So, if the user sets the 'tmo' to 10ms then the timer
  557. * will never fire while the block is still getting filled
  558. * (which is what we want). However, the user could choose
  559. * to close a block early and that's fine.
  560. *
  561. * But when the timer does fire, we check whether or not to refresh it.
  562. * Since the tmo granularity is in msecs, it is not too expensive
  563. * to refresh the timer, lets say every '8' msecs.
  564. * Either the user can set the 'tmo' or we can derive it based on
  565. * a) line-speed and b) block-size.
  566. * prb_calc_retire_blk_tmo() calculates the tmo.
  567. *
  568. */
  569. static void prb_retire_rx_blk_timer_expired(unsigned long data)
  570. {
  571. struct packet_sock *po = (struct packet_sock *)data;
  572. struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
  573. unsigned int frozen;
  574. struct tpacket_block_desc *pbd;
  575. spin_lock(&po->sk.sk_receive_queue.lock);
  576. frozen = prb_queue_frozen(pkc);
  577. pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
  578. if (unlikely(pkc->delete_blk_timer))
  579. goto out;
  580. /* We only need to plug the race when the block is partially filled.
  581. * tpacket_rcv:
  582. * lock(); increment BLOCK_NUM_PKTS; unlock()
  583. * copy_bits() is in progress ...
  584. * timer fires on other cpu:
  585. * we can't retire the current block because copy_bits
  586. * is in progress.
  587. *
  588. */
  589. if (BLOCK_NUM_PKTS(pbd)) {
  590. while (atomic_read(&pkc->blk_fill_in_prog)) {
  591. /* Waiting for skb_copy_bits to finish... */
  592. cpu_relax();
  593. }
  594. }
  595. if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
  596. if (!frozen) {
  597. if (!BLOCK_NUM_PKTS(pbd)) {
  598. /* An empty block. Just refresh the timer. */
  599. goto refresh_timer;
  600. }
  601. prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
  602. if (!prb_dispatch_next_block(pkc, po))
  603. goto refresh_timer;
  604. else
  605. goto out;
  606. } else {
  607. /* Case 1. Queue was frozen because user-space was
  608. * lagging behind.
  609. */
  610. if (prb_curr_blk_in_use(pkc, pbd)) {
  611. /*
  612. * Ok, user-space is still behind.
  613. * So just refresh the timer.
  614. */
  615. goto refresh_timer;
  616. } else {
  617. /* Case 2. queue was frozen,user-space caught up,
  618. * now the link went idle && the timer fired.
  619. * We don't have a block to close.So we open this
  620. * block and restart the timer.
  621. * opening a block thaws the queue,restarts timer
  622. * Thawing/timer-refresh is a side effect.
  623. */
  624. prb_open_block(pkc, pbd);
  625. goto out;
  626. }
  627. }
  628. }
  629. refresh_timer:
  630. _prb_refresh_rx_retire_blk_timer(pkc);
  631. out:
  632. spin_unlock(&po->sk.sk_receive_queue.lock);
  633. }
  634. static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
  635. struct tpacket_block_desc *pbd1, __u32 status)
  636. {
  637. /* Flush everything minus the block header */
  638. #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
  639. u8 *start, *end;
  640. start = (u8 *)pbd1;
  641. /* Skip the block header(we know header WILL fit in 4K) */
  642. start += PAGE_SIZE;
  643. end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
  644. for (; start < end; start += PAGE_SIZE)
  645. flush_dcache_page(pgv_to_page(start));
  646. smp_wmb();
  647. #endif
  648. /* Now update the block status. */
  649. BLOCK_STATUS(pbd1) = status;
  650. /* Flush the block header */
  651. #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
  652. start = (u8 *)pbd1;
  653. flush_dcache_page(pgv_to_page(start));
  654. smp_wmb();
  655. #endif
  656. }
  657. /*
  658. * Side effect:
  659. *
  660. * 1) flush the block
  661. * 2) Increment active_blk_num
  662. *
  663. * Note:We DONT refresh the timer on purpose.
  664. * Because almost always the next block will be opened.
  665. */
  666. static void prb_close_block(struct tpacket_kbdq_core *pkc1,
  667. struct tpacket_block_desc *pbd1,
  668. struct packet_sock *po, unsigned int stat)
  669. {
  670. __u32 status = TP_STATUS_USER | stat;
  671. struct tpacket3_hdr *last_pkt;
  672. struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
  673. struct sock *sk = &po->sk;
  674. if (po->stats.stats3.tp_drops)
  675. status |= TP_STATUS_LOSING;
  676. last_pkt = (struct tpacket3_hdr *)pkc1->prev;
  677. last_pkt->tp_next_offset = 0;
  678. /* Get the ts of the last pkt */
  679. if (BLOCK_NUM_PKTS(pbd1)) {
  680. h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
  681. h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
  682. } else {
  683. /* Ok, we tmo'd - so get the current time.
  684. *
  685. * It shouldn't really happen as we don't close empty
  686. * blocks. See prb_retire_rx_blk_timer_expired().
  687. */
  688. struct timespec ts;
  689. getnstimeofday(&ts);
  690. h1->ts_last_pkt.ts_sec = ts.tv_sec;
  691. h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
  692. }
  693. smp_wmb();
  694. /* Flush the block */
  695. prb_flush_block(pkc1, pbd1, status);
  696. sk->sk_data_ready(sk);
  697. pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
  698. }
  699. static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
  700. {
  701. pkc->reset_pending_on_curr_blk = 0;
  702. }
  703. /*
  704. * Side effect of opening a block:
  705. *
  706. * 1) prb_queue is thawed.
  707. * 2) retire_blk_timer is refreshed.
  708. *
  709. */
  710. static void prb_open_block(struct tpacket_kbdq_core *pkc1,
  711. struct tpacket_block_desc *pbd1)
  712. {
  713. struct timespec ts;
  714. struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
  715. smp_rmb();
  716. /* We could have just memset this but we will lose the
  717. * flexibility of making the priv area sticky
  718. */
  719. BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
  720. BLOCK_NUM_PKTS(pbd1) = 0;
  721. BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
  722. getnstimeofday(&ts);
  723. h1->ts_first_pkt.ts_sec = ts.tv_sec;
  724. h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
  725. pkc1->pkblk_start = (char *)pbd1;
  726. pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
  727. BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
  728. BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
  729. pbd1->version = pkc1->version;
  730. pkc1->prev = pkc1->nxt_offset;
  731. pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
  732. prb_thaw_queue(pkc1);
  733. _prb_refresh_rx_retire_blk_timer(pkc1);
  734. smp_wmb();
  735. }
  736. /*
  737. * Queue freeze logic:
  738. * 1) Assume tp_block_nr = 8 blocks.
  739. * 2) At time 't0', user opens Rx ring.
  740. * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
  741. * 4) user-space is either sleeping or processing block '0'.
  742. * 5) tpacket_rcv is currently filling block '7', since there is no space left,
  743. * it will close block-7,loop around and try to fill block '0'.
  744. * call-flow:
  745. * __packet_lookup_frame_in_block
  746. * prb_retire_current_block()
  747. * prb_dispatch_next_block()
  748. * |->(BLOCK_STATUS == USER) evaluates to true
  749. * 5.1) Since block-0 is currently in-use, we just freeze the queue.
  750. * 6) Now there are two cases:
  751. * 6.1) Link goes idle right after the queue is frozen.
  752. * But remember, the last open_block() refreshed the timer.
  753. * When this timer expires,it will refresh itself so that we can
  754. * re-open block-0 in near future.
  755. * 6.2) Link is busy and keeps on receiving packets. This is a simple
  756. * case and __packet_lookup_frame_in_block will check if block-0
  757. * is free and can now be re-used.
  758. */
  759. static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
  760. struct packet_sock *po)
  761. {
  762. pkc->reset_pending_on_curr_blk = 1;
  763. po->stats.stats3.tp_freeze_q_cnt++;
  764. }
  765. #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
  766. /*
  767. * If the next block is free then we will dispatch it
  768. * and return a good offset.
  769. * Else, we will freeze the queue.
  770. * So, caller must check the return value.
  771. */
  772. static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
  773. struct packet_sock *po)
  774. {
  775. struct tpacket_block_desc *pbd;
  776. smp_rmb();
  777. /* 1. Get current block num */
  778. pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
  779. /* 2. If this block is currently in_use then freeze the queue */
  780. if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
  781. prb_freeze_queue(pkc, po);
  782. return NULL;
  783. }
  784. /*
  785. * 3.
  786. * open this block and return the offset where the first packet
  787. * needs to get stored.
  788. */
  789. prb_open_block(pkc, pbd);
  790. return (void *)pkc->nxt_offset;
  791. }
  792. static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
  793. struct packet_sock *po, unsigned int status)
  794. {
  795. struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
  796. /* retire/close the current block */
  797. if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
  798. /*
  799. * Plug the case where copy_bits() is in progress on
  800. * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
  801. * have space to copy the pkt in the current block and
  802. * called prb_retire_current_block()
  803. *
  804. * We don't need to worry about the TMO case because
  805. * the timer-handler already handled this case.
  806. */
  807. if (!(status & TP_STATUS_BLK_TMO)) {
  808. while (atomic_read(&pkc->blk_fill_in_prog)) {
  809. /* Waiting for skb_copy_bits to finish... */
  810. cpu_relax();
  811. }
  812. }
  813. prb_close_block(pkc, pbd, po, status);
  814. return;
  815. }
  816. }
  817. static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
  818. struct tpacket_block_desc *pbd)
  819. {
  820. return TP_STATUS_USER & BLOCK_STATUS(pbd);
  821. }
  822. static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
  823. {
  824. return pkc->reset_pending_on_curr_blk;
  825. }
  826. static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
  827. {
  828. struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
  829. atomic_dec(&pkc->blk_fill_in_prog);
  830. }
  831. static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
  832. struct tpacket3_hdr *ppd)
  833. {
  834. ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
  835. }
  836. static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
  837. struct tpacket3_hdr *ppd)
  838. {
  839. ppd->hv1.tp_rxhash = 0;
  840. }
  841. static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
  842. struct tpacket3_hdr *ppd)
  843. {
  844. if (skb_vlan_tag_present(pkc->skb)) {
  845. ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
  846. ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
  847. ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
  848. } else {
  849. ppd->hv1.tp_vlan_tci = 0;
  850. ppd->hv1.tp_vlan_tpid = 0;
  851. ppd->tp_status = TP_STATUS_AVAILABLE;
  852. }
  853. }
  854. static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
  855. struct tpacket3_hdr *ppd)
  856. {
  857. ppd->hv1.tp_padding = 0;
  858. prb_fill_vlan_info(pkc, ppd);
  859. if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
  860. prb_fill_rxhash(pkc, ppd);
  861. else
  862. prb_clear_rxhash(pkc, ppd);
  863. }
  864. static void prb_fill_curr_block(char *curr,
  865. struct tpacket_kbdq_core *pkc,
  866. struct tpacket_block_desc *pbd,
  867. unsigned int len)
  868. {
  869. struct tpacket3_hdr *ppd;
  870. ppd = (struct tpacket3_hdr *)curr;
  871. ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
  872. pkc->prev = curr;
  873. pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
  874. BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
  875. BLOCK_NUM_PKTS(pbd) += 1;
  876. atomic_inc(&pkc->blk_fill_in_prog);
  877. prb_run_all_ft_ops(pkc, ppd);
  878. }
  879. /* Assumes caller has the sk->rx_queue.lock */
  880. static void *__packet_lookup_frame_in_block(struct packet_sock *po,
  881. struct sk_buff *skb,
  882. int status,
  883. unsigned int len
  884. )
  885. {
  886. struct tpacket_kbdq_core *pkc;
  887. struct tpacket_block_desc *pbd;
  888. char *curr, *end;
  889. pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
  890. pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
  891. /* Queue is frozen when user space is lagging behind */
  892. if (prb_queue_frozen(pkc)) {
  893. /*
  894. * Check if that last block which caused the queue to freeze,
  895. * is still in_use by user-space.
  896. */
  897. if (prb_curr_blk_in_use(pkc, pbd)) {
  898. /* Can't record this packet */
  899. return NULL;
  900. } else {
  901. /*
  902. * Ok, the block was released by user-space.
  903. * Now let's open that block.
  904. * opening a block also thaws the queue.
  905. * Thawing is a side effect.
  906. */
  907. prb_open_block(pkc, pbd);
  908. }
  909. }
  910. smp_mb();
  911. curr = pkc->nxt_offset;
  912. pkc->skb = skb;
  913. end = (char *)pbd + pkc->kblk_size;
  914. /* first try the current block */
  915. if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
  916. prb_fill_curr_block(curr, pkc, pbd, len);
  917. return (void *)curr;
  918. }
  919. /* Ok, close the current block */
  920. prb_retire_current_block(pkc, po, 0);
  921. /* Now, try to dispatch the next block */
  922. curr = (char *)prb_dispatch_next_block(pkc, po);
  923. if (curr) {
  924. pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
  925. prb_fill_curr_block(curr, pkc, pbd, len);
  926. return (void *)curr;
  927. }
  928. /*
  929. * No free blocks are available.user_space hasn't caught up yet.
  930. * Queue was just frozen and now this packet will get dropped.
  931. */
  932. return NULL;
  933. }
  934. static void *packet_current_rx_frame(struct packet_sock *po,
  935. struct sk_buff *skb,
  936. int status, unsigned int len)
  937. {
  938. char *curr = NULL;
  939. switch (po->tp_version) {
  940. case TPACKET_V1:
  941. case TPACKET_V2:
  942. curr = packet_lookup_frame(po, &po->rx_ring,
  943. po->rx_ring.head, status);
  944. return curr;
  945. case TPACKET_V3:
  946. return __packet_lookup_frame_in_block(po, skb, status, len);
  947. default:
  948. WARN(1, "TPACKET version not supported\n");
  949. BUG();
  950. return NULL;
  951. }
  952. }
  953. static void *prb_lookup_block(struct packet_sock *po,
  954. struct packet_ring_buffer *rb,
  955. unsigned int idx,
  956. int status)
  957. {
  958. struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
  959. struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
  960. if (status != BLOCK_STATUS(pbd))
  961. return NULL;
  962. return pbd;
  963. }
  964. static int prb_previous_blk_num(struct packet_ring_buffer *rb)
  965. {
  966. unsigned int prev;
  967. if (rb->prb_bdqc.kactive_blk_num)
  968. prev = rb->prb_bdqc.kactive_blk_num-1;
  969. else
  970. prev = rb->prb_bdqc.knum_blocks-1;
  971. return prev;
  972. }
  973. /* Assumes caller has held the rx_queue.lock */
  974. static void *__prb_previous_block(struct packet_sock *po,
  975. struct packet_ring_buffer *rb,
  976. int status)
  977. {
  978. unsigned int previous = prb_previous_blk_num(rb);
  979. return prb_lookup_block(po, rb, previous, status);
  980. }
  981. static void *packet_previous_rx_frame(struct packet_sock *po,
  982. struct packet_ring_buffer *rb,
  983. int status)
  984. {
  985. if (po->tp_version <= TPACKET_V2)
  986. return packet_previous_frame(po, rb, status);
  987. return __prb_previous_block(po, rb, status);
  988. }
  989. static void packet_increment_rx_head(struct packet_sock *po,
  990. struct packet_ring_buffer *rb)
  991. {
  992. switch (po->tp_version) {
  993. case TPACKET_V1:
  994. case TPACKET_V2:
  995. return packet_increment_head(rb);
  996. case TPACKET_V3:
  997. default:
  998. WARN(1, "TPACKET version not supported.\n");
  999. BUG();
  1000. return;
  1001. }
  1002. }
  1003. static void *packet_previous_frame(struct packet_sock *po,
  1004. struct packet_ring_buffer *rb,
  1005. int status)
  1006. {
  1007. unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
  1008. return packet_lookup_frame(po, rb, previous, status);
  1009. }
  1010. static void packet_increment_head(struct packet_ring_buffer *buff)
  1011. {
  1012. buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
  1013. }
  1014. static void packet_inc_pending(struct packet_ring_buffer *rb)
  1015. {
  1016. this_cpu_inc(*rb->pending_refcnt);
  1017. }
  1018. static void packet_dec_pending(struct packet_ring_buffer *rb)
  1019. {
  1020. this_cpu_dec(*rb->pending_refcnt);
  1021. }
  1022. static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
  1023. {
  1024. unsigned int refcnt = 0;
  1025. int cpu;
  1026. /* We don't use pending refcount in rx_ring. */
  1027. if (rb->pending_refcnt == NULL)
  1028. return 0;
  1029. for_each_possible_cpu(cpu)
  1030. refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
  1031. return refcnt;
  1032. }
  1033. static int packet_alloc_pending(struct packet_sock *po)
  1034. {
  1035. po->rx_ring.pending_refcnt = NULL;
  1036. po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
  1037. if (unlikely(po->tx_ring.pending_refcnt == NULL))
  1038. return -ENOBUFS;
  1039. return 0;
  1040. }
  1041. static void packet_free_pending(struct packet_sock *po)
  1042. {
  1043. free_percpu(po->tx_ring.pending_refcnt);
  1044. }
  1045. #define ROOM_POW_OFF 2
  1046. #define ROOM_NONE 0x0
  1047. #define ROOM_LOW 0x1
  1048. #define ROOM_NORMAL 0x2
  1049. static bool __tpacket_has_room(struct packet_sock *po, int pow_off)
  1050. {
  1051. int idx, len;
  1052. len = po->rx_ring.frame_max + 1;
  1053. idx = po->rx_ring.head;
  1054. if (pow_off)
  1055. idx += len >> pow_off;
  1056. if (idx >= len)
  1057. idx -= len;
  1058. return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
  1059. }
  1060. static bool __tpacket_v3_has_room(struct packet_sock *po, int pow_off)
  1061. {
  1062. int idx, len;
  1063. len = po->rx_ring.prb_bdqc.knum_blocks;
  1064. idx = po->rx_ring.prb_bdqc.kactive_blk_num;
  1065. if (pow_off)
  1066. idx += len >> pow_off;
  1067. if (idx >= len)
  1068. idx -= len;
  1069. return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
  1070. }
  1071. static int __packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
  1072. {
  1073. struct sock *sk = &po->sk;
  1074. int ret = ROOM_NONE;
  1075. if (po->prot_hook.func != tpacket_rcv) {
  1076. int avail = sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc)
  1077. - (skb ? skb->truesize : 0);
  1078. if (avail > (sk->sk_rcvbuf >> ROOM_POW_OFF))
  1079. return ROOM_NORMAL;
  1080. else if (avail > 0)
  1081. return ROOM_LOW;
  1082. else
  1083. return ROOM_NONE;
  1084. }
  1085. if (po->tp_version == TPACKET_V3) {
  1086. if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
  1087. ret = ROOM_NORMAL;
  1088. else if (__tpacket_v3_has_room(po, 0))
  1089. ret = ROOM_LOW;
  1090. } else {
  1091. if (__tpacket_has_room(po, ROOM_POW_OFF))
  1092. ret = ROOM_NORMAL;
  1093. else if (__tpacket_has_room(po, 0))
  1094. ret = ROOM_LOW;
  1095. }
  1096. return ret;
  1097. }
  1098. static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
  1099. {
  1100. int ret;
  1101. bool has_room;
  1102. spin_lock_bh(&po->sk.sk_receive_queue.lock);
  1103. ret = __packet_rcv_has_room(po, skb);
  1104. has_room = ret == ROOM_NORMAL;
  1105. if (po->pressure == has_room)
  1106. po->pressure = !has_room;
  1107. spin_unlock_bh(&po->sk.sk_receive_queue.lock);
  1108. return ret;
  1109. }
  1110. static void packet_sock_destruct(struct sock *sk)
  1111. {
  1112. skb_queue_purge(&sk->sk_error_queue);
  1113. WARN_ON(atomic_read(&sk->sk_rmem_alloc));
  1114. WARN_ON(atomic_read(&sk->sk_wmem_alloc));
  1115. if (!sock_flag(sk, SOCK_DEAD)) {
  1116. pr_err("Attempt to release alive packet socket: %p\n", sk);
  1117. return;
  1118. }
  1119. sk_refcnt_debug_dec(sk);
  1120. }
  1121. static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
  1122. {
  1123. u32 rxhash;
  1124. int i, count = 0;
  1125. rxhash = skb_get_hash(skb);
  1126. for (i = 0; i < ROLLOVER_HLEN; i++)
  1127. if (po->rollover->history[i] == rxhash)
  1128. count++;
  1129. po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash;
  1130. return count > (ROLLOVER_HLEN >> 1);
  1131. }
  1132. static unsigned int fanout_demux_hash(struct packet_fanout *f,
  1133. struct sk_buff *skb,
  1134. unsigned int num)
  1135. {
  1136. return reciprocal_scale(skb_get_hash(skb), num);
  1137. }
  1138. static unsigned int fanout_demux_lb(struct packet_fanout *f,
  1139. struct sk_buff *skb,
  1140. unsigned int num)
  1141. {
  1142. unsigned int val = atomic_inc_return(&f->rr_cur);
  1143. return val % num;
  1144. }
  1145. static unsigned int fanout_demux_cpu(struct packet_fanout *f,
  1146. struct sk_buff *skb,
  1147. unsigned int num)
  1148. {
  1149. return smp_processor_id() % num;
  1150. }
  1151. static unsigned int fanout_demux_rnd(struct packet_fanout *f,
  1152. struct sk_buff *skb,
  1153. unsigned int num)
  1154. {
  1155. return prandom_u32_max(num);
  1156. }
  1157. static unsigned int fanout_demux_rollover(struct packet_fanout *f,
  1158. struct sk_buff *skb,
  1159. unsigned int idx, bool try_self,
  1160. unsigned int num)
  1161. {
  1162. struct packet_sock *po, *po_next, *po_skip = NULL;
  1163. unsigned int i, j, room = ROOM_NONE;
  1164. po = pkt_sk(f->arr[idx]);
  1165. if (try_self) {
  1166. room = packet_rcv_has_room(po, skb);
  1167. if (room == ROOM_NORMAL ||
  1168. (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
  1169. return idx;
  1170. po_skip = po;
  1171. }
  1172. i = j = min_t(int, po->rollover->sock, num - 1);
  1173. do {
  1174. po_next = pkt_sk(f->arr[i]);
  1175. if (po_next != po_skip && !po_next->pressure &&
  1176. packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
  1177. if (i != j)
  1178. po->rollover->sock = i;
  1179. atomic_long_inc(&po->rollover->num);
  1180. if (room == ROOM_LOW)
  1181. atomic_long_inc(&po->rollover->num_huge);
  1182. return i;
  1183. }
  1184. if (++i == num)
  1185. i = 0;
  1186. } while (i != j);
  1187. atomic_long_inc(&po->rollover->num_failed);
  1188. return idx;
  1189. }
  1190. static unsigned int fanout_demux_qm(struct packet_fanout *f,
  1191. struct sk_buff *skb,
  1192. unsigned int num)
  1193. {
  1194. return skb_get_queue_mapping(skb) % num;
  1195. }
  1196. static unsigned int fanout_demux_bpf(struct packet_fanout *f,
  1197. struct sk_buff *skb,
  1198. unsigned int num)
  1199. {
  1200. struct bpf_prog *prog;
  1201. unsigned int ret = 0;
  1202. rcu_read_lock();
  1203. prog = rcu_dereference(f->bpf_prog);
  1204. if (prog)
  1205. ret = bpf_prog_run_clear_cb(prog, skb) % num;
  1206. rcu_read_unlock();
  1207. return ret;
  1208. }
  1209. static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
  1210. {
  1211. return f->flags & (flag >> 8);
  1212. }
  1213. static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
  1214. struct packet_type *pt, struct net_device *orig_dev)
  1215. {
  1216. struct packet_fanout *f = pt->af_packet_priv;
  1217. unsigned int num = READ_ONCE(f->num_members);
  1218. struct net *net = read_pnet(&f->net);
  1219. struct packet_sock *po;
  1220. unsigned int idx;
  1221. if (!net_eq(dev_net(dev), net) || !num) {
  1222. kfree_skb(skb);
  1223. return 0;
  1224. }
  1225. if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
  1226. skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
  1227. if (!skb)
  1228. return 0;
  1229. }
  1230. switch (f->type) {
  1231. case PACKET_FANOUT_HASH:
  1232. default:
  1233. idx = fanout_demux_hash(f, skb, num);
  1234. break;
  1235. case PACKET_FANOUT_LB:
  1236. idx = fanout_demux_lb(f, skb, num);
  1237. break;
  1238. case PACKET_FANOUT_CPU:
  1239. idx = fanout_demux_cpu(f, skb, num);
  1240. break;
  1241. case PACKET_FANOUT_RND:
  1242. idx = fanout_demux_rnd(f, skb, num);
  1243. break;
  1244. case PACKET_FANOUT_QM:
  1245. idx = fanout_demux_qm(f, skb, num);
  1246. break;
  1247. case PACKET_FANOUT_ROLLOVER:
  1248. idx = fanout_demux_rollover(f, skb, 0, false, num);
  1249. break;
  1250. case PACKET_FANOUT_CBPF:
  1251. case PACKET_FANOUT_EBPF:
  1252. idx = fanout_demux_bpf(f, skb, num);
  1253. break;
  1254. }
  1255. if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
  1256. idx = fanout_demux_rollover(f, skb, idx, true, num);
  1257. po = pkt_sk(f->arr[idx]);
  1258. return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
  1259. }
  1260. DEFINE_MUTEX(fanout_mutex);
  1261. EXPORT_SYMBOL_GPL(fanout_mutex);
  1262. static LIST_HEAD(fanout_list);
  1263. static void __fanout_link(struct sock *sk, struct packet_sock *po)
  1264. {
  1265. struct packet_fanout *f = po->fanout;
  1266. spin_lock(&f->lock);
  1267. f->arr[f->num_members] = sk;
  1268. smp_wmb();
  1269. f->num_members++;
  1270. spin_unlock(&f->lock);
  1271. }
  1272. static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
  1273. {
  1274. struct packet_fanout *f = po->fanout;
  1275. int i;
  1276. spin_lock(&f->lock);
  1277. for (i = 0; i < f->num_members; i++) {
  1278. if (f->arr[i] == sk)
  1279. break;
  1280. }
  1281. BUG_ON(i >= f->num_members);
  1282. f->arr[i] = f->arr[f->num_members - 1];
  1283. f->num_members--;
  1284. spin_unlock(&f->lock);
  1285. }
  1286. static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
  1287. {
  1288. if (sk->sk_family != PF_PACKET)
  1289. return false;
  1290. return ptype->af_packet_priv == pkt_sk(sk)->fanout;
  1291. }
  1292. static void fanout_init_data(struct packet_fanout *f)
  1293. {
  1294. switch (f->type) {
  1295. case PACKET_FANOUT_LB:
  1296. atomic_set(&f->rr_cur, 0);
  1297. break;
  1298. case PACKET_FANOUT_CBPF:
  1299. case PACKET_FANOUT_EBPF:
  1300. RCU_INIT_POINTER(f->bpf_prog, NULL);
  1301. break;
  1302. }
  1303. }
  1304. static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
  1305. {
  1306. struct bpf_prog *old;
  1307. spin_lock(&f->lock);
  1308. old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
  1309. rcu_assign_pointer(f->bpf_prog, new);
  1310. spin_unlock(&f->lock);
  1311. if (old) {
  1312. synchronize_net();
  1313. bpf_prog_destroy(old);
  1314. }
  1315. }
  1316. static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data,
  1317. unsigned int len)
  1318. {
  1319. struct bpf_prog *new;
  1320. struct sock_fprog fprog;
  1321. int ret;
  1322. if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
  1323. return -EPERM;
  1324. if (len != sizeof(fprog))
  1325. return -EINVAL;
  1326. if (copy_from_user(&fprog, data, len))
  1327. return -EFAULT;
  1328. ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
  1329. if (ret)
  1330. return ret;
  1331. __fanout_set_data_bpf(po->fanout, new);
  1332. return 0;
  1333. }
  1334. static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data,
  1335. unsigned int len)
  1336. {
  1337. struct bpf_prog *new;
  1338. u32 fd;
  1339. if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
  1340. return -EPERM;
  1341. if (len != sizeof(fd))
  1342. return -EINVAL;
  1343. if (copy_from_user(&fd, data, len))
  1344. return -EFAULT;
  1345. new = bpf_prog_get(fd);
  1346. if (IS_ERR(new))
  1347. return PTR_ERR(new);
  1348. if (new->type != BPF_PROG_TYPE_SOCKET_FILTER) {
  1349. bpf_prog_put(new);
  1350. return -EINVAL;
  1351. }
  1352. __fanout_set_data_bpf(po->fanout, new);
  1353. return 0;
  1354. }
  1355. static int fanout_set_data(struct packet_sock *po, char __user *data,
  1356. unsigned int len)
  1357. {
  1358. switch (po->fanout->type) {
  1359. case PACKET_FANOUT_CBPF:
  1360. return fanout_set_data_cbpf(po, data, len);
  1361. case PACKET_FANOUT_EBPF:
  1362. return fanout_set_data_ebpf(po, data, len);
  1363. default:
  1364. return -EINVAL;
  1365. };
  1366. }
  1367. static void fanout_release_data(struct packet_fanout *f)
  1368. {
  1369. switch (f->type) {
  1370. case PACKET_FANOUT_CBPF:
  1371. case PACKET_FANOUT_EBPF:
  1372. __fanout_set_data_bpf(f, NULL);
  1373. };
  1374. }
  1375. static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
  1376. {
  1377. struct packet_sock *po = pkt_sk(sk);
  1378. struct packet_fanout *f, *match;
  1379. u8 type = type_flags & 0xff;
  1380. u8 flags = type_flags >> 8;
  1381. int err;
  1382. switch (type) {
  1383. case PACKET_FANOUT_ROLLOVER:
  1384. if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
  1385. return -EINVAL;
  1386. case PACKET_FANOUT_HASH:
  1387. case PACKET_FANOUT_LB:
  1388. case PACKET_FANOUT_CPU:
  1389. case PACKET_FANOUT_RND:
  1390. case PACKET_FANOUT_QM:
  1391. case PACKET_FANOUT_CBPF:
  1392. case PACKET_FANOUT_EBPF:
  1393. break;
  1394. default:
  1395. return -EINVAL;
  1396. }
  1397. if (!po->running)
  1398. return -EINVAL;
  1399. if (po->fanout)
  1400. return -EALREADY;
  1401. if (type == PACKET_FANOUT_ROLLOVER ||
  1402. (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
  1403. po->rollover = kzalloc(sizeof(*po->rollover), GFP_KERNEL);
  1404. if (!po->rollover)
  1405. return -ENOMEM;
  1406. atomic_long_set(&po->rollover->num, 0);
  1407. atomic_long_set(&po->rollover->num_huge, 0);
  1408. atomic_long_set(&po->rollover->num_failed, 0);
  1409. }
  1410. mutex_lock(&fanout_mutex);
  1411. match = NULL;
  1412. list_for_each_entry(f, &fanout_list, list) {
  1413. if (f->id == id &&
  1414. read_pnet(&f->net) == sock_net(sk)) {
  1415. match = f;
  1416. break;
  1417. }
  1418. }
  1419. err = -EINVAL;
  1420. if (match && match->flags != flags)
  1421. goto out;
  1422. if (!match) {
  1423. err = -ENOMEM;
  1424. match = kzalloc(sizeof(*match), GFP_KERNEL);
  1425. if (!match)
  1426. goto out;
  1427. write_pnet(&match->net, sock_net(sk));
  1428. match->id = id;
  1429. match->type = type;
  1430. match->flags = flags;
  1431. INIT_LIST_HEAD(&match->list);
  1432. spin_lock_init(&match->lock);
  1433. atomic_set(&match->sk_ref, 0);
  1434. fanout_init_data(match);
  1435. match->prot_hook.type = po->prot_hook.type;
  1436. match->prot_hook.dev = po->prot_hook.dev;
  1437. match->prot_hook.func = packet_rcv_fanout;
  1438. match->prot_hook.af_packet_priv = match;
  1439. match->prot_hook.id_match = match_fanout_group;
  1440. dev_add_pack(&match->prot_hook);
  1441. list_add(&match->list, &fanout_list);
  1442. }
  1443. err = -EINVAL;
  1444. if (match->type == type &&
  1445. match->prot_hook.type == po->prot_hook.type &&
  1446. match->prot_hook.dev == po->prot_hook.dev) {
  1447. err = -ENOSPC;
  1448. if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
  1449. __dev_remove_pack(&po->prot_hook);
  1450. po->fanout = match;
  1451. atomic_inc(&match->sk_ref);
  1452. __fanout_link(sk, po);
  1453. err = 0;
  1454. }
  1455. }
  1456. out:
  1457. mutex_unlock(&fanout_mutex);
  1458. if (err) {
  1459. kfree(po->rollover);
  1460. po->rollover = NULL;
  1461. }
  1462. return err;
  1463. }
  1464. static void fanout_release(struct sock *sk)
  1465. {
  1466. struct packet_sock *po = pkt_sk(sk);
  1467. struct packet_fanout *f;
  1468. f = po->fanout;
  1469. if (!f)
  1470. return;
  1471. mutex_lock(&fanout_mutex);
  1472. po->fanout = NULL;
  1473. if (atomic_dec_and_test(&f->sk_ref)) {
  1474. list_del(&f->list);
  1475. dev_remove_pack(&f->prot_hook);
  1476. fanout_release_data(f);
  1477. kfree(f);
  1478. }
  1479. mutex_unlock(&fanout_mutex);
  1480. if (po->rollover)
  1481. kfree_rcu(po->rollover, rcu);
  1482. }
  1483. static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
  1484. struct sk_buff *skb)
  1485. {
  1486. /* Earlier code assumed this would be a VLAN pkt, double-check
  1487. * this now that we have the actual packet in hand. We can only
  1488. * do this check on Ethernet devices.
  1489. */
  1490. if (unlikely(dev->type != ARPHRD_ETHER))
  1491. return false;
  1492. skb_reset_mac_header(skb);
  1493. return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
  1494. }
  1495. static const struct proto_ops packet_ops;
  1496. static const struct proto_ops packet_ops_spkt;
  1497. static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
  1498. struct packet_type *pt, struct net_device *orig_dev)
  1499. {
  1500. struct sock *sk;
  1501. struct sockaddr_pkt *spkt;
  1502. /*
  1503. * When we registered the protocol we saved the socket in the data
  1504. * field for just this event.
  1505. */
  1506. sk = pt->af_packet_priv;
  1507. /*
  1508. * Yank back the headers [hope the device set this
  1509. * right or kerboom...]
  1510. *
  1511. * Incoming packets have ll header pulled,
  1512. * push it back.
  1513. *
  1514. * For outgoing ones skb->data == skb_mac_header(skb)
  1515. * so that this procedure is noop.
  1516. */
  1517. if (skb->pkt_type == PACKET_LOOPBACK)
  1518. goto out;
  1519. if (!net_eq(dev_net(dev), sock_net(sk)))
  1520. goto out;
  1521. skb = skb_share_check(skb, GFP_ATOMIC);
  1522. if (skb == NULL)
  1523. goto oom;
  1524. /* drop any routing info */
  1525. skb_dst_drop(skb);
  1526. /* drop conntrack reference */
  1527. nf_reset(skb);
  1528. spkt = &PACKET_SKB_CB(skb)->sa.pkt;
  1529. skb_push(skb, skb->data - skb_mac_header(skb));
  1530. /*
  1531. * The SOCK_PACKET socket receives _all_ frames.
  1532. */
  1533. spkt->spkt_family = dev->type;
  1534. strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
  1535. spkt->spkt_protocol = skb->protocol;
  1536. /*
  1537. * Charge the memory to the socket. This is done specifically
  1538. * to prevent sockets using all the memory up.
  1539. */
  1540. if (sock_queue_rcv_skb(sk, skb) == 0)
  1541. return 0;
  1542. out:
  1543. kfree_skb(skb);
  1544. oom:
  1545. return 0;
  1546. }
  1547. /*
  1548. * Output a raw packet to a device layer. This bypasses all the other
  1549. * protocol layers and you must therefore supply it with a complete frame
  1550. */
  1551. static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
  1552. size_t len)
  1553. {
  1554. struct sock *sk = sock->sk;
  1555. DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
  1556. struct sk_buff *skb = NULL;
  1557. struct net_device *dev;
  1558. __be16 proto = 0;
  1559. int err;
  1560. int extra_len = 0;
  1561. /*
  1562. * Get and verify the address.
  1563. */
  1564. if (saddr) {
  1565. if (msg->msg_namelen < sizeof(struct sockaddr))
  1566. return -EINVAL;
  1567. if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
  1568. proto = saddr->spkt_protocol;
  1569. } else
  1570. return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
  1571. /*
  1572. * Find the device first to size check it
  1573. */
  1574. saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
  1575. retry:
  1576. rcu_read_lock();
  1577. dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
  1578. err = -ENODEV;
  1579. if (dev == NULL)
  1580. goto out_unlock;
  1581. err = -ENETDOWN;
  1582. if (!(dev->flags & IFF_UP))
  1583. goto out_unlock;
  1584. /*
  1585. * You may not queue a frame bigger than the mtu. This is the lowest level
  1586. * raw protocol and you must do your own fragmentation at this level.
  1587. */
  1588. if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
  1589. if (!netif_supports_nofcs(dev)) {
  1590. err = -EPROTONOSUPPORT;
  1591. goto out_unlock;
  1592. }
  1593. extra_len = 4; /* We're doing our own CRC */
  1594. }
  1595. err = -EMSGSIZE;
  1596. if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
  1597. goto out_unlock;
  1598. if (!skb) {
  1599. size_t reserved = LL_RESERVED_SPACE(dev);
  1600. int tlen = dev->needed_tailroom;
  1601. unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
  1602. rcu_read_unlock();
  1603. skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
  1604. if (skb == NULL)
  1605. return -ENOBUFS;
  1606. /* FIXME: Save some space for broken drivers that write a hard
  1607. * header at transmission time by themselves. PPP is the notable
  1608. * one here. This should really be fixed at the driver level.
  1609. */
  1610. skb_reserve(skb, reserved);
  1611. skb_reset_network_header(skb);
  1612. /* Try to align data part correctly */
  1613. if (hhlen) {
  1614. skb->data -= hhlen;
  1615. skb->tail -= hhlen;
  1616. if (len < hhlen)
  1617. skb_reset_network_header(skb);
  1618. }
  1619. err = memcpy_from_msg(skb_put(skb, len), msg, len);
  1620. if (err)
  1621. goto out_free;
  1622. goto retry;
  1623. }
  1624. if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
  1625. !packet_extra_vlan_len_allowed(dev, skb)) {
  1626. err = -EMSGSIZE;
  1627. goto out_unlock;
  1628. }
  1629. skb->protocol = proto;
  1630. skb->dev = dev;
  1631. skb->priority = sk->sk_priority;
  1632. skb->mark = sk->sk_mark;
  1633. sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
  1634. if (unlikely(extra_len == 4))
  1635. skb->no_fcs = 1;
  1636. skb_probe_transport_header(skb, 0);
  1637. dev_queue_xmit(skb);
  1638. rcu_read_unlock();
  1639. return len;
  1640. out_unlock:
  1641. rcu_read_unlock();
  1642. out_free:
  1643. kfree_skb(skb);
  1644. return err;
  1645. }
  1646. static unsigned int run_filter(struct sk_buff *skb,
  1647. const struct sock *sk,
  1648. unsigned int res)
  1649. {
  1650. struct sk_filter *filter;
  1651. rcu_read_lock();
  1652. filter = rcu_dereference(sk->sk_filter);
  1653. if (filter != NULL)
  1654. res = bpf_prog_run_clear_cb(filter->prog, skb);
  1655. rcu_read_unlock();
  1656. return res;
  1657. }
  1658. /*
  1659. * This function makes lazy skb cloning in hope that most of packets
  1660. * are discarded by BPF.
  1661. *
  1662. * Note tricky part: we DO mangle shared skb! skb->data, skb->len
  1663. * and skb->cb are mangled. It works because (and until) packets
  1664. * falling here are owned by current CPU. Output packets are cloned
  1665. * by dev_queue_xmit_nit(), input packets are processed by net_bh
  1666. * sequencially, so that if we return skb to original state on exit,
  1667. * we will not harm anyone.
  1668. */
  1669. static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
  1670. struct packet_type *pt, struct net_device *orig_dev)
  1671. {
  1672. struct sock *sk;
  1673. struct sockaddr_ll *sll;
  1674. struct packet_sock *po;
  1675. u8 *skb_head = skb->data;
  1676. int skb_len = skb->len;
  1677. unsigned int snaplen, res;
  1678. if (skb->pkt_type == PACKET_LOOPBACK)
  1679. goto drop;
  1680. sk = pt->af_packet_priv;
  1681. po = pkt_sk(sk);
  1682. if (!net_eq(dev_net(dev), sock_net(sk)))
  1683. goto drop;
  1684. skb->dev = dev;
  1685. if (dev->header_ops) {
  1686. /* The device has an explicit notion of ll header,
  1687. * exported to higher levels.
  1688. *
  1689. * Otherwise, the device hides details of its frame
  1690. * structure, so that corresponding packet head is
  1691. * never delivered to user.
  1692. */
  1693. if (sk->sk_type != SOCK_DGRAM)
  1694. skb_push(skb, skb->data - skb_mac_header(skb));
  1695. else if (skb->pkt_type == PACKET_OUTGOING) {
  1696. /* Special case: outgoing packets have ll header at head */
  1697. skb_pull(skb, skb_network_offset(skb));
  1698. }
  1699. }
  1700. snaplen = skb->len;
  1701. res = run_filter(skb, sk, snaplen);
  1702. if (!res)
  1703. goto drop_n_restore;
  1704. if (snaplen > res)
  1705. snaplen = res;
  1706. if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
  1707. goto drop_n_acct;
  1708. if (skb_shared(skb)) {
  1709. struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
  1710. if (nskb == NULL)
  1711. goto drop_n_acct;
  1712. if (skb_head != skb->data) {
  1713. skb->data = skb_head;
  1714. skb->len = skb_len;
  1715. }
  1716. consume_skb(skb);
  1717. skb = nskb;
  1718. }
  1719. sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
  1720. sll = &PACKET_SKB_CB(skb)->sa.ll;
  1721. sll->sll_hatype = dev->type;
  1722. sll->sll_pkttype = skb->pkt_type;
  1723. if (unlikely(po->origdev))
  1724. sll->sll_ifindex = orig_dev->ifindex;
  1725. else
  1726. sll->sll_ifindex = dev->ifindex;
  1727. sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
  1728. /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
  1729. * Use their space for storing the original skb length.
  1730. */
  1731. PACKET_SKB_CB(skb)->sa.origlen = skb->len;
  1732. if (pskb_trim(skb, snaplen))
  1733. goto drop_n_acct;
  1734. skb_set_owner_r(skb, sk);
  1735. skb->dev = NULL;
  1736. skb_dst_drop(skb);
  1737. /* drop conntrack reference */
  1738. nf_reset(skb);
  1739. spin_lock(&sk->sk_receive_queue.lock);
  1740. po->stats.stats1.tp_packets++;
  1741. sock_skb_set_dropcount(sk, skb);
  1742. __skb_queue_tail(&sk->sk_receive_queue, skb);
  1743. spin_unlock(&sk->sk_receive_queue.lock);
  1744. sk->sk_data_ready(sk);
  1745. return 0;
  1746. drop_n_acct:
  1747. spin_lock(&sk->sk_receive_queue.lock);
  1748. po->stats.stats1.tp_drops++;
  1749. atomic_inc(&sk->sk_drops);
  1750. spin_unlock(&sk->sk_receive_queue.lock);
  1751. drop_n_restore:
  1752. if (skb_head != skb->data && skb_shared(skb)) {
  1753. skb->data = skb_head;
  1754. skb->len = skb_len;
  1755. }
  1756. drop:
  1757. consume_skb(skb);
  1758. return 0;
  1759. }
  1760. static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
  1761. struct packet_type *pt, struct net_device *orig_dev)
  1762. {
  1763. struct sock *sk;
  1764. struct packet_sock *po;
  1765. struct sockaddr_ll *sll;
  1766. union tpacket_uhdr h;
  1767. u8 *skb_head = skb->data;
  1768. int skb_len = skb->len;
  1769. unsigned int snaplen, res;
  1770. unsigned long status = TP_STATUS_USER;
  1771. unsigned short macoff, netoff, hdrlen;
  1772. struct sk_buff *copy_skb = NULL;
  1773. struct timespec ts;
  1774. __u32 ts_status;
  1775. /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
  1776. * We may add members to them until current aligned size without forcing
  1777. * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
  1778. */
  1779. BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
  1780. BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
  1781. if (skb->pkt_type == PACKET_LOOPBACK)
  1782. goto drop;
  1783. sk = pt->af_packet_priv;
  1784. po = pkt_sk(sk);
  1785. if (!net_eq(dev_net(dev), sock_net(sk)))
  1786. goto drop;
  1787. if (dev->header_ops) {
  1788. if (sk->sk_type != SOCK_DGRAM)
  1789. skb_push(skb, skb->data - skb_mac_header(skb));
  1790. else if (skb->pkt_type == PACKET_OUTGOING) {
  1791. /* Special case: outgoing packets have ll header at head */
  1792. skb_pull(skb, skb_network_offset(skb));
  1793. }
  1794. }
  1795. snaplen = skb->len;
  1796. res = run_filter(skb, sk, snaplen);
  1797. if (!res)
  1798. goto drop_n_restore;
  1799. if (skb->ip_summed == CHECKSUM_PARTIAL)
  1800. status |= TP_STATUS_CSUMNOTREADY;
  1801. else if (skb->pkt_type != PACKET_OUTGOING &&
  1802. (skb->ip_summed == CHECKSUM_COMPLETE ||
  1803. skb_csum_unnecessary(skb)))
  1804. status |= TP_STATUS_CSUM_VALID;
  1805. if (snaplen > res)
  1806. snaplen = res;
  1807. if (sk->sk_type == SOCK_DGRAM) {
  1808. macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
  1809. po->tp_reserve;
  1810. } else {
  1811. unsigned int maclen = skb_network_offset(skb);
  1812. netoff = TPACKET_ALIGN(po->tp_hdrlen +
  1813. (maclen < 16 ? 16 : maclen)) +
  1814. po->tp_reserve;
  1815. macoff = netoff - maclen;
  1816. }
  1817. if (po->tp_version <= TPACKET_V2) {
  1818. if (macoff + snaplen > po->rx_ring.frame_size) {
  1819. if (po->copy_thresh &&
  1820. atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
  1821. if (skb_shared(skb)) {
  1822. copy_skb = skb_clone(skb, GFP_ATOMIC);
  1823. } else {
  1824. copy_skb = skb_get(skb);
  1825. skb_head = skb->data;
  1826. }
  1827. if (copy_skb)
  1828. skb_set_owner_r(copy_skb, sk);
  1829. }
  1830. snaplen = po->rx_ring.frame_size - macoff;
  1831. if ((int)snaplen < 0)
  1832. snaplen = 0;
  1833. }
  1834. } else if (unlikely(macoff + snaplen >
  1835. GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
  1836. u32 nval;
  1837. nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
  1838. pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
  1839. snaplen, nval, macoff);
  1840. snaplen = nval;
  1841. if (unlikely((int)snaplen < 0)) {
  1842. snaplen = 0;
  1843. macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
  1844. }
  1845. }
  1846. spin_lock(&sk->sk_receive_queue.lock);
  1847. h.raw = packet_current_rx_frame(po, skb,
  1848. TP_STATUS_KERNEL, (macoff+snaplen));
  1849. if (!h.raw)
  1850. goto ring_is_full;
  1851. if (po->tp_version <= TPACKET_V2) {
  1852. packet_increment_rx_head(po, &po->rx_ring);
  1853. /*
  1854. * LOSING will be reported till you read the stats,
  1855. * because it's COR - Clear On Read.
  1856. * Anyways, moving it for V1/V2 only as V3 doesn't need this
  1857. * at packet level.
  1858. */
  1859. if (po->stats.stats1.tp_drops)
  1860. status |= TP_STATUS_LOSING;
  1861. }
  1862. po->stats.stats1.tp_packets++;
  1863. if (copy_skb) {
  1864. status |= TP_STATUS_COPY;
  1865. __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
  1866. }
  1867. spin_unlock(&sk->sk_receive_queue.lock);
  1868. skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
  1869. if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
  1870. getnstimeofday(&ts);
  1871. status |= ts_status;
  1872. switch (po->tp_version) {
  1873. case TPACKET_V1:
  1874. h.h1->tp_len = skb->len;
  1875. h.h1->tp_snaplen = snaplen;
  1876. h.h1->tp_mac = macoff;
  1877. h.h1->tp_net = netoff;
  1878. h.h1->tp_sec = ts.tv_sec;
  1879. h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
  1880. hdrlen = sizeof(*h.h1);
  1881. break;
  1882. case TPACKET_V2:
  1883. h.h2->tp_len = skb->len;
  1884. h.h2->tp_snaplen = snaplen;
  1885. h.h2->tp_mac = macoff;
  1886. h.h2->tp_net = netoff;
  1887. h.h2->tp_sec = ts.tv_sec;
  1888. h.h2->tp_nsec = ts.tv_nsec;
  1889. if (skb_vlan_tag_present(skb)) {
  1890. h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
  1891. h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
  1892. status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
  1893. } else {
  1894. h.h2->tp_vlan_tci = 0;
  1895. h.h2->tp_vlan_tpid = 0;
  1896. }
  1897. memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
  1898. hdrlen = sizeof(*h.h2);
  1899. break;
  1900. case TPACKET_V3:
  1901. /* tp_nxt_offset,vlan are already populated above.
  1902. * So DONT clear those fields here
  1903. */
  1904. h.h3->tp_status |= status;
  1905. h.h3->tp_len = skb->len;
  1906. h.h3->tp_snaplen = snaplen;
  1907. h.h3->tp_mac = macoff;
  1908. h.h3->tp_net = netoff;
  1909. h.h3->tp_sec = ts.tv_sec;
  1910. h.h3->tp_nsec = ts.tv_nsec;
  1911. memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
  1912. hdrlen = sizeof(*h.h3);
  1913. break;
  1914. default:
  1915. BUG();
  1916. }
  1917. sll = h.raw + TPACKET_ALIGN(hdrlen);
  1918. sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
  1919. sll->sll_family = AF_PACKET;
  1920. sll->sll_hatype = dev->type;
  1921. sll->sll_protocol = skb->protocol;
  1922. sll->sll_pkttype = skb->pkt_type;
  1923. if (unlikely(po->origdev))
  1924. sll->sll_ifindex = orig_dev->ifindex;
  1925. else
  1926. sll->sll_ifindex = dev->ifindex;
  1927. smp_mb();
  1928. #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
  1929. if (po->tp_version <= TPACKET_V2) {
  1930. u8 *start, *end;
  1931. end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
  1932. macoff + snaplen);
  1933. for (start = h.raw; start < end; start += PAGE_SIZE)
  1934. flush_dcache_page(pgv_to_page(start));
  1935. }
  1936. smp_wmb();
  1937. #endif
  1938. if (po->tp_version <= TPACKET_V2) {
  1939. __packet_set_status(po, h.raw, status);
  1940. sk->sk_data_ready(sk);
  1941. } else {
  1942. prb_clear_blk_fill_status(&po->rx_ring);
  1943. }
  1944. drop_n_restore:
  1945. if (skb_head != skb->data && skb_shared(skb)) {
  1946. skb->data = skb_head;
  1947. skb->len = skb_len;
  1948. }
  1949. drop:
  1950. kfree_skb(skb);
  1951. return 0;
  1952. ring_is_full:
  1953. po->stats.stats1.tp_drops++;
  1954. spin_unlock(&sk->sk_receive_queue.lock);
  1955. sk->sk_data_ready(sk);
  1956. kfree_skb(copy_skb);
  1957. goto drop_n_restore;
  1958. }
  1959. static void tpacket_destruct_skb(struct sk_buff *skb)
  1960. {
  1961. struct packet_sock *po = pkt_sk(skb->sk);
  1962. if (likely(po->tx_ring.pg_vec)) {
  1963. void *ph;
  1964. __u32 ts;
  1965. ph = skb_shinfo(skb)->destructor_arg;
  1966. packet_dec_pending(&po->tx_ring);
  1967. ts = __packet_set_timestamp(po, ph, skb);
  1968. __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
  1969. }
  1970. sock_wfree(skb);
  1971. }
  1972. static bool ll_header_truncated(const struct net_device *dev, int len)
  1973. {
  1974. /* net device doesn't like empty head */
  1975. if (unlikely(len < dev->hard_header_len)) {
  1976. net_warn_ratelimited("%s: packet size is too short (%d < %d)\n",
  1977. current->comm, len, dev->hard_header_len);
  1978. return true;
  1979. }
  1980. return false;
  1981. }
  1982. static void tpacket_set_protocol(const struct net_device *dev,
  1983. struct sk_buff *skb)
  1984. {
  1985. if (dev->type == ARPHRD_ETHER) {
  1986. skb_reset_mac_header(skb);
  1987. skb->protocol = eth_hdr(skb)->h_proto;
  1988. }
  1989. }
  1990. static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
  1991. void *frame, struct net_device *dev, int size_max,
  1992. __be16 proto, unsigned char *addr, int hlen)
  1993. {
  1994. union tpacket_uhdr ph;
  1995. int to_write, offset, len, tp_len, nr_frags, len_max;
  1996. struct socket *sock = po->sk.sk_socket;
  1997. struct page *page;
  1998. void *data;
  1999. int err;
  2000. ph.raw = frame;
  2001. skb->protocol = proto;
  2002. skb->dev = dev;
  2003. skb->priority = po->sk.sk_priority;
  2004. skb->mark = po->sk.sk_mark;
  2005. sock_tx_timestamp(&po->sk, &skb_shinfo(skb)->tx_flags);
  2006. skb_shinfo(skb)->destructor_arg = ph.raw;
  2007. switch (po->tp_version) {
  2008. case TPACKET_V2:
  2009. tp_len = ph.h2->tp_len;
  2010. break;
  2011. default:
  2012. tp_len = ph.h1->tp_len;
  2013. break;
  2014. }
  2015. if (unlikely(tp_len > size_max)) {
  2016. pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
  2017. return -EMSGSIZE;
  2018. }
  2019. skb_reserve(skb, hlen);
  2020. skb_reset_network_header(skb);
  2021. if (unlikely(po->tp_tx_has_off)) {
  2022. int off_min, off_max, off;
  2023. off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
  2024. off_max = po->tx_ring.frame_size - tp_len;
  2025. if (sock->type == SOCK_DGRAM) {
  2026. switch (po->tp_version) {
  2027. case TPACKET_V2:
  2028. off = ph.h2->tp_net;
  2029. break;
  2030. default:
  2031. off = ph.h1->tp_net;
  2032. break;
  2033. }
  2034. } else {
  2035. switch (po->tp_version) {
  2036. case TPACKET_V2:
  2037. off = ph.h2->tp_mac;
  2038. break;
  2039. default:
  2040. off = ph.h1->tp_mac;
  2041. break;
  2042. }
  2043. }
  2044. if (unlikely((off < off_min) || (off_max < off)))
  2045. return -EINVAL;
  2046. data = ph.raw + off;
  2047. } else {
  2048. data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
  2049. }
  2050. to_write = tp_len;
  2051. if (sock->type == SOCK_DGRAM) {
  2052. err = dev_hard_header(skb, dev, ntohs(proto), addr,
  2053. NULL, tp_len);
  2054. if (unlikely(err < 0))
  2055. return -EINVAL;
  2056. } else if (dev->hard_header_len) {
  2057. if (ll_header_truncated(dev, tp_len))
  2058. return -EINVAL;
  2059. skb_push(skb, dev->hard_header_len);
  2060. err = skb_store_bits(skb, 0, data,
  2061. dev->hard_header_len);
  2062. if (unlikely(err))
  2063. return err;
  2064. if (!skb->protocol)
  2065. tpacket_set_protocol(dev, skb);
  2066. data += dev->hard_header_len;
  2067. to_write -= dev->hard_header_len;
  2068. }
  2069. offset = offset_in_page(data);
  2070. len_max = PAGE_SIZE - offset;
  2071. len = ((to_write > len_max) ? len_max : to_write);
  2072. skb->data_len = to_write;
  2073. skb->len += to_write;
  2074. skb->truesize += to_write;
  2075. atomic_add(to_write, &po->sk.sk_wmem_alloc);
  2076. while (likely(to_write)) {
  2077. nr_frags = skb_shinfo(skb)->nr_frags;
  2078. if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
  2079. pr_err("Packet exceed the number of skb frags(%lu)\n",
  2080. MAX_SKB_FRAGS);
  2081. return -EFAULT;
  2082. }
  2083. page = pgv_to_page(data);
  2084. data += len;
  2085. flush_dcache_page(page);
  2086. get_page(page);
  2087. skb_fill_page_desc(skb, nr_frags, page, offset, len);
  2088. to_write -= len;
  2089. offset = 0;
  2090. len_max = PAGE_SIZE;
  2091. len = ((to_write > len_max) ? len_max : to_write);
  2092. }
  2093. skb_probe_transport_header(skb, 0);
  2094. return tp_len;
  2095. }
  2096. static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
  2097. {
  2098. struct sk_buff *skb;
  2099. struct net_device *dev;
  2100. __be16 proto;
  2101. int err, reserve = 0;
  2102. void *ph;
  2103. DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
  2104. bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
  2105. int tp_len, size_max;
  2106. unsigned char *addr;
  2107. int len_sum = 0;
  2108. int status = TP_STATUS_AVAILABLE;
  2109. int hlen, tlen;
  2110. mutex_lock(&po->pg_vec_lock);
  2111. if (likely(saddr == NULL)) {
  2112. dev = packet_cached_dev_get(po);
  2113. proto = po->num;
  2114. addr = NULL;
  2115. } else {
  2116. err = -EINVAL;
  2117. if (msg->msg_namelen < sizeof(struct sockaddr_ll))
  2118. goto out;
  2119. if (msg->msg_namelen < (saddr->sll_halen
  2120. + offsetof(struct sockaddr_ll,
  2121. sll_addr)))
  2122. goto out;
  2123. proto = saddr->sll_protocol;
  2124. addr = saddr->sll_addr;
  2125. dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
  2126. }
  2127. err = -ENXIO;
  2128. if (unlikely(dev == NULL))
  2129. goto out;
  2130. err = -ENETDOWN;
  2131. if (unlikely(!(dev->flags & IFF_UP)))
  2132. goto out_put;
  2133. if (po->sk.sk_socket->type == SOCK_RAW)
  2134. reserve = dev->hard_header_len;
  2135. size_max = po->tx_ring.frame_size
  2136. - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
  2137. if (size_max > dev->mtu + reserve + VLAN_HLEN)
  2138. size_max = dev->mtu + reserve + VLAN_HLEN;
  2139. do {
  2140. ph = packet_current_frame(po, &po->tx_ring,
  2141. TP_STATUS_SEND_REQUEST);
  2142. if (unlikely(ph == NULL)) {
  2143. if (need_wait && need_resched())
  2144. schedule();
  2145. continue;
  2146. }
  2147. status = TP_STATUS_SEND_REQUEST;
  2148. hlen = LL_RESERVED_SPACE(dev);
  2149. tlen = dev->needed_tailroom;
  2150. skb = sock_alloc_send_skb(&po->sk,
  2151. hlen + tlen + sizeof(struct sockaddr_ll),
  2152. !need_wait, &err);
  2153. if (unlikely(skb == NULL)) {
  2154. /* we assume the socket was initially writeable ... */
  2155. if (likely(len_sum > 0))
  2156. err = len_sum;
  2157. goto out_status;
  2158. }
  2159. tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
  2160. addr, hlen);
  2161. if (likely(tp_len >= 0) &&
  2162. tp_len > dev->mtu + reserve &&
  2163. !packet_extra_vlan_len_allowed(dev, skb))
  2164. tp_len = -EMSGSIZE;
  2165. if (unlikely(tp_len < 0)) {
  2166. if (po->tp_loss) {
  2167. __packet_set_status(po, ph,
  2168. TP_STATUS_AVAILABLE);
  2169. packet_increment_head(&po->tx_ring);
  2170. kfree_skb(skb);
  2171. continue;
  2172. } else {
  2173. status = TP_STATUS_WRONG_FORMAT;
  2174. err = tp_len;
  2175. goto out_status;
  2176. }
  2177. }
  2178. packet_pick_tx_queue(dev, skb);
  2179. skb->destructor = tpacket_destruct_skb;
  2180. __packet_set_status(po, ph, TP_STATUS_SENDING);
  2181. packet_inc_pending(&po->tx_ring);
  2182. status = TP_STATUS_SEND_REQUEST;
  2183. err = po->xmit(skb);
  2184. if (unlikely(err > 0)) {
  2185. err = net_xmit_errno(err);
  2186. if (err && __packet_get_status(po, ph) ==
  2187. TP_STATUS_AVAILABLE) {
  2188. /* skb was destructed already */
  2189. skb = NULL;
  2190. goto out_status;
  2191. }
  2192. /*
  2193. * skb was dropped but not destructed yet;
  2194. * let's treat it like congestion or err < 0
  2195. */
  2196. err = 0;
  2197. }
  2198. packet_increment_head(&po->tx_ring);
  2199. len_sum += tp_len;
  2200. } while (likely((ph != NULL) ||
  2201. /* Note: packet_read_pending() might be slow if we have
  2202. * to call it as it's per_cpu variable, but in fast-path
  2203. * we already short-circuit the loop with the first
  2204. * condition, and luckily don't have to go that path
  2205. * anyway.
  2206. */
  2207. (need_wait && packet_read_pending(&po->tx_ring))));
  2208. err = len_sum;
  2209. goto out_put;
  2210. out_status:
  2211. __packet_set_status(po, ph, status);
  2212. kfree_skb(skb);
  2213. out_put:
  2214. dev_put(dev);
  2215. out:
  2216. mutex_unlock(&po->pg_vec_lock);
  2217. return err;
  2218. }
  2219. static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
  2220. size_t reserve, size_t len,
  2221. size_t linear, int noblock,
  2222. int *err)
  2223. {
  2224. struct sk_buff *skb;
  2225. /* Under a page? Don't bother with paged skb. */
  2226. if (prepad + len < PAGE_SIZE || !linear)
  2227. linear = len;
  2228. skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
  2229. err, 0);
  2230. if (!skb)
  2231. return NULL;
  2232. skb_reserve(skb, reserve);
  2233. skb_put(skb, linear);
  2234. skb->data_len = len - linear;
  2235. skb->len += len - linear;
  2236. return skb;
  2237. }
  2238. static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
  2239. {
  2240. struct sock *sk = sock->sk;
  2241. DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
  2242. struct sk_buff *skb;
  2243. struct net_device *dev;
  2244. __be16 proto;
  2245. unsigned char *addr;
  2246. int err, reserve = 0;
  2247. struct sockcm_cookie sockc;
  2248. struct virtio_net_hdr vnet_hdr = { 0 };
  2249. int offset = 0;
  2250. int vnet_hdr_len;
  2251. struct packet_sock *po = pkt_sk(sk);
  2252. unsigned short gso_type = 0;
  2253. int hlen, tlen;
  2254. int extra_len = 0;
  2255. ssize_t n;
  2256. /*
  2257. * Get and verify the address.
  2258. */
  2259. if (likely(saddr == NULL)) {
  2260. dev = packet_cached_dev_get(po);
  2261. proto = po->num;
  2262. addr = NULL;
  2263. } else {
  2264. err = -EINVAL;
  2265. if (msg->msg_namelen < sizeof(struct sockaddr_ll))
  2266. goto out;
  2267. if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
  2268. goto out;
  2269. proto = saddr->sll_protocol;
  2270. addr = saddr->sll_addr;
  2271. dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
  2272. }
  2273. err = -ENXIO;
  2274. if (unlikely(dev == NULL))
  2275. goto out_unlock;
  2276. err = -ENETDOWN;
  2277. if (unlikely(!(dev->flags & IFF_UP)))
  2278. goto out_unlock;
  2279. sockc.mark = sk->sk_mark;
  2280. if (msg->msg_controllen) {
  2281. err = sock_cmsg_send(sk, msg, &sockc);
  2282. if (unlikely(err))
  2283. goto out_unlock;
  2284. }
  2285. if (sock->type == SOCK_RAW)
  2286. reserve = dev->hard_header_len;
  2287. if (po->has_vnet_hdr) {
  2288. vnet_hdr_len = sizeof(vnet_hdr);
  2289. err = -EINVAL;
  2290. if (len < vnet_hdr_len)
  2291. goto out_unlock;
  2292. len -= vnet_hdr_len;
  2293. err = -EFAULT;
  2294. n = copy_from_iter(&vnet_hdr, vnet_hdr_len, &msg->msg_iter);
  2295. if (n != vnet_hdr_len)
  2296. goto out_unlock;
  2297. if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
  2298. (__virtio16_to_cpu(vio_le(), vnet_hdr.csum_start) +
  2299. __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset) + 2 >
  2300. __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len)))
  2301. vnet_hdr.hdr_len = __cpu_to_virtio16(vio_le(),
  2302. __virtio16_to_cpu(vio_le(), vnet_hdr.csum_start) +
  2303. __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset) + 2);
  2304. err = -EINVAL;
  2305. if (__virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len) > len)
  2306. goto out_unlock;
  2307. if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
  2308. switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
  2309. case VIRTIO_NET_HDR_GSO_TCPV4:
  2310. gso_type = SKB_GSO_TCPV4;
  2311. break;
  2312. case VIRTIO_NET_HDR_GSO_TCPV6:
  2313. gso_type = SKB_GSO_TCPV6;
  2314. break;
  2315. case VIRTIO_NET_HDR_GSO_UDP:
  2316. gso_type = SKB_GSO_UDP;
  2317. break;
  2318. default:
  2319. goto out_unlock;
  2320. }
  2321. if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
  2322. gso_type |= SKB_GSO_TCP_ECN;
  2323. if (vnet_hdr.gso_size == 0)
  2324. goto out_unlock;
  2325. }
  2326. }
  2327. if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
  2328. if (!netif_supports_nofcs(dev)) {
  2329. err = -EPROTONOSUPPORT;
  2330. goto out_unlock;
  2331. }
  2332. extra_len = 4; /* We're doing our own CRC */
  2333. }
  2334. err = -EMSGSIZE;
  2335. if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
  2336. goto out_unlock;
  2337. err = -ENOBUFS;
  2338. hlen = LL_RESERVED_SPACE(dev);
  2339. tlen = dev->needed_tailroom;
  2340. skb = packet_alloc_skb(sk, hlen + tlen, hlen, len,
  2341. __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len),
  2342. msg->msg_flags & MSG_DONTWAIT, &err);
  2343. if (skb == NULL)
  2344. goto out_unlock;
  2345. skb_set_network_header(skb, reserve);
  2346. err = -EINVAL;
  2347. if (sock->type == SOCK_DGRAM) {
  2348. offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
  2349. if (unlikely(offset < 0))
  2350. goto out_free;
  2351. } else {
  2352. if (ll_header_truncated(dev, len))
  2353. goto out_free;
  2354. }
  2355. /* Returns -EFAULT on error */
  2356. err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
  2357. if (err)
  2358. goto out_free;
  2359. sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
  2360. if (!gso_type && (len > dev->mtu + reserve + extra_len) &&
  2361. !packet_extra_vlan_len_allowed(dev, skb)) {
  2362. err = -EMSGSIZE;
  2363. goto out_free;
  2364. }
  2365. skb->protocol = proto;
  2366. skb->dev = dev;
  2367. skb->priority = sk->sk_priority;
  2368. skb->mark = sockc.mark;
  2369. packet_pick_tx_queue(dev, skb);
  2370. if (po->has_vnet_hdr) {
  2371. if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
  2372. u16 s = __virtio16_to_cpu(vio_le(), vnet_hdr.csum_start);
  2373. u16 o = __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset);
  2374. if (!skb_partial_csum_set(skb, s, o)) {
  2375. err = -EINVAL;
  2376. goto out_free;
  2377. }
  2378. }
  2379. skb_shinfo(skb)->gso_size =
  2380. __virtio16_to_cpu(vio_le(), vnet_hdr.gso_size);
  2381. skb_shinfo(skb)->gso_type = gso_type;
  2382. /* Header must be checked, and gso_segs computed. */
  2383. skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
  2384. skb_shinfo(skb)->gso_segs = 0;
  2385. len += vnet_hdr_len;
  2386. }
  2387. skb_probe_transport_header(skb, reserve);
  2388. if (unlikely(extra_len == 4))
  2389. skb->no_fcs = 1;
  2390. err = po->xmit(skb);
  2391. if (err > 0 && (err = net_xmit_errno(err)) != 0)
  2392. goto out_unlock;
  2393. dev_put(dev);
  2394. return len;
  2395. out_free:
  2396. kfree_skb(skb);
  2397. out_unlock:
  2398. if (dev)
  2399. dev_put(dev);
  2400. out:
  2401. return err;
  2402. }
  2403. static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
  2404. {
  2405. struct sock *sk = sock->sk;
  2406. struct packet_sock *po = pkt_sk(sk);
  2407. if (po->tx_ring.pg_vec)
  2408. return tpacket_snd(po, msg);
  2409. else
  2410. return packet_snd(sock, msg, len);
  2411. }
  2412. /*
  2413. * Close a PACKET socket. This is fairly simple. We immediately go
  2414. * to 'closed' state and remove our protocol entry in the device list.
  2415. */
  2416. static int packet_release(struct socket *sock)
  2417. {
  2418. struct sock *sk = sock->sk;
  2419. struct packet_sock *po;
  2420. struct net *net;
  2421. union tpacket_req_u req_u;
  2422. if (!sk)
  2423. return 0;
  2424. net = sock_net(sk);
  2425. po = pkt_sk(sk);
  2426. mutex_lock(&net->packet.sklist_lock);
  2427. sk_del_node_init_rcu(sk);
  2428. mutex_unlock(&net->packet.sklist_lock);
  2429. preempt_disable();
  2430. sock_prot_inuse_add(net, sk->sk_prot, -1);
  2431. preempt_enable();
  2432. spin_lock(&po->bind_lock);
  2433. unregister_prot_hook(sk, false);
  2434. packet_cached_dev_reset(po);
  2435. if (po->prot_hook.dev) {
  2436. dev_put(po->prot_hook.dev);
  2437. po->prot_hook.dev = NULL;
  2438. }
  2439. spin_unlock(&po->bind_lock);
  2440. packet_flush_mclist(sk);
  2441. if (po->rx_ring.pg_vec) {
  2442. memset(&req_u, 0, sizeof(req_u));
  2443. packet_set_ring(sk, &req_u, 1, 0);
  2444. }
  2445. if (po->tx_ring.pg_vec) {
  2446. memset(&req_u, 0, sizeof(req_u));
  2447. packet_set_ring(sk, &req_u, 1, 1);
  2448. }
  2449. fanout_release(sk);
  2450. synchronize_net();
  2451. /*
  2452. * Now the socket is dead. No more input will appear.
  2453. */
  2454. sock_orphan(sk);
  2455. sock->sk = NULL;
  2456. /* Purge queues */
  2457. skb_queue_purge(&sk->sk_receive_queue);
  2458. packet_free_pending(po);
  2459. sk_refcnt_debug_release(sk);
  2460. sock_put(sk);
  2461. return 0;
  2462. }
  2463. /*
  2464. * Attach a packet hook.
  2465. */
  2466. static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
  2467. __be16 proto)
  2468. {
  2469. struct packet_sock *po = pkt_sk(sk);
  2470. struct net_device *dev_curr;
  2471. __be16 proto_curr;
  2472. bool need_rehook;
  2473. struct net_device *dev = NULL;
  2474. int ret = 0;
  2475. bool unlisted = false;
  2476. if (po->fanout)
  2477. return -EINVAL;
  2478. lock_sock(sk);
  2479. spin_lock(&po->bind_lock);
  2480. rcu_read_lock();
  2481. if (name) {
  2482. dev = dev_get_by_name_rcu(sock_net(sk), name);
  2483. if (!dev) {
  2484. ret = -ENODEV;
  2485. goto out_unlock;
  2486. }
  2487. } else if (ifindex) {
  2488. dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
  2489. if (!dev) {
  2490. ret = -ENODEV;
  2491. goto out_unlock;
  2492. }
  2493. }
  2494. if (dev)
  2495. dev_hold(dev);
  2496. proto_curr = po->prot_hook.type;
  2497. dev_curr = po->prot_hook.dev;
  2498. need_rehook = proto_curr != proto || dev_curr != dev;
  2499. if (need_rehook) {
  2500. if (po->running) {
  2501. rcu_read_unlock();
  2502. __unregister_prot_hook(sk, true);
  2503. rcu_read_lock();
  2504. dev_curr = po->prot_hook.dev;
  2505. if (dev)
  2506. unlisted = !dev_get_by_index_rcu(sock_net(sk),
  2507. dev->ifindex);
  2508. }
  2509. po->num = proto;
  2510. po->prot_hook.type = proto;
  2511. if (unlikely(unlisted)) {
  2512. dev_put(dev);
  2513. po->prot_hook.dev = NULL;
  2514. po->ifindex = -1;
  2515. packet_cached_dev_reset(po);
  2516. } else {
  2517. po->prot_hook.dev = dev;
  2518. po->ifindex = dev ? dev->ifindex : 0;
  2519. packet_cached_dev_assign(po, dev);
  2520. }
  2521. }
  2522. if (dev_curr)
  2523. dev_put(dev_curr);
  2524. if (proto == 0 || !need_rehook)
  2525. goto out_unlock;
  2526. if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
  2527. register_prot_hook(sk);
  2528. } else {
  2529. sk->sk_err = ENETDOWN;
  2530. if (!sock_flag(sk, SOCK_DEAD))
  2531. sk->sk_error_report(sk);
  2532. }
  2533. out_unlock:
  2534. rcu_read_unlock();
  2535. spin_unlock(&po->bind_lock);
  2536. release_sock(sk);
  2537. return ret;
  2538. }
  2539. /*
  2540. * Bind a packet socket to a device
  2541. */
  2542. static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
  2543. int addr_len)
  2544. {
  2545. struct sock *sk = sock->sk;
  2546. char name[15];
  2547. /*
  2548. * Check legality
  2549. */
  2550. if (addr_len != sizeof(struct sockaddr))
  2551. return -EINVAL;
  2552. strlcpy(name, uaddr->sa_data, sizeof(name));
  2553. return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
  2554. }
  2555. static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
  2556. {
  2557. struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
  2558. struct sock *sk = sock->sk;
  2559. /*
  2560. * Check legality
  2561. */
  2562. if (addr_len < sizeof(struct sockaddr_ll))
  2563. return -EINVAL;
  2564. if (sll->sll_family != AF_PACKET)
  2565. return -EINVAL;
  2566. return packet_do_bind(sk, NULL, sll->sll_ifindex,
  2567. sll->sll_protocol ? : pkt_sk(sk)->num);
  2568. }
  2569. static struct proto packet_proto = {
  2570. .name = "PACKET",
  2571. .owner = THIS_MODULE,
  2572. .obj_size = sizeof(struct packet_sock),
  2573. };
  2574. /*
  2575. * Create a packet of type SOCK_PACKET.
  2576. */
  2577. static int packet_create(struct net *net, struct socket *sock, int protocol,
  2578. int kern)
  2579. {
  2580. struct sock *sk;
  2581. struct packet_sock *po;
  2582. __be16 proto = (__force __be16)protocol; /* weird, but documented */
  2583. int err;
  2584. if (!ns_capable(net->user_ns, CAP_NET_RAW))
  2585. return -EPERM;
  2586. if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
  2587. sock->type != SOCK_PACKET)
  2588. return -ESOCKTNOSUPPORT;
  2589. sock->state = SS_UNCONNECTED;
  2590. err = -ENOBUFS;
  2591. sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
  2592. if (sk == NULL)
  2593. goto out;
  2594. sock->ops = &packet_ops;
  2595. if (sock->type == SOCK_PACKET)
  2596. sock->ops = &packet_ops_spkt;
  2597. sock_init_data(sock, sk);
  2598. po = pkt_sk(sk);
  2599. sk->sk_family = PF_PACKET;
  2600. po->num = proto;
  2601. po->xmit = dev_queue_xmit;
  2602. err = packet_alloc_pending(po);
  2603. if (err)
  2604. goto out2;
  2605. packet_cached_dev_reset(po);
  2606. sk->sk_destruct = packet_sock_destruct;
  2607. sk_refcnt_debug_inc(sk);
  2608. /*
  2609. * Attach a protocol block
  2610. */
  2611. spin_lock_init(&po->bind_lock);
  2612. mutex_init(&po->pg_vec_lock);
  2613. po->rollover = NULL;
  2614. po->prot_hook.func = packet_rcv;
  2615. if (sock->type == SOCK_PACKET)
  2616. po->prot_hook.func = packet_rcv_spkt;
  2617. po->prot_hook.af_packet_priv = sk;
  2618. if (proto) {
  2619. po->prot_hook.type = proto;
  2620. register_prot_hook(sk);
  2621. }
  2622. mutex_lock(&net->packet.sklist_lock);
  2623. sk_add_node_rcu(sk, &net->packet.sklist);
  2624. mutex_unlock(&net->packet.sklist_lock);
  2625. preempt_disable();
  2626. sock_prot_inuse_add(net, &packet_proto, 1);
  2627. preempt_enable();
  2628. return 0;
  2629. out2:
  2630. sk_free(sk);
  2631. out:
  2632. return err;
  2633. }
  2634. /*
  2635. * Pull a packet from our receive queue and hand it to the user.
  2636. * If necessary we block.
  2637. */
  2638. static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
  2639. int flags)
  2640. {
  2641. struct sock *sk = sock->sk;
  2642. struct sk_buff *skb;
  2643. int copied, err;
  2644. int vnet_hdr_len = 0;
  2645. unsigned int origlen = 0;
  2646. err = -EINVAL;
  2647. if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
  2648. goto out;
  2649. #if 0
  2650. /* What error should we return now? EUNATTACH? */
  2651. if (pkt_sk(sk)->ifindex < 0)
  2652. return -ENODEV;
  2653. #endif
  2654. if (flags & MSG_ERRQUEUE) {
  2655. err = sock_recv_errqueue(sk, msg, len,
  2656. SOL_PACKET, PACKET_TX_TIMESTAMP);
  2657. goto out;
  2658. }
  2659. /*
  2660. * Call the generic datagram receiver. This handles all sorts
  2661. * of horrible races and re-entrancy so we can forget about it
  2662. * in the protocol layers.
  2663. *
  2664. * Now it will return ENETDOWN, if device have just gone down,
  2665. * but then it will block.
  2666. */
  2667. skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
  2668. /*
  2669. * An error occurred so return it. Because skb_recv_datagram()
  2670. * handles the blocking we don't see and worry about blocking
  2671. * retries.
  2672. */
  2673. if (skb == NULL)
  2674. goto out;
  2675. if (pkt_sk(sk)->pressure)
  2676. packet_rcv_has_room(pkt_sk(sk), NULL);
  2677. if (pkt_sk(sk)->has_vnet_hdr) {
  2678. struct virtio_net_hdr vnet_hdr = { 0 };
  2679. err = -EINVAL;
  2680. vnet_hdr_len = sizeof(vnet_hdr);
  2681. if (len < vnet_hdr_len)
  2682. goto out_free;
  2683. len -= vnet_hdr_len;
  2684. if (skb_is_gso(skb)) {
  2685. struct skb_shared_info *sinfo = skb_shinfo(skb);
  2686. /* This is a hint as to how much should be linear. */
  2687. vnet_hdr.hdr_len =
  2688. __cpu_to_virtio16(vio_le(), skb_headlen(skb));
  2689. vnet_hdr.gso_size =
  2690. __cpu_to_virtio16(vio_le(), sinfo->gso_size);
  2691. if (sinfo->gso_type & SKB_GSO_TCPV4)
  2692. vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
  2693. else if (sinfo->gso_type & SKB_GSO_TCPV6)
  2694. vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
  2695. else if (sinfo->gso_type & SKB_GSO_UDP)
  2696. vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
  2697. else if (sinfo->gso_type & SKB_GSO_FCOE)
  2698. goto out_free;
  2699. else
  2700. BUG();
  2701. if (sinfo->gso_type & SKB_GSO_TCP_ECN)
  2702. vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
  2703. } else
  2704. vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
  2705. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  2706. vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
  2707. vnet_hdr.csum_start = __cpu_to_virtio16(vio_le(),
  2708. skb_checksum_start_offset(skb));
  2709. vnet_hdr.csum_offset = __cpu_to_virtio16(vio_le(),
  2710. skb->csum_offset);
  2711. } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
  2712. vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
  2713. } /* else everything is zero */
  2714. err = memcpy_to_msg(msg, (void *)&vnet_hdr, vnet_hdr_len);
  2715. if (err < 0)
  2716. goto out_free;
  2717. }
  2718. /* You lose any data beyond the buffer you gave. If it worries
  2719. * a user program they can ask the device for its MTU
  2720. * anyway.
  2721. */
  2722. copied = skb->len;
  2723. if (copied > len) {
  2724. copied = len;
  2725. msg->msg_flags |= MSG_TRUNC;
  2726. }
  2727. err = skb_copy_datagram_msg(skb, 0, msg, copied);
  2728. if (err)
  2729. goto out_free;
  2730. if (sock->type != SOCK_PACKET) {
  2731. struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
  2732. /* Original length was stored in sockaddr_ll fields */
  2733. origlen = PACKET_SKB_CB(skb)->sa.origlen;
  2734. sll->sll_family = AF_PACKET;
  2735. sll->sll_protocol = skb->protocol;
  2736. }
  2737. sock_recv_ts_and_drops(msg, sk, skb);
  2738. if (msg->msg_name) {
  2739. /* If the address length field is there to be filled
  2740. * in, we fill it in now.
  2741. */
  2742. if (sock->type == SOCK_PACKET) {
  2743. __sockaddr_check_size(sizeof(struct sockaddr_pkt));
  2744. msg->msg_namelen = sizeof(struct sockaddr_pkt);
  2745. } else {
  2746. struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
  2747. msg->msg_namelen = sll->sll_halen +
  2748. offsetof(struct sockaddr_ll, sll_addr);
  2749. }
  2750. memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
  2751. msg->msg_namelen);
  2752. }
  2753. if (pkt_sk(sk)->auxdata) {
  2754. struct tpacket_auxdata aux;
  2755. aux.tp_status = TP_STATUS_USER;
  2756. if (skb->ip_summed == CHECKSUM_PARTIAL)
  2757. aux.tp_status |= TP_STATUS_CSUMNOTREADY;
  2758. else if (skb->pkt_type != PACKET_OUTGOING &&
  2759. (skb->ip_summed == CHECKSUM_COMPLETE ||
  2760. skb_csum_unnecessary(skb)))
  2761. aux.tp_status |= TP_STATUS_CSUM_VALID;
  2762. aux.tp_len = origlen;
  2763. aux.tp_snaplen = skb->len;
  2764. aux.tp_mac = 0;
  2765. aux.tp_net = skb_network_offset(skb);
  2766. if (skb_vlan_tag_present(skb)) {
  2767. aux.tp_vlan_tci = skb_vlan_tag_get(skb);
  2768. aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
  2769. aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
  2770. } else {
  2771. aux.tp_vlan_tci = 0;
  2772. aux.tp_vlan_tpid = 0;
  2773. }
  2774. put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
  2775. }
  2776. /*
  2777. * Free or return the buffer as appropriate. Again this
  2778. * hides all the races and re-entrancy issues from us.
  2779. */
  2780. err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
  2781. out_free:
  2782. skb_free_datagram(sk, skb);
  2783. out:
  2784. return err;
  2785. }
  2786. static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
  2787. int *uaddr_len, int peer)
  2788. {
  2789. struct net_device *dev;
  2790. struct sock *sk = sock->sk;
  2791. if (peer)
  2792. return -EOPNOTSUPP;
  2793. uaddr->sa_family = AF_PACKET;
  2794. memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
  2795. rcu_read_lock();
  2796. dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
  2797. if (dev)
  2798. strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
  2799. rcu_read_unlock();
  2800. *uaddr_len = sizeof(*uaddr);
  2801. return 0;
  2802. }
  2803. static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
  2804. int *uaddr_len, int peer)
  2805. {
  2806. struct net_device *dev;
  2807. struct sock *sk = sock->sk;
  2808. struct packet_sock *po = pkt_sk(sk);
  2809. DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
  2810. if (peer)
  2811. return -EOPNOTSUPP;
  2812. sll->sll_family = AF_PACKET;
  2813. sll->sll_ifindex = po->ifindex;
  2814. sll->sll_protocol = po->num;
  2815. sll->sll_pkttype = 0;
  2816. rcu_read_lock();
  2817. dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
  2818. if (dev) {
  2819. sll->sll_hatype = dev->type;
  2820. sll->sll_halen = dev->addr_len;
  2821. memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
  2822. } else {
  2823. sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
  2824. sll->sll_halen = 0;
  2825. }
  2826. rcu_read_unlock();
  2827. *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
  2828. return 0;
  2829. }
  2830. static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
  2831. int what)
  2832. {
  2833. switch (i->type) {
  2834. case PACKET_MR_MULTICAST:
  2835. if (i->alen != dev->addr_len)
  2836. return -EINVAL;
  2837. if (what > 0)
  2838. return dev_mc_add(dev, i->addr);
  2839. else
  2840. return dev_mc_del(dev, i->addr);
  2841. break;
  2842. case PACKET_MR_PROMISC:
  2843. return dev_set_promiscuity(dev, what);
  2844. case PACKET_MR_ALLMULTI:
  2845. return dev_set_allmulti(dev, what);
  2846. case PACKET_MR_UNICAST:
  2847. if (i->alen != dev->addr_len)
  2848. return -EINVAL;
  2849. if (what > 0)
  2850. return dev_uc_add(dev, i->addr);
  2851. else
  2852. return dev_uc_del(dev, i->addr);
  2853. break;
  2854. default:
  2855. break;
  2856. }
  2857. return 0;
  2858. }
  2859. static void packet_dev_mclist_delete(struct net_device *dev,
  2860. struct packet_mclist **mlp)
  2861. {
  2862. struct packet_mclist *ml;
  2863. while ((ml = *mlp) != NULL) {
  2864. if (ml->ifindex == dev->ifindex) {
  2865. packet_dev_mc(dev, ml, -1);
  2866. *mlp = ml->next;
  2867. kfree(ml);
  2868. } else
  2869. mlp = &ml->next;
  2870. }
  2871. }
  2872. static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
  2873. {
  2874. struct packet_sock *po = pkt_sk(sk);
  2875. struct packet_mclist *ml, *i;
  2876. struct net_device *dev;
  2877. int err;
  2878. rtnl_lock();
  2879. err = -ENODEV;
  2880. dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
  2881. if (!dev)
  2882. goto done;
  2883. err = -EINVAL;
  2884. if (mreq->mr_alen > dev->addr_len)
  2885. goto done;
  2886. err = -ENOBUFS;
  2887. i = kmalloc(sizeof(*i), GFP_KERNEL);
  2888. if (i == NULL)
  2889. goto done;
  2890. err = 0;
  2891. for (ml = po->mclist; ml; ml = ml->next) {
  2892. if (ml->ifindex == mreq->mr_ifindex &&
  2893. ml->type == mreq->mr_type &&
  2894. ml->alen == mreq->mr_alen &&
  2895. memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
  2896. ml->count++;
  2897. /* Free the new element ... */
  2898. kfree(i);
  2899. goto done;
  2900. }
  2901. }
  2902. i->type = mreq->mr_type;
  2903. i->ifindex = mreq->mr_ifindex;
  2904. i->alen = mreq->mr_alen;
  2905. memcpy(i->addr, mreq->mr_address, i->alen);
  2906. i->count = 1;
  2907. i->next = po->mclist;
  2908. po->mclist = i;
  2909. err = packet_dev_mc(dev, i, 1);
  2910. if (err) {
  2911. po->mclist = i->next;
  2912. kfree(i);
  2913. }
  2914. done:
  2915. rtnl_unlock();
  2916. return err;
  2917. }
  2918. static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
  2919. {
  2920. struct packet_mclist *ml, **mlp;
  2921. rtnl_lock();
  2922. for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
  2923. if (ml->ifindex == mreq->mr_ifindex &&
  2924. ml->type == mreq->mr_type &&
  2925. ml->alen == mreq->mr_alen &&
  2926. memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
  2927. if (--ml->count == 0) {
  2928. struct net_device *dev;
  2929. *mlp = ml->next;
  2930. dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
  2931. if (dev)
  2932. packet_dev_mc(dev, ml, -1);
  2933. kfree(ml);
  2934. }
  2935. break;
  2936. }
  2937. }
  2938. rtnl_unlock();
  2939. return 0;
  2940. }
  2941. static void packet_flush_mclist(struct sock *sk)
  2942. {
  2943. struct packet_sock *po = pkt_sk(sk);
  2944. struct packet_mclist *ml;
  2945. if (!po->mclist)
  2946. return;
  2947. rtnl_lock();
  2948. while ((ml = po->mclist) != NULL) {
  2949. struct net_device *dev;
  2950. po->mclist = ml->next;
  2951. dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
  2952. if (dev != NULL)
  2953. packet_dev_mc(dev, ml, -1);
  2954. kfree(ml);
  2955. }
  2956. rtnl_unlock();
  2957. }
  2958. static int
  2959. packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
  2960. {
  2961. struct sock *sk = sock->sk;
  2962. struct packet_sock *po = pkt_sk(sk);
  2963. int ret;
  2964. if (level != SOL_PACKET)
  2965. return -ENOPROTOOPT;
  2966. switch (optname) {
  2967. case PACKET_ADD_MEMBERSHIP:
  2968. case PACKET_DROP_MEMBERSHIP:
  2969. {
  2970. struct packet_mreq_max mreq;
  2971. int len = optlen;
  2972. memset(&mreq, 0, sizeof(mreq));
  2973. if (len < sizeof(struct packet_mreq))
  2974. return -EINVAL;
  2975. if (len > sizeof(mreq))
  2976. len = sizeof(mreq);
  2977. if (copy_from_user(&mreq, optval, len))
  2978. return -EFAULT;
  2979. if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
  2980. return -EINVAL;
  2981. if (optname == PACKET_ADD_MEMBERSHIP)
  2982. ret = packet_mc_add(sk, &mreq);
  2983. else
  2984. ret = packet_mc_drop(sk, &mreq);
  2985. return ret;
  2986. }
  2987. case PACKET_RX_RING:
  2988. case PACKET_TX_RING:
  2989. {
  2990. union tpacket_req_u req_u;
  2991. int len;
  2992. switch (po->tp_version) {
  2993. case TPACKET_V1:
  2994. case TPACKET_V2:
  2995. len = sizeof(req_u.req);
  2996. break;
  2997. case TPACKET_V3:
  2998. default:
  2999. len = sizeof(req_u.req3);
  3000. break;
  3001. }
  3002. if (optlen < len)
  3003. return -EINVAL;
  3004. if (pkt_sk(sk)->has_vnet_hdr)
  3005. return -EINVAL;
  3006. if (copy_from_user(&req_u.req, optval, len))
  3007. return -EFAULT;
  3008. return packet_set_ring(sk, &req_u, 0,
  3009. optname == PACKET_TX_RING);
  3010. }
  3011. case PACKET_COPY_THRESH:
  3012. {
  3013. int val;
  3014. if (optlen != sizeof(val))
  3015. return -EINVAL;
  3016. if (copy_from_user(&val, optval, sizeof(val)))
  3017. return -EFAULT;
  3018. pkt_sk(sk)->copy_thresh = val;
  3019. return 0;
  3020. }
  3021. case PACKET_VERSION:
  3022. {
  3023. int val;
  3024. if (optlen != sizeof(val))
  3025. return -EINVAL;
  3026. if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
  3027. return -EBUSY;
  3028. if (copy_from_user(&val, optval, sizeof(val)))
  3029. return -EFAULT;
  3030. switch (val) {
  3031. case TPACKET_V1:
  3032. case TPACKET_V2:
  3033. case TPACKET_V3:
  3034. po->tp_version = val;
  3035. return 0;
  3036. default:
  3037. return -EINVAL;
  3038. }
  3039. }
  3040. case PACKET_RESERVE:
  3041. {
  3042. unsigned int val;
  3043. if (optlen != sizeof(val))
  3044. return -EINVAL;
  3045. if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
  3046. return -EBUSY;
  3047. if (copy_from_user(&val, optval, sizeof(val)))
  3048. return -EFAULT;
  3049. po->tp_reserve = val;
  3050. return 0;
  3051. }
  3052. case PACKET_LOSS:
  3053. {
  3054. unsigned int val;
  3055. if (optlen != sizeof(val))
  3056. return -EINVAL;
  3057. if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
  3058. return -EBUSY;
  3059. if (copy_from_user(&val, optval, sizeof(val)))
  3060. return -EFAULT;
  3061. po->tp_loss = !!val;
  3062. return 0;
  3063. }
  3064. case PACKET_AUXDATA:
  3065. {
  3066. int val;
  3067. if (optlen < sizeof(val))
  3068. return -EINVAL;
  3069. if (copy_from_user(&val, optval, sizeof(val)))
  3070. return -EFAULT;
  3071. po->auxdata = !!val;
  3072. return 0;
  3073. }
  3074. case PACKET_ORIGDEV:
  3075. {
  3076. int val;
  3077. if (optlen < sizeof(val))
  3078. return -EINVAL;
  3079. if (copy_from_user(&val, optval, sizeof(val)))
  3080. return -EFAULT;
  3081. po->origdev = !!val;
  3082. return 0;
  3083. }
  3084. case PACKET_VNET_HDR:
  3085. {
  3086. int val;
  3087. if (sock->type != SOCK_RAW)
  3088. return -EINVAL;
  3089. if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
  3090. return -EBUSY;
  3091. if (optlen < sizeof(val))
  3092. return -EINVAL;
  3093. if (copy_from_user(&val, optval, sizeof(val)))
  3094. return -EFAULT;
  3095. po->has_vnet_hdr = !!val;
  3096. return 0;
  3097. }
  3098. case PACKET_TIMESTAMP:
  3099. {
  3100. int val;
  3101. if (optlen != sizeof(val))
  3102. return -EINVAL;
  3103. if (copy_from_user(&val, optval, sizeof(val)))
  3104. return -EFAULT;
  3105. po->tp_tstamp = val;
  3106. return 0;
  3107. }
  3108. case PACKET_FANOUT:
  3109. {
  3110. int val;
  3111. if (optlen != sizeof(val))
  3112. return -EINVAL;
  3113. if (copy_from_user(&val, optval, sizeof(val)))
  3114. return -EFAULT;
  3115. return fanout_add(sk, val & 0xffff, val >> 16);
  3116. }
  3117. case PACKET_FANOUT_DATA:
  3118. {
  3119. if (!po->fanout)
  3120. return -EINVAL;
  3121. return fanout_set_data(po, optval, optlen);
  3122. }
  3123. case PACKET_TX_HAS_OFF:
  3124. {
  3125. unsigned int val;
  3126. if (optlen != sizeof(val))
  3127. return -EINVAL;
  3128. if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
  3129. return -EBUSY;
  3130. if (copy_from_user(&val, optval, sizeof(val)))
  3131. return -EFAULT;
  3132. po->tp_tx_has_off = !!val;
  3133. return 0;
  3134. }
  3135. case PACKET_QDISC_BYPASS:
  3136. {
  3137. int val;
  3138. if (optlen != sizeof(val))
  3139. return -EINVAL;
  3140. if (copy_from_user(&val, optval, sizeof(val)))
  3141. return -EFAULT;
  3142. po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
  3143. return 0;
  3144. }
  3145. default:
  3146. return -ENOPROTOOPT;
  3147. }
  3148. }
  3149. static int packet_getsockopt(struct socket *sock, int level, int optname,
  3150. char __user *optval, int __user *optlen)
  3151. {
  3152. int len;
  3153. int val, lv = sizeof(val);
  3154. struct sock *sk = sock->sk;
  3155. struct packet_sock *po = pkt_sk(sk);
  3156. void *data = &val;
  3157. union tpacket_stats_u st;
  3158. struct tpacket_rollover_stats rstats;
  3159. if (level != SOL_PACKET)
  3160. return -ENOPROTOOPT;
  3161. if (get_user(len, optlen))
  3162. return -EFAULT;
  3163. if (len < 0)
  3164. return -EINVAL;
  3165. switch (optname) {
  3166. case PACKET_STATISTICS:
  3167. spin_lock_bh(&sk->sk_receive_queue.lock);
  3168. memcpy(&st, &po->stats, sizeof(st));
  3169. memset(&po->stats, 0, sizeof(po->stats));
  3170. spin_unlock_bh(&sk->sk_receive_queue.lock);
  3171. if (po->tp_version == TPACKET_V3) {
  3172. lv = sizeof(struct tpacket_stats_v3);
  3173. st.stats3.tp_packets += st.stats3.tp_drops;
  3174. data = &st.stats3;
  3175. } else {
  3176. lv = sizeof(struct tpacket_stats);
  3177. st.stats1.tp_packets += st.stats1.tp_drops;
  3178. data = &st.stats1;
  3179. }
  3180. break;
  3181. case PACKET_AUXDATA:
  3182. val = po->auxdata;
  3183. break;
  3184. case PACKET_ORIGDEV:
  3185. val = po->origdev;
  3186. break;
  3187. case PACKET_VNET_HDR:
  3188. val = po->has_vnet_hdr;
  3189. break;
  3190. case PACKET_VERSION:
  3191. val = po->tp_version;
  3192. break;
  3193. case PACKET_HDRLEN:
  3194. if (len > sizeof(int))
  3195. len = sizeof(int);
  3196. if (copy_from_user(&val, optval, len))
  3197. return -EFAULT;
  3198. switch (val) {
  3199. case TPACKET_V1:
  3200. val = sizeof(struct tpacket_hdr);
  3201. break;
  3202. case TPACKET_V2:
  3203. val = sizeof(struct tpacket2_hdr);
  3204. break;
  3205. case TPACKET_V3:
  3206. val = sizeof(struct tpacket3_hdr);
  3207. break;
  3208. default:
  3209. return -EINVAL;
  3210. }
  3211. break;
  3212. case PACKET_RESERVE:
  3213. val = po->tp_reserve;
  3214. break;
  3215. case PACKET_LOSS:
  3216. val = po->tp_loss;
  3217. break;
  3218. case PACKET_TIMESTAMP:
  3219. val = po->tp_tstamp;
  3220. break;
  3221. case PACKET_FANOUT:
  3222. val = (po->fanout ?
  3223. ((u32)po->fanout->id |
  3224. ((u32)po->fanout->type << 16) |
  3225. ((u32)po->fanout->flags << 24)) :
  3226. 0);
  3227. break;
  3228. case PACKET_ROLLOVER_STATS:
  3229. if (!po->rollover)
  3230. return -EINVAL;
  3231. rstats.tp_all = atomic_long_read(&po->rollover->num);
  3232. rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
  3233. rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
  3234. data = &rstats;
  3235. lv = sizeof(rstats);
  3236. break;
  3237. case PACKET_TX_HAS_OFF:
  3238. val = po->tp_tx_has_off;
  3239. break;
  3240. case PACKET_QDISC_BYPASS:
  3241. val = packet_use_direct_xmit(po);
  3242. break;
  3243. default:
  3244. return -ENOPROTOOPT;
  3245. }
  3246. if (len > lv)
  3247. len = lv;
  3248. if (put_user(len, optlen))
  3249. return -EFAULT;
  3250. if (copy_to_user(optval, data, len))
  3251. return -EFAULT;
  3252. return 0;
  3253. }
  3254. static int packet_notifier(struct notifier_block *this,
  3255. unsigned long msg, void *ptr)
  3256. {
  3257. struct sock *sk;
  3258. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  3259. struct net *net = dev_net(dev);
  3260. rcu_read_lock();
  3261. sk_for_each_rcu(sk, &net->packet.sklist) {
  3262. struct packet_sock *po = pkt_sk(sk);
  3263. switch (msg) {
  3264. case NETDEV_UNREGISTER:
  3265. if (po->mclist)
  3266. packet_dev_mclist_delete(dev, &po->mclist);
  3267. /* fallthrough */
  3268. case NETDEV_DOWN:
  3269. if (dev->ifindex == po->ifindex) {
  3270. spin_lock(&po->bind_lock);
  3271. if (po->running) {
  3272. __unregister_prot_hook(sk, false);
  3273. sk->sk_err = ENETDOWN;
  3274. if (!sock_flag(sk, SOCK_DEAD))
  3275. sk->sk_error_report(sk);
  3276. }
  3277. if (msg == NETDEV_UNREGISTER) {
  3278. packet_cached_dev_reset(po);
  3279. po->ifindex = -1;
  3280. if (po->prot_hook.dev)
  3281. dev_put(po->prot_hook.dev);
  3282. po->prot_hook.dev = NULL;
  3283. }
  3284. spin_unlock(&po->bind_lock);
  3285. }
  3286. break;
  3287. case NETDEV_UP:
  3288. if (dev->ifindex == po->ifindex) {
  3289. spin_lock(&po->bind_lock);
  3290. if (po->num)
  3291. register_prot_hook(sk);
  3292. spin_unlock(&po->bind_lock);
  3293. }
  3294. break;
  3295. }
  3296. }
  3297. rcu_read_unlock();
  3298. return NOTIFY_DONE;
  3299. }
  3300. static int packet_ioctl(struct socket *sock, unsigned int cmd,
  3301. unsigned long arg)
  3302. {
  3303. struct sock *sk = sock->sk;
  3304. switch (cmd) {
  3305. case SIOCOUTQ:
  3306. {
  3307. int amount = sk_wmem_alloc_get(sk);
  3308. return put_user(amount, (int __user *)arg);
  3309. }
  3310. case SIOCINQ:
  3311. {
  3312. struct sk_buff *skb;
  3313. int amount = 0;
  3314. spin_lock_bh(&sk->sk_receive_queue.lock);
  3315. skb = skb_peek(&sk->sk_receive_queue);
  3316. if (skb)
  3317. amount = skb->len;
  3318. spin_unlock_bh(&sk->sk_receive_queue.lock);
  3319. return put_user(amount, (int __user *)arg);
  3320. }
  3321. case SIOCGSTAMP:
  3322. return sock_get_timestamp(sk, (struct timeval __user *)arg);
  3323. case SIOCGSTAMPNS:
  3324. return sock_get_timestampns(sk, (struct timespec __user *)arg);
  3325. #ifdef CONFIG_INET
  3326. case SIOCADDRT:
  3327. case SIOCDELRT:
  3328. case SIOCDARP:
  3329. case SIOCGARP:
  3330. case SIOCSARP:
  3331. case SIOCGIFADDR:
  3332. case SIOCSIFADDR:
  3333. case SIOCGIFBRDADDR:
  3334. case SIOCSIFBRDADDR:
  3335. case SIOCGIFNETMASK:
  3336. case SIOCSIFNETMASK:
  3337. case SIOCGIFDSTADDR:
  3338. case SIOCSIFDSTADDR:
  3339. case SIOCSIFFLAGS:
  3340. return inet_dgram_ops.ioctl(sock, cmd, arg);
  3341. #endif
  3342. default:
  3343. return -ENOIOCTLCMD;
  3344. }
  3345. return 0;
  3346. }
  3347. static unsigned int packet_poll(struct file *file, struct socket *sock,
  3348. poll_table *wait)
  3349. {
  3350. struct sock *sk = sock->sk;
  3351. struct packet_sock *po = pkt_sk(sk);
  3352. unsigned int mask = datagram_poll(file, sock, wait);
  3353. spin_lock_bh(&sk->sk_receive_queue.lock);
  3354. if (po->rx_ring.pg_vec) {
  3355. if (!packet_previous_rx_frame(po, &po->rx_ring,
  3356. TP_STATUS_KERNEL))
  3357. mask |= POLLIN | POLLRDNORM;
  3358. }
  3359. if (po->pressure && __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
  3360. po->pressure = 0;
  3361. spin_unlock_bh(&sk->sk_receive_queue.lock);
  3362. spin_lock_bh(&sk->sk_write_queue.lock);
  3363. if (po->tx_ring.pg_vec) {
  3364. if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
  3365. mask |= POLLOUT | POLLWRNORM;
  3366. }
  3367. spin_unlock_bh(&sk->sk_write_queue.lock);
  3368. return mask;
  3369. }
  3370. /* Dirty? Well, I still did not learn better way to account
  3371. * for user mmaps.
  3372. */
  3373. static void packet_mm_open(struct vm_area_struct *vma)
  3374. {
  3375. struct file *file = vma->vm_file;
  3376. struct socket *sock = file->private_data;
  3377. struct sock *sk = sock->sk;
  3378. if (sk)
  3379. atomic_inc(&pkt_sk(sk)->mapped);
  3380. }
  3381. static void packet_mm_close(struct vm_area_struct *vma)
  3382. {
  3383. struct file *file = vma->vm_file;
  3384. struct socket *sock = file->private_data;
  3385. struct sock *sk = sock->sk;
  3386. if (sk)
  3387. atomic_dec(&pkt_sk(sk)->mapped);
  3388. }
  3389. static const struct vm_operations_struct packet_mmap_ops = {
  3390. .open = packet_mm_open,
  3391. .close = packet_mm_close,
  3392. };
  3393. static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
  3394. unsigned int len)
  3395. {
  3396. int i;
  3397. for (i = 0; i < len; i++) {
  3398. if (likely(pg_vec[i].buffer)) {
  3399. if (is_vmalloc_addr(pg_vec[i].buffer))
  3400. vfree(pg_vec[i].buffer);
  3401. else
  3402. free_pages((unsigned long)pg_vec[i].buffer,
  3403. order);
  3404. pg_vec[i].buffer = NULL;
  3405. }
  3406. }
  3407. kfree(pg_vec);
  3408. }
  3409. static char *alloc_one_pg_vec_page(unsigned long order)
  3410. {
  3411. char *buffer;
  3412. gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
  3413. __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
  3414. buffer = (char *) __get_free_pages(gfp_flags, order);
  3415. if (buffer)
  3416. return buffer;
  3417. /* __get_free_pages failed, fall back to vmalloc */
  3418. buffer = vzalloc((1 << order) * PAGE_SIZE);
  3419. if (buffer)
  3420. return buffer;
  3421. /* vmalloc failed, lets dig into swap here */
  3422. gfp_flags &= ~__GFP_NORETRY;
  3423. buffer = (char *) __get_free_pages(gfp_flags, order);
  3424. if (buffer)
  3425. return buffer;
  3426. /* complete and utter failure */
  3427. return NULL;
  3428. }
  3429. static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
  3430. {
  3431. unsigned int block_nr = req->tp_block_nr;
  3432. struct pgv *pg_vec;
  3433. int i;
  3434. pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
  3435. if (unlikely(!pg_vec))
  3436. goto out;
  3437. for (i = 0; i < block_nr; i++) {
  3438. pg_vec[i].buffer = alloc_one_pg_vec_page(order);
  3439. if (unlikely(!pg_vec[i].buffer))
  3440. goto out_free_pgvec;
  3441. }
  3442. out:
  3443. return pg_vec;
  3444. out_free_pgvec:
  3445. free_pg_vec(pg_vec, order, block_nr);
  3446. pg_vec = NULL;
  3447. goto out;
  3448. }
  3449. static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
  3450. int closing, int tx_ring)
  3451. {
  3452. struct pgv *pg_vec = NULL;
  3453. struct packet_sock *po = pkt_sk(sk);
  3454. int was_running, order = 0;
  3455. struct packet_ring_buffer *rb;
  3456. struct sk_buff_head *rb_queue;
  3457. __be16 num;
  3458. int err = -EINVAL;
  3459. /* Added to avoid minimal code churn */
  3460. struct tpacket_req *req = &req_u->req;
  3461. /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
  3462. if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
  3463. WARN(1, "Tx-ring is not supported.\n");
  3464. goto out;
  3465. }
  3466. rb = tx_ring ? &po->tx_ring : &po->rx_ring;
  3467. rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
  3468. err = -EBUSY;
  3469. if (!closing) {
  3470. if (atomic_read(&po->mapped))
  3471. goto out;
  3472. if (packet_read_pending(rb))
  3473. goto out;
  3474. }
  3475. if (req->tp_block_nr) {
  3476. /* Sanity tests and some calculations */
  3477. err = -EBUSY;
  3478. if (unlikely(rb->pg_vec))
  3479. goto out;
  3480. switch (po->tp_version) {
  3481. case TPACKET_V1:
  3482. po->tp_hdrlen = TPACKET_HDRLEN;
  3483. break;
  3484. case TPACKET_V2:
  3485. po->tp_hdrlen = TPACKET2_HDRLEN;
  3486. break;
  3487. case TPACKET_V3:
  3488. po->tp_hdrlen = TPACKET3_HDRLEN;
  3489. break;
  3490. }
  3491. err = -EINVAL;
  3492. if (unlikely((int)req->tp_block_size <= 0))
  3493. goto out;
  3494. if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
  3495. goto out;
  3496. if (po->tp_version >= TPACKET_V3 &&
  3497. (int)(req->tp_block_size -
  3498. BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0)
  3499. goto out;
  3500. if (unlikely(req->tp_frame_size < po->tp_hdrlen +
  3501. po->tp_reserve))
  3502. goto out;
  3503. if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
  3504. goto out;
  3505. rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
  3506. if (unlikely(rb->frames_per_block == 0))
  3507. goto out;
  3508. if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
  3509. req->tp_frame_nr))
  3510. goto out;
  3511. err = -ENOMEM;
  3512. order = get_order(req->tp_block_size);
  3513. pg_vec = alloc_pg_vec(req, order);
  3514. if (unlikely(!pg_vec))
  3515. goto out;
  3516. switch (po->tp_version) {
  3517. case TPACKET_V3:
  3518. /* Transmit path is not supported. We checked
  3519. * it above but just being paranoid
  3520. */
  3521. if (!tx_ring)
  3522. init_prb_bdqc(po, rb, pg_vec, req_u);
  3523. break;
  3524. default:
  3525. break;
  3526. }
  3527. }
  3528. /* Done */
  3529. else {
  3530. err = -EINVAL;
  3531. if (unlikely(req->tp_frame_nr))
  3532. goto out;
  3533. }
  3534. lock_sock(sk);
  3535. /* Detach socket from network */
  3536. spin_lock(&po->bind_lock);
  3537. was_running = po->running;
  3538. num = po->num;
  3539. if (was_running) {
  3540. po->num = 0;
  3541. __unregister_prot_hook(sk, false);
  3542. }
  3543. spin_unlock(&po->bind_lock);
  3544. synchronize_net();
  3545. err = -EBUSY;
  3546. mutex_lock(&po->pg_vec_lock);
  3547. if (closing || atomic_read(&po->mapped) == 0) {
  3548. err = 0;
  3549. spin_lock_bh(&rb_queue->lock);
  3550. swap(rb->pg_vec, pg_vec);
  3551. rb->frame_max = (req->tp_frame_nr - 1);
  3552. rb->head = 0;
  3553. rb->frame_size = req->tp_frame_size;
  3554. spin_unlock_bh(&rb_queue->lock);
  3555. swap(rb->pg_vec_order, order);
  3556. swap(rb->pg_vec_len, req->tp_block_nr);
  3557. rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
  3558. po->prot_hook.func = (po->rx_ring.pg_vec) ?
  3559. tpacket_rcv : packet_rcv;
  3560. skb_queue_purge(rb_queue);
  3561. if (atomic_read(&po->mapped))
  3562. pr_err("packet_mmap: vma is busy: %d\n",
  3563. atomic_read(&po->mapped));
  3564. }
  3565. mutex_unlock(&po->pg_vec_lock);
  3566. spin_lock(&po->bind_lock);
  3567. if (was_running) {
  3568. po->num = num;
  3569. register_prot_hook(sk);
  3570. }
  3571. spin_unlock(&po->bind_lock);
  3572. if (closing && (po->tp_version > TPACKET_V2)) {
  3573. /* Because we don't support block-based V3 on tx-ring */
  3574. if (!tx_ring)
  3575. prb_shutdown_retire_blk_timer(po, rb_queue);
  3576. }
  3577. release_sock(sk);
  3578. if (pg_vec)
  3579. free_pg_vec(pg_vec, order, req->tp_block_nr);
  3580. out:
  3581. return err;
  3582. }
  3583. static int packet_mmap(struct file *file, struct socket *sock,
  3584. struct vm_area_struct *vma)
  3585. {
  3586. struct sock *sk = sock->sk;
  3587. struct packet_sock *po = pkt_sk(sk);
  3588. unsigned long size, expected_size;
  3589. struct packet_ring_buffer *rb;
  3590. unsigned long start;
  3591. int err = -EINVAL;
  3592. int i;
  3593. if (vma->vm_pgoff)
  3594. return -EINVAL;
  3595. mutex_lock(&po->pg_vec_lock);
  3596. expected_size = 0;
  3597. for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
  3598. if (rb->pg_vec) {
  3599. expected_size += rb->pg_vec_len
  3600. * rb->pg_vec_pages
  3601. * PAGE_SIZE;
  3602. }
  3603. }
  3604. if (expected_size == 0)
  3605. goto out;
  3606. size = vma->vm_end - vma->vm_start;
  3607. if (size != expected_size)
  3608. goto out;
  3609. start = vma->vm_start;
  3610. for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
  3611. if (rb->pg_vec == NULL)
  3612. continue;
  3613. for (i = 0; i < rb->pg_vec_len; i++) {
  3614. struct page *page;
  3615. void *kaddr = rb->pg_vec[i].buffer;
  3616. int pg_num;
  3617. for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
  3618. page = pgv_to_page(kaddr);
  3619. err = vm_insert_page(vma, start, page);
  3620. if (unlikely(err))
  3621. goto out;
  3622. start += PAGE_SIZE;
  3623. kaddr += PAGE_SIZE;
  3624. }
  3625. }
  3626. }
  3627. atomic_inc(&po->mapped);
  3628. vma->vm_ops = &packet_mmap_ops;
  3629. err = 0;
  3630. out:
  3631. mutex_unlock(&po->pg_vec_lock);
  3632. return err;
  3633. }
  3634. static const struct proto_ops packet_ops_spkt = {
  3635. .family = PF_PACKET,
  3636. .owner = THIS_MODULE,
  3637. .release = packet_release,
  3638. .bind = packet_bind_spkt,
  3639. .connect = sock_no_connect,
  3640. .socketpair = sock_no_socketpair,
  3641. .accept = sock_no_accept,
  3642. .getname = packet_getname_spkt,
  3643. .poll = datagram_poll,
  3644. .ioctl = packet_ioctl,
  3645. .listen = sock_no_listen,
  3646. .shutdown = sock_no_shutdown,
  3647. .setsockopt = sock_no_setsockopt,
  3648. .getsockopt = sock_no_getsockopt,
  3649. .sendmsg = packet_sendmsg_spkt,
  3650. .recvmsg = packet_recvmsg,
  3651. .mmap = sock_no_mmap,
  3652. .sendpage = sock_no_sendpage,
  3653. };
  3654. static const struct proto_ops packet_ops = {
  3655. .family = PF_PACKET,
  3656. .owner = THIS_MODULE,
  3657. .release = packet_release,
  3658. .bind = packet_bind,
  3659. .connect = sock_no_connect,
  3660. .socketpair = sock_no_socketpair,
  3661. .accept = sock_no_accept,
  3662. .getname = packet_getname,
  3663. .poll = packet_poll,
  3664. .ioctl = packet_ioctl,
  3665. .listen = sock_no_listen,
  3666. .shutdown = sock_no_shutdown,
  3667. .setsockopt = packet_setsockopt,
  3668. .getsockopt = packet_getsockopt,
  3669. .sendmsg = packet_sendmsg,
  3670. .recvmsg = packet_recvmsg,
  3671. .mmap = packet_mmap,
  3672. .sendpage = sock_no_sendpage,
  3673. };
  3674. static const struct net_proto_family packet_family_ops = {
  3675. .family = PF_PACKET,
  3676. .create = packet_create,
  3677. .owner = THIS_MODULE,
  3678. };
  3679. static struct notifier_block packet_netdev_notifier = {
  3680. .notifier_call = packet_notifier,
  3681. };
  3682. #ifdef CONFIG_PROC_FS
  3683. static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
  3684. __acquires(RCU)
  3685. {
  3686. struct net *net = seq_file_net(seq);
  3687. rcu_read_lock();
  3688. return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
  3689. }
  3690. static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  3691. {
  3692. struct net *net = seq_file_net(seq);
  3693. return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
  3694. }
  3695. static void packet_seq_stop(struct seq_file *seq, void *v)
  3696. __releases(RCU)
  3697. {
  3698. rcu_read_unlock();
  3699. }
  3700. static int packet_seq_show(struct seq_file *seq, void *v)
  3701. {
  3702. if (v == SEQ_START_TOKEN)
  3703. seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
  3704. else {
  3705. struct sock *s = sk_entry(v);
  3706. const struct packet_sock *po = pkt_sk(s);
  3707. seq_printf(seq,
  3708. "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
  3709. s,
  3710. atomic_read(&s->sk_refcnt),
  3711. s->sk_type,
  3712. ntohs(po->num),
  3713. po->ifindex,
  3714. po->running,
  3715. atomic_read(&s->sk_rmem_alloc),
  3716. from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
  3717. sock_i_ino(s));
  3718. }
  3719. return 0;
  3720. }
  3721. static const struct seq_operations packet_seq_ops = {
  3722. .start = packet_seq_start,
  3723. .next = packet_seq_next,
  3724. .stop = packet_seq_stop,
  3725. .show = packet_seq_show,
  3726. };
  3727. static int packet_seq_open(struct inode *inode, struct file *file)
  3728. {
  3729. return seq_open_net(inode, file, &packet_seq_ops,
  3730. sizeof(struct seq_net_private));
  3731. }
  3732. static const struct file_operations packet_seq_fops = {
  3733. .owner = THIS_MODULE,
  3734. .open = packet_seq_open,
  3735. .read = seq_read,
  3736. .llseek = seq_lseek,
  3737. .release = seq_release_net,
  3738. };
  3739. #endif
  3740. static int __net_init packet_net_init(struct net *net)
  3741. {
  3742. mutex_init(&net->packet.sklist_lock);
  3743. INIT_HLIST_HEAD(&net->packet.sklist);
  3744. if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops))
  3745. return -ENOMEM;
  3746. return 0;
  3747. }
  3748. static void __net_exit packet_net_exit(struct net *net)
  3749. {
  3750. remove_proc_entry("packet", net->proc_net);
  3751. }
  3752. static struct pernet_operations packet_net_ops = {
  3753. .init = packet_net_init,
  3754. .exit = packet_net_exit,
  3755. };
  3756. static void __exit packet_exit(void)
  3757. {
  3758. unregister_netdevice_notifier(&packet_netdev_notifier);
  3759. unregister_pernet_subsys(&packet_net_ops);
  3760. sock_unregister(PF_PACKET);
  3761. proto_unregister(&packet_proto);
  3762. }
  3763. static int __init packet_init(void)
  3764. {
  3765. int rc = proto_register(&packet_proto, 0);
  3766. if (rc != 0)
  3767. goto out;
  3768. sock_register(&packet_family_ops);
  3769. register_pernet_subsys(&packet_net_ops);
  3770. register_netdevice_notifier(&packet_netdev_notifier);
  3771. out:
  3772. return rc;
  3773. }
  3774. module_init(packet_init);
  3775. module_exit(packet_exit);
  3776. MODULE_LICENSE("GPL");
  3777. MODULE_ALIAS_NETPROTO(PF_PACKET);