macsec.c 86 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574
  1. /*
  2. * drivers/net/macsec.c - MACsec device
  3. *
  4. * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. */
  11. #include <linux/types.h>
  12. #include <linux/skbuff.h>
  13. #include <linux/socket.h>
  14. #include <linux/module.h>
  15. #include <crypto/aead.h>
  16. #include <linux/etherdevice.h>
  17. #include <linux/rtnetlink.h>
  18. #include <linux/refcount.h>
  19. #include <net/genetlink.h>
  20. #include <net/sock.h>
  21. #include <net/gro_cells.h>
  22. #include <uapi/linux/if_macsec.h>
  23. typedef u64 __bitwise sci_t;
  24. #define MACSEC_SCI_LEN 8
  25. /* SecTAG length = macsec_eth_header without the optional SCI */
  26. #define MACSEC_TAG_LEN 6
  27. struct macsec_eth_header {
  28. struct ethhdr eth;
  29. /* SecTAG */
  30. u8 tci_an;
  31. #if defined(__LITTLE_ENDIAN_BITFIELD)
  32. u8 short_length:6,
  33. unused:2;
  34. #elif defined(__BIG_ENDIAN_BITFIELD)
  35. u8 unused:2,
  36. short_length:6;
  37. #else
  38. #error "Please fix <asm/byteorder.h>"
  39. #endif
  40. __be32 packet_number;
  41. u8 secure_channel_id[8]; /* optional */
  42. } __packed;
  43. #define MACSEC_TCI_VERSION 0x80
  44. #define MACSEC_TCI_ES 0x40 /* end station */
  45. #define MACSEC_TCI_SC 0x20 /* SCI present */
  46. #define MACSEC_TCI_SCB 0x10 /* epon */
  47. #define MACSEC_TCI_E 0x08 /* encryption */
  48. #define MACSEC_TCI_C 0x04 /* changed text */
  49. #define MACSEC_AN_MASK 0x03 /* association number */
  50. #define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C)
  51. /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */
  52. #define MIN_NON_SHORT_LEN 48
  53. #define GCM_AES_IV_LEN 12
  54. #define DEFAULT_ICV_LEN 16
  55. #define MACSEC_NUM_AN 4 /* 2 bits for the association number */
  56. #define for_each_rxsc(secy, sc) \
  57. for (sc = rcu_dereference_bh(secy->rx_sc); \
  58. sc; \
  59. sc = rcu_dereference_bh(sc->next))
  60. #define for_each_rxsc_rtnl(secy, sc) \
  61. for (sc = rtnl_dereference(secy->rx_sc); \
  62. sc; \
  63. sc = rtnl_dereference(sc->next))
  64. struct gcm_iv {
  65. union {
  66. u8 secure_channel_id[8];
  67. sci_t sci;
  68. };
  69. __be32 pn;
  70. };
  71. /**
  72. * struct macsec_key - SA key
  73. * @id: user-provided key identifier
  74. * @tfm: crypto struct, key storage
  75. */
  76. struct macsec_key {
  77. u8 id[MACSEC_KEYID_LEN];
  78. struct crypto_aead *tfm;
  79. };
  80. struct macsec_rx_sc_stats {
  81. __u64 InOctetsValidated;
  82. __u64 InOctetsDecrypted;
  83. __u64 InPktsUnchecked;
  84. __u64 InPktsDelayed;
  85. __u64 InPktsOK;
  86. __u64 InPktsInvalid;
  87. __u64 InPktsLate;
  88. __u64 InPktsNotValid;
  89. __u64 InPktsNotUsingSA;
  90. __u64 InPktsUnusedSA;
  91. };
  92. struct macsec_rx_sa_stats {
  93. __u32 InPktsOK;
  94. __u32 InPktsInvalid;
  95. __u32 InPktsNotValid;
  96. __u32 InPktsNotUsingSA;
  97. __u32 InPktsUnusedSA;
  98. };
  99. struct macsec_tx_sa_stats {
  100. __u32 OutPktsProtected;
  101. __u32 OutPktsEncrypted;
  102. };
  103. struct macsec_tx_sc_stats {
  104. __u64 OutPktsProtected;
  105. __u64 OutPktsEncrypted;
  106. __u64 OutOctetsProtected;
  107. __u64 OutOctetsEncrypted;
  108. };
  109. struct macsec_dev_stats {
  110. __u64 OutPktsUntagged;
  111. __u64 InPktsUntagged;
  112. __u64 OutPktsTooLong;
  113. __u64 InPktsNoTag;
  114. __u64 InPktsBadTag;
  115. __u64 InPktsUnknownSCI;
  116. __u64 InPktsNoSCI;
  117. __u64 InPktsOverrun;
  118. };
  119. /**
  120. * struct macsec_rx_sa - receive secure association
  121. * @active:
  122. * @next_pn: packet number expected for the next packet
  123. * @lock: protects next_pn manipulations
  124. * @key: key structure
  125. * @stats: per-SA stats
  126. */
  127. struct macsec_rx_sa {
  128. struct macsec_key key;
  129. spinlock_t lock;
  130. u32 next_pn;
  131. refcount_t refcnt;
  132. bool active;
  133. struct macsec_rx_sa_stats __percpu *stats;
  134. struct macsec_rx_sc *sc;
  135. struct rcu_head rcu;
  136. };
  137. struct pcpu_rx_sc_stats {
  138. struct macsec_rx_sc_stats stats;
  139. struct u64_stats_sync syncp;
  140. };
  141. /**
  142. * struct macsec_rx_sc - receive secure channel
  143. * @sci: secure channel identifier for this SC
  144. * @active: channel is active
  145. * @sa: array of secure associations
  146. * @stats: per-SC stats
  147. */
  148. struct macsec_rx_sc {
  149. struct macsec_rx_sc __rcu *next;
  150. sci_t sci;
  151. bool active;
  152. struct macsec_rx_sa __rcu *sa[MACSEC_NUM_AN];
  153. struct pcpu_rx_sc_stats __percpu *stats;
  154. refcount_t refcnt;
  155. struct rcu_head rcu_head;
  156. };
  157. /**
  158. * struct macsec_tx_sa - transmit secure association
  159. * @active:
  160. * @next_pn: packet number to use for the next packet
  161. * @lock: protects next_pn manipulations
  162. * @key: key structure
  163. * @stats: per-SA stats
  164. */
  165. struct macsec_tx_sa {
  166. struct macsec_key key;
  167. spinlock_t lock;
  168. u32 next_pn;
  169. refcount_t refcnt;
  170. bool active;
  171. struct macsec_tx_sa_stats __percpu *stats;
  172. struct rcu_head rcu;
  173. };
  174. struct pcpu_tx_sc_stats {
  175. struct macsec_tx_sc_stats stats;
  176. struct u64_stats_sync syncp;
  177. };
  178. /**
  179. * struct macsec_tx_sc - transmit secure channel
  180. * @active:
  181. * @encoding_sa: association number of the SA currently in use
  182. * @encrypt: encrypt packets on transmit, or authenticate only
  183. * @send_sci: always include the SCI in the SecTAG
  184. * @end_station:
  185. * @scb: single copy broadcast flag
  186. * @sa: array of secure associations
  187. * @stats: stats for this TXSC
  188. */
  189. struct macsec_tx_sc {
  190. bool active;
  191. u8 encoding_sa;
  192. bool encrypt;
  193. bool send_sci;
  194. bool end_station;
  195. bool scb;
  196. struct macsec_tx_sa __rcu *sa[MACSEC_NUM_AN];
  197. struct pcpu_tx_sc_stats __percpu *stats;
  198. };
  199. #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
  200. /**
  201. * struct macsec_secy - MACsec Security Entity
  202. * @netdev: netdevice for this SecY
  203. * @n_rx_sc: number of receive secure channels configured on this SecY
  204. * @sci: secure channel identifier used for tx
  205. * @key_len: length of keys used by the cipher suite
  206. * @icv_len: length of ICV used by the cipher suite
  207. * @validate_frames: validation mode
  208. * @operational: MAC_Operational flag
  209. * @protect_frames: enable protection for this SecY
  210. * @replay_protect: enable packet number checks on receive
  211. * @replay_window: size of the replay window
  212. * @tx_sc: transmit secure channel
  213. * @rx_sc: linked list of receive secure channels
  214. */
  215. struct macsec_secy {
  216. struct net_device *netdev;
  217. unsigned int n_rx_sc;
  218. sci_t sci;
  219. u16 key_len;
  220. u16 icv_len;
  221. enum macsec_validation_type validate_frames;
  222. bool operational;
  223. bool protect_frames;
  224. bool replay_protect;
  225. u32 replay_window;
  226. struct macsec_tx_sc tx_sc;
  227. struct macsec_rx_sc __rcu *rx_sc;
  228. };
  229. struct pcpu_secy_stats {
  230. struct macsec_dev_stats stats;
  231. struct u64_stats_sync syncp;
  232. };
  233. /**
  234. * struct macsec_dev - private data
  235. * @secy: SecY config
  236. * @real_dev: pointer to underlying netdevice
  237. * @stats: MACsec device stats
  238. * @secys: linked list of SecY's on the underlying device
  239. */
  240. struct macsec_dev {
  241. struct macsec_secy secy;
  242. struct net_device *real_dev;
  243. struct pcpu_secy_stats __percpu *stats;
  244. struct list_head secys;
  245. struct gro_cells gro_cells;
  246. unsigned int nest_level;
  247. };
  248. /**
  249. * struct macsec_rxh_data - rx_handler private argument
  250. * @secys: linked list of SecY's on this underlying device
  251. */
  252. struct macsec_rxh_data {
  253. struct list_head secys;
  254. };
  255. static struct macsec_dev *macsec_priv(const struct net_device *dev)
  256. {
  257. return (struct macsec_dev *)netdev_priv(dev);
  258. }
  259. static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev)
  260. {
  261. return rcu_dereference_bh(dev->rx_handler_data);
  262. }
  263. static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev)
  264. {
  265. return rtnl_dereference(dev->rx_handler_data);
  266. }
  267. struct macsec_cb {
  268. struct aead_request *req;
  269. union {
  270. struct macsec_tx_sa *tx_sa;
  271. struct macsec_rx_sa *rx_sa;
  272. };
  273. u8 assoc_num;
  274. bool valid;
  275. bool has_sci;
  276. };
  277. static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
  278. {
  279. struct macsec_rx_sa *sa = rcu_dereference_bh(ptr);
  280. if (!sa || !sa->active)
  281. return NULL;
  282. if (!refcount_inc_not_zero(&sa->refcnt))
  283. return NULL;
  284. return sa;
  285. }
  286. static void free_rx_sc_rcu(struct rcu_head *head)
  287. {
  288. struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
  289. free_percpu(rx_sc->stats);
  290. kfree(rx_sc);
  291. }
  292. static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc)
  293. {
  294. return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL;
  295. }
  296. static void macsec_rxsc_put(struct macsec_rx_sc *sc)
  297. {
  298. if (refcount_dec_and_test(&sc->refcnt))
  299. call_rcu(&sc->rcu_head, free_rx_sc_rcu);
  300. }
  301. static void free_rxsa(struct rcu_head *head)
  302. {
  303. struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu);
  304. crypto_free_aead(sa->key.tfm);
  305. free_percpu(sa->stats);
  306. kfree(sa);
  307. }
  308. static void macsec_rxsa_put(struct macsec_rx_sa *sa)
  309. {
  310. if (refcount_dec_and_test(&sa->refcnt))
  311. call_rcu(&sa->rcu, free_rxsa);
  312. }
  313. static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr)
  314. {
  315. struct macsec_tx_sa *sa = rcu_dereference_bh(ptr);
  316. if (!sa || !sa->active)
  317. return NULL;
  318. if (!refcount_inc_not_zero(&sa->refcnt))
  319. return NULL;
  320. return sa;
  321. }
  322. static void free_txsa(struct rcu_head *head)
  323. {
  324. struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu);
  325. crypto_free_aead(sa->key.tfm);
  326. free_percpu(sa->stats);
  327. kfree(sa);
  328. }
  329. static void macsec_txsa_put(struct macsec_tx_sa *sa)
  330. {
  331. if (refcount_dec_and_test(&sa->refcnt))
  332. call_rcu(&sa->rcu, free_txsa);
  333. }
  334. static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
  335. {
  336. BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb));
  337. return (struct macsec_cb *)skb->cb;
  338. }
  339. #define MACSEC_PORT_ES (htons(0x0001))
  340. #define MACSEC_PORT_SCB (0x0000)
  341. #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
  342. #define MACSEC_GCM_AES_128_SAK_LEN 16
  343. #define MACSEC_GCM_AES_256_SAK_LEN 32
  344. #define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN
  345. #define DEFAULT_SEND_SCI true
  346. #define DEFAULT_ENCRYPT false
  347. #define DEFAULT_ENCODING_SA 0
  348. static bool send_sci(const struct macsec_secy *secy)
  349. {
  350. const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
  351. return tx_sc->send_sci ||
  352. (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
  353. }
  354. static sci_t make_sci(u8 *addr, __be16 port)
  355. {
  356. sci_t sci;
  357. memcpy(&sci, addr, ETH_ALEN);
  358. memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port));
  359. return sci;
  360. }
  361. static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present)
  362. {
  363. sci_t sci;
  364. if (sci_present)
  365. memcpy(&sci, hdr->secure_channel_id,
  366. sizeof(hdr->secure_channel_id));
  367. else
  368. sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES);
  369. return sci;
  370. }
  371. static unsigned int macsec_sectag_len(bool sci_present)
  372. {
  373. return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0);
  374. }
  375. static unsigned int macsec_hdr_len(bool sci_present)
  376. {
  377. return macsec_sectag_len(sci_present) + ETH_HLEN;
  378. }
  379. static unsigned int macsec_extra_len(bool sci_present)
  380. {
  381. return macsec_sectag_len(sci_present) + sizeof(__be16);
  382. }
  383. /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
  384. static void macsec_fill_sectag(struct macsec_eth_header *h,
  385. const struct macsec_secy *secy, u32 pn,
  386. bool sci_present)
  387. {
  388. const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
  389. memset(&h->tci_an, 0, macsec_sectag_len(sci_present));
  390. h->eth.h_proto = htons(ETH_P_MACSEC);
  391. if (sci_present) {
  392. h->tci_an |= MACSEC_TCI_SC;
  393. memcpy(&h->secure_channel_id, &secy->sci,
  394. sizeof(h->secure_channel_id));
  395. } else {
  396. if (tx_sc->end_station)
  397. h->tci_an |= MACSEC_TCI_ES;
  398. if (tx_sc->scb)
  399. h->tci_an |= MACSEC_TCI_SCB;
  400. }
  401. h->packet_number = htonl(pn);
  402. /* with GCM, C/E clear for !encrypt, both set for encrypt */
  403. if (tx_sc->encrypt)
  404. h->tci_an |= MACSEC_TCI_CONFID;
  405. else if (secy->icv_len != DEFAULT_ICV_LEN)
  406. h->tci_an |= MACSEC_TCI_C;
  407. h->tci_an |= tx_sc->encoding_sa;
  408. }
  409. static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len)
  410. {
  411. if (data_len < MIN_NON_SHORT_LEN)
  412. h->short_length = data_len;
  413. }
  414. /* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */
  415. static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len)
  416. {
  417. struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data;
  418. int len = skb->len - 2 * ETH_ALEN;
  419. int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len;
  420. /* a) It comprises at least 17 octets */
  421. if (skb->len <= 16)
  422. return false;
  423. /* b) MACsec EtherType: already checked */
  424. /* c) V bit is clear */
  425. if (h->tci_an & MACSEC_TCI_VERSION)
  426. return false;
  427. /* d) ES or SCB => !SC */
  428. if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) &&
  429. (h->tci_an & MACSEC_TCI_SC))
  430. return false;
  431. /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */
  432. if (h->unused)
  433. return false;
  434. /* rx.pn != 0 (figure 10-5) */
  435. if (!h->packet_number)
  436. return false;
  437. /* length check, f) g) h) i) */
  438. if (h->short_length)
  439. return len == extra_len + h->short_length;
  440. return len >= extra_len + MIN_NON_SHORT_LEN;
  441. }
  442. #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
  443. #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
  444. static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn)
  445. {
  446. struct gcm_iv *gcm_iv = (struct gcm_iv *)iv;
  447. gcm_iv->sci = sci;
  448. gcm_iv->pn = htonl(pn);
  449. }
  450. static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
  451. {
  452. return (struct macsec_eth_header *)skb_mac_header(skb);
  453. }
  454. static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy)
  455. {
  456. u32 pn;
  457. spin_lock_bh(&tx_sa->lock);
  458. pn = tx_sa->next_pn;
  459. tx_sa->next_pn++;
  460. if (tx_sa->next_pn == 0) {
  461. pr_debug("PN wrapped, transitioning to !oper\n");
  462. tx_sa->active = false;
  463. if (secy->protect_frames)
  464. secy->operational = false;
  465. }
  466. spin_unlock_bh(&tx_sa->lock);
  467. return pn;
  468. }
  469. static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev)
  470. {
  471. struct macsec_dev *macsec = netdev_priv(dev);
  472. skb->dev = macsec->real_dev;
  473. skb_reset_mac_header(skb);
  474. skb->protocol = eth_hdr(skb)->h_proto;
  475. }
  476. static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
  477. struct macsec_tx_sa *tx_sa)
  478. {
  479. struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats);
  480. u64_stats_update_begin(&txsc_stats->syncp);
  481. if (tx_sc->encrypt) {
  482. txsc_stats->stats.OutOctetsEncrypted += skb->len;
  483. txsc_stats->stats.OutPktsEncrypted++;
  484. this_cpu_inc(tx_sa->stats->OutPktsEncrypted);
  485. } else {
  486. txsc_stats->stats.OutOctetsProtected += skb->len;
  487. txsc_stats->stats.OutPktsProtected++;
  488. this_cpu_inc(tx_sa->stats->OutPktsProtected);
  489. }
  490. u64_stats_update_end(&txsc_stats->syncp);
  491. }
  492. static void count_tx(struct net_device *dev, int ret, int len)
  493. {
  494. if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
  495. struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
  496. u64_stats_update_begin(&stats->syncp);
  497. stats->tx_packets++;
  498. stats->tx_bytes += len;
  499. u64_stats_update_end(&stats->syncp);
  500. }
  501. }
  502. static void macsec_encrypt_done(struct crypto_async_request *base, int err)
  503. {
  504. struct sk_buff *skb = base->data;
  505. struct net_device *dev = skb->dev;
  506. struct macsec_dev *macsec = macsec_priv(dev);
  507. struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa;
  508. int len, ret;
  509. aead_request_free(macsec_skb_cb(skb)->req);
  510. rcu_read_lock_bh();
  511. macsec_encrypt_finish(skb, dev);
  512. macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
  513. len = skb->len;
  514. ret = dev_queue_xmit(skb);
  515. count_tx(dev, ret, len);
  516. rcu_read_unlock_bh();
  517. macsec_txsa_put(sa);
  518. dev_put(dev);
  519. }
  520. static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
  521. unsigned char **iv,
  522. struct scatterlist **sg,
  523. int num_frags)
  524. {
  525. size_t size, iv_offset, sg_offset;
  526. struct aead_request *req;
  527. void *tmp;
  528. size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm);
  529. iv_offset = size;
  530. size += GCM_AES_IV_LEN;
  531. size = ALIGN(size, __alignof__(struct scatterlist));
  532. sg_offset = size;
  533. size += sizeof(struct scatterlist) * num_frags;
  534. tmp = kmalloc(size, GFP_ATOMIC);
  535. if (!tmp)
  536. return NULL;
  537. *iv = (unsigned char *)(tmp + iv_offset);
  538. *sg = (struct scatterlist *)(tmp + sg_offset);
  539. req = tmp;
  540. aead_request_set_tfm(req, tfm);
  541. return req;
  542. }
  543. static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
  544. struct net_device *dev)
  545. {
  546. int ret;
  547. struct scatterlist *sg;
  548. struct sk_buff *trailer;
  549. unsigned char *iv;
  550. struct ethhdr *eth;
  551. struct macsec_eth_header *hh;
  552. size_t unprotected_len;
  553. struct aead_request *req;
  554. struct macsec_secy *secy;
  555. struct macsec_tx_sc *tx_sc;
  556. struct macsec_tx_sa *tx_sa;
  557. struct macsec_dev *macsec = macsec_priv(dev);
  558. bool sci_present;
  559. u32 pn;
  560. secy = &macsec->secy;
  561. tx_sc = &secy->tx_sc;
  562. /* 10.5.1 TX SA assignment */
  563. tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]);
  564. if (!tx_sa) {
  565. secy->operational = false;
  566. kfree_skb(skb);
  567. return ERR_PTR(-EINVAL);
  568. }
  569. if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM ||
  570. skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) {
  571. struct sk_buff *nskb = skb_copy_expand(skb,
  572. MACSEC_NEEDED_HEADROOM,
  573. MACSEC_NEEDED_TAILROOM,
  574. GFP_ATOMIC);
  575. if (likely(nskb)) {
  576. consume_skb(skb);
  577. skb = nskb;
  578. } else {
  579. macsec_txsa_put(tx_sa);
  580. kfree_skb(skb);
  581. return ERR_PTR(-ENOMEM);
  582. }
  583. } else {
  584. skb = skb_unshare(skb, GFP_ATOMIC);
  585. if (!skb) {
  586. macsec_txsa_put(tx_sa);
  587. return ERR_PTR(-ENOMEM);
  588. }
  589. }
  590. unprotected_len = skb->len;
  591. eth = eth_hdr(skb);
  592. sci_present = send_sci(secy);
  593. hh = skb_push(skb, macsec_extra_len(sci_present));
  594. memmove(hh, eth, 2 * ETH_ALEN);
  595. pn = tx_sa_update_pn(tx_sa, secy);
  596. if (pn == 0) {
  597. macsec_txsa_put(tx_sa);
  598. kfree_skb(skb);
  599. return ERR_PTR(-ENOLINK);
  600. }
  601. macsec_fill_sectag(hh, secy, pn, sci_present);
  602. macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
  603. skb_put(skb, secy->icv_len);
  604. if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
  605. struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
  606. u64_stats_update_begin(&secy_stats->syncp);
  607. secy_stats->stats.OutPktsTooLong++;
  608. u64_stats_update_end(&secy_stats->syncp);
  609. macsec_txsa_put(tx_sa);
  610. kfree_skb(skb);
  611. return ERR_PTR(-EINVAL);
  612. }
  613. ret = skb_cow_data(skb, 0, &trailer);
  614. if (unlikely(ret < 0)) {
  615. macsec_txsa_put(tx_sa);
  616. kfree_skb(skb);
  617. return ERR_PTR(ret);
  618. }
  619. req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret);
  620. if (!req) {
  621. macsec_txsa_put(tx_sa);
  622. kfree_skb(skb);
  623. return ERR_PTR(-ENOMEM);
  624. }
  625. macsec_fill_iv(iv, secy->sci, pn);
  626. sg_init_table(sg, ret);
  627. ret = skb_to_sgvec(skb, sg, 0, skb->len);
  628. if (unlikely(ret < 0)) {
  629. aead_request_free(req);
  630. macsec_txsa_put(tx_sa);
  631. kfree_skb(skb);
  632. return ERR_PTR(ret);
  633. }
  634. if (tx_sc->encrypt) {
  635. int len = skb->len - macsec_hdr_len(sci_present) -
  636. secy->icv_len;
  637. aead_request_set_crypt(req, sg, sg, len, iv);
  638. aead_request_set_ad(req, macsec_hdr_len(sci_present));
  639. } else {
  640. aead_request_set_crypt(req, sg, sg, 0, iv);
  641. aead_request_set_ad(req, skb->len - secy->icv_len);
  642. }
  643. macsec_skb_cb(skb)->req = req;
  644. macsec_skb_cb(skb)->tx_sa = tx_sa;
  645. aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
  646. dev_hold(skb->dev);
  647. ret = crypto_aead_encrypt(req);
  648. if (ret == -EINPROGRESS) {
  649. return ERR_PTR(ret);
  650. } else if (ret != 0) {
  651. dev_put(skb->dev);
  652. kfree_skb(skb);
  653. aead_request_free(req);
  654. macsec_txsa_put(tx_sa);
  655. return ERR_PTR(-EINVAL);
  656. }
  657. dev_put(skb->dev);
  658. aead_request_free(req);
  659. macsec_txsa_put(tx_sa);
  660. return skb;
  661. }
  662. static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn)
  663. {
  664. struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
  665. struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats);
  666. struct macsec_eth_header *hdr = macsec_ethhdr(skb);
  667. u32 lowest_pn = 0;
  668. spin_lock(&rx_sa->lock);
  669. if (rx_sa->next_pn >= secy->replay_window)
  670. lowest_pn = rx_sa->next_pn - secy->replay_window;
  671. /* Now perform replay protection check again
  672. * (see IEEE 802.1AE-2006 figure 10-5)
  673. */
  674. if (secy->replay_protect && pn < lowest_pn) {
  675. spin_unlock(&rx_sa->lock);
  676. u64_stats_update_begin(&rxsc_stats->syncp);
  677. rxsc_stats->stats.InPktsLate++;
  678. u64_stats_update_end(&rxsc_stats->syncp);
  679. return false;
  680. }
  681. if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) {
  682. u64_stats_update_begin(&rxsc_stats->syncp);
  683. if (hdr->tci_an & MACSEC_TCI_E)
  684. rxsc_stats->stats.InOctetsDecrypted += skb->len;
  685. else
  686. rxsc_stats->stats.InOctetsValidated += skb->len;
  687. u64_stats_update_end(&rxsc_stats->syncp);
  688. }
  689. if (!macsec_skb_cb(skb)->valid) {
  690. spin_unlock(&rx_sa->lock);
  691. /* 10.6.5 */
  692. if (hdr->tci_an & MACSEC_TCI_C ||
  693. secy->validate_frames == MACSEC_VALIDATE_STRICT) {
  694. u64_stats_update_begin(&rxsc_stats->syncp);
  695. rxsc_stats->stats.InPktsNotValid++;
  696. u64_stats_update_end(&rxsc_stats->syncp);
  697. return false;
  698. }
  699. u64_stats_update_begin(&rxsc_stats->syncp);
  700. if (secy->validate_frames == MACSEC_VALIDATE_CHECK) {
  701. rxsc_stats->stats.InPktsInvalid++;
  702. this_cpu_inc(rx_sa->stats->InPktsInvalid);
  703. } else if (pn < lowest_pn) {
  704. rxsc_stats->stats.InPktsDelayed++;
  705. } else {
  706. rxsc_stats->stats.InPktsUnchecked++;
  707. }
  708. u64_stats_update_end(&rxsc_stats->syncp);
  709. } else {
  710. u64_stats_update_begin(&rxsc_stats->syncp);
  711. if (pn < lowest_pn) {
  712. rxsc_stats->stats.InPktsDelayed++;
  713. } else {
  714. rxsc_stats->stats.InPktsOK++;
  715. this_cpu_inc(rx_sa->stats->InPktsOK);
  716. }
  717. u64_stats_update_end(&rxsc_stats->syncp);
  718. if (pn >= rx_sa->next_pn)
  719. rx_sa->next_pn = pn + 1;
  720. spin_unlock(&rx_sa->lock);
  721. }
  722. return true;
  723. }
  724. static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev)
  725. {
  726. skb->pkt_type = PACKET_HOST;
  727. skb->protocol = eth_type_trans(skb, dev);
  728. skb_reset_network_header(skb);
  729. if (!skb_transport_header_was_set(skb))
  730. skb_reset_transport_header(skb);
  731. skb_reset_mac_len(skb);
  732. }
  733. static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len)
  734. {
  735. memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN);
  736. skb_pull(skb, hdr_len);
  737. pskb_trim_unique(skb, skb->len - icv_len);
  738. }
  739. static void count_rx(struct net_device *dev, int len)
  740. {
  741. struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
  742. u64_stats_update_begin(&stats->syncp);
  743. stats->rx_packets++;
  744. stats->rx_bytes += len;
  745. u64_stats_update_end(&stats->syncp);
  746. }
  747. static void macsec_decrypt_done(struct crypto_async_request *base, int err)
  748. {
  749. struct sk_buff *skb = base->data;
  750. struct net_device *dev = skb->dev;
  751. struct macsec_dev *macsec = macsec_priv(dev);
  752. struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
  753. struct macsec_rx_sc *rx_sc = rx_sa->sc;
  754. int len;
  755. u32 pn;
  756. aead_request_free(macsec_skb_cb(skb)->req);
  757. if (!err)
  758. macsec_skb_cb(skb)->valid = true;
  759. rcu_read_lock_bh();
  760. pn = ntohl(macsec_ethhdr(skb)->packet_number);
  761. if (!macsec_post_decrypt(skb, &macsec->secy, pn)) {
  762. rcu_read_unlock_bh();
  763. kfree_skb(skb);
  764. goto out;
  765. }
  766. macsec_finalize_skb(skb, macsec->secy.icv_len,
  767. macsec_extra_len(macsec_skb_cb(skb)->has_sci));
  768. macsec_reset_skb(skb, macsec->secy.netdev);
  769. len = skb->len;
  770. if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS)
  771. count_rx(dev, len);
  772. rcu_read_unlock_bh();
  773. out:
  774. macsec_rxsa_put(rx_sa);
  775. macsec_rxsc_put(rx_sc);
  776. dev_put(dev);
  777. }
  778. static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
  779. struct net_device *dev,
  780. struct macsec_rx_sa *rx_sa,
  781. sci_t sci,
  782. struct macsec_secy *secy)
  783. {
  784. int ret;
  785. struct scatterlist *sg;
  786. struct sk_buff *trailer;
  787. unsigned char *iv;
  788. struct aead_request *req;
  789. struct macsec_eth_header *hdr;
  790. u16 icv_len = secy->icv_len;
  791. macsec_skb_cb(skb)->valid = false;
  792. skb = skb_share_check(skb, GFP_ATOMIC);
  793. if (!skb)
  794. return ERR_PTR(-ENOMEM);
  795. ret = skb_cow_data(skb, 0, &trailer);
  796. if (unlikely(ret < 0)) {
  797. kfree_skb(skb);
  798. return ERR_PTR(ret);
  799. }
  800. req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret);
  801. if (!req) {
  802. kfree_skb(skb);
  803. return ERR_PTR(-ENOMEM);
  804. }
  805. hdr = (struct macsec_eth_header *)skb->data;
  806. macsec_fill_iv(iv, sci, ntohl(hdr->packet_number));
  807. sg_init_table(sg, ret);
  808. ret = skb_to_sgvec(skb, sg, 0, skb->len);
  809. if (unlikely(ret < 0)) {
  810. aead_request_free(req);
  811. kfree_skb(skb);
  812. return ERR_PTR(ret);
  813. }
  814. if (hdr->tci_an & MACSEC_TCI_E) {
  815. /* confidentiality: ethernet + macsec header
  816. * authenticated, encrypted payload
  817. */
  818. int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci);
  819. aead_request_set_crypt(req, sg, sg, len, iv);
  820. aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci));
  821. skb = skb_unshare(skb, GFP_ATOMIC);
  822. if (!skb) {
  823. aead_request_free(req);
  824. return ERR_PTR(-ENOMEM);
  825. }
  826. } else {
  827. /* integrity only: all headers + data authenticated */
  828. aead_request_set_crypt(req, sg, sg, icv_len, iv);
  829. aead_request_set_ad(req, skb->len - icv_len);
  830. }
  831. macsec_skb_cb(skb)->req = req;
  832. skb->dev = dev;
  833. aead_request_set_callback(req, 0, macsec_decrypt_done, skb);
  834. dev_hold(dev);
  835. ret = crypto_aead_decrypt(req);
  836. if (ret == -EINPROGRESS) {
  837. return ERR_PTR(ret);
  838. } else if (ret != 0) {
  839. /* decryption/authentication failed
  840. * 10.6 if validateFrames is disabled, deliver anyway
  841. */
  842. if (ret != -EBADMSG) {
  843. kfree_skb(skb);
  844. skb = ERR_PTR(ret);
  845. }
  846. } else {
  847. macsec_skb_cb(skb)->valid = true;
  848. }
  849. dev_put(dev);
  850. aead_request_free(req);
  851. return skb;
  852. }
  853. static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci)
  854. {
  855. struct macsec_rx_sc *rx_sc;
  856. for_each_rxsc(secy, rx_sc) {
  857. if (rx_sc->sci == sci)
  858. return rx_sc;
  859. }
  860. return NULL;
  861. }
  862. static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci)
  863. {
  864. struct macsec_rx_sc *rx_sc;
  865. for_each_rxsc_rtnl(secy, rx_sc) {
  866. if (rx_sc->sci == sci)
  867. return rx_sc;
  868. }
  869. return NULL;
  870. }
  871. static void handle_not_macsec(struct sk_buff *skb)
  872. {
  873. struct macsec_rxh_data *rxd;
  874. struct macsec_dev *macsec;
  875. rcu_read_lock();
  876. rxd = macsec_data_rcu(skb->dev);
  877. /* 10.6 If the management control validateFrames is not
  878. * Strict, frames without a SecTAG are received, counted, and
  879. * delivered to the Controlled Port
  880. */
  881. list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
  882. struct sk_buff *nskb;
  883. struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
  884. if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
  885. u64_stats_update_begin(&secy_stats->syncp);
  886. secy_stats->stats.InPktsNoTag++;
  887. u64_stats_update_end(&secy_stats->syncp);
  888. continue;
  889. }
  890. /* deliver on this port */
  891. nskb = skb_clone(skb, GFP_ATOMIC);
  892. if (!nskb)
  893. break;
  894. nskb->dev = macsec->secy.netdev;
  895. if (netif_rx(nskb) == NET_RX_SUCCESS) {
  896. u64_stats_update_begin(&secy_stats->syncp);
  897. secy_stats->stats.InPktsUntagged++;
  898. u64_stats_update_end(&secy_stats->syncp);
  899. }
  900. }
  901. rcu_read_unlock();
  902. }
  903. static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
  904. {
  905. struct sk_buff *skb = *pskb;
  906. struct net_device *dev = skb->dev;
  907. struct macsec_eth_header *hdr;
  908. struct macsec_secy *secy = NULL;
  909. struct macsec_rx_sc *rx_sc;
  910. struct macsec_rx_sa *rx_sa;
  911. struct macsec_rxh_data *rxd;
  912. struct macsec_dev *macsec;
  913. sci_t sci;
  914. u32 pn;
  915. bool cbit;
  916. struct pcpu_rx_sc_stats *rxsc_stats;
  917. struct pcpu_secy_stats *secy_stats;
  918. bool pulled_sci;
  919. int ret;
  920. if (skb_headroom(skb) < ETH_HLEN)
  921. goto drop_direct;
  922. hdr = macsec_ethhdr(skb);
  923. if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) {
  924. handle_not_macsec(skb);
  925. /* and deliver to the uncontrolled port */
  926. return RX_HANDLER_PASS;
  927. }
  928. skb = skb_unshare(skb, GFP_ATOMIC);
  929. if (!skb) {
  930. *pskb = NULL;
  931. return RX_HANDLER_CONSUMED;
  932. }
  933. pulled_sci = pskb_may_pull(skb, macsec_extra_len(true));
  934. if (!pulled_sci) {
  935. if (!pskb_may_pull(skb, macsec_extra_len(false)))
  936. goto drop_direct;
  937. }
  938. hdr = macsec_ethhdr(skb);
  939. /* Frames with a SecTAG that has the TCI E bit set but the C
  940. * bit clear are discarded, as this reserved encoding is used
  941. * to identify frames with a SecTAG that are not to be
  942. * delivered to the Controlled Port.
  943. */
  944. if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E)
  945. return RX_HANDLER_PASS;
  946. /* now, pull the extra length */
  947. if (hdr->tci_an & MACSEC_TCI_SC) {
  948. if (!pulled_sci)
  949. goto drop_direct;
  950. }
  951. /* ethernet header is part of crypto processing */
  952. skb_push(skb, ETH_HLEN);
  953. macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC);
  954. macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK;
  955. sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci);
  956. rcu_read_lock();
  957. rxd = macsec_data_rcu(skb->dev);
  958. list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
  959. struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci);
  960. sc = sc ? macsec_rxsc_get(sc) : NULL;
  961. if (sc) {
  962. secy = &macsec->secy;
  963. rx_sc = sc;
  964. break;
  965. }
  966. }
  967. if (!secy)
  968. goto nosci;
  969. dev = secy->netdev;
  970. macsec = macsec_priv(dev);
  971. secy_stats = this_cpu_ptr(macsec->stats);
  972. rxsc_stats = this_cpu_ptr(rx_sc->stats);
  973. if (!macsec_validate_skb(skb, secy->icv_len)) {
  974. u64_stats_update_begin(&secy_stats->syncp);
  975. secy_stats->stats.InPktsBadTag++;
  976. u64_stats_update_end(&secy_stats->syncp);
  977. goto drop_nosa;
  978. }
  979. rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]);
  980. if (!rx_sa) {
  981. /* 10.6.1 if the SA is not in use */
  982. /* If validateFrames is Strict or the C bit in the
  983. * SecTAG is set, discard
  984. */
  985. if (hdr->tci_an & MACSEC_TCI_C ||
  986. secy->validate_frames == MACSEC_VALIDATE_STRICT) {
  987. u64_stats_update_begin(&rxsc_stats->syncp);
  988. rxsc_stats->stats.InPktsNotUsingSA++;
  989. u64_stats_update_end(&rxsc_stats->syncp);
  990. goto drop_nosa;
  991. }
  992. /* not Strict, the frame (with the SecTAG and ICV
  993. * removed) is delivered to the Controlled Port.
  994. */
  995. u64_stats_update_begin(&rxsc_stats->syncp);
  996. rxsc_stats->stats.InPktsUnusedSA++;
  997. u64_stats_update_end(&rxsc_stats->syncp);
  998. goto deliver;
  999. }
  1000. /* First, PN check to avoid decrypting obviously wrong packets */
  1001. pn = ntohl(hdr->packet_number);
  1002. if (secy->replay_protect) {
  1003. bool late;
  1004. spin_lock(&rx_sa->lock);
  1005. late = rx_sa->next_pn >= secy->replay_window &&
  1006. pn < (rx_sa->next_pn - secy->replay_window);
  1007. spin_unlock(&rx_sa->lock);
  1008. if (late) {
  1009. u64_stats_update_begin(&rxsc_stats->syncp);
  1010. rxsc_stats->stats.InPktsLate++;
  1011. u64_stats_update_end(&rxsc_stats->syncp);
  1012. goto drop;
  1013. }
  1014. }
  1015. macsec_skb_cb(skb)->rx_sa = rx_sa;
  1016. /* Disabled && !changed text => skip validation */
  1017. if (hdr->tci_an & MACSEC_TCI_C ||
  1018. secy->validate_frames != MACSEC_VALIDATE_DISABLED)
  1019. skb = macsec_decrypt(skb, dev, rx_sa, sci, secy);
  1020. if (IS_ERR(skb)) {
  1021. /* the decrypt callback needs the reference */
  1022. if (PTR_ERR(skb) != -EINPROGRESS) {
  1023. macsec_rxsa_put(rx_sa);
  1024. macsec_rxsc_put(rx_sc);
  1025. }
  1026. rcu_read_unlock();
  1027. *pskb = NULL;
  1028. return RX_HANDLER_CONSUMED;
  1029. }
  1030. if (!macsec_post_decrypt(skb, secy, pn))
  1031. goto drop;
  1032. deliver:
  1033. macsec_finalize_skb(skb, secy->icv_len,
  1034. macsec_extra_len(macsec_skb_cb(skb)->has_sci));
  1035. macsec_reset_skb(skb, secy->netdev);
  1036. if (rx_sa)
  1037. macsec_rxsa_put(rx_sa);
  1038. macsec_rxsc_put(rx_sc);
  1039. ret = gro_cells_receive(&macsec->gro_cells, skb);
  1040. if (ret == NET_RX_SUCCESS)
  1041. count_rx(dev, skb->len);
  1042. else
  1043. macsec->secy.netdev->stats.rx_dropped++;
  1044. rcu_read_unlock();
  1045. *pskb = NULL;
  1046. return RX_HANDLER_CONSUMED;
  1047. drop:
  1048. macsec_rxsa_put(rx_sa);
  1049. drop_nosa:
  1050. macsec_rxsc_put(rx_sc);
  1051. rcu_read_unlock();
  1052. drop_direct:
  1053. kfree_skb(skb);
  1054. *pskb = NULL;
  1055. return RX_HANDLER_CONSUMED;
  1056. nosci:
  1057. /* 10.6.1 if the SC is not found */
  1058. cbit = !!(hdr->tci_an & MACSEC_TCI_C);
  1059. if (!cbit)
  1060. macsec_finalize_skb(skb, DEFAULT_ICV_LEN,
  1061. macsec_extra_len(macsec_skb_cb(skb)->has_sci));
  1062. list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
  1063. struct sk_buff *nskb;
  1064. secy_stats = this_cpu_ptr(macsec->stats);
  1065. /* If validateFrames is Strict or the C bit in the
  1066. * SecTAG is set, discard
  1067. */
  1068. if (cbit ||
  1069. macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
  1070. u64_stats_update_begin(&secy_stats->syncp);
  1071. secy_stats->stats.InPktsNoSCI++;
  1072. u64_stats_update_end(&secy_stats->syncp);
  1073. continue;
  1074. }
  1075. /* not strict, the frame (with the SecTAG and ICV
  1076. * removed) is delivered to the Controlled Port.
  1077. */
  1078. nskb = skb_clone(skb, GFP_ATOMIC);
  1079. if (!nskb)
  1080. break;
  1081. macsec_reset_skb(nskb, macsec->secy.netdev);
  1082. ret = netif_rx(nskb);
  1083. if (ret == NET_RX_SUCCESS) {
  1084. u64_stats_update_begin(&secy_stats->syncp);
  1085. secy_stats->stats.InPktsUnknownSCI++;
  1086. u64_stats_update_end(&secy_stats->syncp);
  1087. } else {
  1088. macsec->secy.netdev->stats.rx_dropped++;
  1089. }
  1090. }
  1091. rcu_read_unlock();
  1092. *pskb = skb;
  1093. return RX_HANDLER_PASS;
  1094. }
  1095. static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
  1096. {
  1097. struct crypto_aead *tfm;
  1098. int ret;
  1099. tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
  1100. if (IS_ERR(tfm))
  1101. return tfm;
  1102. ret = crypto_aead_setkey(tfm, key, key_len);
  1103. if (ret < 0)
  1104. goto fail;
  1105. ret = crypto_aead_setauthsize(tfm, icv_len);
  1106. if (ret < 0)
  1107. goto fail;
  1108. return tfm;
  1109. fail:
  1110. crypto_free_aead(tfm);
  1111. return ERR_PTR(ret);
  1112. }
  1113. static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len,
  1114. int icv_len)
  1115. {
  1116. rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats);
  1117. if (!rx_sa->stats)
  1118. return -ENOMEM;
  1119. rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
  1120. if (IS_ERR(rx_sa->key.tfm)) {
  1121. free_percpu(rx_sa->stats);
  1122. return PTR_ERR(rx_sa->key.tfm);
  1123. }
  1124. rx_sa->active = false;
  1125. rx_sa->next_pn = 1;
  1126. refcount_set(&rx_sa->refcnt, 1);
  1127. spin_lock_init(&rx_sa->lock);
  1128. return 0;
  1129. }
  1130. static void clear_rx_sa(struct macsec_rx_sa *rx_sa)
  1131. {
  1132. rx_sa->active = false;
  1133. macsec_rxsa_put(rx_sa);
  1134. }
  1135. static void free_rx_sc(struct macsec_rx_sc *rx_sc)
  1136. {
  1137. int i;
  1138. for (i = 0; i < MACSEC_NUM_AN; i++) {
  1139. struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]);
  1140. RCU_INIT_POINTER(rx_sc->sa[i], NULL);
  1141. if (sa)
  1142. clear_rx_sa(sa);
  1143. }
  1144. macsec_rxsc_put(rx_sc);
  1145. }
  1146. static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci)
  1147. {
  1148. struct macsec_rx_sc *rx_sc, __rcu **rx_scp;
  1149. for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp);
  1150. rx_sc;
  1151. rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) {
  1152. if (rx_sc->sci == sci) {
  1153. if (rx_sc->active)
  1154. secy->n_rx_sc--;
  1155. rcu_assign_pointer(*rx_scp, rx_sc->next);
  1156. return rx_sc;
  1157. }
  1158. }
  1159. return NULL;
  1160. }
  1161. static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci)
  1162. {
  1163. struct macsec_rx_sc *rx_sc;
  1164. struct macsec_dev *macsec;
  1165. struct net_device *real_dev = macsec_priv(dev)->real_dev;
  1166. struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
  1167. struct macsec_secy *secy;
  1168. list_for_each_entry(macsec, &rxd->secys, secys) {
  1169. if (find_rx_sc_rtnl(&macsec->secy, sci))
  1170. return ERR_PTR(-EEXIST);
  1171. }
  1172. rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
  1173. if (!rx_sc)
  1174. return ERR_PTR(-ENOMEM);
  1175. rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats);
  1176. if (!rx_sc->stats) {
  1177. kfree(rx_sc);
  1178. return ERR_PTR(-ENOMEM);
  1179. }
  1180. rx_sc->sci = sci;
  1181. rx_sc->active = true;
  1182. refcount_set(&rx_sc->refcnt, 1);
  1183. secy = &macsec_priv(dev)->secy;
  1184. rcu_assign_pointer(rx_sc->next, secy->rx_sc);
  1185. rcu_assign_pointer(secy->rx_sc, rx_sc);
  1186. if (rx_sc->active)
  1187. secy->n_rx_sc++;
  1188. return rx_sc;
  1189. }
  1190. static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len,
  1191. int icv_len)
  1192. {
  1193. tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats);
  1194. if (!tx_sa->stats)
  1195. return -ENOMEM;
  1196. tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
  1197. if (IS_ERR(tx_sa->key.tfm)) {
  1198. free_percpu(tx_sa->stats);
  1199. return PTR_ERR(tx_sa->key.tfm);
  1200. }
  1201. tx_sa->active = false;
  1202. refcount_set(&tx_sa->refcnt, 1);
  1203. spin_lock_init(&tx_sa->lock);
  1204. return 0;
  1205. }
  1206. static void clear_tx_sa(struct macsec_tx_sa *tx_sa)
  1207. {
  1208. tx_sa->active = false;
  1209. macsec_txsa_put(tx_sa);
  1210. }
  1211. static struct genl_family macsec_fam;
  1212. static struct net_device *get_dev_from_nl(struct net *net,
  1213. struct nlattr **attrs)
  1214. {
  1215. int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]);
  1216. struct net_device *dev;
  1217. dev = __dev_get_by_index(net, ifindex);
  1218. if (!dev)
  1219. return ERR_PTR(-ENODEV);
  1220. if (!netif_is_macsec(dev))
  1221. return ERR_PTR(-ENODEV);
  1222. return dev;
  1223. }
  1224. static sci_t nla_get_sci(const struct nlattr *nla)
  1225. {
  1226. return (__force sci_t)nla_get_u64(nla);
  1227. }
  1228. static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value,
  1229. int padattr)
  1230. {
  1231. return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr);
  1232. }
  1233. static struct macsec_tx_sa *get_txsa_from_nl(struct net *net,
  1234. struct nlattr **attrs,
  1235. struct nlattr **tb_sa,
  1236. struct net_device **devp,
  1237. struct macsec_secy **secyp,
  1238. struct macsec_tx_sc **scp,
  1239. u8 *assoc_num)
  1240. {
  1241. struct net_device *dev;
  1242. struct macsec_secy *secy;
  1243. struct macsec_tx_sc *tx_sc;
  1244. struct macsec_tx_sa *tx_sa;
  1245. if (!tb_sa[MACSEC_SA_ATTR_AN])
  1246. return ERR_PTR(-EINVAL);
  1247. *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
  1248. dev = get_dev_from_nl(net, attrs);
  1249. if (IS_ERR(dev))
  1250. return ERR_CAST(dev);
  1251. if (*assoc_num >= MACSEC_NUM_AN)
  1252. return ERR_PTR(-EINVAL);
  1253. secy = &macsec_priv(dev)->secy;
  1254. tx_sc = &secy->tx_sc;
  1255. tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]);
  1256. if (!tx_sa)
  1257. return ERR_PTR(-ENODEV);
  1258. *devp = dev;
  1259. *scp = tx_sc;
  1260. *secyp = secy;
  1261. return tx_sa;
  1262. }
  1263. static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net,
  1264. struct nlattr **attrs,
  1265. struct nlattr **tb_rxsc,
  1266. struct net_device **devp,
  1267. struct macsec_secy **secyp)
  1268. {
  1269. struct net_device *dev;
  1270. struct macsec_secy *secy;
  1271. struct macsec_rx_sc *rx_sc;
  1272. sci_t sci;
  1273. dev = get_dev_from_nl(net, attrs);
  1274. if (IS_ERR(dev))
  1275. return ERR_CAST(dev);
  1276. secy = &macsec_priv(dev)->secy;
  1277. if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
  1278. return ERR_PTR(-EINVAL);
  1279. sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
  1280. rx_sc = find_rx_sc_rtnl(secy, sci);
  1281. if (!rx_sc)
  1282. return ERR_PTR(-ENODEV);
  1283. *secyp = secy;
  1284. *devp = dev;
  1285. return rx_sc;
  1286. }
  1287. static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net,
  1288. struct nlattr **attrs,
  1289. struct nlattr **tb_rxsc,
  1290. struct nlattr **tb_sa,
  1291. struct net_device **devp,
  1292. struct macsec_secy **secyp,
  1293. struct macsec_rx_sc **scp,
  1294. u8 *assoc_num)
  1295. {
  1296. struct macsec_rx_sc *rx_sc;
  1297. struct macsec_rx_sa *rx_sa;
  1298. if (!tb_sa[MACSEC_SA_ATTR_AN])
  1299. return ERR_PTR(-EINVAL);
  1300. *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
  1301. if (*assoc_num >= MACSEC_NUM_AN)
  1302. return ERR_PTR(-EINVAL);
  1303. rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp);
  1304. if (IS_ERR(rx_sc))
  1305. return ERR_CAST(rx_sc);
  1306. rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]);
  1307. if (!rx_sa)
  1308. return ERR_PTR(-ENODEV);
  1309. *scp = rx_sc;
  1310. return rx_sa;
  1311. }
  1312. static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = {
  1313. [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 },
  1314. [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED },
  1315. [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED },
  1316. };
  1317. static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
  1318. [MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 },
  1319. [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 },
  1320. };
  1321. static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
  1322. [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
  1323. [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
  1324. [MACSEC_SA_ATTR_PN] = { .type = NLA_U32 },
  1325. [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
  1326. .len = MACSEC_KEYID_LEN, },
  1327. [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
  1328. .len = MACSEC_MAX_KEY_LEN, },
  1329. };
  1330. static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa)
  1331. {
  1332. if (!attrs[MACSEC_ATTR_SA_CONFIG])
  1333. return -EINVAL;
  1334. if (nla_parse_nested(tb_sa, MACSEC_SA_ATTR_MAX,
  1335. attrs[MACSEC_ATTR_SA_CONFIG],
  1336. macsec_genl_sa_policy, NULL))
  1337. return -EINVAL;
  1338. return 0;
  1339. }
  1340. static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc)
  1341. {
  1342. if (!attrs[MACSEC_ATTR_RXSC_CONFIG])
  1343. return -EINVAL;
  1344. if (nla_parse_nested(tb_rxsc, MACSEC_RXSC_ATTR_MAX,
  1345. attrs[MACSEC_ATTR_RXSC_CONFIG],
  1346. macsec_genl_rxsc_policy, NULL))
  1347. return -EINVAL;
  1348. return 0;
  1349. }
  1350. static bool validate_add_rxsa(struct nlattr **attrs)
  1351. {
  1352. if (!attrs[MACSEC_SA_ATTR_AN] ||
  1353. !attrs[MACSEC_SA_ATTR_KEY] ||
  1354. !attrs[MACSEC_SA_ATTR_KEYID])
  1355. return false;
  1356. if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
  1357. return false;
  1358. if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
  1359. return false;
  1360. if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
  1361. if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
  1362. return false;
  1363. }
  1364. if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
  1365. return false;
  1366. return true;
  1367. }
  1368. static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
  1369. {
  1370. struct net_device *dev;
  1371. struct nlattr **attrs = info->attrs;
  1372. struct macsec_secy *secy;
  1373. struct macsec_rx_sc *rx_sc;
  1374. struct macsec_rx_sa *rx_sa;
  1375. unsigned char assoc_num;
  1376. struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
  1377. struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
  1378. int err;
  1379. if (!attrs[MACSEC_ATTR_IFINDEX])
  1380. return -EINVAL;
  1381. if (parse_sa_config(attrs, tb_sa))
  1382. return -EINVAL;
  1383. if (parse_rxsc_config(attrs, tb_rxsc))
  1384. return -EINVAL;
  1385. if (!validate_add_rxsa(tb_sa))
  1386. return -EINVAL;
  1387. rtnl_lock();
  1388. rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
  1389. if (IS_ERR(rx_sc)) {
  1390. rtnl_unlock();
  1391. return PTR_ERR(rx_sc);
  1392. }
  1393. assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
  1394. if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
  1395. pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n",
  1396. nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
  1397. rtnl_unlock();
  1398. return -EINVAL;
  1399. }
  1400. rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]);
  1401. if (rx_sa) {
  1402. rtnl_unlock();
  1403. return -EBUSY;
  1404. }
  1405. rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL);
  1406. if (!rx_sa) {
  1407. rtnl_unlock();
  1408. return -ENOMEM;
  1409. }
  1410. err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
  1411. secy->key_len, secy->icv_len);
  1412. if (err < 0) {
  1413. kfree(rx_sa);
  1414. rtnl_unlock();
  1415. return err;
  1416. }
  1417. if (tb_sa[MACSEC_SA_ATTR_PN]) {
  1418. spin_lock_bh(&rx_sa->lock);
  1419. rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
  1420. spin_unlock_bh(&rx_sa->lock);
  1421. }
  1422. if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
  1423. rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
  1424. nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
  1425. rx_sa->sc = rx_sc;
  1426. rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
  1427. rtnl_unlock();
  1428. return 0;
  1429. }
  1430. static bool validate_add_rxsc(struct nlattr **attrs)
  1431. {
  1432. if (!attrs[MACSEC_RXSC_ATTR_SCI])
  1433. return false;
  1434. if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) {
  1435. if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1)
  1436. return false;
  1437. }
  1438. return true;
  1439. }
  1440. static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
  1441. {
  1442. struct net_device *dev;
  1443. sci_t sci = MACSEC_UNDEF_SCI;
  1444. struct nlattr **attrs = info->attrs;
  1445. struct macsec_rx_sc *rx_sc;
  1446. struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
  1447. if (!attrs[MACSEC_ATTR_IFINDEX])
  1448. return -EINVAL;
  1449. if (parse_rxsc_config(attrs, tb_rxsc))
  1450. return -EINVAL;
  1451. if (!validate_add_rxsc(tb_rxsc))
  1452. return -EINVAL;
  1453. rtnl_lock();
  1454. dev = get_dev_from_nl(genl_info_net(info), attrs);
  1455. if (IS_ERR(dev)) {
  1456. rtnl_unlock();
  1457. return PTR_ERR(dev);
  1458. }
  1459. sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
  1460. rx_sc = create_rx_sc(dev, sci);
  1461. if (IS_ERR(rx_sc)) {
  1462. rtnl_unlock();
  1463. return PTR_ERR(rx_sc);
  1464. }
  1465. if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
  1466. rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
  1467. rtnl_unlock();
  1468. return 0;
  1469. }
  1470. static bool validate_add_txsa(struct nlattr **attrs)
  1471. {
  1472. if (!attrs[MACSEC_SA_ATTR_AN] ||
  1473. !attrs[MACSEC_SA_ATTR_PN] ||
  1474. !attrs[MACSEC_SA_ATTR_KEY] ||
  1475. !attrs[MACSEC_SA_ATTR_KEYID])
  1476. return false;
  1477. if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
  1478. return false;
  1479. if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
  1480. return false;
  1481. if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
  1482. if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
  1483. return false;
  1484. }
  1485. if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
  1486. return false;
  1487. return true;
  1488. }
  1489. static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
  1490. {
  1491. struct net_device *dev;
  1492. struct nlattr **attrs = info->attrs;
  1493. struct macsec_secy *secy;
  1494. struct macsec_tx_sc *tx_sc;
  1495. struct macsec_tx_sa *tx_sa;
  1496. unsigned char assoc_num;
  1497. struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
  1498. int err;
  1499. if (!attrs[MACSEC_ATTR_IFINDEX])
  1500. return -EINVAL;
  1501. if (parse_sa_config(attrs, tb_sa))
  1502. return -EINVAL;
  1503. if (!validate_add_txsa(tb_sa))
  1504. return -EINVAL;
  1505. rtnl_lock();
  1506. dev = get_dev_from_nl(genl_info_net(info), attrs);
  1507. if (IS_ERR(dev)) {
  1508. rtnl_unlock();
  1509. return PTR_ERR(dev);
  1510. }
  1511. secy = &macsec_priv(dev)->secy;
  1512. tx_sc = &secy->tx_sc;
  1513. assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
  1514. if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
  1515. pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n",
  1516. nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
  1517. rtnl_unlock();
  1518. return -EINVAL;
  1519. }
  1520. tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]);
  1521. if (tx_sa) {
  1522. rtnl_unlock();
  1523. return -EBUSY;
  1524. }
  1525. tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL);
  1526. if (!tx_sa) {
  1527. rtnl_unlock();
  1528. return -ENOMEM;
  1529. }
  1530. err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
  1531. secy->key_len, secy->icv_len);
  1532. if (err < 0) {
  1533. kfree(tx_sa);
  1534. rtnl_unlock();
  1535. return err;
  1536. }
  1537. nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
  1538. spin_lock_bh(&tx_sa->lock);
  1539. tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
  1540. spin_unlock_bh(&tx_sa->lock);
  1541. if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
  1542. tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
  1543. if (assoc_num == tx_sc->encoding_sa && tx_sa->active)
  1544. secy->operational = true;
  1545. rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);
  1546. rtnl_unlock();
  1547. return 0;
  1548. }
  1549. static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
  1550. {
  1551. struct nlattr **attrs = info->attrs;
  1552. struct net_device *dev;
  1553. struct macsec_secy *secy;
  1554. struct macsec_rx_sc *rx_sc;
  1555. struct macsec_rx_sa *rx_sa;
  1556. u8 assoc_num;
  1557. struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
  1558. struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
  1559. if (!attrs[MACSEC_ATTR_IFINDEX])
  1560. return -EINVAL;
  1561. if (parse_sa_config(attrs, tb_sa))
  1562. return -EINVAL;
  1563. if (parse_rxsc_config(attrs, tb_rxsc))
  1564. return -EINVAL;
  1565. rtnl_lock();
  1566. rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
  1567. &dev, &secy, &rx_sc, &assoc_num);
  1568. if (IS_ERR(rx_sa)) {
  1569. rtnl_unlock();
  1570. return PTR_ERR(rx_sa);
  1571. }
  1572. if (rx_sa->active) {
  1573. rtnl_unlock();
  1574. return -EBUSY;
  1575. }
  1576. RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL);
  1577. clear_rx_sa(rx_sa);
  1578. rtnl_unlock();
  1579. return 0;
  1580. }
  1581. static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
  1582. {
  1583. struct nlattr **attrs = info->attrs;
  1584. struct net_device *dev;
  1585. struct macsec_secy *secy;
  1586. struct macsec_rx_sc *rx_sc;
  1587. sci_t sci;
  1588. struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
  1589. if (!attrs[MACSEC_ATTR_IFINDEX])
  1590. return -EINVAL;
  1591. if (parse_rxsc_config(attrs, tb_rxsc))
  1592. return -EINVAL;
  1593. if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
  1594. return -EINVAL;
  1595. rtnl_lock();
  1596. dev = get_dev_from_nl(genl_info_net(info), info->attrs);
  1597. if (IS_ERR(dev)) {
  1598. rtnl_unlock();
  1599. return PTR_ERR(dev);
  1600. }
  1601. secy = &macsec_priv(dev)->secy;
  1602. sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
  1603. rx_sc = del_rx_sc(secy, sci);
  1604. if (!rx_sc) {
  1605. rtnl_unlock();
  1606. return -ENODEV;
  1607. }
  1608. free_rx_sc(rx_sc);
  1609. rtnl_unlock();
  1610. return 0;
  1611. }
  1612. static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
  1613. {
  1614. struct nlattr **attrs = info->attrs;
  1615. struct net_device *dev;
  1616. struct macsec_secy *secy;
  1617. struct macsec_tx_sc *tx_sc;
  1618. struct macsec_tx_sa *tx_sa;
  1619. u8 assoc_num;
  1620. struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
  1621. if (!attrs[MACSEC_ATTR_IFINDEX])
  1622. return -EINVAL;
  1623. if (parse_sa_config(attrs, tb_sa))
  1624. return -EINVAL;
  1625. rtnl_lock();
  1626. tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
  1627. &dev, &secy, &tx_sc, &assoc_num);
  1628. if (IS_ERR(tx_sa)) {
  1629. rtnl_unlock();
  1630. return PTR_ERR(tx_sa);
  1631. }
  1632. if (tx_sa->active) {
  1633. rtnl_unlock();
  1634. return -EBUSY;
  1635. }
  1636. RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL);
  1637. clear_tx_sa(tx_sa);
  1638. rtnl_unlock();
  1639. return 0;
  1640. }
  1641. static bool validate_upd_sa(struct nlattr **attrs)
  1642. {
  1643. if (!attrs[MACSEC_SA_ATTR_AN] ||
  1644. attrs[MACSEC_SA_ATTR_KEY] ||
  1645. attrs[MACSEC_SA_ATTR_KEYID])
  1646. return false;
  1647. if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
  1648. return false;
  1649. if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
  1650. return false;
  1651. if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
  1652. if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
  1653. return false;
  1654. }
  1655. return true;
  1656. }
  1657. static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
  1658. {
  1659. struct nlattr **attrs = info->attrs;
  1660. struct net_device *dev;
  1661. struct macsec_secy *secy;
  1662. struct macsec_tx_sc *tx_sc;
  1663. struct macsec_tx_sa *tx_sa;
  1664. u8 assoc_num;
  1665. struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
  1666. if (!attrs[MACSEC_ATTR_IFINDEX])
  1667. return -EINVAL;
  1668. if (parse_sa_config(attrs, tb_sa))
  1669. return -EINVAL;
  1670. if (!validate_upd_sa(tb_sa))
  1671. return -EINVAL;
  1672. rtnl_lock();
  1673. tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
  1674. &dev, &secy, &tx_sc, &assoc_num);
  1675. if (IS_ERR(tx_sa)) {
  1676. rtnl_unlock();
  1677. return PTR_ERR(tx_sa);
  1678. }
  1679. if (tb_sa[MACSEC_SA_ATTR_PN]) {
  1680. spin_lock_bh(&tx_sa->lock);
  1681. tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
  1682. spin_unlock_bh(&tx_sa->lock);
  1683. }
  1684. if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
  1685. tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
  1686. if (assoc_num == tx_sc->encoding_sa)
  1687. secy->operational = tx_sa->active;
  1688. rtnl_unlock();
  1689. return 0;
  1690. }
  1691. static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
  1692. {
  1693. struct nlattr **attrs = info->attrs;
  1694. struct net_device *dev;
  1695. struct macsec_secy *secy;
  1696. struct macsec_rx_sc *rx_sc;
  1697. struct macsec_rx_sa *rx_sa;
  1698. u8 assoc_num;
  1699. struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
  1700. struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
  1701. if (!attrs[MACSEC_ATTR_IFINDEX])
  1702. return -EINVAL;
  1703. if (parse_rxsc_config(attrs, tb_rxsc))
  1704. return -EINVAL;
  1705. if (parse_sa_config(attrs, tb_sa))
  1706. return -EINVAL;
  1707. if (!validate_upd_sa(tb_sa))
  1708. return -EINVAL;
  1709. rtnl_lock();
  1710. rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
  1711. &dev, &secy, &rx_sc, &assoc_num);
  1712. if (IS_ERR(rx_sa)) {
  1713. rtnl_unlock();
  1714. return PTR_ERR(rx_sa);
  1715. }
  1716. if (tb_sa[MACSEC_SA_ATTR_PN]) {
  1717. spin_lock_bh(&rx_sa->lock);
  1718. rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
  1719. spin_unlock_bh(&rx_sa->lock);
  1720. }
  1721. if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
  1722. rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
  1723. rtnl_unlock();
  1724. return 0;
  1725. }
  1726. static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
  1727. {
  1728. struct nlattr **attrs = info->attrs;
  1729. struct net_device *dev;
  1730. struct macsec_secy *secy;
  1731. struct macsec_rx_sc *rx_sc;
  1732. struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
  1733. if (!attrs[MACSEC_ATTR_IFINDEX])
  1734. return -EINVAL;
  1735. if (parse_rxsc_config(attrs, tb_rxsc))
  1736. return -EINVAL;
  1737. if (!validate_add_rxsc(tb_rxsc))
  1738. return -EINVAL;
  1739. rtnl_lock();
  1740. rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
  1741. if (IS_ERR(rx_sc)) {
  1742. rtnl_unlock();
  1743. return PTR_ERR(rx_sc);
  1744. }
  1745. if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) {
  1746. bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
  1747. if (rx_sc->active != new)
  1748. secy->n_rx_sc += new ? 1 : -1;
  1749. rx_sc->active = new;
  1750. }
  1751. rtnl_unlock();
  1752. return 0;
  1753. }
  1754. static int copy_tx_sa_stats(struct sk_buff *skb,
  1755. struct macsec_tx_sa_stats __percpu *pstats)
  1756. {
  1757. struct macsec_tx_sa_stats sum = {0, };
  1758. int cpu;
  1759. for_each_possible_cpu(cpu) {
  1760. const struct macsec_tx_sa_stats *stats = per_cpu_ptr(pstats, cpu);
  1761. sum.OutPktsProtected += stats->OutPktsProtected;
  1762. sum.OutPktsEncrypted += stats->OutPktsEncrypted;
  1763. }
  1764. if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) ||
  1765. nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted))
  1766. return -EMSGSIZE;
  1767. return 0;
  1768. }
  1769. static int copy_rx_sa_stats(struct sk_buff *skb,
  1770. struct macsec_rx_sa_stats __percpu *pstats)
  1771. {
  1772. struct macsec_rx_sa_stats sum = {0, };
  1773. int cpu;
  1774. for_each_possible_cpu(cpu) {
  1775. const struct macsec_rx_sa_stats *stats = per_cpu_ptr(pstats, cpu);
  1776. sum.InPktsOK += stats->InPktsOK;
  1777. sum.InPktsInvalid += stats->InPktsInvalid;
  1778. sum.InPktsNotValid += stats->InPktsNotValid;
  1779. sum.InPktsNotUsingSA += stats->InPktsNotUsingSA;
  1780. sum.InPktsUnusedSA += stats->InPktsUnusedSA;
  1781. }
  1782. if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) ||
  1783. nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) ||
  1784. nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) ||
  1785. nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) ||
  1786. nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA))
  1787. return -EMSGSIZE;
  1788. return 0;
  1789. }
  1790. static int copy_rx_sc_stats(struct sk_buff *skb,
  1791. struct pcpu_rx_sc_stats __percpu *pstats)
  1792. {
  1793. struct macsec_rx_sc_stats sum = {0, };
  1794. int cpu;
  1795. for_each_possible_cpu(cpu) {
  1796. const struct pcpu_rx_sc_stats *stats;
  1797. struct macsec_rx_sc_stats tmp;
  1798. unsigned int start;
  1799. stats = per_cpu_ptr(pstats, cpu);
  1800. do {
  1801. start = u64_stats_fetch_begin_irq(&stats->syncp);
  1802. memcpy(&tmp, &stats->stats, sizeof(tmp));
  1803. } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
  1804. sum.InOctetsValidated += tmp.InOctetsValidated;
  1805. sum.InOctetsDecrypted += tmp.InOctetsDecrypted;
  1806. sum.InPktsUnchecked += tmp.InPktsUnchecked;
  1807. sum.InPktsDelayed += tmp.InPktsDelayed;
  1808. sum.InPktsOK += tmp.InPktsOK;
  1809. sum.InPktsInvalid += tmp.InPktsInvalid;
  1810. sum.InPktsLate += tmp.InPktsLate;
  1811. sum.InPktsNotValid += tmp.InPktsNotValid;
  1812. sum.InPktsNotUsingSA += tmp.InPktsNotUsingSA;
  1813. sum.InPktsUnusedSA += tmp.InPktsUnusedSA;
  1814. }
  1815. if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED,
  1816. sum.InOctetsValidated,
  1817. MACSEC_RXSC_STATS_ATTR_PAD) ||
  1818. nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED,
  1819. sum.InOctetsDecrypted,
  1820. MACSEC_RXSC_STATS_ATTR_PAD) ||
  1821. nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED,
  1822. sum.InPktsUnchecked,
  1823. MACSEC_RXSC_STATS_ATTR_PAD) ||
  1824. nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED,
  1825. sum.InPktsDelayed,
  1826. MACSEC_RXSC_STATS_ATTR_PAD) ||
  1827. nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK,
  1828. sum.InPktsOK,
  1829. MACSEC_RXSC_STATS_ATTR_PAD) ||
  1830. nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID,
  1831. sum.InPktsInvalid,
  1832. MACSEC_RXSC_STATS_ATTR_PAD) ||
  1833. nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE,
  1834. sum.InPktsLate,
  1835. MACSEC_RXSC_STATS_ATTR_PAD) ||
  1836. nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
  1837. sum.InPktsNotValid,
  1838. MACSEC_RXSC_STATS_ATTR_PAD) ||
  1839. nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
  1840. sum.InPktsNotUsingSA,
  1841. MACSEC_RXSC_STATS_ATTR_PAD) ||
  1842. nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
  1843. sum.InPktsUnusedSA,
  1844. MACSEC_RXSC_STATS_ATTR_PAD))
  1845. return -EMSGSIZE;
  1846. return 0;
  1847. }
  1848. static int copy_tx_sc_stats(struct sk_buff *skb,
  1849. struct pcpu_tx_sc_stats __percpu *pstats)
  1850. {
  1851. struct macsec_tx_sc_stats sum = {0, };
  1852. int cpu;
  1853. for_each_possible_cpu(cpu) {
  1854. const struct pcpu_tx_sc_stats *stats;
  1855. struct macsec_tx_sc_stats tmp;
  1856. unsigned int start;
  1857. stats = per_cpu_ptr(pstats, cpu);
  1858. do {
  1859. start = u64_stats_fetch_begin_irq(&stats->syncp);
  1860. memcpy(&tmp, &stats->stats, sizeof(tmp));
  1861. } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
  1862. sum.OutPktsProtected += tmp.OutPktsProtected;
  1863. sum.OutPktsEncrypted += tmp.OutPktsEncrypted;
  1864. sum.OutOctetsProtected += tmp.OutOctetsProtected;
  1865. sum.OutOctetsEncrypted += tmp.OutOctetsEncrypted;
  1866. }
  1867. if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED,
  1868. sum.OutPktsProtected,
  1869. MACSEC_TXSC_STATS_ATTR_PAD) ||
  1870. nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
  1871. sum.OutPktsEncrypted,
  1872. MACSEC_TXSC_STATS_ATTR_PAD) ||
  1873. nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
  1874. sum.OutOctetsProtected,
  1875. MACSEC_TXSC_STATS_ATTR_PAD) ||
  1876. nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
  1877. sum.OutOctetsEncrypted,
  1878. MACSEC_TXSC_STATS_ATTR_PAD))
  1879. return -EMSGSIZE;
  1880. return 0;
  1881. }
  1882. static int copy_secy_stats(struct sk_buff *skb,
  1883. struct pcpu_secy_stats __percpu *pstats)
  1884. {
  1885. struct macsec_dev_stats sum = {0, };
  1886. int cpu;
  1887. for_each_possible_cpu(cpu) {
  1888. const struct pcpu_secy_stats *stats;
  1889. struct macsec_dev_stats tmp;
  1890. unsigned int start;
  1891. stats = per_cpu_ptr(pstats, cpu);
  1892. do {
  1893. start = u64_stats_fetch_begin_irq(&stats->syncp);
  1894. memcpy(&tmp, &stats->stats, sizeof(tmp));
  1895. } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
  1896. sum.OutPktsUntagged += tmp.OutPktsUntagged;
  1897. sum.InPktsUntagged += tmp.InPktsUntagged;
  1898. sum.OutPktsTooLong += tmp.OutPktsTooLong;
  1899. sum.InPktsNoTag += tmp.InPktsNoTag;
  1900. sum.InPktsBadTag += tmp.InPktsBadTag;
  1901. sum.InPktsUnknownSCI += tmp.InPktsUnknownSCI;
  1902. sum.InPktsNoSCI += tmp.InPktsNoSCI;
  1903. sum.InPktsOverrun += tmp.InPktsOverrun;
  1904. }
  1905. if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED,
  1906. sum.OutPktsUntagged,
  1907. MACSEC_SECY_STATS_ATTR_PAD) ||
  1908. nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED,
  1909. sum.InPktsUntagged,
  1910. MACSEC_SECY_STATS_ATTR_PAD) ||
  1911. nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG,
  1912. sum.OutPktsTooLong,
  1913. MACSEC_SECY_STATS_ATTR_PAD) ||
  1914. nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG,
  1915. sum.InPktsNoTag,
  1916. MACSEC_SECY_STATS_ATTR_PAD) ||
  1917. nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG,
  1918. sum.InPktsBadTag,
  1919. MACSEC_SECY_STATS_ATTR_PAD) ||
  1920. nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
  1921. sum.InPktsUnknownSCI,
  1922. MACSEC_SECY_STATS_ATTR_PAD) ||
  1923. nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
  1924. sum.InPktsNoSCI,
  1925. MACSEC_SECY_STATS_ATTR_PAD) ||
  1926. nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
  1927. sum.InPktsOverrun,
  1928. MACSEC_SECY_STATS_ATTR_PAD))
  1929. return -EMSGSIZE;
  1930. return 0;
  1931. }
  1932. static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
  1933. {
  1934. struct macsec_tx_sc *tx_sc = &secy->tx_sc;
  1935. struct nlattr *secy_nest = nla_nest_start(skb, MACSEC_ATTR_SECY);
  1936. u64 csid;
  1937. if (!secy_nest)
  1938. return 1;
  1939. switch (secy->key_len) {
  1940. case MACSEC_GCM_AES_128_SAK_LEN:
  1941. csid = MACSEC_DEFAULT_CIPHER_ID;
  1942. break;
  1943. case MACSEC_GCM_AES_256_SAK_LEN:
  1944. csid = MACSEC_CIPHER_ID_GCM_AES_256;
  1945. break;
  1946. default:
  1947. goto cancel;
  1948. }
  1949. if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci,
  1950. MACSEC_SECY_ATTR_PAD) ||
  1951. nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
  1952. csid, MACSEC_SECY_ATTR_PAD) ||
  1953. nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
  1954. nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
  1955. nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
  1956. nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) ||
  1957. nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) ||
  1958. nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) ||
  1959. nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) ||
  1960. nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) ||
  1961. nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) ||
  1962. nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa))
  1963. goto cancel;
  1964. if (secy->replay_protect) {
  1965. if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window))
  1966. goto cancel;
  1967. }
  1968. nla_nest_end(skb, secy_nest);
  1969. return 0;
  1970. cancel:
  1971. nla_nest_cancel(skb, secy_nest);
  1972. return 1;
  1973. }
  1974. static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
  1975. struct sk_buff *skb, struct netlink_callback *cb)
  1976. {
  1977. struct macsec_rx_sc *rx_sc;
  1978. struct macsec_tx_sc *tx_sc = &secy->tx_sc;
  1979. struct nlattr *txsa_list, *rxsc_list;
  1980. int i, j;
  1981. void *hdr;
  1982. struct nlattr *attr;
  1983. hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
  1984. &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC);
  1985. if (!hdr)
  1986. return -EMSGSIZE;
  1987. genl_dump_check_consistent(cb, hdr);
  1988. if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
  1989. goto nla_put_failure;
  1990. if (nla_put_secy(secy, skb))
  1991. goto nla_put_failure;
  1992. attr = nla_nest_start(skb, MACSEC_ATTR_TXSC_STATS);
  1993. if (!attr)
  1994. goto nla_put_failure;
  1995. if (copy_tx_sc_stats(skb, tx_sc->stats)) {
  1996. nla_nest_cancel(skb, attr);
  1997. goto nla_put_failure;
  1998. }
  1999. nla_nest_end(skb, attr);
  2000. attr = nla_nest_start(skb, MACSEC_ATTR_SECY_STATS);
  2001. if (!attr)
  2002. goto nla_put_failure;
  2003. if (copy_secy_stats(skb, macsec_priv(dev)->stats)) {
  2004. nla_nest_cancel(skb, attr);
  2005. goto nla_put_failure;
  2006. }
  2007. nla_nest_end(skb, attr);
  2008. txsa_list = nla_nest_start(skb, MACSEC_ATTR_TXSA_LIST);
  2009. if (!txsa_list)
  2010. goto nla_put_failure;
  2011. for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) {
  2012. struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]);
  2013. struct nlattr *txsa_nest;
  2014. if (!tx_sa)
  2015. continue;
  2016. txsa_nest = nla_nest_start(skb, j++);
  2017. if (!txsa_nest) {
  2018. nla_nest_cancel(skb, txsa_list);
  2019. goto nla_put_failure;
  2020. }
  2021. if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
  2022. nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) ||
  2023. nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
  2024. nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
  2025. nla_nest_cancel(skb, txsa_nest);
  2026. nla_nest_cancel(skb, txsa_list);
  2027. goto nla_put_failure;
  2028. }
  2029. attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS);
  2030. if (!attr) {
  2031. nla_nest_cancel(skb, txsa_nest);
  2032. nla_nest_cancel(skb, txsa_list);
  2033. goto nla_put_failure;
  2034. }
  2035. if (copy_tx_sa_stats(skb, tx_sa->stats)) {
  2036. nla_nest_cancel(skb, attr);
  2037. nla_nest_cancel(skb, txsa_nest);
  2038. nla_nest_cancel(skb, txsa_list);
  2039. goto nla_put_failure;
  2040. }
  2041. nla_nest_end(skb, attr);
  2042. nla_nest_end(skb, txsa_nest);
  2043. }
  2044. nla_nest_end(skb, txsa_list);
  2045. rxsc_list = nla_nest_start(skb, MACSEC_ATTR_RXSC_LIST);
  2046. if (!rxsc_list)
  2047. goto nla_put_failure;
  2048. j = 1;
  2049. for_each_rxsc_rtnl(secy, rx_sc) {
  2050. int k;
  2051. struct nlattr *rxsa_list;
  2052. struct nlattr *rxsc_nest = nla_nest_start(skb, j++);
  2053. if (!rxsc_nest) {
  2054. nla_nest_cancel(skb, rxsc_list);
  2055. goto nla_put_failure;
  2056. }
  2057. if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) ||
  2058. nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci,
  2059. MACSEC_RXSC_ATTR_PAD)) {
  2060. nla_nest_cancel(skb, rxsc_nest);
  2061. nla_nest_cancel(skb, rxsc_list);
  2062. goto nla_put_failure;
  2063. }
  2064. attr = nla_nest_start(skb, MACSEC_RXSC_ATTR_STATS);
  2065. if (!attr) {
  2066. nla_nest_cancel(skb, rxsc_nest);
  2067. nla_nest_cancel(skb, rxsc_list);
  2068. goto nla_put_failure;
  2069. }
  2070. if (copy_rx_sc_stats(skb, rx_sc->stats)) {
  2071. nla_nest_cancel(skb, attr);
  2072. nla_nest_cancel(skb, rxsc_nest);
  2073. nla_nest_cancel(skb, rxsc_list);
  2074. goto nla_put_failure;
  2075. }
  2076. nla_nest_end(skb, attr);
  2077. rxsa_list = nla_nest_start(skb, MACSEC_RXSC_ATTR_SA_LIST);
  2078. if (!rxsa_list) {
  2079. nla_nest_cancel(skb, rxsc_nest);
  2080. nla_nest_cancel(skb, rxsc_list);
  2081. goto nla_put_failure;
  2082. }
  2083. for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) {
  2084. struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]);
  2085. struct nlattr *rxsa_nest;
  2086. if (!rx_sa)
  2087. continue;
  2088. rxsa_nest = nla_nest_start(skb, k++);
  2089. if (!rxsa_nest) {
  2090. nla_nest_cancel(skb, rxsa_list);
  2091. nla_nest_cancel(skb, rxsc_nest);
  2092. nla_nest_cancel(skb, rxsc_list);
  2093. goto nla_put_failure;
  2094. }
  2095. attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS);
  2096. if (!attr) {
  2097. nla_nest_cancel(skb, rxsa_list);
  2098. nla_nest_cancel(skb, rxsc_nest);
  2099. nla_nest_cancel(skb, rxsc_list);
  2100. goto nla_put_failure;
  2101. }
  2102. if (copy_rx_sa_stats(skb, rx_sa->stats)) {
  2103. nla_nest_cancel(skb, attr);
  2104. nla_nest_cancel(skb, rxsa_list);
  2105. nla_nest_cancel(skb, rxsc_nest);
  2106. nla_nest_cancel(skb, rxsc_list);
  2107. goto nla_put_failure;
  2108. }
  2109. nla_nest_end(skb, attr);
  2110. if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
  2111. nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) ||
  2112. nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
  2113. nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
  2114. nla_nest_cancel(skb, rxsa_nest);
  2115. nla_nest_cancel(skb, rxsc_nest);
  2116. nla_nest_cancel(skb, rxsc_list);
  2117. goto nla_put_failure;
  2118. }
  2119. nla_nest_end(skb, rxsa_nest);
  2120. }
  2121. nla_nest_end(skb, rxsa_list);
  2122. nla_nest_end(skb, rxsc_nest);
  2123. }
  2124. nla_nest_end(skb, rxsc_list);
  2125. genlmsg_end(skb, hdr);
  2126. return 0;
  2127. nla_put_failure:
  2128. genlmsg_cancel(skb, hdr);
  2129. return -EMSGSIZE;
  2130. }
  2131. static int macsec_generation = 1; /* protected by RTNL */
  2132. static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
  2133. {
  2134. struct net *net = sock_net(skb->sk);
  2135. struct net_device *dev;
  2136. int dev_idx, d;
  2137. dev_idx = cb->args[0];
  2138. d = 0;
  2139. rtnl_lock();
  2140. cb->seq = macsec_generation;
  2141. for_each_netdev(net, dev) {
  2142. struct macsec_secy *secy;
  2143. if (d < dev_idx)
  2144. goto next;
  2145. if (!netif_is_macsec(dev))
  2146. goto next;
  2147. secy = &macsec_priv(dev)->secy;
  2148. if (dump_secy(secy, dev, skb, cb) < 0)
  2149. goto done;
  2150. next:
  2151. d++;
  2152. }
  2153. done:
  2154. rtnl_unlock();
  2155. cb->args[0] = d;
  2156. return skb->len;
  2157. }
  2158. static const struct genl_ops macsec_genl_ops[] = {
  2159. {
  2160. .cmd = MACSEC_CMD_GET_TXSC,
  2161. .dumpit = macsec_dump_txsc,
  2162. .policy = macsec_genl_policy,
  2163. },
  2164. {
  2165. .cmd = MACSEC_CMD_ADD_RXSC,
  2166. .doit = macsec_add_rxsc,
  2167. .policy = macsec_genl_policy,
  2168. .flags = GENL_ADMIN_PERM,
  2169. },
  2170. {
  2171. .cmd = MACSEC_CMD_DEL_RXSC,
  2172. .doit = macsec_del_rxsc,
  2173. .policy = macsec_genl_policy,
  2174. .flags = GENL_ADMIN_PERM,
  2175. },
  2176. {
  2177. .cmd = MACSEC_CMD_UPD_RXSC,
  2178. .doit = macsec_upd_rxsc,
  2179. .policy = macsec_genl_policy,
  2180. .flags = GENL_ADMIN_PERM,
  2181. },
  2182. {
  2183. .cmd = MACSEC_CMD_ADD_TXSA,
  2184. .doit = macsec_add_txsa,
  2185. .policy = macsec_genl_policy,
  2186. .flags = GENL_ADMIN_PERM,
  2187. },
  2188. {
  2189. .cmd = MACSEC_CMD_DEL_TXSA,
  2190. .doit = macsec_del_txsa,
  2191. .policy = macsec_genl_policy,
  2192. .flags = GENL_ADMIN_PERM,
  2193. },
  2194. {
  2195. .cmd = MACSEC_CMD_UPD_TXSA,
  2196. .doit = macsec_upd_txsa,
  2197. .policy = macsec_genl_policy,
  2198. .flags = GENL_ADMIN_PERM,
  2199. },
  2200. {
  2201. .cmd = MACSEC_CMD_ADD_RXSA,
  2202. .doit = macsec_add_rxsa,
  2203. .policy = macsec_genl_policy,
  2204. .flags = GENL_ADMIN_PERM,
  2205. },
  2206. {
  2207. .cmd = MACSEC_CMD_DEL_RXSA,
  2208. .doit = macsec_del_rxsa,
  2209. .policy = macsec_genl_policy,
  2210. .flags = GENL_ADMIN_PERM,
  2211. },
  2212. {
  2213. .cmd = MACSEC_CMD_UPD_RXSA,
  2214. .doit = macsec_upd_rxsa,
  2215. .policy = macsec_genl_policy,
  2216. .flags = GENL_ADMIN_PERM,
  2217. },
  2218. };
  2219. static struct genl_family macsec_fam __ro_after_init = {
  2220. .name = MACSEC_GENL_NAME,
  2221. .hdrsize = 0,
  2222. .version = MACSEC_GENL_VERSION,
  2223. .maxattr = MACSEC_ATTR_MAX,
  2224. .netnsok = true,
  2225. .module = THIS_MODULE,
  2226. .ops = macsec_genl_ops,
  2227. .n_ops = ARRAY_SIZE(macsec_genl_ops),
  2228. };
  2229. static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
  2230. struct net_device *dev)
  2231. {
  2232. struct macsec_dev *macsec = netdev_priv(dev);
  2233. struct macsec_secy *secy = &macsec->secy;
  2234. struct pcpu_secy_stats *secy_stats;
  2235. int ret, len;
  2236. /* 10.5 */
  2237. if (!secy->protect_frames) {
  2238. secy_stats = this_cpu_ptr(macsec->stats);
  2239. u64_stats_update_begin(&secy_stats->syncp);
  2240. secy_stats->stats.OutPktsUntagged++;
  2241. u64_stats_update_end(&secy_stats->syncp);
  2242. skb->dev = macsec->real_dev;
  2243. len = skb->len;
  2244. ret = dev_queue_xmit(skb);
  2245. count_tx(dev, ret, len);
  2246. return ret;
  2247. }
  2248. if (!secy->operational) {
  2249. kfree_skb(skb);
  2250. dev->stats.tx_dropped++;
  2251. return NETDEV_TX_OK;
  2252. }
  2253. skb = macsec_encrypt(skb, dev);
  2254. if (IS_ERR(skb)) {
  2255. if (PTR_ERR(skb) != -EINPROGRESS)
  2256. dev->stats.tx_dropped++;
  2257. return NETDEV_TX_OK;
  2258. }
  2259. macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
  2260. macsec_encrypt_finish(skb, dev);
  2261. len = skb->len;
  2262. ret = dev_queue_xmit(skb);
  2263. count_tx(dev, ret, len);
  2264. return ret;
  2265. }
  2266. #define MACSEC_FEATURES \
  2267. (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
  2268. static struct lock_class_key macsec_netdev_addr_lock_key;
  2269. static int macsec_dev_init(struct net_device *dev)
  2270. {
  2271. struct macsec_dev *macsec = macsec_priv(dev);
  2272. struct net_device *real_dev = macsec->real_dev;
  2273. int err;
  2274. dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
  2275. if (!dev->tstats)
  2276. return -ENOMEM;
  2277. err = gro_cells_init(&macsec->gro_cells, dev);
  2278. if (err) {
  2279. free_percpu(dev->tstats);
  2280. return err;
  2281. }
  2282. dev->features = real_dev->features & MACSEC_FEATURES;
  2283. dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
  2284. dev->needed_headroom = real_dev->needed_headroom +
  2285. MACSEC_NEEDED_HEADROOM;
  2286. dev->needed_tailroom = real_dev->needed_tailroom +
  2287. MACSEC_NEEDED_TAILROOM;
  2288. if (is_zero_ether_addr(dev->dev_addr))
  2289. eth_hw_addr_inherit(dev, real_dev);
  2290. if (is_zero_ether_addr(dev->broadcast))
  2291. memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
  2292. return 0;
  2293. }
  2294. static void macsec_dev_uninit(struct net_device *dev)
  2295. {
  2296. struct macsec_dev *macsec = macsec_priv(dev);
  2297. gro_cells_destroy(&macsec->gro_cells);
  2298. free_percpu(dev->tstats);
  2299. }
  2300. static netdev_features_t macsec_fix_features(struct net_device *dev,
  2301. netdev_features_t features)
  2302. {
  2303. struct macsec_dev *macsec = macsec_priv(dev);
  2304. struct net_device *real_dev = macsec->real_dev;
  2305. features &= (real_dev->features & MACSEC_FEATURES) |
  2306. NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
  2307. features |= NETIF_F_LLTX;
  2308. return features;
  2309. }
  2310. static int macsec_dev_open(struct net_device *dev)
  2311. {
  2312. struct macsec_dev *macsec = macsec_priv(dev);
  2313. struct net_device *real_dev = macsec->real_dev;
  2314. int err;
  2315. if (!(real_dev->flags & IFF_UP))
  2316. return -ENETDOWN;
  2317. err = dev_uc_add(real_dev, dev->dev_addr);
  2318. if (err < 0)
  2319. return err;
  2320. if (dev->flags & IFF_ALLMULTI) {
  2321. err = dev_set_allmulti(real_dev, 1);
  2322. if (err < 0)
  2323. goto del_unicast;
  2324. }
  2325. if (dev->flags & IFF_PROMISC) {
  2326. err = dev_set_promiscuity(real_dev, 1);
  2327. if (err < 0)
  2328. goto clear_allmulti;
  2329. }
  2330. if (netif_carrier_ok(real_dev))
  2331. netif_carrier_on(dev);
  2332. return 0;
  2333. clear_allmulti:
  2334. if (dev->flags & IFF_ALLMULTI)
  2335. dev_set_allmulti(real_dev, -1);
  2336. del_unicast:
  2337. dev_uc_del(real_dev, dev->dev_addr);
  2338. netif_carrier_off(dev);
  2339. return err;
  2340. }
  2341. static int macsec_dev_stop(struct net_device *dev)
  2342. {
  2343. struct macsec_dev *macsec = macsec_priv(dev);
  2344. struct net_device *real_dev = macsec->real_dev;
  2345. netif_carrier_off(dev);
  2346. dev_mc_unsync(real_dev, dev);
  2347. dev_uc_unsync(real_dev, dev);
  2348. if (dev->flags & IFF_ALLMULTI)
  2349. dev_set_allmulti(real_dev, -1);
  2350. if (dev->flags & IFF_PROMISC)
  2351. dev_set_promiscuity(real_dev, -1);
  2352. dev_uc_del(real_dev, dev->dev_addr);
  2353. return 0;
  2354. }
  2355. static void macsec_dev_change_rx_flags(struct net_device *dev, int change)
  2356. {
  2357. struct net_device *real_dev = macsec_priv(dev)->real_dev;
  2358. if (!(dev->flags & IFF_UP))
  2359. return;
  2360. if (change & IFF_ALLMULTI)
  2361. dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
  2362. if (change & IFF_PROMISC)
  2363. dev_set_promiscuity(real_dev,
  2364. dev->flags & IFF_PROMISC ? 1 : -1);
  2365. }
  2366. static void macsec_dev_set_rx_mode(struct net_device *dev)
  2367. {
  2368. struct net_device *real_dev = macsec_priv(dev)->real_dev;
  2369. dev_mc_sync(real_dev, dev);
  2370. dev_uc_sync(real_dev, dev);
  2371. }
  2372. static int macsec_set_mac_address(struct net_device *dev, void *p)
  2373. {
  2374. struct macsec_dev *macsec = macsec_priv(dev);
  2375. struct net_device *real_dev = macsec->real_dev;
  2376. struct sockaddr *addr = p;
  2377. int err;
  2378. if (!is_valid_ether_addr(addr->sa_data))
  2379. return -EADDRNOTAVAIL;
  2380. if (!(dev->flags & IFF_UP))
  2381. goto out;
  2382. err = dev_uc_add(real_dev, addr->sa_data);
  2383. if (err < 0)
  2384. return err;
  2385. dev_uc_del(real_dev, dev->dev_addr);
  2386. out:
  2387. ether_addr_copy(dev->dev_addr, addr->sa_data);
  2388. return 0;
  2389. }
  2390. static int macsec_change_mtu(struct net_device *dev, int new_mtu)
  2391. {
  2392. struct macsec_dev *macsec = macsec_priv(dev);
  2393. unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true);
  2394. if (macsec->real_dev->mtu - extra < new_mtu)
  2395. return -ERANGE;
  2396. dev->mtu = new_mtu;
  2397. return 0;
  2398. }
  2399. static void macsec_get_stats64(struct net_device *dev,
  2400. struct rtnl_link_stats64 *s)
  2401. {
  2402. int cpu;
  2403. if (!dev->tstats)
  2404. return;
  2405. for_each_possible_cpu(cpu) {
  2406. struct pcpu_sw_netstats *stats;
  2407. struct pcpu_sw_netstats tmp;
  2408. int start;
  2409. stats = per_cpu_ptr(dev->tstats, cpu);
  2410. do {
  2411. start = u64_stats_fetch_begin_irq(&stats->syncp);
  2412. tmp.rx_packets = stats->rx_packets;
  2413. tmp.rx_bytes = stats->rx_bytes;
  2414. tmp.tx_packets = stats->tx_packets;
  2415. tmp.tx_bytes = stats->tx_bytes;
  2416. } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
  2417. s->rx_packets += tmp.rx_packets;
  2418. s->rx_bytes += tmp.rx_bytes;
  2419. s->tx_packets += tmp.tx_packets;
  2420. s->tx_bytes += tmp.tx_bytes;
  2421. }
  2422. s->rx_dropped = dev->stats.rx_dropped;
  2423. s->tx_dropped = dev->stats.tx_dropped;
  2424. }
  2425. static int macsec_get_iflink(const struct net_device *dev)
  2426. {
  2427. return macsec_priv(dev)->real_dev->ifindex;
  2428. }
  2429. static int macsec_get_nest_level(struct net_device *dev)
  2430. {
  2431. return macsec_priv(dev)->nest_level;
  2432. }
  2433. static const struct net_device_ops macsec_netdev_ops = {
  2434. .ndo_init = macsec_dev_init,
  2435. .ndo_uninit = macsec_dev_uninit,
  2436. .ndo_open = macsec_dev_open,
  2437. .ndo_stop = macsec_dev_stop,
  2438. .ndo_fix_features = macsec_fix_features,
  2439. .ndo_change_mtu = macsec_change_mtu,
  2440. .ndo_set_rx_mode = macsec_dev_set_rx_mode,
  2441. .ndo_change_rx_flags = macsec_dev_change_rx_flags,
  2442. .ndo_set_mac_address = macsec_set_mac_address,
  2443. .ndo_start_xmit = macsec_start_xmit,
  2444. .ndo_get_stats64 = macsec_get_stats64,
  2445. .ndo_get_iflink = macsec_get_iflink,
  2446. .ndo_get_lock_subclass = macsec_get_nest_level,
  2447. };
  2448. static const struct device_type macsec_type = {
  2449. .name = "macsec",
  2450. };
  2451. static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
  2452. [IFLA_MACSEC_SCI] = { .type = NLA_U64 },
  2453. [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
  2454. [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
  2455. [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
  2456. [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 },
  2457. [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 },
  2458. [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 },
  2459. [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 },
  2460. [IFLA_MACSEC_ES] = { .type = NLA_U8 },
  2461. [IFLA_MACSEC_SCB] = { .type = NLA_U8 },
  2462. [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 },
  2463. [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 },
  2464. };
  2465. static void macsec_free_netdev(struct net_device *dev)
  2466. {
  2467. struct macsec_dev *macsec = macsec_priv(dev);
  2468. struct net_device *real_dev = macsec->real_dev;
  2469. free_percpu(macsec->stats);
  2470. free_percpu(macsec->secy.tx_sc.stats);
  2471. dev_put(real_dev);
  2472. }
  2473. static void macsec_setup(struct net_device *dev)
  2474. {
  2475. ether_setup(dev);
  2476. dev->min_mtu = 0;
  2477. dev->max_mtu = ETH_MAX_MTU;
  2478. dev->priv_flags |= IFF_NO_QUEUE;
  2479. dev->netdev_ops = &macsec_netdev_ops;
  2480. dev->needs_free_netdev = true;
  2481. dev->priv_destructor = macsec_free_netdev;
  2482. SET_NETDEV_DEVTYPE(dev, &macsec_type);
  2483. eth_zero_addr(dev->broadcast);
  2484. }
  2485. static int macsec_changelink_common(struct net_device *dev,
  2486. struct nlattr *data[])
  2487. {
  2488. struct macsec_secy *secy;
  2489. struct macsec_tx_sc *tx_sc;
  2490. secy = &macsec_priv(dev)->secy;
  2491. tx_sc = &secy->tx_sc;
  2492. if (data[IFLA_MACSEC_ENCODING_SA]) {
  2493. struct macsec_tx_sa *tx_sa;
  2494. tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]);
  2495. tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]);
  2496. secy->operational = tx_sa && tx_sa->active;
  2497. }
  2498. if (data[IFLA_MACSEC_WINDOW])
  2499. secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
  2500. if (data[IFLA_MACSEC_ENCRYPT])
  2501. tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]);
  2502. if (data[IFLA_MACSEC_PROTECT])
  2503. secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]);
  2504. if (data[IFLA_MACSEC_INC_SCI])
  2505. tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
  2506. if (data[IFLA_MACSEC_ES])
  2507. tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]);
  2508. if (data[IFLA_MACSEC_SCB])
  2509. tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]);
  2510. if (data[IFLA_MACSEC_REPLAY_PROTECT])
  2511. secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]);
  2512. if (data[IFLA_MACSEC_VALIDATION])
  2513. secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]);
  2514. if (data[IFLA_MACSEC_CIPHER_SUITE]) {
  2515. switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) {
  2516. case MACSEC_CIPHER_ID_GCM_AES_128:
  2517. case MACSEC_DEFAULT_CIPHER_ID:
  2518. secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
  2519. break;
  2520. case MACSEC_CIPHER_ID_GCM_AES_256:
  2521. secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
  2522. break;
  2523. default:
  2524. return -EINVAL;
  2525. }
  2526. }
  2527. return 0;
  2528. }
  2529. static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
  2530. struct nlattr *data[],
  2531. struct netlink_ext_ack *extack)
  2532. {
  2533. if (!data)
  2534. return 0;
  2535. if (data[IFLA_MACSEC_CIPHER_SUITE] ||
  2536. data[IFLA_MACSEC_ICV_LEN] ||
  2537. data[IFLA_MACSEC_SCI] ||
  2538. data[IFLA_MACSEC_PORT])
  2539. return -EINVAL;
  2540. return macsec_changelink_common(dev, data);
  2541. }
  2542. static void macsec_del_dev(struct macsec_dev *macsec)
  2543. {
  2544. int i;
  2545. while (macsec->secy.rx_sc) {
  2546. struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc);
  2547. rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next);
  2548. free_rx_sc(rx_sc);
  2549. }
  2550. for (i = 0; i < MACSEC_NUM_AN; i++) {
  2551. struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]);
  2552. if (sa) {
  2553. RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL);
  2554. clear_tx_sa(sa);
  2555. }
  2556. }
  2557. }
  2558. static void macsec_common_dellink(struct net_device *dev, struct list_head *head)
  2559. {
  2560. struct macsec_dev *macsec = macsec_priv(dev);
  2561. struct net_device *real_dev = macsec->real_dev;
  2562. unregister_netdevice_queue(dev, head);
  2563. list_del_rcu(&macsec->secys);
  2564. macsec_del_dev(macsec);
  2565. netdev_upper_dev_unlink(real_dev, dev);
  2566. macsec_generation++;
  2567. }
  2568. static void macsec_dellink(struct net_device *dev, struct list_head *head)
  2569. {
  2570. struct macsec_dev *macsec = macsec_priv(dev);
  2571. struct net_device *real_dev = macsec->real_dev;
  2572. struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
  2573. macsec_common_dellink(dev, head);
  2574. if (list_empty(&rxd->secys)) {
  2575. netdev_rx_handler_unregister(real_dev);
  2576. kfree(rxd);
  2577. }
  2578. }
  2579. static int register_macsec_dev(struct net_device *real_dev,
  2580. struct net_device *dev)
  2581. {
  2582. struct macsec_dev *macsec = macsec_priv(dev);
  2583. struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
  2584. if (!rxd) {
  2585. int err;
  2586. rxd = kmalloc(sizeof(*rxd), GFP_KERNEL);
  2587. if (!rxd)
  2588. return -ENOMEM;
  2589. INIT_LIST_HEAD(&rxd->secys);
  2590. err = netdev_rx_handler_register(real_dev, macsec_handle_frame,
  2591. rxd);
  2592. if (err < 0) {
  2593. kfree(rxd);
  2594. return err;
  2595. }
  2596. }
  2597. list_add_tail_rcu(&macsec->secys, &rxd->secys);
  2598. return 0;
  2599. }
  2600. static bool sci_exists(struct net_device *dev, sci_t sci)
  2601. {
  2602. struct macsec_rxh_data *rxd = macsec_data_rtnl(dev);
  2603. struct macsec_dev *macsec;
  2604. list_for_each_entry(macsec, &rxd->secys, secys) {
  2605. if (macsec->secy.sci == sci)
  2606. return true;
  2607. }
  2608. return false;
  2609. }
  2610. static sci_t dev_to_sci(struct net_device *dev, __be16 port)
  2611. {
  2612. return make_sci(dev->dev_addr, port);
  2613. }
  2614. static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
  2615. {
  2616. struct macsec_dev *macsec = macsec_priv(dev);
  2617. struct macsec_secy *secy = &macsec->secy;
  2618. macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats);
  2619. if (!macsec->stats)
  2620. return -ENOMEM;
  2621. secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats);
  2622. if (!secy->tx_sc.stats) {
  2623. free_percpu(macsec->stats);
  2624. return -ENOMEM;
  2625. }
  2626. if (sci == MACSEC_UNDEF_SCI)
  2627. sci = dev_to_sci(dev, MACSEC_PORT_ES);
  2628. secy->netdev = dev;
  2629. secy->operational = true;
  2630. secy->key_len = DEFAULT_SAK_LEN;
  2631. secy->icv_len = icv_len;
  2632. secy->validate_frames = MACSEC_VALIDATE_DEFAULT;
  2633. secy->protect_frames = true;
  2634. secy->replay_protect = false;
  2635. secy->sci = sci;
  2636. secy->tx_sc.active = true;
  2637. secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA;
  2638. secy->tx_sc.encrypt = DEFAULT_ENCRYPT;
  2639. secy->tx_sc.send_sci = DEFAULT_SEND_SCI;
  2640. secy->tx_sc.end_station = false;
  2641. secy->tx_sc.scb = false;
  2642. return 0;
  2643. }
  2644. static int macsec_newlink(struct net *net, struct net_device *dev,
  2645. struct nlattr *tb[], struct nlattr *data[],
  2646. struct netlink_ext_ack *extack)
  2647. {
  2648. struct macsec_dev *macsec = macsec_priv(dev);
  2649. struct net_device *real_dev;
  2650. int err;
  2651. sci_t sci;
  2652. u8 icv_len = DEFAULT_ICV_LEN;
  2653. rx_handler_func_t *rx_handler;
  2654. if (!tb[IFLA_LINK])
  2655. return -EINVAL;
  2656. real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
  2657. if (!real_dev)
  2658. return -ENODEV;
  2659. dev->priv_flags |= IFF_MACSEC;
  2660. macsec->real_dev = real_dev;
  2661. if (data && data[IFLA_MACSEC_ICV_LEN])
  2662. icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
  2663. dev->mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
  2664. rx_handler = rtnl_dereference(real_dev->rx_handler);
  2665. if (rx_handler && rx_handler != macsec_handle_frame)
  2666. return -EBUSY;
  2667. err = register_netdevice(dev);
  2668. if (err < 0)
  2669. return err;
  2670. dev_hold(real_dev);
  2671. macsec->nest_level = dev_get_nest_level(real_dev) + 1;
  2672. netdev_lockdep_set_classes(dev);
  2673. lockdep_set_class_and_subclass(&dev->addr_list_lock,
  2674. &macsec_netdev_addr_lock_key,
  2675. macsec_get_nest_level(dev));
  2676. err = netdev_upper_dev_link(real_dev, dev, extack);
  2677. if (err < 0)
  2678. goto unregister;
  2679. /* need to be already registered so that ->init has run and
  2680. * the MAC addr is set
  2681. */
  2682. if (data && data[IFLA_MACSEC_SCI])
  2683. sci = nla_get_sci(data[IFLA_MACSEC_SCI]);
  2684. else if (data && data[IFLA_MACSEC_PORT])
  2685. sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT]));
  2686. else
  2687. sci = dev_to_sci(dev, MACSEC_PORT_ES);
  2688. if (rx_handler && sci_exists(real_dev, sci)) {
  2689. err = -EBUSY;
  2690. goto unlink;
  2691. }
  2692. err = macsec_add_dev(dev, sci, icv_len);
  2693. if (err)
  2694. goto unlink;
  2695. if (data) {
  2696. err = macsec_changelink_common(dev, data);
  2697. if (err)
  2698. goto del_dev;
  2699. }
  2700. err = register_macsec_dev(real_dev, dev);
  2701. if (err < 0)
  2702. goto del_dev;
  2703. macsec_generation++;
  2704. return 0;
  2705. del_dev:
  2706. macsec_del_dev(macsec);
  2707. unlink:
  2708. netdev_upper_dev_unlink(real_dev, dev);
  2709. unregister:
  2710. unregister_netdevice(dev);
  2711. return err;
  2712. }
  2713. static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
  2714. struct netlink_ext_ack *extack)
  2715. {
  2716. u64 csid = MACSEC_DEFAULT_CIPHER_ID;
  2717. u8 icv_len = DEFAULT_ICV_LEN;
  2718. int flag;
  2719. bool es, scb, sci;
  2720. if (!data)
  2721. return 0;
  2722. if (data[IFLA_MACSEC_CIPHER_SUITE])
  2723. csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]);
  2724. if (data[IFLA_MACSEC_ICV_LEN]) {
  2725. icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
  2726. if (icv_len != DEFAULT_ICV_LEN) {
  2727. char dummy_key[DEFAULT_SAK_LEN] = { 0 };
  2728. struct crypto_aead *dummy_tfm;
  2729. dummy_tfm = macsec_alloc_tfm(dummy_key,
  2730. DEFAULT_SAK_LEN,
  2731. icv_len);
  2732. if (IS_ERR(dummy_tfm))
  2733. return PTR_ERR(dummy_tfm);
  2734. crypto_free_aead(dummy_tfm);
  2735. }
  2736. }
  2737. switch (csid) {
  2738. case MACSEC_CIPHER_ID_GCM_AES_128:
  2739. case MACSEC_CIPHER_ID_GCM_AES_256:
  2740. case MACSEC_DEFAULT_CIPHER_ID:
  2741. if (icv_len < MACSEC_MIN_ICV_LEN ||
  2742. icv_len > MACSEC_STD_ICV_LEN)
  2743. return -EINVAL;
  2744. break;
  2745. default:
  2746. return -EINVAL;
  2747. }
  2748. if (data[IFLA_MACSEC_ENCODING_SA]) {
  2749. if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN)
  2750. return -EINVAL;
  2751. }
  2752. for (flag = IFLA_MACSEC_ENCODING_SA + 1;
  2753. flag < IFLA_MACSEC_VALIDATION;
  2754. flag++) {
  2755. if (data[flag]) {
  2756. if (nla_get_u8(data[flag]) > 1)
  2757. return -EINVAL;
  2758. }
  2759. }
  2760. es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false;
  2761. sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false;
  2762. scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false;
  2763. if ((sci && (scb || es)) || (scb && es))
  2764. return -EINVAL;
  2765. if (data[IFLA_MACSEC_VALIDATION] &&
  2766. nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
  2767. return -EINVAL;
  2768. if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
  2769. nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
  2770. !data[IFLA_MACSEC_WINDOW])
  2771. return -EINVAL;
  2772. return 0;
  2773. }
  2774. static struct net *macsec_get_link_net(const struct net_device *dev)
  2775. {
  2776. return dev_net(macsec_priv(dev)->real_dev);
  2777. }
  2778. static size_t macsec_get_size(const struct net_device *dev)
  2779. {
  2780. return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */
  2781. nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */
  2782. nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */
  2783. nla_total_size(4) + /* IFLA_MACSEC_WINDOW */
  2784. nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */
  2785. nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */
  2786. nla_total_size(1) + /* IFLA_MACSEC_PROTECT */
  2787. nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */
  2788. nla_total_size(1) + /* IFLA_MACSEC_ES */
  2789. nla_total_size(1) + /* IFLA_MACSEC_SCB */
  2790. nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
  2791. nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
  2792. 0;
  2793. }
  2794. static int macsec_fill_info(struct sk_buff *skb,
  2795. const struct net_device *dev)
  2796. {
  2797. struct macsec_secy *secy = &macsec_priv(dev)->secy;
  2798. struct macsec_tx_sc *tx_sc = &secy->tx_sc;
  2799. u64 csid;
  2800. switch (secy->key_len) {
  2801. case MACSEC_GCM_AES_128_SAK_LEN:
  2802. csid = MACSEC_DEFAULT_CIPHER_ID;
  2803. break;
  2804. case MACSEC_GCM_AES_256_SAK_LEN:
  2805. csid = MACSEC_CIPHER_ID_GCM_AES_256;
  2806. break;
  2807. default:
  2808. goto nla_put_failure;
  2809. }
  2810. if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci,
  2811. IFLA_MACSEC_PAD) ||
  2812. nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
  2813. nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE,
  2814. csid, IFLA_MACSEC_PAD) ||
  2815. nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
  2816. nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
  2817. nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
  2818. nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) ||
  2819. nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) ||
  2820. nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) ||
  2821. nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) ||
  2822. nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) ||
  2823. 0)
  2824. goto nla_put_failure;
  2825. if (secy->replay_protect) {
  2826. if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window))
  2827. goto nla_put_failure;
  2828. }
  2829. return 0;
  2830. nla_put_failure:
  2831. return -EMSGSIZE;
  2832. }
  2833. static struct rtnl_link_ops macsec_link_ops __read_mostly = {
  2834. .kind = "macsec",
  2835. .priv_size = sizeof(struct macsec_dev),
  2836. .maxtype = IFLA_MACSEC_MAX,
  2837. .policy = macsec_rtnl_policy,
  2838. .setup = macsec_setup,
  2839. .validate = macsec_validate_attr,
  2840. .newlink = macsec_newlink,
  2841. .changelink = macsec_changelink,
  2842. .dellink = macsec_dellink,
  2843. .get_size = macsec_get_size,
  2844. .fill_info = macsec_fill_info,
  2845. .get_link_net = macsec_get_link_net,
  2846. };
  2847. static bool is_macsec_master(struct net_device *dev)
  2848. {
  2849. return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame;
  2850. }
  2851. static int macsec_notify(struct notifier_block *this, unsigned long event,
  2852. void *ptr)
  2853. {
  2854. struct net_device *real_dev = netdev_notifier_info_to_dev(ptr);
  2855. LIST_HEAD(head);
  2856. if (!is_macsec_master(real_dev))
  2857. return NOTIFY_DONE;
  2858. switch (event) {
  2859. case NETDEV_UNREGISTER: {
  2860. struct macsec_dev *m, *n;
  2861. struct macsec_rxh_data *rxd;
  2862. rxd = macsec_data_rtnl(real_dev);
  2863. list_for_each_entry_safe(m, n, &rxd->secys, secys) {
  2864. macsec_common_dellink(m->secy.netdev, &head);
  2865. }
  2866. netdev_rx_handler_unregister(real_dev);
  2867. kfree(rxd);
  2868. unregister_netdevice_many(&head);
  2869. break;
  2870. }
  2871. case NETDEV_CHANGEMTU: {
  2872. struct macsec_dev *m;
  2873. struct macsec_rxh_data *rxd;
  2874. rxd = macsec_data_rtnl(real_dev);
  2875. list_for_each_entry(m, &rxd->secys, secys) {
  2876. struct net_device *dev = m->secy.netdev;
  2877. unsigned int mtu = real_dev->mtu - (m->secy.icv_len +
  2878. macsec_extra_len(true));
  2879. if (dev->mtu > mtu)
  2880. dev_set_mtu(dev, mtu);
  2881. }
  2882. }
  2883. }
  2884. return NOTIFY_OK;
  2885. }
  2886. static struct notifier_block macsec_notifier = {
  2887. .notifier_call = macsec_notify,
  2888. };
  2889. static int __init macsec_init(void)
  2890. {
  2891. int err;
  2892. pr_info("MACsec IEEE 802.1AE\n");
  2893. err = register_netdevice_notifier(&macsec_notifier);
  2894. if (err)
  2895. return err;
  2896. err = rtnl_link_register(&macsec_link_ops);
  2897. if (err)
  2898. goto notifier;
  2899. err = genl_register_family(&macsec_fam);
  2900. if (err)
  2901. goto rtnl;
  2902. return 0;
  2903. rtnl:
  2904. rtnl_link_unregister(&macsec_link_ops);
  2905. notifier:
  2906. unregister_netdevice_notifier(&macsec_notifier);
  2907. return err;
  2908. }
  2909. static void __exit macsec_exit(void)
  2910. {
  2911. genl_unregister_family(&macsec_fam);
  2912. rtnl_link_unregister(&macsec_link_ops);
  2913. unregister_netdevice_notifier(&macsec_notifier);
  2914. rcu_barrier();
  2915. }
  2916. module_init(macsec_init);
  2917. module_exit(macsec_exit);
  2918. MODULE_ALIAS_RTNL_LINK("macsec");
  2919. MODULE_ALIAS_GENL_FAMILY("macsec");
  2920. MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
  2921. MODULE_LICENSE("GPL v2");