ixgbe_ipsec.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved. */
  3. #include "ixgbe.h"
  4. #include <net/xfrm.h>
  5. #include <crypto/aead.h>
  6. #define IXGBE_IPSEC_KEY_BITS 160
  7. static const char aes_gcm_name[] = "rfc4106(gcm(aes))";
  8. static void ixgbe_ipsec_del_sa(struct xfrm_state *xs);
  9. /**
  10. * ixgbe_ipsec_set_tx_sa - set the Tx SA registers
  11. * @hw: hw specific details
  12. * @idx: register index to write
  13. * @key: key byte array
  14. * @salt: salt bytes
  15. **/
  16. static void ixgbe_ipsec_set_tx_sa(struct ixgbe_hw *hw, u16 idx,
  17. u32 key[], u32 salt)
  18. {
  19. u32 reg;
  20. int i;
  21. for (i = 0; i < 4; i++)
  22. IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(i),
  23. (__force u32)cpu_to_be32(key[3 - i]));
  24. IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, (__force u32)cpu_to_be32(salt));
  25. IXGBE_WRITE_FLUSH(hw);
  26. reg = IXGBE_READ_REG(hw, IXGBE_IPSTXIDX);
  27. reg &= IXGBE_RXTXIDX_IPS_EN;
  28. reg |= idx << IXGBE_RXTXIDX_IDX_SHIFT | IXGBE_RXTXIDX_WRITE;
  29. IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, reg);
  30. IXGBE_WRITE_FLUSH(hw);
  31. }
  32. /**
  33. * ixgbe_ipsec_set_rx_item - set an Rx table item
  34. * @hw: hw specific details
  35. * @idx: register index to write
  36. * @tbl: table selector
  37. *
  38. * Trigger the device to store into a particular Rx table the
  39. * data that has already been loaded into the input register
  40. **/
  41. static void ixgbe_ipsec_set_rx_item(struct ixgbe_hw *hw, u16 idx,
  42. enum ixgbe_ipsec_tbl_sel tbl)
  43. {
  44. u32 reg;
  45. reg = IXGBE_READ_REG(hw, IXGBE_IPSRXIDX);
  46. reg &= IXGBE_RXTXIDX_IPS_EN;
  47. reg |= tbl << IXGBE_RXIDX_TBL_SHIFT |
  48. idx << IXGBE_RXTXIDX_IDX_SHIFT |
  49. IXGBE_RXTXIDX_WRITE;
  50. IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, reg);
  51. IXGBE_WRITE_FLUSH(hw);
  52. }
  53. /**
  54. * ixgbe_ipsec_set_rx_sa - set up the register bits to save SA info
  55. * @hw: hw specific details
  56. * @idx: register index to write
  57. * @spi: security parameter index
  58. * @key: key byte array
  59. * @salt: salt bytes
  60. * @mode: rx decrypt control bits
  61. * @ip_idx: index into IP table for related IP address
  62. **/
  63. static void ixgbe_ipsec_set_rx_sa(struct ixgbe_hw *hw, u16 idx, __be32 spi,
  64. u32 key[], u32 salt, u32 mode, u32 ip_idx)
  65. {
  66. int i;
  67. /* store the SPI (in bigendian) and IPidx */
  68. IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI,
  69. (__force u32)cpu_to_le32((__force u32)spi));
  70. IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, ip_idx);
  71. IXGBE_WRITE_FLUSH(hw);
  72. ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_spi_tbl);
  73. /* store the key, salt, and mode */
  74. for (i = 0; i < 4; i++)
  75. IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(i),
  76. (__force u32)cpu_to_be32(key[3 - i]));
  77. IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, (__force u32)cpu_to_be32(salt));
  78. IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, mode);
  79. IXGBE_WRITE_FLUSH(hw);
  80. ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_key_tbl);
  81. }
  82. /**
  83. * ixgbe_ipsec_set_rx_ip - set up the register bits to save SA IP addr info
  84. * @hw: hw specific details
  85. * @idx: register index to write
  86. * @addr: IP address byte array
  87. **/
  88. static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[])
  89. {
  90. int i;
  91. /* store the ip address */
  92. for (i = 0; i < 4; i++)
  93. IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(i),
  94. (__force u32)cpu_to_le32((__force u32)addr[i]));
  95. IXGBE_WRITE_FLUSH(hw);
  96. ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_ip_tbl);
  97. }
  98. /**
  99. * ixgbe_ipsec_clear_hw_tables - because some tables don't get cleared on reset
  100. * @adapter: board private structure
  101. **/
  102. static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter)
  103. {
  104. struct ixgbe_hw *hw = &adapter->hw;
  105. u32 buf[4] = {0, 0, 0, 0};
  106. u16 idx;
  107. /* disable Rx and Tx SA lookup */
  108. IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0);
  109. IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0);
  110. /* scrub the tables - split the loops for the max of the IP table */
  111. for (idx = 0; idx < IXGBE_IPSEC_MAX_RX_IP_COUNT; idx++) {
  112. ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
  113. ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
  114. ixgbe_ipsec_set_rx_ip(hw, idx, (__be32 *)buf);
  115. }
  116. for (; idx < IXGBE_IPSEC_MAX_SA_COUNT; idx++) {
  117. ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
  118. ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
  119. }
  120. }
  121. /**
  122. * ixgbe_ipsec_stop_data
  123. * @adapter: board private structure
  124. **/
  125. static void ixgbe_ipsec_stop_data(struct ixgbe_adapter *adapter)
  126. {
  127. struct ixgbe_hw *hw = &adapter->hw;
  128. bool link = adapter->link_up;
  129. u32 t_rdy, r_rdy;
  130. u32 limit;
  131. u32 reg;
  132. /* halt data paths */
  133. reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
  134. reg |= IXGBE_SECTXCTRL_TX_DIS;
  135. IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg);
  136. reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
  137. reg |= IXGBE_SECRXCTRL_RX_DIS;
  138. IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
  139. /* If both Tx and Rx are ready there are no packets
  140. * that we need to flush so the loopback configuration
  141. * below is not necessary.
  142. */
  143. t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) &
  144. IXGBE_SECTXSTAT_SECTX_RDY;
  145. r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
  146. IXGBE_SECRXSTAT_SECRX_RDY;
  147. if (t_rdy && r_rdy)
  148. return;
  149. /* If the tx fifo doesn't have link, but still has data,
  150. * we can't clear the tx sec block. Set the MAC loopback
  151. * before block clear
  152. */
  153. if (!link) {
  154. reg = IXGBE_READ_REG(hw, IXGBE_MACC);
  155. reg |= IXGBE_MACC_FLU;
  156. IXGBE_WRITE_REG(hw, IXGBE_MACC, reg);
  157. reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
  158. reg |= IXGBE_HLREG0_LPBK;
  159. IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
  160. IXGBE_WRITE_FLUSH(hw);
  161. mdelay(3);
  162. }
  163. /* wait for the paths to empty */
  164. limit = 20;
  165. do {
  166. mdelay(10);
  167. t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) &
  168. IXGBE_SECTXSTAT_SECTX_RDY;
  169. r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
  170. IXGBE_SECRXSTAT_SECRX_RDY;
  171. } while (!(t_rdy && r_rdy) && limit--);
  172. /* undo loopback if we played with it earlier */
  173. if (!link) {
  174. reg = IXGBE_READ_REG(hw, IXGBE_MACC);
  175. reg &= ~IXGBE_MACC_FLU;
  176. IXGBE_WRITE_REG(hw, IXGBE_MACC, reg);
  177. reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
  178. reg &= ~IXGBE_HLREG0_LPBK;
  179. IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
  180. IXGBE_WRITE_FLUSH(hw);
  181. }
  182. }
  183. /**
  184. * ixgbe_ipsec_stop_engine
  185. * @adapter: board private structure
  186. **/
  187. static void ixgbe_ipsec_stop_engine(struct ixgbe_adapter *adapter)
  188. {
  189. struct ixgbe_hw *hw = &adapter->hw;
  190. u32 reg;
  191. ixgbe_ipsec_stop_data(adapter);
  192. /* disable Rx and Tx SA lookup */
  193. IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0);
  194. IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0);
  195. /* disable the Rx and Tx engines and full packet store-n-forward */
  196. reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
  197. reg |= IXGBE_SECTXCTRL_SECTX_DIS;
  198. reg &= ~IXGBE_SECTXCTRL_STORE_FORWARD;
  199. IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg);
  200. reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
  201. reg |= IXGBE_SECRXCTRL_SECRX_DIS;
  202. IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
  203. /* restore the "tx security buffer almost full threshold" to 0x250 */
  204. IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, 0x250);
  205. /* Set minimum IFG between packets back to the default 0x1 */
  206. reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
  207. reg = (reg & 0xfffffff0) | 0x1;
  208. IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
  209. /* final set for normal (no ipsec offload) processing */
  210. IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_SECTX_DIS);
  211. IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, IXGBE_SECRXCTRL_SECRX_DIS);
  212. IXGBE_WRITE_FLUSH(hw);
  213. }
  214. /**
  215. * ixgbe_ipsec_start_engine
  216. * @adapter: board private structure
  217. *
  218. * NOTE: this increases power consumption whether being used or not
  219. **/
  220. static void ixgbe_ipsec_start_engine(struct ixgbe_adapter *adapter)
  221. {
  222. struct ixgbe_hw *hw = &adapter->hw;
  223. u32 reg;
  224. ixgbe_ipsec_stop_data(adapter);
  225. /* Set minimum IFG between packets to 3 */
  226. reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
  227. reg = (reg & 0xfffffff0) | 0x3;
  228. IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
  229. /* Set "tx security buffer almost full threshold" to 0x15 so that the
  230. * almost full indication is generated only after buffer contains at
  231. * least an entire jumbo packet.
  232. */
  233. reg = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF);
  234. reg = (reg & 0xfffffc00) | 0x15;
  235. IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, reg);
  236. /* restart the data paths by clearing the DISABLE bits */
  237. IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
  238. IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_STORE_FORWARD);
  239. /* enable Rx and Tx SA lookup */
  240. IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, IXGBE_RXTXIDX_IPS_EN);
  241. IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, IXGBE_RXTXIDX_IPS_EN);
  242. IXGBE_WRITE_FLUSH(hw);
  243. }
  244. /**
  245. * ixgbe_ipsec_restore - restore the ipsec HW settings after a reset
  246. * @adapter: board private structure
  247. *
  248. * Reload the HW tables from the SW tables after they've been bashed
  249. * by a chip reset.
  250. *
  251. * Any VF entries are removed from the SW and HW tables since either
  252. * (a) the VF also gets reset on PF reset and will ask again for the
  253. * offloads, or (b) the VF has been removed by a change in the num_vfs.
  254. **/
  255. void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter)
  256. {
  257. struct ixgbe_ipsec *ipsec = adapter->ipsec;
  258. struct ixgbe_hw *hw = &adapter->hw;
  259. int i;
  260. if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED))
  261. return;
  262. /* clean up and restart the engine */
  263. ixgbe_ipsec_stop_engine(adapter);
  264. ixgbe_ipsec_clear_hw_tables(adapter);
  265. ixgbe_ipsec_start_engine(adapter);
  266. /* reload the Rx and Tx keys */
  267. for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
  268. struct rx_sa *r = &ipsec->rx_tbl[i];
  269. struct tx_sa *t = &ipsec->tx_tbl[i];
  270. if (r->used) {
  271. if (r->mode & IXGBE_RXTXMOD_VF)
  272. ixgbe_ipsec_del_sa(r->xs);
  273. else
  274. ixgbe_ipsec_set_rx_sa(hw, i, r->xs->id.spi,
  275. r->key, r->salt,
  276. r->mode, r->iptbl_ind);
  277. }
  278. if (t->used) {
  279. if (t->mode & IXGBE_RXTXMOD_VF)
  280. ixgbe_ipsec_del_sa(t->xs);
  281. else
  282. ixgbe_ipsec_set_tx_sa(hw, i, t->key, t->salt);
  283. }
  284. }
  285. /* reload the IP addrs */
  286. for (i = 0; i < IXGBE_IPSEC_MAX_RX_IP_COUNT; i++) {
  287. struct rx_ip_sa *ipsa = &ipsec->ip_tbl[i];
  288. if (ipsa->used)
  289. ixgbe_ipsec_set_rx_ip(hw, i, ipsa->ipaddr);
  290. }
  291. }
  292. /**
  293. * ixgbe_ipsec_find_empty_idx - find the first unused security parameter index
  294. * @ipsec: pointer to ipsec struct
  295. * @rxtable: true if we need to look in the Rx table
  296. *
  297. * Returns the first unused index in either the Rx or Tx SA table
  298. **/
  299. static int ixgbe_ipsec_find_empty_idx(struct ixgbe_ipsec *ipsec, bool rxtable)
  300. {
  301. u32 i;
  302. if (rxtable) {
  303. if (ipsec->num_rx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
  304. return -ENOSPC;
  305. /* search rx sa table */
  306. for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
  307. if (!ipsec->rx_tbl[i].used)
  308. return i;
  309. }
  310. } else {
  311. if (ipsec->num_tx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
  312. return -ENOSPC;
  313. /* search tx sa table */
  314. for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
  315. if (!ipsec->tx_tbl[i].used)
  316. return i;
  317. }
  318. }
  319. return -ENOSPC;
  320. }
  321. /**
  322. * ixgbe_ipsec_find_rx_state - find the state that matches
  323. * @ipsec: pointer to ipsec struct
  324. * @daddr: inbound address to match
  325. * @proto: protocol to match
  326. * @spi: SPI to match
  327. * @ip4: true if using an ipv4 address
  328. *
  329. * Returns a pointer to the matching SA state information
  330. **/
  331. static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
  332. __be32 *daddr, u8 proto,
  333. __be32 spi, bool ip4)
  334. {
  335. struct rx_sa *rsa;
  336. struct xfrm_state *ret = NULL;
  337. rcu_read_lock();
  338. hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist,
  339. (__force u32)spi) {
  340. if (rsa->mode & IXGBE_RXTXMOD_VF)
  341. continue;
  342. if (spi == rsa->xs->id.spi &&
  343. ((ip4 && *daddr == rsa->xs->id.daddr.a4) ||
  344. (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6,
  345. sizeof(rsa->xs->id.daddr.a6)))) &&
  346. proto == rsa->xs->id.proto) {
  347. ret = rsa->xs;
  348. xfrm_state_hold(ret);
  349. break;
  350. }
  351. }
  352. rcu_read_unlock();
  353. return ret;
  354. }
  355. /**
  356. * ixgbe_ipsec_parse_proto_keys - find the key and salt based on the protocol
  357. * @xs: pointer to xfrm_state struct
  358. * @mykey: pointer to key array to populate
  359. * @mysalt: pointer to salt value to populate
  360. *
  361. * This copies the protocol keys and salt to our own data tables. The
  362. * 82599 family only supports the one algorithm.
  363. **/
  364. static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
  365. u32 *mykey, u32 *mysalt)
  366. {
  367. struct net_device *dev = xs->xso.dev;
  368. unsigned char *key_data;
  369. char *alg_name = NULL;
  370. int key_len;
  371. if (!xs->aead) {
  372. netdev_err(dev, "Unsupported IPsec algorithm\n");
  373. return -EINVAL;
  374. }
  375. if (xs->aead->alg_icv_len != IXGBE_IPSEC_AUTH_BITS) {
  376. netdev_err(dev, "IPsec offload requires %d bit authentication\n",
  377. IXGBE_IPSEC_AUTH_BITS);
  378. return -EINVAL;
  379. }
  380. key_data = &xs->aead->alg_key[0];
  381. key_len = xs->aead->alg_key_len;
  382. alg_name = xs->aead->alg_name;
  383. if (strcmp(alg_name, aes_gcm_name)) {
  384. netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n",
  385. aes_gcm_name);
  386. return -EINVAL;
  387. }
  388. /* The key bytes come down in a bigendian array of bytes, so
  389. * we don't need to do any byteswapping.
  390. * 160 accounts for 16 byte key and 4 byte salt
  391. */
  392. if (key_len == IXGBE_IPSEC_KEY_BITS) {
  393. *mysalt = ((u32 *)key_data)[4];
  394. } else if (key_len != (IXGBE_IPSEC_KEY_BITS - (sizeof(*mysalt) * 8))) {
  395. netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n");
  396. return -EINVAL;
  397. } else {
  398. netdev_info(dev, "IPsec hw offload parameters missing 32 bit salt value\n");
  399. *mysalt = 0;
  400. }
  401. memcpy(mykey, key_data, 16);
  402. return 0;
  403. }
  404. /**
  405. * ixgbe_ipsec_check_mgmt_ip - make sure there is no clash with mgmt IP filters
  406. * @xs: pointer to transformer state struct
  407. **/
  408. static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs)
  409. {
  410. struct net_device *dev = xs->xso.dev;
  411. struct ixgbe_adapter *adapter = netdev_priv(dev);
  412. struct ixgbe_hw *hw = &adapter->hw;
  413. u32 mfval, manc, reg;
  414. int num_filters = 4;
  415. bool manc_ipv4;
  416. u32 bmcipval;
  417. int i, j;
  418. #define MANC_EN_IPV4_FILTER BIT(24)
  419. #define MFVAL_IPV4_FILTER_SHIFT 16
  420. #define MFVAL_IPV6_FILTER_SHIFT 24
  421. #define MIPAF_ARR(_m, _n) (IXGBE_MIPAF + ((_m) * 0x10) + ((_n) * 4))
  422. #define IXGBE_BMCIP(_n) (0x5050 + ((_n) * 4))
  423. #define IXGBE_BMCIPVAL 0x5060
  424. #define BMCIP_V4 0x2
  425. #define BMCIP_V6 0x3
  426. #define BMCIP_MASK 0x3
  427. manc = IXGBE_READ_REG(hw, IXGBE_MANC);
  428. manc_ipv4 = !!(manc & MANC_EN_IPV4_FILTER);
  429. mfval = IXGBE_READ_REG(hw, IXGBE_MFVAL);
  430. bmcipval = IXGBE_READ_REG(hw, IXGBE_BMCIPVAL);
  431. if (xs->props.family == AF_INET) {
  432. /* are there any IPv4 filters to check? */
  433. if (manc_ipv4) {
  434. /* the 4 ipv4 filters are all in MIPAF(3, i) */
  435. for (i = 0; i < num_filters; i++) {
  436. if (!(mfval & BIT(MFVAL_IPV4_FILTER_SHIFT + i)))
  437. continue;
  438. reg = IXGBE_READ_REG(hw, MIPAF_ARR(3, i));
  439. if (reg == xs->id.daddr.a4)
  440. return 1;
  441. }
  442. }
  443. if ((bmcipval & BMCIP_MASK) == BMCIP_V4) {
  444. reg = IXGBE_READ_REG(hw, IXGBE_BMCIP(3));
  445. if (reg == xs->id.daddr.a4)
  446. return 1;
  447. }
  448. } else {
  449. /* if there are ipv4 filters, they are in the last ipv6 slot */
  450. if (manc_ipv4)
  451. num_filters = 3;
  452. for (i = 0; i < num_filters; i++) {
  453. if (!(mfval & BIT(MFVAL_IPV6_FILTER_SHIFT + i)))
  454. continue;
  455. for (j = 0; j < 4; j++) {
  456. reg = IXGBE_READ_REG(hw, MIPAF_ARR(i, j));
  457. if (reg != xs->id.daddr.a6[j])
  458. break;
  459. }
  460. if (j == 4) /* did we match all 4 words? */
  461. return 1;
  462. }
  463. if ((bmcipval & BMCIP_MASK) == BMCIP_V6) {
  464. for (j = 0; j < 4; j++) {
  465. reg = IXGBE_READ_REG(hw, IXGBE_BMCIP(j));
  466. if (reg != xs->id.daddr.a6[j])
  467. break;
  468. }
  469. if (j == 4) /* did we match all 4 words? */
  470. return 1;
  471. }
  472. }
  473. return 0;
  474. }
  475. /**
  476. * ixgbe_ipsec_add_sa - program device with a security association
  477. * @xs: pointer to transformer state struct
  478. **/
  479. static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
  480. {
  481. struct net_device *dev = xs->xso.dev;
  482. struct ixgbe_adapter *adapter = netdev_priv(dev);
  483. struct ixgbe_ipsec *ipsec = adapter->ipsec;
  484. struct ixgbe_hw *hw = &adapter->hw;
  485. int checked, match, first;
  486. u16 sa_idx;
  487. int ret;
  488. int i;
  489. if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
  490. netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n",
  491. xs->id.proto);
  492. return -EINVAL;
  493. }
  494. if (ixgbe_ipsec_check_mgmt_ip(xs)) {
  495. netdev_err(dev, "IPsec IP addr clash with mgmt filters\n");
  496. return -EINVAL;
  497. }
  498. if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
  499. struct rx_sa rsa;
  500. if (xs->calg) {
  501. netdev_err(dev, "Compression offload not supported\n");
  502. return -EINVAL;
  503. }
  504. /* find the first unused index */
  505. ret = ixgbe_ipsec_find_empty_idx(ipsec, true);
  506. if (ret < 0) {
  507. netdev_err(dev, "No space for SA in Rx table!\n");
  508. return ret;
  509. }
  510. sa_idx = (u16)ret;
  511. memset(&rsa, 0, sizeof(rsa));
  512. rsa.used = true;
  513. rsa.xs = xs;
  514. if (rsa.xs->id.proto & IPPROTO_ESP)
  515. rsa.decrypt = xs->ealg || xs->aead;
  516. /* get the key and salt */
  517. ret = ixgbe_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
  518. if (ret) {
  519. netdev_err(dev, "Failed to get key data for Rx SA table\n");
  520. return ret;
  521. }
  522. /* get ip for rx sa table */
  523. if (xs->props.family == AF_INET6)
  524. memcpy(rsa.ipaddr, &xs->id.daddr.a6, 16);
  525. else
  526. memcpy(&rsa.ipaddr[3], &xs->id.daddr.a4, 4);
  527. /* The HW does not have a 1:1 mapping from keys to IP addrs, so
  528. * check for a matching IP addr entry in the table. If the addr
  529. * already exists, use it; else find an unused slot and add the
  530. * addr. If one does not exist and there are no unused table
  531. * entries, fail the request.
  532. */
  533. /* Find an existing match or first not used, and stop looking
  534. * after we've checked all we know we have.
  535. */
  536. checked = 0;
  537. match = -1;
  538. first = -1;
  539. for (i = 0;
  540. i < IXGBE_IPSEC_MAX_RX_IP_COUNT &&
  541. (checked < ipsec->num_rx_sa || first < 0);
  542. i++) {
  543. if (ipsec->ip_tbl[i].used) {
  544. if (!memcmp(ipsec->ip_tbl[i].ipaddr,
  545. rsa.ipaddr, sizeof(rsa.ipaddr))) {
  546. match = i;
  547. break;
  548. }
  549. checked++;
  550. } else if (first < 0) {
  551. first = i; /* track the first empty seen */
  552. }
  553. }
  554. if (ipsec->num_rx_sa == 0)
  555. first = 0;
  556. if (match >= 0) {
  557. /* addrs are the same, we should use this one */
  558. rsa.iptbl_ind = match;
  559. ipsec->ip_tbl[match].ref_cnt++;
  560. } else if (first >= 0) {
  561. /* no matches, but here's an empty slot */
  562. rsa.iptbl_ind = first;
  563. memcpy(ipsec->ip_tbl[first].ipaddr,
  564. rsa.ipaddr, sizeof(rsa.ipaddr));
  565. ipsec->ip_tbl[first].ref_cnt = 1;
  566. ipsec->ip_tbl[first].used = true;
  567. ixgbe_ipsec_set_rx_ip(hw, rsa.iptbl_ind, rsa.ipaddr);
  568. } else {
  569. /* no match and no empty slot */
  570. netdev_err(dev, "No space for SA in Rx IP SA table\n");
  571. memset(&rsa, 0, sizeof(rsa));
  572. return -ENOSPC;
  573. }
  574. rsa.mode = IXGBE_RXMOD_VALID;
  575. if (rsa.xs->id.proto & IPPROTO_ESP)
  576. rsa.mode |= IXGBE_RXMOD_PROTO_ESP;
  577. if (rsa.decrypt)
  578. rsa.mode |= IXGBE_RXMOD_DECRYPT;
  579. if (rsa.xs->props.family == AF_INET6)
  580. rsa.mode |= IXGBE_RXMOD_IPV6;
  581. /* the preparations worked, so save the info */
  582. memcpy(&ipsec->rx_tbl[sa_idx], &rsa, sizeof(rsa));
  583. ixgbe_ipsec_set_rx_sa(hw, sa_idx, rsa.xs->id.spi, rsa.key,
  584. rsa.salt, rsa.mode, rsa.iptbl_ind);
  585. xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_RX_INDEX;
  586. ipsec->num_rx_sa++;
  587. /* hash the new entry for faster search in Rx path */
  588. hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist,
  589. (__force u32)rsa.xs->id.spi);
  590. } else {
  591. struct tx_sa tsa;
  592. if (adapter->num_vfs)
  593. return -EOPNOTSUPP;
  594. /* find the first unused index */
  595. ret = ixgbe_ipsec_find_empty_idx(ipsec, false);
  596. if (ret < 0) {
  597. netdev_err(dev, "No space for SA in Tx table\n");
  598. return ret;
  599. }
  600. sa_idx = (u16)ret;
  601. memset(&tsa, 0, sizeof(tsa));
  602. tsa.used = true;
  603. tsa.xs = xs;
  604. if (xs->id.proto & IPPROTO_ESP)
  605. tsa.encrypt = xs->ealg || xs->aead;
  606. ret = ixgbe_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
  607. if (ret) {
  608. netdev_err(dev, "Failed to get key data for Tx SA table\n");
  609. memset(&tsa, 0, sizeof(tsa));
  610. return ret;
  611. }
  612. /* the preparations worked, so save the info */
  613. memcpy(&ipsec->tx_tbl[sa_idx], &tsa, sizeof(tsa));
  614. ixgbe_ipsec_set_tx_sa(hw, sa_idx, tsa.key, tsa.salt);
  615. xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_TX_INDEX;
  616. ipsec->num_tx_sa++;
  617. }
  618. /* enable the engine if not already warmed up */
  619. if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED)) {
  620. ixgbe_ipsec_start_engine(adapter);
  621. adapter->flags2 |= IXGBE_FLAG2_IPSEC_ENABLED;
  622. }
  623. return 0;
  624. }
  625. /**
  626. * ixgbe_ipsec_del_sa - clear out this specific SA
  627. * @xs: pointer to transformer state struct
  628. **/
  629. static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
  630. {
  631. struct net_device *dev = xs->xso.dev;
  632. struct ixgbe_adapter *adapter = netdev_priv(dev);
  633. struct ixgbe_ipsec *ipsec = adapter->ipsec;
  634. struct ixgbe_hw *hw = &adapter->hw;
  635. u32 zerobuf[4] = {0, 0, 0, 0};
  636. u16 sa_idx;
  637. if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
  638. struct rx_sa *rsa;
  639. u8 ipi;
  640. sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
  641. rsa = &ipsec->rx_tbl[sa_idx];
  642. if (!rsa->used) {
  643. netdev_err(dev, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n",
  644. sa_idx, xs->xso.offload_handle);
  645. return;
  646. }
  647. ixgbe_ipsec_set_rx_sa(hw, sa_idx, 0, zerobuf, 0, 0, 0);
  648. hash_del_rcu(&rsa->hlist);
  649. /* if the IP table entry is referenced by only this SA,
  650. * i.e. ref_cnt is only 1, clear the IP table entry as well
  651. */
  652. ipi = rsa->iptbl_ind;
  653. if (ipsec->ip_tbl[ipi].ref_cnt > 0) {
  654. ipsec->ip_tbl[ipi].ref_cnt--;
  655. if (!ipsec->ip_tbl[ipi].ref_cnt) {
  656. memset(&ipsec->ip_tbl[ipi], 0,
  657. sizeof(struct rx_ip_sa));
  658. ixgbe_ipsec_set_rx_ip(hw, ipi,
  659. (__force __be32 *)zerobuf);
  660. }
  661. }
  662. memset(rsa, 0, sizeof(struct rx_sa));
  663. ipsec->num_rx_sa--;
  664. } else {
  665. sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
  666. if (!ipsec->tx_tbl[sa_idx].used) {
  667. netdev_err(dev, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n",
  668. sa_idx, xs->xso.offload_handle);
  669. return;
  670. }
  671. ixgbe_ipsec_set_tx_sa(hw, sa_idx, zerobuf, 0);
  672. memset(&ipsec->tx_tbl[sa_idx], 0, sizeof(struct tx_sa));
  673. ipsec->num_tx_sa--;
  674. }
  675. /* if there are no SAs left, stop the engine to save energy */
  676. if (ipsec->num_rx_sa == 0 && ipsec->num_tx_sa == 0) {
  677. adapter->flags2 &= ~IXGBE_FLAG2_IPSEC_ENABLED;
  678. ixgbe_ipsec_stop_engine(adapter);
  679. }
  680. }
  681. /**
  682. * ixgbe_ipsec_offload_ok - can this packet use the xfrm hw offload
  683. * @skb: current data packet
  684. * @xs: pointer to transformer state struct
  685. **/
  686. static bool ixgbe_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
  687. {
  688. if (xs->props.family == AF_INET) {
  689. /* Offload with IPv4 options is not supported yet */
  690. if (ip_hdr(skb)->ihl != 5)
  691. return false;
  692. } else {
  693. /* Offload with IPv6 extension headers is not support yet */
  694. if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
  695. return false;
  696. }
  697. return true;
  698. }
  699. static const struct xfrmdev_ops ixgbe_xfrmdev_ops = {
  700. .xdo_dev_state_add = ixgbe_ipsec_add_sa,
  701. .xdo_dev_state_delete = ixgbe_ipsec_del_sa,
  702. .xdo_dev_offload_ok = ixgbe_ipsec_offload_ok,
  703. };
  704. /**
  705. * ixgbe_ipsec_vf_clear - clear the tables of data for a VF
  706. * @adapter: board private structure
  707. * @vf: VF id to be removed
  708. **/
  709. void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf)
  710. {
  711. struct ixgbe_ipsec *ipsec = adapter->ipsec;
  712. int i;
  713. /* search rx sa table */
  714. for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT && ipsec->num_rx_sa; i++) {
  715. if (!ipsec->rx_tbl[i].used)
  716. continue;
  717. if (ipsec->rx_tbl[i].mode & IXGBE_RXTXMOD_VF &&
  718. ipsec->rx_tbl[i].vf == vf)
  719. ixgbe_ipsec_del_sa(ipsec->rx_tbl[i].xs);
  720. }
  721. /* search tx sa table */
  722. for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT && ipsec->num_tx_sa; i++) {
  723. if (!ipsec->tx_tbl[i].used)
  724. continue;
  725. if (ipsec->tx_tbl[i].mode & IXGBE_RXTXMOD_VF &&
  726. ipsec->tx_tbl[i].vf == vf)
  727. ixgbe_ipsec_del_sa(ipsec->tx_tbl[i].xs);
  728. }
  729. }
  730. /**
  731. * ixgbe_ipsec_vf_add_sa - translate VF request to SA add
  732. * @adapter: board private structure
  733. * @msgbuf: The message buffer
  734. * @vf: the VF index
  735. *
  736. * Make up a new xs and algorithm info from the data sent by the VF.
  737. * We only need to sketch in just enough to set up the HW offload.
  738. * Put the resulting offload_handle into the return message to the VF.
  739. *
  740. * Returns 0 or error value
  741. **/
  742. int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
  743. {
  744. struct ixgbe_ipsec *ipsec = adapter->ipsec;
  745. struct xfrm_algo_desc *algo;
  746. struct sa_mbx_msg *sam;
  747. struct xfrm_state *xs;
  748. size_t aead_len;
  749. u16 sa_idx;
  750. u32 pfsa;
  751. int err;
  752. sam = (struct sa_mbx_msg *)(&msgbuf[1]);
  753. if (!adapter->vfinfo[vf].trusted ||
  754. !(adapter->flags2 & IXGBE_FLAG2_VF_IPSEC_ENABLED)) {
  755. e_warn(drv, "VF %d attempted to add an IPsec SA\n", vf);
  756. err = -EACCES;
  757. goto err_out;
  758. }
  759. /* Tx IPsec offload doesn't seem to work on this
  760. * device, so block these requests for now.
  761. */
  762. if (!(sam->flags & XFRM_OFFLOAD_INBOUND)) {
  763. err = -EOPNOTSUPP;
  764. goto err_out;
  765. }
  766. xs = kzalloc(sizeof(*xs), GFP_KERNEL);
  767. if (unlikely(!xs)) {
  768. err = -ENOMEM;
  769. goto err_out;
  770. }
  771. xs->xso.flags = sam->flags;
  772. xs->id.spi = sam->spi;
  773. xs->id.proto = sam->proto;
  774. xs->props.family = sam->family;
  775. if (xs->props.family == AF_INET6)
  776. memcpy(&xs->id.daddr.a6, sam->addr, sizeof(xs->id.daddr.a6));
  777. else
  778. memcpy(&xs->id.daddr.a4, sam->addr, sizeof(xs->id.daddr.a4));
  779. xs->xso.dev = adapter->netdev;
  780. algo = xfrm_aead_get_byname(aes_gcm_name, IXGBE_IPSEC_AUTH_BITS, 1);
  781. if (unlikely(!algo)) {
  782. err = -ENOENT;
  783. goto err_xs;
  784. }
  785. aead_len = sizeof(*xs->aead) + IXGBE_IPSEC_KEY_BITS / 8;
  786. xs->aead = kzalloc(aead_len, GFP_KERNEL);
  787. if (unlikely(!xs->aead)) {
  788. err = -ENOMEM;
  789. goto err_xs;
  790. }
  791. xs->props.ealgo = algo->desc.sadb_alg_id;
  792. xs->geniv = algo->uinfo.aead.geniv;
  793. xs->aead->alg_icv_len = IXGBE_IPSEC_AUTH_BITS;
  794. xs->aead->alg_key_len = IXGBE_IPSEC_KEY_BITS;
  795. memcpy(xs->aead->alg_key, sam->key, sizeof(sam->key));
  796. memcpy(xs->aead->alg_name, aes_gcm_name, sizeof(aes_gcm_name));
  797. /* set up the HW offload */
  798. err = ixgbe_ipsec_add_sa(xs);
  799. if (err)
  800. goto err_aead;
  801. pfsa = xs->xso.offload_handle;
  802. if (pfsa < IXGBE_IPSEC_BASE_TX_INDEX) {
  803. sa_idx = pfsa - IXGBE_IPSEC_BASE_RX_INDEX;
  804. ipsec->rx_tbl[sa_idx].vf = vf;
  805. ipsec->rx_tbl[sa_idx].mode |= IXGBE_RXTXMOD_VF;
  806. } else {
  807. sa_idx = pfsa - IXGBE_IPSEC_BASE_TX_INDEX;
  808. ipsec->tx_tbl[sa_idx].vf = vf;
  809. ipsec->tx_tbl[sa_idx].mode |= IXGBE_RXTXMOD_VF;
  810. }
  811. msgbuf[1] = xs->xso.offload_handle;
  812. return 0;
  813. err_aead:
  814. memset(xs->aead, 0, sizeof(*xs->aead));
  815. kfree(xs->aead);
  816. err_xs:
  817. memset(xs, 0, sizeof(*xs));
  818. kfree(xs);
  819. err_out:
  820. msgbuf[1] = err;
  821. return err;
  822. }
  823. /**
  824. * ixgbe_ipsec_vf_del_sa - translate VF request to SA delete
  825. * @adapter: board private structure
  826. * @msgbuf: The message buffer
  827. * @vf: the VF index
  828. *
  829. * Given the offload_handle sent by the VF, look for the related SA table
  830. * entry and use its xs field to call for a delete of the SA.
  831. *
  832. * Note: We silently ignore requests to delete entries that are already
  833. * set to unused because when a VF is set to "DOWN", the PF first
  834. * gets a reset and clears all the VF's entries; then the VF's
  835. * XFRM stack sends individual deletes for each entry, which the
  836. * reset already removed. In the future it might be good to try to
  837. * optimize this so not so many unnecessary delete messages are sent.
  838. *
  839. * Returns 0 or error value
  840. **/
  841. int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
  842. {
  843. struct ixgbe_ipsec *ipsec = adapter->ipsec;
  844. struct xfrm_state *xs;
  845. u32 pfsa = msgbuf[1];
  846. u16 sa_idx;
  847. if (!adapter->vfinfo[vf].trusted) {
  848. e_err(drv, "vf %d attempted to delete an SA\n", vf);
  849. return -EPERM;
  850. }
  851. if (pfsa < IXGBE_IPSEC_BASE_TX_INDEX) {
  852. struct rx_sa *rsa;
  853. sa_idx = pfsa - IXGBE_IPSEC_BASE_RX_INDEX;
  854. if (sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT) {
  855. e_err(drv, "vf %d SA index %d out of range\n",
  856. vf, sa_idx);
  857. return -EINVAL;
  858. }
  859. rsa = &ipsec->rx_tbl[sa_idx];
  860. if (!rsa->used)
  861. return 0;
  862. if (!(rsa->mode & IXGBE_RXTXMOD_VF) ||
  863. rsa->vf != vf) {
  864. e_err(drv, "vf %d bad Rx SA index %d\n", vf, sa_idx);
  865. return -ENOENT;
  866. }
  867. xs = ipsec->rx_tbl[sa_idx].xs;
  868. } else {
  869. struct tx_sa *tsa;
  870. sa_idx = pfsa - IXGBE_IPSEC_BASE_TX_INDEX;
  871. if (sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT) {
  872. e_err(drv, "vf %d SA index %d out of range\n",
  873. vf, sa_idx);
  874. return -EINVAL;
  875. }
  876. tsa = &ipsec->tx_tbl[sa_idx];
  877. if (!tsa->used)
  878. return 0;
  879. if (!(tsa->mode & IXGBE_RXTXMOD_VF) ||
  880. tsa->vf != vf) {
  881. e_err(drv, "vf %d bad Tx SA index %d\n", vf, sa_idx);
  882. return -ENOENT;
  883. }
  884. xs = ipsec->tx_tbl[sa_idx].xs;
  885. }
  886. ixgbe_ipsec_del_sa(xs);
  887. /* remove the xs that was made-up in the add request */
  888. memset(xs, 0, sizeof(*xs));
  889. kfree(xs);
  890. return 0;
  891. }
  892. /**
  893. * ixgbe_ipsec_tx - setup Tx flags for ipsec offload
  894. * @tx_ring: outgoing context
  895. * @first: current data packet
  896. * @itd: ipsec Tx data for later use in building context descriptor
  897. **/
  898. int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
  899. struct ixgbe_tx_buffer *first,
  900. struct ixgbe_ipsec_tx_data *itd)
  901. {
  902. struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev);
  903. struct ixgbe_ipsec *ipsec = adapter->ipsec;
  904. struct xfrm_state *xs;
  905. struct tx_sa *tsa;
  906. if (unlikely(!first->skb->sp->len)) {
  907. netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n",
  908. __func__, first->skb->sp->len);
  909. return 0;
  910. }
  911. xs = xfrm_input_state(first->skb);
  912. if (unlikely(!xs)) {
  913. netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n",
  914. __func__, xs);
  915. return 0;
  916. }
  917. itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
  918. if (unlikely(itd->sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) {
  919. netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
  920. __func__, itd->sa_idx, xs->xso.offload_handle);
  921. return 0;
  922. }
  923. tsa = &ipsec->tx_tbl[itd->sa_idx];
  924. if (unlikely(!tsa->used)) {
  925. netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n",
  926. __func__, itd->sa_idx);
  927. return 0;
  928. }
  929. first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CC;
  930. if (xs->id.proto == IPPROTO_ESP) {
  931. itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
  932. IXGBE_ADVTXD_TUCMD_L4T_TCP;
  933. if (first->protocol == htons(ETH_P_IP))
  934. itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4;
  935. /* The actual trailer length is authlen (16 bytes) plus
  936. * 2 bytes for the proto and the padlen values, plus
  937. * padlen bytes of padding. This ends up not the same
  938. * as the static value found in xs->props.trailer_len (21).
  939. *
  940. * ... but if we're doing GSO, don't bother as the stack
  941. * doesn't add a trailer for those.
  942. */
  943. if (!skb_is_gso(first->skb)) {
  944. /* The "correct" way to get the auth length would be
  945. * to use
  946. * authlen = crypto_aead_authsize(xs->data);
  947. * but since we know we only have one size to worry
  948. * about * we can let the compiler use the constant
  949. * and save us a few CPU cycles.
  950. */
  951. const int authlen = IXGBE_IPSEC_AUTH_BITS / 8;
  952. struct sk_buff *skb = first->skb;
  953. u8 padlen;
  954. int ret;
  955. ret = skb_copy_bits(skb, skb->len - (authlen + 2),
  956. &padlen, 1);
  957. if (unlikely(ret))
  958. return 0;
  959. itd->trailer_len = authlen + 2 + padlen;
  960. }
  961. }
  962. if (tsa->encrypt)
  963. itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN;
  964. return 1;
  965. }
  966. /**
  967. * ixgbe_ipsec_rx - decode ipsec bits from Rx descriptor
  968. * @rx_ring: receiving ring
  969. * @rx_desc: receive data descriptor
  970. * @skb: current data packet
  971. *
  972. * Determine if there was an ipsec encapsulation noticed, and if so set up
  973. * the resulting status for later in the receive stack.
  974. **/
  975. void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
  976. union ixgbe_adv_rx_desc *rx_desc,
  977. struct sk_buff *skb)
  978. {
  979. struct ixgbe_adapter *adapter = netdev_priv(rx_ring->netdev);
  980. __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
  981. __le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH |
  982. IXGBE_RXDADV_PKTTYPE_IPSEC_ESP);
  983. struct ixgbe_ipsec *ipsec = adapter->ipsec;
  984. struct xfrm_offload *xo = NULL;
  985. struct xfrm_state *xs = NULL;
  986. struct ipv6hdr *ip6 = NULL;
  987. struct iphdr *ip4 = NULL;
  988. void *daddr;
  989. __be32 spi;
  990. u8 *c_hdr;
  991. u8 proto;
  992. /* Find the ip and crypto headers in the data.
  993. * We can assume no vlan header in the way, b/c the
  994. * hw won't recognize the IPsec packet and anyway the
  995. * currently vlan device doesn't support xfrm offload.
  996. */
  997. if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) {
  998. ip4 = (struct iphdr *)(skb->data + ETH_HLEN);
  999. daddr = &ip4->daddr;
  1000. c_hdr = (u8 *)ip4 + ip4->ihl * 4;
  1001. } else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) {
  1002. ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
  1003. daddr = &ip6->daddr;
  1004. c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr);
  1005. } else {
  1006. return;
  1007. }
  1008. switch (pkt_info & ipsec_pkt_types) {
  1009. case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH):
  1010. spi = ((struct ip_auth_hdr *)c_hdr)->spi;
  1011. proto = IPPROTO_AH;
  1012. break;
  1013. case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP):
  1014. spi = ((struct ip_esp_hdr *)c_hdr)->spi;
  1015. proto = IPPROTO_ESP;
  1016. break;
  1017. default:
  1018. return;
  1019. }
  1020. xs = ixgbe_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4);
  1021. if (unlikely(!xs))
  1022. return;
  1023. skb->sp = secpath_dup(skb->sp);
  1024. if (unlikely(!skb->sp))
  1025. return;
  1026. skb->sp->xvec[skb->sp->len++] = xs;
  1027. skb->sp->olen++;
  1028. xo = xfrm_offload(skb);
  1029. xo->flags = CRYPTO_DONE;
  1030. xo->status = CRYPTO_SUCCESS;
  1031. adapter->rx_ipsec++;
  1032. }
  1033. /**
  1034. * ixgbe_init_ipsec_offload - initialize security registers for IPSec operation
  1035. * @adapter: board private structure
  1036. **/
  1037. void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter)
  1038. {
  1039. struct ixgbe_hw *hw = &adapter->hw;
  1040. struct ixgbe_ipsec *ipsec;
  1041. u32 t_dis, r_dis;
  1042. size_t size;
  1043. if (hw->mac.type == ixgbe_mac_82598EB)
  1044. return;
  1045. /* If there is no support for either Tx or Rx offload
  1046. * we should not be advertising support for IPsec.
  1047. */
  1048. t_dis = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) &
  1049. IXGBE_SECTXSTAT_SECTX_OFF_DIS;
  1050. r_dis = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
  1051. IXGBE_SECRXSTAT_SECRX_OFF_DIS;
  1052. if (t_dis || r_dis)
  1053. return;
  1054. ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
  1055. if (!ipsec)
  1056. goto err1;
  1057. hash_init(ipsec->rx_sa_list);
  1058. size = sizeof(struct rx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
  1059. ipsec->rx_tbl = kzalloc(size, GFP_KERNEL);
  1060. if (!ipsec->rx_tbl)
  1061. goto err2;
  1062. size = sizeof(struct tx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
  1063. ipsec->tx_tbl = kzalloc(size, GFP_KERNEL);
  1064. if (!ipsec->tx_tbl)
  1065. goto err2;
  1066. size = sizeof(struct rx_ip_sa) * IXGBE_IPSEC_MAX_RX_IP_COUNT;
  1067. ipsec->ip_tbl = kzalloc(size, GFP_KERNEL);
  1068. if (!ipsec->ip_tbl)
  1069. goto err2;
  1070. ipsec->num_rx_sa = 0;
  1071. ipsec->num_tx_sa = 0;
  1072. adapter->ipsec = ipsec;
  1073. ixgbe_ipsec_stop_engine(adapter);
  1074. ixgbe_ipsec_clear_hw_tables(adapter);
  1075. adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops;
  1076. return;
  1077. err2:
  1078. kfree(ipsec->ip_tbl);
  1079. kfree(ipsec->rx_tbl);
  1080. kfree(ipsec->tx_tbl);
  1081. kfree(ipsec);
  1082. err1:
  1083. netdev_err(adapter->netdev, "Unable to allocate memory for SA tables");
  1084. }
  1085. /**
  1086. * ixgbe_stop_ipsec_offload - tear down the ipsec offload
  1087. * @adapter: board private structure
  1088. **/
  1089. void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter)
  1090. {
  1091. struct ixgbe_ipsec *ipsec = adapter->ipsec;
  1092. adapter->ipsec = NULL;
  1093. if (ipsec) {
  1094. kfree(ipsec->ip_tbl);
  1095. kfree(ipsec->rx_tbl);
  1096. kfree(ipsec->tx_tbl);
  1097. kfree(ipsec);
  1098. }
  1099. }