ixgbe_ipsec.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved. */
  3. #include "ixgbe.h"
  4. #include <net/xfrm.h>
  5. #include <crypto/aead.h>
  6. /**
  7. * ixgbe_ipsec_set_tx_sa - set the Tx SA registers
  8. * @hw: hw specific details
  9. * @idx: register index to write
  10. * @key: key byte array
  11. * @salt: salt bytes
  12. **/
  13. static void ixgbe_ipsec_set_tx_sa(struct ixgbe_hw *hw, u16 idx,
  14. u32 key[], u32 salt)
  15. {
  16. u32 reg;
  17. int i;
  18. for (i = 0; i < 4; i++)
  19. IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(i), cpu_to_be32(key[3 - i]));
  20. IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, cpu_to_be32(salt));
  21. IXGBE_WRITE_FLUSH(hw);
  22. reg = IXGBE_READ_REG(hw, IXGBE_IPSTXIDX);
  23. reg &= IXGBE_RXTXIDX_IPS_EN;
  24. reg |= idx << IXGBE_RXTXIDX_IDX_SHIFT | IXGBE_RXTXIDX_WRITE;
  25. IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, reg);
  26. IXGBE_WRITE_FLUSH(hw);
  27. }
  28. /**
  29. * ixgbe_ipsec_set_rx_item - set an Rx table item
  30. * @hw: hw specific details
  31. * @idx: register index to write
  32. * @tbl: table selector
  33. *
  34. * Trigger the device to store into a particular Rx table the
  35. * data that has already been loaded into the input register
  36. **/
  37. static void ixgbe_ipsec_set_rx_item(struct ixgbe_hw *hw, u16 idx,
  38. enum ixgbe_ipsec_tbl_sel tbl)
  39. {
  40. u32 reg;
  41. reg = IXGBE_READ_REG(hw, IXGBE_IPSRXIDX);
  42. reg &= IXGBE_RXTXIDX_IPS_EN;
  43. reg |= tbl << IXGBE_RXIDX_TBL_SHIFT |
  44. idx << IXGBE_RXTXIDX_IDX_SHIFT |
  45. IXGBE_RXTXIDX_WRITE;
  46. IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, reg);
  47. IXGBE_WRITE_FLUSH(hw);
  48. }
  49. /**
  50. * ixgbe_ipsec_set_rx_sa - set up the register bits to save SA info
  51. * @hw: hw specific details
  52. * @idx: register index to write
  53. * @spi: security parameter index
  54. * @key: key byte array
  55. * @salt: salt bytes
  56. * @mode: rx decrypt control bits
  57. * @ip_idx: index into IP table for related IP address
  58. **/
  59. static void ixgbe_ipsec_set_rx_sa(struct ixgbe_hw *hw, u16 idx, __be32 spi,
  60. u32 key[], u32 salt, u32 mode, u32 ip_idx)
  61. {
  62. int i;
  63. /* store the SPI (in bigendian) and IPidx */
  64. IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, cpu_to_le32(spi));
  65. IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, ip_idx);
  66. IXGBE_WRITE_FLUSH(hw);
  67. ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_spi_tbl);
  68. /* store the key, salt, and mode */
  69. for (i = 0; i < 4; i++)
  70. IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(i), cpu_to_be32(key[3 - i]));
  71. IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, cpu_to_be32(salt));
  72. IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, mode);
  73. IXGBE_WRITE_FLUSH(hw);
  74. ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_key_tbl);
  75. }
  76. /**
  77. * ixgbe_ipsec_set_rx_ip - set up the register bits to save SA IP addr info
  78. * @hw: hw specific details
  79. * @idx: register index to write
  80. * @addr: IP address byte array
  81. **/
  82. static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[])
  83. {
  84. int i;
  85. /* store the ip address */
  86. for (i = 0; i < 4; i++)
  87. IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(i), cpu_to_le32(addr[i]));
  88. IXGBE_WRITE_FLUSH(hw);
  89. ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_ip_tbl);
  90. }
  91. /**
  92. * ixgbe_ipsec_clear_hw_tables - because some tables don't get cleared on reset
  93. * @adapter: board private structure
  94. **/
  95. static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter)
  96. {
  97. struct ixgbe_ipsec *ipsec = adapter->ipsec;
  98. struct ixgbe_hw *hw = &adapter->hw;
  99. u32 buf[4] = {0, 0, 0, 0};
  100. u16 idx;
  101. /* disable Rx and Tx SA lookup */
  102. IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0);
  103. IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0);
  104. /* scrub the tables - split the loops for the max of the IP table */
  105. for (idx = 0; idx < IXGBE_IPSEC_MAX_RX_IP_COUNT; idx++) {
  106. ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
  107. ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
  108. ixgbe_ipsec_set_rx_ip(hw, idx, (__be32 *)buf);
  109. }
  110. for (; idx < IXGBE_IPSEC_MAX_SA_COUNT; idx++) {
  111. ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
  112. ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
  113. }
  114. ipsec->num_rx_sa = 0;
  115. ipsec->num_tx_sa = 0;
  116. }
  117. /**
  118. * ixgbe_ipsec_stop_data
  119. * @adapter: board private structure
  120. **/
  121. static void ixgbe_ipsec_stop_data(struct ixgbe_adapter *adapter)
  122. {
  123. struct ixgbe_hw *hw = &adapter->hw;
  124. bool link = adapter->link_up;
  125. u32 t_rdy, r_rdy;
  126. u32 limit;
  127. u32 reg;
  128. /* halt data paths */
  129. reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
  130. reg |= IXGBE_SECTXCTRL_TX_DIS;
  131. IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg);
  132. reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
  133. reg |= IXGBE_SECRXCTRL_RX_DIS;
  134. IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
  135. IXGBE_WRITE_FLUSH(hw);
  136. /* If the tx fifo doesn't have link, but still has data,
  137. * we can't clear the tx sec block. Set the MAC loopback
  138. * before block clear
  139. */
  140. if (!link) {
  141. reg = IXGBE_READ_REG(hw, IXGBE_MACC);
  142. reg |= IXGBE_MACC_FLU;
  143. IXGBE_WRITE_REG(hw, IXGBE_MACC, reg);
  144. reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
  145. reg |= IXGBE_HLREG0_LPBK;
  146. IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
  147. IXGBE_WRITE_FLUSH(hw);
  148. mdelay(3);
  149. }
  150. /* wait for the paths to empty */
  151. limit = 20;
  152. do {
  153. mdelay(10);
  154. t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) &
  155. IXGBE_SECTXSTAT_SECTX_RDY;
  156. r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
  157. IXGBE_SECRXSTAT_SECRX_RDY;
  158. } while (!t_rdy && !r_rdy && limit--);
  159. /* undo loopback if we played with it earlier */
  160. if (!link) {
  161. reg = IXGBE_READ_REG(hw, IXGBE_MACC);
  162. reg &= ~IXGBE_MACC_FLU;
  163. IXGBE_WRITE_REG(hw, IXGBE_MACC, reg);
  164. reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
  165. reg &= ~IXGBE_HLREG0_LPBK;
  166. IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
  167. IXGBE_WRITE_FLUSH(hw);
  168. }
  169. }
  170. /**
  171. * ixgbe_ipsec_stop_engine
  172. * @adapter: board private structure
  173. **/
  174. static void ixgbe_ipsec_stop_engine(struct ixgbe_adapter *adapter)
  175. {
  176. struct ixgbe_hw *hw = &adapter->hw;
  177. u32 reg;
  178. ixgbe_ipsec_stop_data(adapter);
  179. /* disable Rx and Tx SA lookup */
  180. IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0);
  181. IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0);
  182. /* disable the Rx and Tx engines and full packet store-n-forward */
  183. reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
  184. reg |= IXGBE_SECTXCTRL_SECTX_DIS;
  185. reg &= ~IXGBE_SECTXCTRL_STORE_FORWARD;
  186. IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg);
  187. reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
  188. reg |= IXGBE_SECRXCTRL_SECRX_DIS;
  189. IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
  190. /* restore the "tx security buffer almost full threshold" to 0x250 */
  191. IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, 0x250);
  192. /* Set minimum IFG between packets back to the default 0x1 */
  193. reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
  194. reg = (reg & 0xfffffff0) | 0x1;
  195. IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
  196. /* final set for normal (no ipsec offload) processing */
  197. IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_SECTX_DIS);
  198. IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, IXGBE_SECRXCTRL_SECRX_DIS);
  199. IXGBE_WRITE_FLUSH(hw);
  200. }
  201. /**
  202. * ixgbe_ipsec_start_engine
  203. * @adapter: board private structure
  204. *
  205. * NOTE: this increases power consumption whether being used or not
  206. **/
  207. static void ixgbe_ipsec_start_engine(struct ixgbe_adapter *adapter)
  208. {
  209. struct ixgbe_hw *hw = &adapter->hw;
  210. u32 reg;
  211. ixgbe_ipsec_stop_data(adapter);
  212. /* Set minimum IFG between packets to 3 */
  213. reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
  214. reg = (reg & 0xfffffff0) | 0x3;
  215. IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
  216. /* Set "tx security buffer almost full threshold" to 0x15 so that the
  217. * almost full indication is generated only after buffer contains at
  218. * least an entire jumbo packet.
  219. */
  220. reg = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF);
  221. reg = (reg & 0xfffffc00) | 0x15;
  222. IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, reg);
  223. /* restart the data paths by clearing the DISABLE bits */
  224. IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
  225. IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_STORE_FORWARD);
  226. /* enable Rx and Tx SA lookup */
  227. IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, IXGBE_RXTXIDX_IPS_EN);
  228. IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, IXGBE_RXTXIDX_IPS_EN);
  229. IXGBE_WRITE_FLUSH(hw);
  230. }
  231. /**
  232. * ixgbe_ipsec_restore - restore the ipsec HW settings after a reset
  233. * @adapter: board private structure
  234. **/
  235. void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter)
  236. {
  237. struct ixgbe_ipsec *ipsec = adapter->ipsec;
  238. struct ixgbe_hw *hw = &adapter->hw;
  239. int i;
  240. if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED))
  241. return;
  242. /* clean up and restart the engine */
  243. ixgbe_ipsec_stop_engine(adapter);
  244. ixgbe_ipsec_clear_hw_tables(adapter);
  245. ixgbe_ipsec_start_engine(adapter);
  246. /* reload the IP addrs */
  247. for (i = 0; i < IXGBE_IPSEC_MAX_RX_IP_COUNT; i++) {
  248. struct rx_ip_sa *ipsa = &ipsec->ip_tbl[i];
  249. if (ipsa->used)
  250. ixgbe_ipsec_set_rx_ip(hw, i, ipsa->ipaddr);
  251. }
  252. /* reload the Rx and Tx keys */
  253. for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
  254. struct rx_sa *rsa = &ipsec->rx_tbl[i];
  255. struct tx_sa *tsa = &ipsec->tx_tbl[i];
  256. if (rsa->used)
  257. ixgbe_ipsec_set_rx_sa(hw, i, rsa->xs->id.spi,
  258. rsa->key, rsa->salt,
  259. rsa->mode, rsa->iptbl_ind);
  260. if (tsa->used)
  261. ixgbe_ipsec_set_tx_sa(hw, i, tsa->key, tsa->salt);
  262. }
  263. }
  264. /**
  265. * ixgbe_ipsec_find_empty_idx - find the first unused security parameter index
  266. * @ipsec: pointer to ipsec struct
  267. * @rxtable: true if we need to look in the Rx table
  268. *
  269. * Returns the first unused index in either the Rx or Tx SA table
  270. **/
  271. static int ixgbe_ipsec_find_empty_idx(struct ixgbe_ipsec *ipsec, bool rxtable)
  272. {
  273. u32 i;
  274. if (rxtable) {
  275. if (ipsec->num_rx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
  276. return -ENOSPC;
  277. /* search rx sa table */
  278. for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
  279. if (!ipsec->rx_tbl[i].used)
  280. return i;
  281. }
  282. } else {
  283. if (ipsec->num_tx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
  284. return -ENOSPC;
  285. /* search tx sa table */
  286. for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
  287. if (!ipsec->tx_tbl[i].used)
  288. return i;
  289. }
  290. }
  291. return -ENOSPC;
  292. }
  293. /**
  294. * ixgbe_ipsec_find_rx_state - find the state that matches
  295. * @ipsec: pointer to ipsec struct
  296. * @daddr: inbound address to match
  297. * @proto: protocol to match
  298. * @spi: SPI to match
  299. * @ip4: true if using an ipv4 address
  300. *
  301. * Returns a pointer to the matching SA state information
  302. **/
  303. static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
  304. __be32 *daddr, u8 proto,
  305. __be32 spi, bool ip4)
  306. {
  307. struct rx_sa *rsa;
  308. struct xfrm_state *ret = NULL;
  309. rcu_read_lock();
  310. hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, spi)
  311. if (spi == rsa->xs->id.spi &&
  312. ((ip4 && *daddr == rsa->xs->id.daddr.a4) ||
  313. (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6,
  314. sizeof(rsa->xs->id.daddr.a6)))) &&
  315. proto == rsa->xs->id.proto) {
  316. ret = rsa->xs;
  317. xfrm_state_hold(ret);
  318. break;
  319. }
  320. rcu_read_unlock();
  321. return ret;
  322. }
  323. /**
  324. * ixgbe_ipsec_parse_proto_keys - find the key and salt based on the protocol
  325. * @xs: pointer to xfrm_state struct
  326. * @mykey: pointer to key array to populate
  327. * @mysalt: pointer to salt value to populate
  328. *
  329. * This copies the protocol keys and salt to our own data tables. The
  330. * 82599 family only supports the one algorithm.
  331. **/
  332. static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
  333. u32 *mykey, u32 *mysalt)
  334. {
  335. struct net_device *dev = xs->xso.dev;
  336. unsigned char *key_data;
  337. char *alg_name = NULL;
  338. const char aes_gcm_name[] = "rfc4106(gcm(aes))";
  339. int key_len;
  340. if (!xs->aead) {
  341. netdev_err(dev, "Unsupported IPsec algorithm\n");
  342. return -EINVAL;
  343. }
  344. if (xs->aead->alg_icv_len != IXGBE_IPSEC_AUTH_BITS) {
  345. netdev_err(dev, "IPsec offload requires %d bit authentication\n",
  346. IXGBE_IPSEC_AUTH_BITS);
  347. return -EINVAL;
  348. }
  349. key_data = &xs->aead->alg_key[0];
  350. key_len = xs->aead->alg_key_len;
  351. alg_name = xs->aead->alg_name;
  352. if (strcmp(alg_name, aes_gcm_name)) {
  353. netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n",
  354. aes_gcm_name);
  355. return -EINVAL;
  356. }
  357. /* The key bytes come down in a bigendian array of bytes, so
  358. * we don't need to do any byteswapping.
  359. * 160 accounts for 16 byte key and 4 byte salt
  360. */
  361. if (key_len == 160) {
  362. *mysalt = ((u32 *)key_data)[4];
  363. } else if (key_len != 128) {
  364. netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n");
  365. return -EINVAL;
  366. } else {
  367. netdev_info(dev, "IPsec hw offload parameters missing 32 bit salt value\n");
  368. *mysalt = 0;
  369. }
  370. memcpy(mykey, key_data, 16);
  371. return 0;
  372. }
  373. /**
  374. * ixgbe_ipsec_add_sa - program device with a security association
  375. * @xs: pointer to transformer state struct
  376. **/
  377. static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
  378. {
  379. struct net_device *dev = xs->xso.dev;
  380. struct ixgbe_adapter *adapter = netdev_priv(dev);
  381. struct ixgbe_ipsec *ipsec = adapter->ipsec;
  382. struct ixgbe_hw *hw = &adapter->hw;
  383. int checked, match, first;
  384. u16 sa_idx;
  385. int ret;
  386. int i;
  387. if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
  388. netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n",
  389. xs->id.proto);
  390. return -EINVAL;
  391. }
  392. if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
  393. struct rx_sa rsa;
  394. if (xs->calg) {
  395. netdev_err(dev, "Compression offload not supported\n");
  396. return -EINVAL;
  397. }
  398. /* find the first unused index */
  399. ret = ixgbe_ipsec_find_empty_idx(ipsec, true);
  400. if (ret < 0) {
  401. netdev_err(dev, "No space for SA in Rx table!\n");
  402. return ret;
  403. }
  404. sa_idx = (u16)ret;
  405. memset(&rsa, 0, sizeof(rsa));
  406. rsa.used = true;
  407. rsa.xs = xs;
  408. if (rsa.xs->id.proto & IPPROTO_ESP)
  409. rsa.decrypt = xs->ealg || xs->aead;
  410. /* get the key and salt */
  411. ret = ixgbe_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
  412. if (ret) {
  413. netdev_err(dev, "Failed to get key data for Rx SA table\n");
  414. return ret;
  415. }
  416. /* get ip for rx sa table */
  417. if (xs->props.family == AF_INET6)
  418. memcpy(rsa.ipaddr, &xs->id.daddr.a6, 16);
  419. else
  420. memcpy(&rsa.ipaddr[3], &xs->id.daddr.a4, 4);
  421. /* The HW does not have a 1:1 mapping from keys to IP addrs, so
  422. * check for a matching IP addr entry in the table. If the addr
  423. * already exists, use it; else find an unused slot and add the
  424. * addr. If one does not exist and there are no unused table
  425. * entries, fail the request.
  426. */
  427. /* Find an existing match or first not used, and stop looking
  428. * after we've checked all we know we have.
  429. */
  430. checked = 0;
  431. match = -1;
  432. first = -1;
  433. for (i = 0;
  434. i < IXGBE_IPSEC_MAX_RX_IP_COUNT &&
  435. (checked < ipsec->num_rx_sa || first < 0);
  436. i++) {
  437. if (ipsec->ip_tbl[i].used) {
  438. if (!memcmp(ipsec->ip_tbl[i].ipaddr,
  439. rsa.ipaddr, sizeof(rsa.ipaddr))) {
  440. match = i;
  441. break;
  442. }
  443. checked++;
  444. } else if (first < 0) {
  445. first = i; /* track the first empty seen */
  446. }
  447. }
  448. if (ipsec->num_rx_sa == 0)
  449. first = 0;
  450. if (match >= 0) {
  451. /* addrs are the same, we should use this one */
  452. rsa.iptbl_ind = match;
  453. ipsec->ip_tbl[match].ref_cnt++;
  454. } else if (first >= 0) {
  455. /* no matches, but here's an empty slot */
  456. rsa.iptbl_ind = first;
  457. memcpy(ipsec->ip_tbl[first].ipaddr,
  458. rsa.ipaddr, sizeof(rsa.ipaddr));
  459. ipsec->ip_tbl[first].ref_cnt = 1;
  460. ipsec->ip_tbl[first].used = true;
  461. ixgbe_ipsec_set_rx_ip(hw, rsa.iptbl_ind, rsa.ipaddr);
  462. } else {
  463. /* no match and no empty slot */
  464. netdev_err(dev, "No space for SA in Rx IP SA table\n");
  465. memset(&rsa, 0, sizeof(rsa));
  466. return -ENOSPC;
  467. }
  468. rsa.mode = IXGBE_RXMOD_VALID;
  469. if (rsa.xs->id.proto & IPPROTO_ESP)
  470. rsa.mode |= IXGBE_RXMOD_PROTO_ESP;
  471. if (rsa.decrypt)
  472. rsa.mode |= IXGBE_RXMOD_DECRYPT;
  473. if (rsa.xs->props.family == AF_INET6)
  474. rsa.mode |= IXGBE_RXMOD_IPV6;
  475. /* the preparations worked, so save the info */
  476. memcpy(&ipsec->rx_tbl[sa_idx], &rsa, sizeof(rsa));
  477. ixgbe_ipsec_set_rx_sa(hw, sa_idx, rsa.xs->id.spi, rsa.key,
  478. rsa.salt, rsa.mode, rsa.iptbl_ind);
  479. xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_RX_INDEX;
  480. ipsec->num_rx_sa++;
  481. /* hash the new entry for faster search in Rx path */
  482. hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist,
  483. rsa.xs->id.spi);
  484. } else {
  485. struct tx_sa tsa;
  486. /* find the first unused index */
  487. ret = ixgbe_ipsec_find_empty_idx(ipsec, false);
  488. if (ret < 0) {
  489. netdev_err(dev, "No space for SA in Tx table\n");
  490. return ret;
  491. }
  492. sa_idx = (u16)ret;
  493. memset(&tsa, 0, sizeof(tsa));
  494. tsa.used = true;
  495. tsa.xs = xs;
  496. if (xs->id.proto & IPPROTO_ESP)
  497. tsa.encrypt = xs->ealg || xs->aead;
  498. ret = ixgbe_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
  499. if (ret) {
  500. netdev_err(dev, "Failed to get key data for Tx SA table\n");
  501. memset(&tsa, 0, sizeof(tsa));
  502. return ret;
  503. }
  504. /* the preparations worked, so save the info */
  505. memcpy(&ipsec->tx_tbl[sa_idx], &tsa, sizeof(tsa));
  506. ixgbe_ipsec_set_tx_sa(hw, sa_idx, tsa.key, tsa.salt);
  507. xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_TX_INDEX;
  508. ipsec->num_tx_sa++;
  509. }
  510. /* enable the engine if not already warmed up */
  511. if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED)) {
  512. ixgbe_ipsec_start_engine(adapter);
  513. adapter->flags2 |= IXGBE_FLAG2_IPSEC_ENABLED;
  514. }
  515. return 0;
  516. }
  517. /**
  518. * ixgbe_ipsec_del_sa - clear out this specific SA
  519. * @xs: pointer to transformer state struct
  520. **/
  521. static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
  522. {
  523. struct net_device *dev = xs->xso.dev;
  524. struct ixgbe_adapter *adapter = netdev_priv(dev);
  525. struct ixgbe_ipsec *ipsec = adapter->ipsec;
  526. struct ixgbe_hw *hw = &adapter->hw;
  527. u32 zerobuf[4] = {0, 0, 0, 0};
  528. u16 sa_idx;
  529. if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
  530. struct rx_sa *rsa;
  531. u8 ipi;
  532. sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
  533. rsa = &ipsec->rx_tbl[sa_idx];
  534. if (!rsa->used) {
  535. netdev_err(dev, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n",
  536. sa_idx, xs->xso.offload_handle);
  537. return;
  538. }
  539. ixgbe_ipsec_set_rx_sa(hw, sa_idx, 0, zerobuf, 0, 0, 0);
  540. hash_del_rcu(&rsa->hlist);
  541. /* if the IP table entry is referenced by only this SA,
  542. * i.e. ref_cnt is only 1, clear the IP table entry as well
  543. */
  544. ipi = rsa->iptbl_ind;
  545. if (ipsec->ip_tbl[ipi].ref_cnt > 0) {
  546. ipsec->ip_tbl[ipi].ref_cnt--;
  547. if (!ipsec->ip_tbl[ipi].ref_cnt) {
  548. memset(&ipsec->ip_tbl[ipi], 0,
  549. sizeof(struct rx_ip_sa));
  550. ixgbe_ipsec_set_rx_ip(hw, ipi, zerobuf);
  551. }
  552. }
  553. memset(rsa, 0, sizeof(struct rx_sa));
  554. ipsec->num_rx_sa--;
  555. } else {
  556. sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
  557. if (!ipsec->tx_tbl[sa_idx].used) {
  558. netdev_err(dev, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n",
  559. sa_idx, xs->xso.offload_handle);
  560. return;
  561. }
  562. ixgbe_ipsec_set_tx_sa(hw, sa_idx, zerobuf, 0);
  563. memset(&ipsec->tx_tbl[sa_idx], 0, sizeof(struct tx_sa));
  564. ipsec->num_tx_sa--;
  565. }
  566. /* if there are no SAs left, stop the engine to save energy */
  567. if (ipsec->num_rx_sa == 0 && ipsec->num_tx_sa == 0) {
  568. adapter->flags2 &= ~IXGBE_FLAG2_IPSEC_ENABLED;
  569. ixgbe_ipsec_stop_engine(adapter);
  570. }
  571. }
  572. /**
  573. * ixgbe_ipsec_offload_ok - can this packet use the xfrm hw offload
  574. * @skb: current data packet
  575. * @xs: pointer to transformer state struct
  576. **/
  577. static bool ixgbe_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
  578. {
  579. if (xs->props.family == AF_INET) {
  580. /* Offload with IPv4 options is not supported yet */
  581. if (ip_hdr(skb)->ihl != 5)
  582. return false;
  583. } else {
  584. /* Offload with IPv6 extension headers is not support yet */
  585. if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
  586. return false;
  587. }
  588. return true;
  589. }
  590. static const struct xfrmdev_ops ixgbe_xfrmdev_ops = {
  591. .xdo_dev_state_add = ixgbe_ipsec_add_sa,
  592. .xdo_dev_state_delete = ixgbe_ipsec_del_sa,
  593. .xdo_dev_offload_ok = ixgbe_ipsec_offload_ok,
  594. };
  595. /**
  596. * ixgbe_ipsec_tx - setup Tx flags for ipsec offload
  597. * @tx_ring: outgoing context
  598. * @first: current data packet
  599. * @itd: ipsec Tx data for later use in building context descriptor
  600. **/
  601. int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
  602. struct ixgbe_tx_buffer *first,
  603. struct ixgbe_ipsec_tx_data *itd)
  604. {
  605. struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev);
  606. struct ixgbe_ipsec *ipsec = adapter->ipsec;
  607. struct xfrm_state *xs;
  608. struct tx_sa *tsa;
  609. if (unlikely(!first->skb->sp->len)) {
  610. netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n",
  611. __func__, first->skb->sp->len);
  612. return 0;
  613. }
  614. xs = xfrm_input_state(first->skb);
  615. if (unlikely(!xs)) {
  616. netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n",
  617. __func__, xs);
  618. return 0;
  619. }
  620. itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
  621. if (unlikely(itd->sa_idx > IXGBE_IPSEC_MAX_SA_COUNT)) {
  622. netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
  623. __func__, itd->sa_idx, xs->xso.offload_handle);
  624. return 0;
  625. }
  626. tsa = &ipsec->tx_tbl[itd->sa_idx];
  627. if (unlikely(!tsa->used)) {
  628. netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n",
  629. __func__, itd->sa_idx);
  630. return 0;
  631. }
  632. first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CC;
  633. if (xs->id.proto == IPPROTO_ESP) {
  634. itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
  635. IXGBE_ADVTXD_TUCMD_L4T_TCP;
  636. if (first->protocol == htons(ETH_P_IP))
  637. itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4;
  638. /* The actual trailer length is authlen (16 bytes) plus
  639. * 2 bytes for the proto and the padlen values, plus
  640. * padlen bytes of padding. This ends up not the same
  641. * as the static value found in xs->props.trailer_len (21).
  642. *
  643. * ... but if we're doing GSO, don't bother as the stack
  644. * doesn't add a trailer for those.
  645. */
  646. if (!skb_is_gso(first->skb)) {
  647. /* The "correct" way to get the auth length would be
  648. * to use
  649. * authlen = crypto_aead_authsize(xs->data);
  650. * but since we know we only have one size to worry
  651. * about * we can let the compiler use the constant
  652. * and save us a few CPU cycles.
  653. */
  654. const int authlen = IXGBE_IPSEC_AUTH_BITS / 8;
  655. struct sk_buff *skb = first->skb;
  656. u8 padlen;
  657. int ret;
  658. ret = skb_copy_bits(skb, skb->len - (authlen + 2),
  659. &padlen, 1);
  660. if (unlikely(ret))
  661. return 0;
  662. itd->trailer_len = authlen + 2 + padlen;
  663. }
  664. }
  665. if (tsa->encrypt)
  666. itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN;
  667. return 1;
  668. }
  669. /**
  670. * ixgbe_ipsec_rx - decode ipsec bits from Rx descriptor
  671. * @rx_ring: receiving ring
  672. * @rx_desc: receive data descriptor
  673. * @skb: current data packet
  674. *
  675. * Determine if there was an ipsec encapsulation noticed, and if so set up
  676. * the resulting status for later in the receive stack.
  677. **/
  678. void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
  679. union ixgbe_adv_rx_desc *rx_desc,
  680. struct sk_buff *skb)
  681. {
  682. struct ixgbe_adapter *adapter = netdev_priv(rx_ring->netdev);
  683. __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
  684. __le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH |
  685. IXGBE_RXDADV_PKTTYPE_IPSEC_ESP);
  686. struct ixgbe_ipsec *ipsec = adapter->ipsec;
  687. struct xfrm_offload *xo = NULL;
  688. struct xfrm_state *xs = NULL;
  689. struct ipv6hdr *ip6 = NULL;
  690. struct iphdr *ip4 = NULL;
  691. void *daddr;
  692. __be32 spi;
  693. u8 *c_hdr;
  694. u8 proto;
  695. /* Find the ip and crypto headers in the data.
  696. * We can assume no vlan header in the way, b/c the
  697. * hw won't recognize the IPsec packet and anyway the
  698. * currently vlan device doesn't support xfrm offload.
  699. */
  700. if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) {
  701. ip4 = (struct iphdr *)(skb->data + ETH_HLEN);
  702. daddr = &ip4->daddr;
  703. c_hdr = (u8 *)ip4 + ip4->ihl * 4;
  704. } else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) {
  705. ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
  706. daddr = &ip6->daddr;
  707. c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr);
  708. } else {
  709. return;
  710. }
  711. switch (pkt_info & ipsec_pkt_types) {
  712. case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH):
  713. spi = ((struct ip_auth_hdr *)c_hdr)->spi;
  714. proto = IPPROTO_AH;
  715. break;
  716. case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP):
  717. spi = ((struct ip_esp_hdr *)c_hdr)->spi;
  718. proto = IPPROTO_ESP;
  719. break;
  720. default:
  721. return;
  722. }
  723. xs = ixgbe_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4);
  724. if (unlikely(!xs))
  725. return;
  726. skb->sp = secpath_dup(skb->sp);
  727. if (unlikely(!skb->sp))
  728. return;
  729. skb->sp->xvec[skb->sp->len++] = xs;
  730. skb->sp->olen++;
  731. xo = xfrm_offload(skb);
  732. xo->flags = CRYPTO_DONE;
  733. xo->status = CRYPTO_SUCCESS;
  734. adapter->rx_ipsec++;
  735. }
  736. /**
  737. * ixgbe_init_ipsec_offload - initialize security registers for IPSec operation
  738. * @adapter: board private structure
  739. **/
  740. void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter)
  741. {
  742. struct ixgbe_ipsec *ipsec;
  743. size_t size;
  744. if (adapter->hw.mac.type == ixgbe_mac_82598EB)
  745. return;
  746. ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
  747. if (!ipsec)
  748. goto err1;
  749. hash_init(ipsec->rx_sa_list);
  750. size = sizeof(struct rx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
  751. ipsec->rx_tbl = kzalloc(size, GFP_KERNEL);
  752. if (!ipsec->rx_tbl)
  753. goto err2;
  754. size = sizeof(struct tx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
  755. ipsec->tx_tbl = kzalloc(size, GFP_KERNEL);
  756. if (!ipsec->tx_tbl)
  757. goto err2;
  758. size = sizeof(struct rx_ip_sa) * IXGBE_IPSEC_MAX_RX_IP_COUNT;
  759. ipsec->ip_tbl = kzalloc(size, GFP_KERNEL);
  760. if (!ipsec->ip_tbl)
  761. goto err2;
  762. ipsec->num_rx_sa = 0;
  763. ipsec->num_tx_sa = 0;
  764. adapter->ipsec = ipsec;
  765. ixgbe_ipsec_stop_engine(adapter);
  766. ixgbe_ipsec_clear_hw_tables(adapter);
  767. adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops;
  768. #define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \
  769. NETIF_F_HW_ESP_TX_CSUM | \
  770. NETIF_F_GSO_ESP)
  771. adapter->netdev->features |= IXGBE_ESP_FEATURES;
  772. adapter->netdev->hw_enc_features |= IXGBE_ESP_FEATURES;
  773. return;
  774. err2:
  775. kfree(ipsec->ip_tbl);
  776. kfree(ipsec->rx_tbl);
  777. kfree(ipsec->tx_tbl);
  778. err1:
  779. kfree(adapter->ipsec);
  780. netdev_err(adapter->netdev, "Unable to allocate memory for SA tables");
  781. }
  782. /**
  783. * ixgbe_stop_ipsec_offload - tear down the ipsec offload
  784. * @adapter: board private structure
  785. **/
  786. void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter)
  787. {
  788. struct ixgbe_ipsec *ipsec = adapter->ipsec;
  789. adapter->ipsec = NULL;
  790. if (ipsec) {
  791. kfree(ipsec->ip_tbl);
  792. kfree(ipsec->rx_tbl);
  793. kfree(ipsec->tx_tbl);
  794. kfree(ipsec);
  795. }
  796. }