ixgbe_ipsec.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966
  1. /*******************************************************************************
  2. *
  3. * Intel 10 Gigabit PCI Express Linux driver
  4. * Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * The full GNU General Public License is included in this distribution in
  19. * the file called "COPYING".
  20. *
  21. * Contact Information:
  22. * Linux NICS <linux.nics@intel.com>
  23. * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  24. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  25. *
  26. ******************************************************************************/
  27. #include "ixgbe.h"
  28. #include <net/xfrm.h>
  29. #include <crypto/aead.h>
  30. /**
  31. * ixgbe_ipsec_set_tx_sa - set the Tx SA registers
  32. * @hw: hw specific details
  33. * @idx: register index to write
  34. * @key: key byte array
  35. * @salt: salt bytes
  36. **/
  37. static void ixgbe_ipsec_set_tx_sa(struct ixgbe_hw *hw, u16 idx,
  38. u32 key[], u32 salt)
  39. {
  40. u32 reg;
  41. int i;
  42. for (i = 0; i < 4; i++)
  43. IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(i), cpu_to_be32(key[3 - i]));
  44. IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, cpu_to_be32(salt));
  45. IXGBE_WRITE_FLUSH(hw);
  46. reg = IXGBE_READ_REG(hw, IXGBE_IPSTXIDX);
  47. reg &= IXGBE_RXTXIDX_IPS_EN;
  48. reg |= idx << IXGBE_RXTXIDX_IDX_SHIFT | IXGBE_RXTXIDX_WRITE;
  49. IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, reg);
  50. IXGBE_WRITE_FLUSH(hw);
  51. }
  52. /**
  53. * ixgbe_ipsec_set_rx_item - set an Rx table item
  54. * @hw: hw specific details
  55. * @idx: register index to write
  56. * @tbl: table selector
  57. *
  58. * Trigger the device to store into a particular Rx table the
  59. * data that has already been loaded into the input register
  60. **/
  61. static void ixgbe_ipsec_set_rx_item(struct ixgbe_hw *hw, u16 idx,
  62. enum ixgbe_ipsec_tbl_sel tbl)
  63. {
  64. u32 reg;
  65. reg = IXGBE_READ_REG(hw, IXGBE_IPSRXIDX);
  66. reg &= IXGBE_RXTXIDX_IPS_EN;
  67. reg |= tbl << IXGBE_RXIDX_TBL_SHIFT |
  68. idx << IXGBE_RXTXIDX_IDX_SHIFT |
  69. IXGBE_RXTXIDX_WRITE;
  70. IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, reg);
  71. IXGBE_WRITE_FLUSH(hw);
  72. }
  73. /**
  74. * ixgbe_ipsec_set_rx_sa - set up the register bits to save SA info
  75. * @hw: hw specific details
  76. * @idx: register index to write
  77. * @spi: security parameter index
  78. * @key: key byte array
  79. * @salt: salt bytes
  80. * @mode: rx decrypt control bits
  81. * @ip_idx: index into IP table for related IP address
  82. **/
  83. static void ixgbe_ipsec_set_rx_sa(struct ixgbe_hw *hw, u16 idx, __be32 spi,
  84. u32 key[], u32 salt, u32 mode, u32 ip_idx)
  85. {
  86. int i;
  87. /* store the SPI (in bigendian) and IPidx */
  88. IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, cpu_to_le32(spi));
  89. IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, ip_idx);
  90. IXGBE_WRITE_FLUSH(hw);
  91. ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_spi_tbl);
  92. /* store the key, salt, and mode */
  93. for (i = 0; i < 4; i++)
  94. IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(i), cpu_to_be32(key[3 - i]));
  95. IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, cpu_to_be32(salt));
  96. IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, mode);
  97. IXGBE_WRITE_FLUSH(hw);
  98. ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_key_tbl);
  99. }
  100. /**
  101. * ixgbe_ipsec_set_rx_ip - set up the register bits to save SA IP addr info
  102. * @hw: hw specific details
  103. * @idx: register index to write
  104. * @addr: IP address byte array
  105. **/
  106. static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[])
  107. {
  108. int i;
  109. /* store the ip address */
  110. for (i = 0; i < 4; i++)
  111. IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(i), cpu_to_le32(addr[i]));
  112. IXGBE_WRITE_FLUSH(hw);
  113. ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_ip_tbl);
  114. }
  115. /**
  116. * ixgbe_ipsec_clear_hw_tables - because some tables don't get cleared on reset
  117. * @adapter: board private structure
  118. **/
  119. static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter)
  120. {
  121. struct ixgbe_ipsec *ipsec = adapter->ipsec;
  122. struct ixgbe_hw *hw = &adapter->hw;
  123. u32 buf[4] = {0, 0, 0, 0};
  124. u16 idx;
  125. /* disable Rx and Tx SA lookup */
  126. IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0);
  127. IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0);
  128. /* scrub the tables - split the loops for the max of the IP table */
  129. for (idx = 0; idx < IXGBE_IPSEC_MAX_RX_IP_COUNT; idx++) {
  130. ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
  131. ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
  132. ixgbe_ipsec_set_rx_ip(hw, idx, (__be32 *)buf);
  133. }
  134. for (; idx < IXGBE_IPSEC_MAX_SA_COUNT; idx++) {
  135. ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
  136. ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
  137. }
  138. ipsec->num_rx_sa = 0;
  139. ipsec->num_tx_sa = 0;
  140. }
  141. /**
  142. * ixgbe_ipsec_stop_data
  143. * @adapter: board private structure
  144. **/
  145. static void ixgbe_ipsec_stop_data(struct ixgbe_adapter *adapter)
  146. {
  147. struct ixgbe_hw *hw = &adapter->hw;
  148. bool link = adapter->link_up;
  149. u32 t_rdy, r_rdy;
  150. u32 limit;
  151. u32 reg;
  152. /* halt data paths */
  153. reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
  154. reg |= IXGBE_SECTXCTRL_TX_DIS;
  155. IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg);
  156. reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
  157. reg |= IXGBE_SECRXCTRL_RX_DIS;
  158. IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
  159. IXGBE_WRITE_FLUSH(hw);
  160. /* If the tx fifo doesn't have link, but still has data,
  161. * we can't clear the tx sec block. Set the MAC loopback
  162. * before block clear
  163. */
  164. if (!link) {
  165. reg = IXGBE_READ_REG(hw, IXGBE_MACC);
  166. reg |= IXGBE_MACC_FLU;
  167. IXGBE_WRITE_REG(hw, IXGBE_MACC, reg);
  168. reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
  169. reg |= IXGBE_HLREG0_LPBK;
  170. IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
  171. IXGBE_WRITE_FLUSH(hw);
  172. mdelay(3);
  173. }
  174. /* wait for the paths to empty */
  175. limit = 20;
  176. do {
  177. mdelay(10);
  178. t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) &
  179. IXGBE_SECTXSTAT_SECTX_RDY;
  180. r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
  181. IXGBE_SECRXSTAT_SECRX_RDY;
  182. } while (!t_rdy && !r_rdy && limit--);
  183. /* undo loopback if we played with it earlier */
  184. if (!link) {
  185. reg = IXGBE_READ_REG(hw, IXGBE_MACC);
  186. reg &= ~IXGBE_MACC_FLU;
  187. IXGBE_WRITE_REG(hw, IXGBE_MACC, reg);
  188. reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
  189. reg &= ~IXGBE_HLREG0_LPBK;
  190. IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
  191. IXGBE_WRITE_FLUSH(hw);
  192. }
  193. }
  194. /**
  195. * ixgbe_ipsec_stop_engine
  196. * @adapter: board private structure
  197. **/
  198. static void ixgbe_ipsec_stop_engine(struct ixgbe_adapter *adapter)
  199. {
  200. struct ixgbe_hw *hw = &adapter->hw;
  201. u32 reg;
  202. ixgbe_ipsec_stop_data(adapter);
  203. /* disable Rx and Tx SA lookup */
  204. IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0);
  205. IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0);
  206. /* disable the Rx and Tx engines and full packet store-n-forward */
  207. reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
  208. reg |= IXGBE_SECTXCTRL_SECTX_DIS;
  209. reg &= ~IXGBE_SECTXCTRL_STORE_FORWARD;
  210. IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg);
  211. reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
  212. reg |= IXGBE_SECRXCTRL_SECRX_DIS;
  213. IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
  214. /* restore the "tx security buffer almost full threshold" to 0x250 */
  215. IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, 0x250);
  216. /* Set minimum IFG between packets back to the default 0x1 */
  217. reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
  218. reg = (reg & 0xfffffff0) | 0x1;
  219. IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
  220. /* final set for normal (no ipsec offload) processing */
  221. IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_SECTX_DIS);
  222. IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, IXGBE_SECRXCTRL_SECRX_DIS);
  223. IXGBE_WRITE_FLUSH(hw);
  224. }
  225. /**
  226. * ixgbe_ipsec_start_engine
  227. * @adapter: board private structure
  228. *
  229. * NOTE: this increases power consumption whether being used or not
  230. **/
  231. static void ixgbe_ipsec_start_engine(struct ixgbe_adapter *adapter)
  232. {
  233. struct ixgbe_hw *hw = &adapter->hw;
  234. u32 reg;
  235. ixgbe_ipsec_stop_data(adapter);
  236. /* Set minimum IFG between packets to 3 */
  237. reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
  238. reg = (reg & 0xfffffff0) | 0x3;
  239. IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
  240. /* Set "tx security buffer almost full threshold" to 0x15 so that the
  241. * almost full indication is generated only after buffer contains at
  242. * least an entire jumbo packet.
  243. */
  244. reg = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF);
  245. reg = (reg & 0xfffffc00) | 0x15;
  246. IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, reg);
  247. /* restart the data paths by clearing the DISABLE bits */
  248. IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
  249. IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_STORE_FORWARD);
  250. /* enable Rx and Tx SA lookup */
  251. IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, IXGBE_RXTXIDX_IPS_EN);
  252. IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, IXGBE_RXTXIDX_IPS_EN);
  253. IXGBE_WRITE_FLUSH(hw);
  254. }
  255. /**
  256. * ixgbe_ipsec_restore - restore the ipsec HW settings after a reset
  257. * @adapter: board private structure
  258. **/
  259. void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter)
  260. {
  261. struct ixgbe_ipsec *ipsec = adapter->ipsec;
  262. struct ixgbe_hw *hw = &adapter->hw;
  263. int i;
  264. if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED))
  265. return;
  266. /* clean up and restart the engine */
  267. ixgbe_ipsec_stop_engine(adapter);
  268. ixgbe_ipsec_clear_hw_tables(adapter);
  269. ixgbe_ipsec_start_engine(adapter);
  270. /* reload the IP addrs */
  271. for (i = 0; i < IXGBE_IPSEC_MAX_RX_IP_COUNT; i++) {
  272. struct rx_ip_sa *ipsa = &ipsec->ip_tbl[i];
  273. if (ipsa->used)
  274. ixgbe_ipsec_set_rx_ip(hw, i, ipsa->ipaddr);
  275. }
  276. /* reload the Rx and Tx keys */
  277. for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
  278. struct rx_sa *rsa = &ipsec->rx_tbl[i];
  279. struct tx_sa *tsa = &ipsec->tx_tbl[i];
  280. if (rsa->used)
  281. ixgbe_ipsec_set_rx_sa(hw, i, rsa->xs->id.spi,
  282. rsa->key, rsa->salt,
  283. rsa->mode, rsa->iptbl_ind);
  284. if (tsa->used)
  285. ixgbe_ipsec_set_tx_sa(hw, i, tsa->key, tsa->salt);
  286. }
  287. }
  288. /**
  289. * ixgbe_ipsec_find_empty_idx - find the first unused security parameter index
  290. * @ipsec: pointer to ipsec struct
  291. * @rxtable: true if we need to look in the Rx table
  292. *
  293. * Returns the first unused index in either the Rx or Tx SA table
  294. **/
  295. static int ixgbe_ipsec_find_empty_idx(struct ixgbe_ipsec *ipsec, bool rxtable)
  296. {
  297. u32 i;
  298. if (rxtable) {
  299. if (ipsec->num_rx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
  300. return -ENOSPC;
  301. /* search rx sa table */
  302. for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
  303. if (!ipsec->rx_tbl[i].used)
  304. return i;
  305. }
  306. } else {
  307. if (ipsec->num_tx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
  308. return -ENOSPC;
  309. /* search tx sa table */
  310. for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
  311. if (!ipsec->tx_tbl[i].used)
  312. return i;
  313. }
  314. }
  315. return -ENOSPC;
  316. }
  317. /**
  318. * ixgbe_ipsec_find_rx_state - find the state that matches
  319. * @ipsec: pointer to ipsec struct
  320. * @daddr: inbound address to match
  321. * @proto: protocol to match
  322. * @spi: SPI to match
  323. * @ip4: true if using an ipv4 address
  324. *
  325. * Returns a pointer to the matching SA state information
  326. **/
  327. static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
  328. __be32 *daddr, u8 proto,
  329. __be32 spi, bool ip4)
  330. {
  331. struct rx_sa *rsa;
  332. struct xfrm_state *ret = NULL;
  333. rcu_read_lock();
  334. hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, spi)
  335. if (spi == rsa->xs->id.spi &&
  336. ((ip4 && *daddr == rsa->xs->id.daddr.a4) ||
  337. (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6,
  338. sizeof(rsa->xs->id.daddr.a6)))) &&
  339. proto == rsa->xs->id.proto) {
  340. ret = rsa->xs;
  341. xfrm_state_hold(ret);
  342. break;
  343. }
  344. rcu_read_unlock();
  345. return ret;
  346. }
  347. /**
  348. * ixgbe_ipsec_parse_proto_keys - find the key and salt based on the protocol
  349. * @xs: pointer to xfrm_state struct
  350. * @mykey: pointer to key array to populate
  351. * @mysalt: pointer to salt value to populate
  352. *
  353. * This copies the protocol keys and salt to our own data tables. The
  354. * 82599 family only supports the one algorithm.
  355. **/
  356. static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
  357. u32 *mykey, u32 *mysalt)
  358. {
  359. struct net_device *dev = xs->xso.dev;
  360. unsigned char *key_data;
  361. char *alg_name = NULL;
  362. const char aes_gcm_name[] = "rfc4106(gcm(aes))";
  363. int key_len;
  364. if (!xs->aead) {
  365. netdev_err(dev, "Unsupported IPsec algorithm\n");
  366. return -EINVAL;
  367. }
  368. if (xs->aead->alg_icv_len != IXGBE_IPSEC_AUTH_BITS) {
  369. netdev_err(dev, "IPsec offload requires %d bit authentication\n",
  370. IXGBE_IPSEC_AUTH_BITS);
  371. return -EINVAL;
  372. }
  373. key_data = &xs->aead->alg_key[0];
  374. key_len = xs->aead->alg_key_len;
  375. alg_name = xs->aead->alg_name;
  376. if (strcmp(alg_name, aes_gcm_name)) {
  377. netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n",
  378. aes_gcm_name);
  379. return -EINVAL;
  380. }
  381. /* The key bytes come down in a bigendian array of bytes, so
  382. * we don't need to do any byteswapping.
  383. * 160 accounts for 16 byte key and 4 byte salt
  384. */
  385. if (key_len == 160) {
  386. *mysalt = ((u32 *)key_data)[4];
  387. } else if (key_len != 128) {
  388. netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n");
  389. return -EINVAL;
  390. } else {
  391. netdev_info(dev, "IPsec hw offload parameters missing 32 bit salt value\n");
  392. *mysalt = 0;
  393. }
  394. memcpy(mykey, key_data, 16);
  395. return 0;
  396. }
  397. /**
  398. * ixgbe_ipsec_add_sa - program device with a security association
  399. * @xs: pointer to transformer state struct
  400. **/
  401. static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
  402. {
  403. struct net_device *dev = xs->xso.dev;
  404. struct ixgbe_adapter *adapter = netdev_priv(dev);
  405. struct ixgbe_ipsec *ipsec = adapter->ipsec;
  406. struct ixgbe_hw *hw = &adapter->hw;
  407. int checked, match, first;
  408. u16 sa_idx;
  409. int ret;
  410. int i;
  411. if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
  412. netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n",
  413. xs->id.proto);
  414. return -EINVAL;
  415. }
  416. if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
  417. struct rx_sa rsa;
  418. if (xs->calg) {
  419. netdev_err(dev, "Compression offload not supported\n");
  420. return -EINVAL;
  421. }
  422. /* find the first unused index */
  423. ret = ixgbe_ipsec_find_empty_idx(ipsec, true);
  424. if (ret < 0) {
  425. netdev_err(dev, "No space for SA in Rx table!\n");
  426. return ret;
  427. }
  428. sa_idx = (u16)ret;
  429. memset(&rsa, 0, sizeof(rsa));
  430. rsa.used = true;
  431. rsa.xs = xs;
  432. if (rsa.xs->id.proto & IPPROTO_ESP)
  433. rsa.decrypt = xs->ealg || xs->aead;
  434. /* get the key and salt */
  435. ret = ixgbe_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
  436. if (ret) {
  437. netdev_err(dev, "Failed to get key data for Rx SA table\n");
  438. return ret;
  439. }
  440. /* get ip for rx sa table */
  441. if (xs->props.family == AF_INET6)
  442. memcpy(rsa.ipaddr, &xs->id.daddr.a6, 16);
  443. else
  444. memcpy(&rsa.ipaddr[3], &xs->id.daddr.a4, 4);
  445. /* The HW does not have a 1:1 mapping from keys to IP addrs, so
  446. * check for a matching IP addr entry in the table. If the addr
  447. * already exists, use it; else find an unused slot and add the
  448. * addr. If one does not exist and there are no unused table
  449. * entries, fail the request.
  450. */
  451. /* Find an existing match or first not used, and stop looking
  452. * after we've checked all we know we have.
  453. */
  454. checked = 0;
  455. match = -1;
  456. first = -1;
  457. for (i = 0;
  458. i < IXGBE_IPSEC_MAX_RX_IP_COUNT &&
  459. (checked < ipsec->num_rx_sa || first < 0);
  460. i++) {
  461. if (ipsec->ip_tbl[i].used) {
  462. if (!memcmp(ipsec->ip_tbl[i].ipaddr,
  463. rsa.ipaddr, sizeof(rsa.ipaddr))) {
  464. match = i;
  465. break;
  466. }
  467. checked++;
  468. } else if (first < 0) {
  469. first = i; /* track the first empty seen */
  470. }
  471. }
  472. if (ipsec->num_rx_sa == 0)
  473. first = 0;
  474. if (match >= 0) {
  475. /* addrs are the same, we should use this one */
  476. rsa.iptbl_ind = match;
  477. ipsec->ip_tbl[match].ref_cnt++;
  478. } else if (first >= 0) {
  479. /* no matches, but here's an empty slot */
  480. rsa.iptbl_ind = first;
  481. memcpy(ipsec->ip_tbl[first].ipaddr,
  482. rsa.ipaddr, sizeof(rsa.ipaddr));
  483. ipsec->ip_tbl[first].ref_cnt = 1;
  484. ipsec->ip_tbl[first].used = true;
  485. ixgbe_ipsec_set_rx_ip(hw, rsa.iptbl_ind, rsa.ipaddr);
  486. } else {
  487. /* no match and no empty slot */
  488. netdev_err(dev, "No space for SA in Rx IP SA table\n");
  489. memset(&rsa, 0, sizeof(rsa));
  490. return -ENOSPC;
  491. }
  492. rsa.mode = IXGBE_RXMOD_VALID;
  493. if (rsa.xs->id.proto & IPPROTO_ESP)
  494. rsa.mode |= IXGBE_RXMOD_PROTO_ESP;
  495. if (rsa.decrypt)
  496. rsa.mode |= IXGBE_RXMOD_DECRYPT;
  497. if (rsa.xs->props.family == AF_INET6)
  498. rsa.mode |= IXGBE_RXMOD_IPV6;
  499. /* the preparations worked, so save the info */
  500. memcpy(&ipsec->rx_tbl[sa_idx], &rsa, sizeof(rsa));
  501. ixgbe_ipsec_set_rx_sa(hw, sa_idx, rsa.xs->id.spi, rsa.key,
  502. rsa.salt, rsa.mode, rsa.iptbl_ind);
  503. xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_RX_INDEX;
  504. ipsec->num_rx_sa++;
  505. /* hash the new entry for faster search in Rx path */
  506. hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist,
  507. rsa.xs->id.spi);
  508. } else {
  509. struct tx_sa tsa;
  510. /* find the first unused index */
  511. ret = ixgbe_ipsec_find_empty_idx(ipsec, false);
  512. if (ret < 0) {
  513. netdev_err(dev, "No space for SA in Tx table\n");
  514. return ret;
  515. }
  516. sa_idx = (u16)ret;
  517. memset(&tsa, 0, sizeof(tsa));
  518. tsa.used = true;
  519. tsa.xs = xs;
  520. if (xs->id.proto & IPPROTO_ESP)
  521. tsa.encrypt = xs->ealg || xs->aead;
  522. ret = ixgbe_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
  523. if (ret) {
  524. netdev_err(dev, "Failed to get key data for Tx SA table\n");
  525. memset(&tsa, 0, sizeof(tsa));
  526. return ret;
  527. }
  528. /* the preparations worked, so save the info */
  529. memcpy(&ipsec->tx_tbl[sa_idx], &tsa, sizeof(tsa));
  530. ixgbe_ipsec_set_tx_sa(hw, sa_idx, tsa.key, tsa.salt);
  531. xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_TX_INDEX;
  532. ipsec->num_tx_sa++;
  533. }
  534. /* enable the engine if not already warmed up */
  535. if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED)) {
  536. ixgbe_ipsec_start_engine(adapter);
  537. adapter->flags2 |= IXGBE_FLAG2_IPSEC_ENABLED;
  538. }
  539. return 0;
  540. }
  541. /**
  542. * ixgbe_ipsec_del_sa - clear out this specific SA
  543. * @xs: pointer to transformer state struct
  544. **/
  545. static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
  546. {
  547. struct net_device *dev = xs->xso.dev;
  548. struct ixgbe_adapter *adapter = netdev_priv(dev);
  549. struct ixgbe_ipsec *ipsec = adapter->ipsec;
  550. struct ixgbe_hw *hw = &adapter->hw;
  551. u32 zerobuf[4] = {0, 0, 0, 0};
  552. u16 sa_idx;
  553. if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
  554. struct rx_sa *rsa;
  555. u8 ipi;
  556. sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
  557. rsa = &ipsec->rx_tbl[sa_idx];
  558. if (!rsa->used) {
  559. netdev_err(dev, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n",
  560. sa_idx, xs->xso.offload_handle);
  561. return;
  562. }
  563. ixgbe_ipsec_set_rx_sa(hw, sa_idx, 0, zerobuf, 0, 0, 0);
  564. hash_del_rcu(&rsa->hlist);
  565. /* if the IP table entry is referenced by only this SA,
  566. * i.e. ref_cnt is only 1, clear the IP table entry as well
  567. */
  568. ipi = rsa->iptbl_ind;
  569. if (ipsec->ip_tbl[ipi].ref_cnt > 0) {
  570. ipsec->ip_tbl[ipi].ref_cnt--;
  571. if (!ipsec->ip_tbl[ipi].ref_cnt) {
  572. memset(&ipsec->ip_tbl[ipi], 0,
  573. sizeof(struct rx_ip_sa));
  574. ixgbe_ipsec_set_rx_ip(hw, ipi, zerobuf);
  575. }
  576. }
  577. memset(rsa, 0, sizeof(struct rx_sa));
  578. ipsec->num_rx_sa--;
  579. } else {
  580. sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
  581. if (!ipsec->tx_tbl[sa_idx].used) {
  582. netdev_err(dev, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n",
  583. sa_idx, xs->xso.offload_handle);
  584. return;
  585. }
  586. ixgbe_ipsec_set_tx_sa(hw, sa_idx, zerobuf, 0);
  587. memset(&ipsec->tx_tbl[sa_idx], 0, sizeof(struct tx_sa));
  588. ipsec->num_tx_sa--;
  589. }
  590. /* if there are no SAs left, stop the engine to save energy */
  591. if (ipsec->num_rx_sa == 0 && ipsec->num_tx_sa == 0) {
  592. adapter->flags2 &= ~IXGBE_FLAG2_IPSEC_ENABLED;
  593. ixgbe_ipsec_stop_engine(adapter);
  594. }
  595. }
  596. /**
  597. * ixgbe_ipsec_offload_ok - can this packet use the xfrm hw offload
  598. * @skb: current data packet
  599. * @xs: pointer to transformer state struct
  600. **/
  601. static bool ixgbe_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
  602. {
  603. if (xs->props.family == AF_INET) {
  604. /* Offload with IPv4 options is not supported yet */
  605. if (ip_hdr(skb)->ihl != 5)
  606. return false;
  607. } else {
  608. /* Offload with IPv6 extension headers is not support yet */
  609. if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
  610. return false;
  611. }
  612. return true;
  613. }
  614. static const struct xfrmdev_ops ixgbe_xfrmdev_ops = {
  615. .xdo_dev_state_add = ixgbe_ipsec_add_sa,
  616. .xdo_dev_state_delete = ixgbe_ipsec_del_sa,
  617. .xdo_dev_offload_ok = ixgbe_ipsec_offload_ok,
  618. };
  619. /**
  620. * ixgbe_ipsec_tx - setup Tx flags for ipsec offload
  621. * @tx_ring: outgoing context
  622. * @first: current data packet
  623. * @itd: ipsec Tx data for later use in building context descriptor
  624. **/
  625. int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
  626. struct ixgbe_tx_buffer *first,
  627. struct ixgbe_ipsec_tx_data *itd)
  628. {
  629. struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev);
  630. struct ixgbe_ipsec *ipsec = adapter->ipsec;
  631. struct xfrm_state *xs;
  632. struct tx_sa *tsa;
  633. if (unlikely(!first->skb->sp->len)) {
  634. netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n",
  635. __func__, first->skb->sp->len);
  636. return 0;
  637. }
  638. xs = xfrm_input_state(first->skb);
  639. if (unlikely(!xs)) {
  640. netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n",
  641. __func__, xs);
  642. return 0;
  643. }
  644. itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
  645. if (unlikely(itd->sa_idx > IXGBE_IPSEC_MAX_SA_COUNT)) {
  646. netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
  647. __func__, itd->sa_idx, xs->xso.offload_handle);
  648. return 0;
  649. }
  650. tsa = &ipsec->tx_tbl[itd->sa_idx];
  651. if (unlikely(!tsa->used)) {
  652. netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n",
  653. __func__, itd->sa_idx);
  654. return 0;
  655. }
  656. first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CC;
  657. if (xs->id.proto == IPPROTO_ESP) {
  658. itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
  659. IXGBE_ADVTXD_TUCMD_L4T_TCP;
  660. if (first->protocol == htons(ETH_P_IP))
  661. itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4;
  662. /* The actual trailer length is authlen (16 bytes) plus
  663. * 2 bytes for the proto and the padlen values, plus
  664. * padlen bytes of padding. This ends up not the same
  665. * as the static value found in xs->props.trailer_len (21).
  666. *
  667. * ... but if we're doing GSO, don't bother as the stack
  668. * doesn't add a trailer for those.
  669. */
  670. if (!skb_is_gso(first->skb)) {
  671. /* The "correct" way to get the auth length would be
  672. * to use
  673. * authlen = crypto_aead_authsize(xs->data);
  674. * but since we know we only have one size to worry
  675. * about * we can let the compiler use the constant
  676. * and save us a few CPU cycles.
  677. */
  678. const int authlen = IXGBE_IPSEC_AUTH_BITS / 8;
  679. struct sk_buff *skb = first->skb;
  680. u8 padlen;
  681. int ret;
  682. ret = skb_copy_bits(skb, skb->len - (authlen + 2),
  683. &padlen, 1);
  684. if (unlikely(ret))
  685. return 0;
  686. itd->trailer_len = authlen + 2 + padlen;
  687. }
  688. }
  689. if (tsa->encrypt)
  690. itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN;
  691. return 1;
  692. }
  693. /**
  694. * ixgbe_ipsec_rx - decode ipsec bits from Rx descriptor
  695. * @rx_ring: receiving ring
  696. * @rx_desc: receive data descriptor
  697. * @skb: current data packet
  698. *
  699. * Determine if there was an ipsec encapsulation noticed, and if so set up
  700. * the resulting status for later in the receive stack.
  701. **/
  702. void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
  703. union ixgbe_adv_rx_desc *rx_desc,
  704. struct sk_buff *skb)
  705. {
  706. struct ixgbe_adapter *adapter = netdev_priv(rx_ring->netdev);
  707. __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
  708. __le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH |
  709. IXGBE_RXDADV_PKTTYPE_IPSEC_ESP);
  710. struct ixgbe_ipsec *ipsec = adapter->ipsec;
  711. struct xfrm_offload *xo = NULL;
  712. struct xfrm_state *xs = NULL;
  713. struct ipv6hdr *ip6 = NULL;
  714. struct iphdr *ip4 = NULL;
  715. void *daddr;
  716. __be32 spi;
  717. u8 *c_hdr;
  718. u8 proto;
  719. /* Find the ip and crypto headers in the data.
  720. * We can assume no vlan header in the way, b/c the
  721. * hw won't recognize the IPsec packet and anyway the
  722. * currently vlan device doesn't support xfrm offload.
  723. */
  724. if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) {
  725. ip4 = (struct iphdr *)(skb->data + ETH_HLEN);
  726. daddr = &ip4->daddr;
  727. c_hdr = (u8 *)ip4 + ip4->ihl * 4;
  728. } else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) {
  729. ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
  730. daddr = &ip6->daddr;
  731. c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr);
  732. } else {
  733. return;
  734. }
  735. switch (pkt_info & ipsec_pkt_types) {
  736. case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH):
  737. spi = ((struct ip_auth_hdr *)c_hdr)->spi;
  738. proto = IPPROTO_AH;
  739. break;
  740. case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP):
  741. spi = ((struct ip_esp_hdr *)c_hdr)->spi;
  742. proto = IPPROTO_ESP;
  743. break;
  744. default:
  745. return;
  746. }
  747. xs = ixgbe_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4);
  748. if (unlikely(!xs))
  749. return;
  750. skb->sp = secpath_dup(skb->sp);
  751. if (unlikely(!skb->sp))
  752. return;
  753. skb->sp->xvec[skb->sp->len++] = xs;
  754. skb->sp->olen++;
  755. xo = xfrm_offload(skb);
  756. xo->flags = CRYPTO_DONE;
  757. xo->status = CRYPTO_SUCCESS;
  758. adapter->rx_ipsec++;
  759. }
  760. /**
  761. * ixgbe_init_ipsec_offload - initialize security registers for IPSec operation
  762. * @adapter: board private structure
  763. **/
  764. void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter)
  765. {
  766. struct ixgbe_ipsec *ipsec;
  767. size_t size;
  768. if (adapter->hw.mac.type == ixgbe_mac_82598EB)
  769. return;
  770. ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
  771. if (!ipsec)
  772. goto err1;
  773. hash_init(ipsec->rx_sa_list);
  774. size = sizeof(struct rx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
  775. ipsec->rx_tbl = kzalloc(size, GFP_KERNEL);
  776. if (!ipsec->rx_tbl)
  777. goto err2;
  778. size = sizeof(struct tx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
  779. ipsec->tx_tbl = kzalloc(size, GFP_KERNEL);
  780. if (!ipsec->tx_tbl)
  781. goto err2;
  782. size = sizeof(struct rx_ip_sa) * IXGBE_IPSEC_MAX_RX_IP_COUNT;
  783. ipsec->ip_tbl = kzalloc(size, GFP_KERNEL);
  784. if (!ipsec->ip_tbl)
  785. goto err2;
  786. ipsec->num_rx_sa = 0;
  787. ipsec->num_tx_sa = 0;
  788. adapter->ipsec = ipsec;
  789. ixgbe_ipsec_stop_engine(adapter);
  790. ixgbe_ipsec_clear_hw_tables(adapter);
  791. adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops;
  792. #define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \
  793. NETIF_F_HW_ESP_TX_CSUM | \
  794. NETIF_F_GSO_ESP)
  795. adapter->netdev->features |= IXGBE_ESP_FEATURES;
  796. adapter->netdev->hw_enc_features |= IXGBE_ESP_FEATURES;
  797. return;
  798. err2:
  799. kfree(ipsec->ip_tbl);
  800. kfree(ipsec->rx_tbl);
  801. kfree(ipsec->tx_tbl);
  802. kfree(ipsec);
  803. err1:
  804. netdev_err(adapter->netdev, "Unable to allocate memory for SA tables");
  805. }
  806. /**
  807. * ixgbe_stop_ipsec_offload - tear down the ipsec offload
  808. * @adapter: board private structure
  809. **/
  810. void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter)
  811. {
  812. struct ixgbe_ipsec *ipsec = adapter->ipsec;
  813. adapter->ipsec = NULL;
  814. if (ipsec) {
  815. kfree(ipsec->ip_tbl);
  816. kfree(ipsec->rx_tbl);
  817. kfree(ipsec->tx_tbl);
  818. kfree(ipsec);
  819. }
  820. }