cmsg.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467
  1. /*
  2. * Copyright (C) 2017-2018 Netronome Systems, Inc.
  3. *
  4. * This software is dual licensed under the GNU General License Version 2,
  5. * June 1991 as shown in the file COPYING in the top-level directory of this
  6. * source tree or the BSD 2-Clause License provided below. You have the
  7. * option to license this software under the complete terms of either license.
  8. *
  9. * The BSD 2-Clause License:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * 1. Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * 2. Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/bpf.h>
  34. #include <linux/bitops.h>
  35. #include <linux/bug.h>
  36. #include <linux/jiffies.h>
  37. #include <linux/skbuff.h>
  38. #include <linux/wait.h>
  39. #include "../nfp_app.h"
  40. #include "../nfp_net.h"
  41. #include "fw.h"
  42. #include "main.h"
  43. #define cmsg_warn(bpf, msg...) nn_dp_warn(&(bpf)->app->ctrl->dp, msg)
  44. #define NFP_BPF_TAG_ALLOC_SPAN (U16_MAX / 4)
  45. static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf *bpf)
  46. {
  47. u16 used_tags;
  48. used_tags = bpf->tag_alloc_next - bpf->tag_alloc_last;
  49. return used_tags > NFP_BPF_TAG_ALLOC_SPAN;
  50. }
  51. static int nfp_bpf_alloc_tag(struct nfp_app_bpf *bpf)
  52. {
  53. /* All FW communication for BPF is request-reply. To make sure we
  54. * don't reuse the message ID too early after timeout - limit the
  55. * number of requests in flight.
  56. */
  57. if (nfp_bpf_all_tags_busy(bpf)) {
  58. cmsg_warn(bpf, "all FW request contexts busy!\n");
  59. return -EAGAIN;
  60. }
  61. WARN_ON(__test_and_set_bit(bpf->tag_alloc_next, bpf->tag_allocator));
  62. return bpf->tag_alloc_next++;
  63. }
  64. static void nfp_bpf_free_tag(struct nfp_app_bpf *bpf, u16 tag)
  65. {
  66. WARN_ON(!__test_and_clear_bit(tag, bpf->tag_allocator));
  67. while (!test_bit(bpf->tag_alloc_last, bpf->tag_allocator) &&
  68. bpf->tag_alloc_last != bpf->tag_alloc_next)
  69. bpf->tag_alloc_last++;
  70. }
  71. static struct sk_buff *
  72. nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
  73. {
  74. struct sk_buff *skb;
  75. skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL);
  76. skb_put(skb, size);
  77. return skb;
  78. }
  79. static struct sk_buff *
  80. nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n)
  81. {
  82. unsigned int size;
  83. size = sizeof(struct cmsg_req_map_op);
  84. size += sizeof(struct cmsg_key_value_pair) * n;
  85. return nfp_bpf_cmsg_alloc(bpf, size);
  86. }
  87. static u8 nfp_bpf_cmsg_get_type(struct sk_buff *skb)
  88. {
  89. struct cmsg_hdr *hdr;
  90. hdr = (struct cmsg_hdr *)skb->data;
  91. return hdr->type;
  92. }
  93. static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb)
  94. {
  95. struct cmsg_hdr *hdr;
  96. hdr = (struct cmsg_hdr *)skb->data;
  97. return be16_to_cpu(hdr->tag);
  98. }
  99. static struct sk_buff *__nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
  100. {
  101. unsigned int msg_tag;
  102. struct sk_buff *skb;
  103. skb_queue_walk(&bpf->cmsg_replies, skb) {
  104. msg_tag = nfp_bpf_cmsg_get_tag(skb);
  105. if (msg_tag == tag) {
  106. nfp_bpf_free_tag(bpf, tag);
  107. __skb_unlink(skb, &bpf->cmsg_replies);
  108. return skb;
  109. }
  110. }
  111. return NULL;
  112. }
  113. static struct sk_buff *nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
  114. {
  115. struct sk_buff *skb;
  116. nfp_ctrl_lock(bpf->app->ctrl);
  117. skb = __nfp_bpf_reply(bpf, tag);
  118. nfp_ctrl_unlock(bpf->app->ctrl);
  119. return skb;
  120. }
  121. static struct sk_buff *nfp_bpf_reply_drop_tag(struct nfp_app_bpf *bpf, u16 tag)
  122. {
  123. struct sk_buff *skb;
  124. nfp_ctrl_lock(bpf->app->ctrl);
  125. skb = __nfp_bpf_reply(bpf, tag);
  126. if (!skb)
  127. nfp_bpf_free_tag(bpf, tag);
  128. nfp_ctrl_unlock(bpf->app->ctrl);
  129. return skb;
  130. }
  131. static struct sk_buff *
  132. nfp_bpf_cmsg_wait_reply(struct nfp_app_bpf *bpf, enum nfp_bpf_cmsg_type type,
  133. int tag)
  134. {
  135. struct sk_buff *skb;
  136. int i, err;
  137. for (i = 0; i < 50; i++) {
  138. udelay(4);
  139. skb = nfp_bpf_reply(bpf, tag);
  140. if (skb)
  141. return skb;
  142. }
  143. err = wait_event_interruptible_timeout(bpf->cmsg_wq,
  144. skb = nfp_bpf_reply(bpf, tag),
  145. msecs_to_jiffies(5000));
  146. /* We didn't get a response - try last time and atomically drop
  147. * the tag even if no response is matched.
  148. */
  149. if (!skb)
  150. skb = nfp_bpf_reply_drop_tag(bpf, tag);
  151. if (err < 0) {
  152. cmsg_warn(bpf, "%s waiting for response to 0x%02x: %d\n",
  153. err == ERESTARTSYS ? "interrupted" : "error",
  154. type, err);
  155. return ERR_PTR(err);
  156. }
  157. if (!skb) {
  158. cmsg_warn(bpf, "timeout waiting for response to 0x%02x\n",
  159. type);
  160. return ERR_PTR(-ETIMEDOUT);
  161. }
  162. return skb;
  163. }
  164. static struct sk_buff *
  165. nfp_bpf_cmsg_communicate(struct nfp_app_bpf *bpf, struct sk_buff *skb,
  166. enum nfp_bpf_cmsg_type type, unsigned int reply_size)
  167. {
  168. struct cmsg_hdr *hdr;
  169. int tag;
  170. nfp_ctrl_lock(bpf->app->ctrl);
  171. tag = nfp_bpf_alloc_tag(bpf);
  172. if (tag < 0) {
  173. nfp_ctrl_unlock(bpf->app->ctrl);
  174. dev_kfree_skb_any(skb);
  175. return ERR_PTR(tag);
  176. }
  177. hdr = (void *)skb->data;
  178. hdr->ver = CMSG_MAP_ABI_VERSION;
  179. hdr->type = type;
  180. hdr->tag = cpu_to_be16(tag);
  181. __nfp_app_ctrl_tx(bpf->app, skb);
  182. nfp_ctrl_unlock(bpf->app->ctrl);
  183. skb = nfp_bpf_cmsg_wait_reply(bpf, type, tag);
  184. if (IS_ERR(skb))
  185. return skb;
  186. hdr = (struct cmsg_hdr *)skb->data;
  187. if (hdr->type != __CMSG_REPLY(type)) {
  188. cmsg_warn(bpf, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
  189. hdr->type, __CMSG_REPLY(type));
  190. goto err_free;
  191. }
  192. /* 0 reply_size means caller will do the validation */
  193. if (reply_size && skb->len != reply_size) {
  194. cmsg_warn(bpf, "cmsg drop - type 0x%02x wrong size %d != %d!\n",
  195. type, skb->len, reply_size);
  196. goto err_free;
  197. }
  198. return skb;
  199. err_free:
  200. dev_kfree_skb_any(skb);
  201. return ERR_PTR(-EIO);
  202. }
  203. static int
  204. nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf *bpf,
  205. struct cmsg_reply_map_simple *reply)
  206. {
  207. static const int res_table[] = {
  208. [CMSG_RC_SUCCESS] = 0,
  209. [CMSG_RC_ERR_MAP_FD] = -EBADFD,
  210. [CMSG_RC_ERR_MAP_NOENT] = -ENOENT,
  211. [CMSG_RC_ERR_MAP_ERR] = -EINVAL,
  212. [CMSG_RC_ERR_MAP_PARSE] = -EIO,
  213. [CMSG_RC_ERR_MAP_EXIST] = -EEXIST,
  214. [CMSG_RC_ERR_MAP_NOMEM] = -ENOMEM,
  215. [CMSG_RC_ERR_MAP_E2BIG] = -E2BIG,
  216. };
  217. u32 rc;
  218. rc = be32_to_cpu(reply->rc);
  219. if (rc >= ARRAY_SIZE(res_table)) {
  220. cmsg_warn(bpf, "FW responded with invalid status: %u\n", rc);
  221. return -EIO;
  222. }
  223. return res_table[rc];
  224. }
  225. long long int
  226. nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map)
  227. {
  228. struct cmsg_reply_map_alloc_tbl *reply;
  229. struct cmsg_req_map_alloc_tbl *req;
  230. struct sk_buff *skb;
  231. u32 tid;
  232. int err;
  233. skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
  234. if (!skb)
  235. return -ENOMEM;
  236. req = (void *)skb->data;
  237. req->key_size = cpu_to_be32(map->key_size);
  238. req->value_size = cpu_to_be32(map->value_size);
  239. req->max_entries = cpu_to_be32(map->max_entries);
  240. req->map_type = cpu_to_be32(map->map_type);
  241. req->map_flags = 0;
  242. skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_ALLOC,
  243. sizeof(*reply));
  244. if (IS_ERR(skb))
  245. return PTR_ERR(skb);
  246. reply = (void *)skb->data;
  247. err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
  248. if (err)
  249. goto err_free;
  250. tid = be32_to_cpu(reply->tid);
  251. dev_consume_skb_any(skb);
  252. return tid;
  253. err_free:
  254. dev_kfree_skb_any(skb);
  255. return err;
  256. }
  257. void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map)
  258. {
  259. struct cmsg_reply_map_free_tbl *reply;
  260. struct cmsg_req_map_free_tbl *req;
  261. struct sk_buff *skb;
  262. int err;
  263. skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
  264. if (!skb) {
  265. cmsg_warn(bpf, "leaking map - failed to allocate msg\n");
  266. return;
  267. }
  268. req = (void *)skb->data;
  269. req->tid = cpu_to_be32(nfp_map->tid);
  270. skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_FREE,
  271. sizeof(*reply));
  272. if (IS_ERR(skb)) {
  273. cmsg_warn(bpf, "leaking map - I/O error\n");
  274. return;
  275. }
  276. reply = (void *)skb->data;
  277. err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
  278. if (err)
  279. cmsg_warn(bpf, "leaking map - FW responded with: %d\n", err);
  280. dev_consume_skb_any(skb);
  281. }
  282. static int
  283. nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
  284. enum nfp_bpf_cmsg_type op,
  285. u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value)
  286. {
  287. struct nfp_bpf_map *nfp_map = offmap->dev_priv;
  288. struct nfp_app_bpf *bpf = nfp_map->bpf;
  289. struct bpf_map *map = &offmap->map;
  290. struct cmsg_reply_map_op *reply;
  291. struct cmsg_req_map_op *req;
  292. struct sk_buff *skb;
  293. int err;
  294. /* FW messages have no space for more than 32 bits of flags */
  295. if (flags >> 32)
  296. return -EOPNOTSUPP;
  297. skb = nfp_bpf_cmsg_map_req_alloc(bpf, 1);
  298. if (!skb)
  299. return -ENOMEM;
  300. req = (void *)skb->data;
  301. req->tid = cpu_to_be32(nfp_map->tid);
  302. req->count = cpu_to_be32(1);
  303. req->flags = cpu_to_be32(flags);
  304. /* Copy inputs */
  305. if (key)
  306. memcpy(&req->elem[0].key, key, map->key_size);
  307. if (value)
  308. memcpy(&req->elem[0].value, value, map->value_size);
  309. skb = nfp_bpf_cmsg_communicate(bpf, skb, op,
  310. sizeof(*reply) + sizeof(*reply->elem));
  311. if (IS_ERR(skb))
  312. return PTR_ERR(skb);
  313. reply = (void *)skb->data;
  314. err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
  315. if (err)
  316. goto err_free;
  317. /* Copy outputs */
  318. if (out_key)
  319. memcpy(out_key, &reply->elem[0].key, map->key_size);
  320. if (out_value)
  321. memcpy(out_value, &reply->elem[0].value, map->value_size);
  322. dev_consume_skb_any(skb);
  323. return 0;
  324. err_free:
  325. dev_kfree_skb_any(skb);
  326. return err;
  327. }
  328. int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
  329. void *key, void *value, u64 flags)
  330. {
  331. return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_UPDATE,
  332. key, value, flags, NULL, NULL);
  333. }
  334. int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key)
  335. {
  336. return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_DELETE,
  337. key, NULL, 0, NULL, NULL);
  338. }
  339. int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
  340. void *key, void *value)
  341. {
  342. return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_LOOKUP,
  343. key, NULL, 0, NULL, value);
  344. }
  345. int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
  346. void *next_key)
  347. {
  348. return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETFIRST,
  349. NULL, NULL, 0, next_key, NULL);
  350. }
  351. int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
  352. void *key, void *next_key)
  353. {
  354. return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETNEXT,
  355. key, NULL, 0, next_key, NULL);
  356. }
  357. void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
  358. {
  359. struct nfp_app_bpf *bpf = app->priv;
  360. unsigned int tag;
  361. if (unlikely(skb->len < sizeof(struct cmsg_reply_map_simple))) {
  362. cmsg_warn(bpf, "cmsg drop - too short %d!\n", skb->len);
  363. goto err_free;
  364. }
  365. if (nfp_bpf_cmsg_get_type(skb) == CMSG_TYPE_BPF_EVENT) {
  366. nfp_bpf_event_output(bpf, skb);
  367. return;
  368. }
  369. nfp_ctrl_lock(bpf->app->ctrl);
  370. tag = nfp_bpf_cmsg_get_tag(skb);
  371. if (unlikely(!test_bit(tag, bpf->tag_allocator))) {
  372. cmsg_warn(bpf, "cmsg drop - no one is waiting for tag %u!\n",
  373. tag);
  374. goto err_unlock;
  375. }
  376. __skb_queue_tail(&bpf->cmsg_replies, skb);
  377. wake_up_interruptible_all(&bpf->cmsg_wq);
  378. nfp_ctrl_unlock(bpf->app->ctrl);
  379. return;
  380. err_unlock:
  381. nfp_ctrl_unlock(bpf->app->ctrl);
  382. err_free:
  383. dev_kfree_skb_any(skb);
  384. }