cmsg.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453
  1. /*
  2. * Copyright (C) 2017 Netronome Systems, Inc.
  3. *
  4. * This software is dual licensed under the GNU General License Version 2,
  5. * June 1991 as shown in the file COPYING in the top-level directory of this
  6. * source tree or the BSD 2-Clause License provided below. You have the
  7. * option to license this software under the complete terms of either license.
  8. *
  9. * The BSD 2-Clause License:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * 1. Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * 2. Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/bpf.h>
  34. #include <linux/bitops.h>
  35. #include <linux/bug.h>
  36. #include <linux/jiffies.h>
  37. #include <linux/skbuff.h>
  38. #include <linux/wait.h>
  39. #include "../nfp_app.h"
  40. #include "../nfp_net.h"
  41. #include "fw.h"
  42. #include "main.h"
  43. #define cmsg_warn(bpf, msg...) nn_dp_warn(&(bpf)->app->ctrl->dp, msg)
  44. #define NFP_BPF_TAG_ALLOC_SPAN (U16_MAX / 4)
  45. static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf *bpf)
  46. {
  47. u16 used_tags;
  48. used_tags = bpf->tag_alloc_next - bpf->tag_alloc_last;
  49. return used_tags > NFP_BPF_TAG_ALLOC_SPAN;
  50. }
  51. static int nfp_bpf_alloc_tag(struct nfp_app_bpf *bpf)
  52. {
  53. /* All FW communication for BPF is request-reply. To make sure we
  54. * don't reuse the message ID too early after timeout - limit the
  55. * number of requests in flight.
  56. */
  57. if (nfp_bpf_all_tags_busy(bpf)) {
  58. cmsg_warn(bpf, "all FW request contexts busy!\n");
  59. return -EAGAIN;
  60. }
  61. WARN_ON(__test_and_set_bit(bpf->tag_alloc_next, bpf->tag_allocator));
  62. return bpf->tag_alloc_next++;
  63. }
  64. static void nfp_bpf_free_tag(struct nfp_app_bpf *bpf, u16 tag)
  65. {
  66. WARN_ON(!__test_and_clear_bit(tag, bpf->tag_allocator));
  67. while (!test_bit(bpf->tag_alloc_last, bpf->tag_allocator) &&
  68. bpf->tag_alloc_last != bpf->tag_alloc_next)
  69. bpf->tag_alloc_last++;
  70. }
  71. static struct sk_buff *
  72. nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
  73. {
  74. struct sk_buff *skb;
  75. skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL);
  76. skb_put(skb, size);
  77. return skb;
  78. }
  79. static struct sk_buff *
  80. nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n)
  81. {
  82. unsigned int size;
  83. size = sizeof(struct cmsg_req_map_op);
  84. size += sizeof(struct cmsg_key_value_pair) * n;
  85. return nfp_bpf_cmsg_alloc(bpf, size);
  86. }
  87. static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb)
  88. {
  89. struct cmsg_hdr *hdr;
  90. hdr = (struct cmsg_hdr *)skb->data;
  91. return be16_to_cpu(hdr->tag);
  92. }
  93. static struct sk_buff *__nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
  94. {
  95. unsigned int msg_tag;
  96. struct sk_buff *skb;
  97. skb_queue_walk(&bpf->cmsg_replies, skb) {
  98. msg_tag = nfp_bpf_cmsg_get_tag(skb);
  99. if (msg_tag == tag) {
  100. nfp_bpf_free_tag(bpf, tag);
  101. __skb_unlink(skb, &bpf->cmsg_replies);
  102. return skb;
  103. }
  104. }
  105. return NULL;
  106. }
  107. static struct sk_buff *nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
  108. {
  109. struct sk_buff *skb;
  110. nfp_ctrl_lock(bpf->app->ctrl);
  111. skb = __nfp_bpf_reply(bpf, tag);
  112. nfp_ctrl_unlock(bpf->app->ctrl);
  113. return skb;
  114. }
  115. static struct sk_buff *nfp_bpf_reply_drop_tag(struct nfp_app_bpf *bpf, u16 tag)
  116. {
  117. struct sk_buff *skb;
  118. nfp_ctrl_lock(bpf->app->ctrl);
  119. skb = __nfp_bpf_reply(bpf, tag);
  120. if (!skb)
  121. nfp_bpf_free_tag(bpf, tag);
  122. nfp_ctrl_unlock(bpf->app->ctrl);
  123. return skb;
  124. }
  125. static struct sk_buff *
  126. nfp_bpf_cmsg_wait_reply(struct nfp_app_bpf *bpf, enum nfp_bpf_cmsg_type type,
  127. int tag)
  128. {
  129. struct sk_buff *skb;
  130. int i, err;
  131. for (i = 0; i < 50; i++) {
  132. udelay(4);
  133. skb = nfp_bpf_reply(bpf, tag);
  134. if (skb)
  135. return skb;
  136. }
  137. err = wait_event_interruptible_timeout(bpf->cmsg_wq,
  138. skb = nfp_bpf_reply(bpf, tag),
  139. msecs_to_jiffies(5000));
  140. /* We didn't get a response - try last time and atomically drop
  141. * the tag even if no response is matched.
  142. */
  143. if (!skb)
  144. skb = nfp_bpf_reply_drop_tag(bpf, tag);
  145. if (err < 0) {
  146. cmsg_warn(bpf, "%s waiting for response to 0x%02x: %d\n",
  147. err == ERESTARTSYS ? "interrupted" : "error",
  148. type, err);
  149. return ERR_PTR(err);
  150. }
  151. if (!skb) {
  152. cmsg_warn(bpf, "timeout waiting for response to 0x%02x\n",
  153. type);
  154. return ERR_PTR(-ETIMEDOUT);
  155. }
  156. return skb;
  157. }
  158. static struct sk_buff *
  159. nfp_bpf_cmsg_communicate(struct nfp_app_bpf *bpf, struct sk_buff *skb,
  160. enum nfp_bpf_cmsg_type type, unsigned int reply_size)
  161. {
  162. struct cmsg_hdr *hdr;
  163. int tag;
  164. nfp_ctrl_lock(bpf->app->ctrl);
  165. tag = nfp_bpf_alloc_tag(bpf);
  166. if (tag < 0) {
  167. nfp_ctrl_unlock(bpf->app->ctrl);
  168. dev_kfree_skb_any(skb);
  169. return ERR_PTR(tag);
  170. }
  171. hdr = (void *)skb->data;
  172. hdr->ver = CMSG_MAP_ABI_VERSION;
  173. hdr->type = type;
  174. hdr->tag = cpu_to_be16(tag);
  175. __nfp_app_ctrl_tx(bpf->app, skb);
  176. nfp_ctrl_unlock(bpf->app->ctrl);
  177. skb = nfp_bpf_cmsg_wait_reply(bpf, type, tag);
  178. if (IS_ERR(skb))
  179. return skb;
  180. hdr = (struct cmsg_hdr *)skb->data;
  181. if (hdr->type != __CMSG_REPLY(type)) {
  182. cmsg_warn(bpf, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
  183. hdr->type, __CMSG_REPLY(type));
  184. goto err_free;
  185. }
  186. /* 0 reply_size means caller will do the validation */
  187. if (reply_size && skb->len != reply_size) {
  188. cmsg_warn(bpf, "cmsg drop - type 0x%02x wrong size %d != %d!\n",
  189. type, skb->len, reply_size);
  190. goto err_free;
  191. }
  192. return skb;
  193. err_free:
  194. dev_kfree_skb_any(skb);
  195. return ERR_PTR(-EIO);
  196. }
  197. static int
  198. nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf *bpf,
  199. struct cmsg_reply_map_simple *reply)
  200. {
  201. static const int res_table[] = {
  202. [CMSG_RC_SUCCESS] = 0,
  203. [CMSG_RC_ERR_MAP_FD] = -EBADFD,
  204. [CMSG_RC_ERR_MAP_NOENT] = -ENOENT,
  205. [CMSG_RC_ERR_MAP_ERR] = -EINVAL,
  206. [CMSG_RC_ERR_MAP_PARSE] = -EIO,
  207. [CMSG_RC_ERR_MAP_EXIST] = -EEXIST,
  208. [CMSG_RC_ERR_MAP_NOMEM] = -ENOMEM,
  209. [CMSG_RC_ERR_MAP_E2BIG] = -E2BIG,
  210. };
  211. u32 rc;
  212. rc = be32_to_cpu(reply->rc);
  213. if (rc >= ARRAY_SIZE(res_table)) {
  214. cmsg_warn(bpf, "FW responded with invalid status: %u\n", rc);
  215. return -EIO;
  216. }
  217. return res_table[rc];
  218. }
  219. long long int
  220. nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map)
  221. {
  222. struct cmsg_reply_map_alloc_tbl *reply;
  223. struct cmsg_req_map_alloc_tbl *req;
  224. struct sk_buff *skb;
  225. u32 tid;
  226. int err;
  227. skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
  228. if (!skb)
  229. return -ENOMEM;
  230. req = (void *)skb->data;
  231. req->key_size = cpu_to_be32(map->key_size);
  232. req->value_size = cpu_to_be32(map->value_size);
  233. req->max_entries = cpu_to_be32(map->max_entries);
  234. req->map_type = cpu_to_be32(map->map_type);
  235. req->map_flags = 0;
  236. skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_ALLOC,
  237. sizeof(*reply));
  238. if (IS_ERR(skb))
  239. return PTR_ERR(skb);
  240. reply = (void *)skb->data;
  241. err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
  242. if (err)
  243. goto err_free;
  244. tid = be32_to_cpu(reply->tid);
  245. dev_consume_skb_any(skb);
  246. return tid;
  247. err_free:
  248. dev_kfree_skb_any(skb);
  249. return err;
  250. }
  251. void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map)
  252. {
  253. struct cmsg_reply_map_free_tbl *reply;
  254. struct cmsg_req_map_free_tbl *req;
  255. struct sk_buff *skb;
  256. int err;
  257. skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
  258. if (!skb) {
  259. cmsg_warn(bpf, "leaking map - failed to allocate msg\n");
  260. return;
  261. }
  262. req = (void *)skb->data;
  263. req->tid = cpu_to_be32(nfp_map->tid);
  264. skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_FREE,
  265. sizeof(*reply));
  266. if (IS_ERR(skb)) {
  267. cmsg_warn(bpf, "leaking map - I/O error\n");
  268. return;
  269. }
  270. reply = (void *)skb->data;
  271. err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
  272. if (err)
  273. cmsg_warn(bpf, "leaking map - FW responded with: %d\n", err);
  274. dev_consume_skb_any(skb);
  275. }
  276. static int
  277. nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
  278. enum nfp_bpf_cmsg_type op,
  279. u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value)
  280. {
  281. struct nfp_bpf_map *nfp_map = offmap->dev_priv;
  282. struct nfp_app_bpf *bpf = nfp_map->bpf;
  283. struct bpf_map *map = &offmap->map;
  284. struct cmsg_reply_map_op *reply;
  285. struct cmsg_req_map_op *req;
  286. struct sk_buff *skb;
  287. int err;
  288. /* FW messages have no space for more than 32 bits of flags */
  289. if (flags >> 32)
  290. return -EOPNOTSUPP;
  291. skb = nfp_bpf_cmsg_map_req_alloc(bpf, 1);
  292. if (!skb)
  293. return -ENOMEM;
  294. req = (void *)skb->data;
  295. req->tid = cpu_to_be32(nfp_map->tid);
  296. req->count = cpu_to_be32(1);
  297. req->flags = cpu_to_be32(flags);
  298. /* Copy inputs */
  299. if (key)
  300. memcpy(&req->elem[0].key, key, map->key_size);
  301. if (value)
  302. memcpy(&req->elem[0].value, value, map->value_size);
  303. skb = nfp_bpf_cmsg_communicate(bpf, skb, op,
  304. sizeof(*reply) + sizeof(*reply->elem));
  305. if (IS_ERR(skb))
  306. return PTR_ERR(skb);
  307. reply = (void *)skb->data;
  308. err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
  309. if (err)
  310. goto err_free;
  311. /* Copy outputs */
  312. if (out_key)
  313. memcpy(out_key, &reply->elem[0].key, map->key_size);
  314. if (out_value)
  315. memcpy(out_value, &reply->elem[0].value, map->value_size);
  316. dev_consume_skb_any(skb);
  317. return 0;
  318. err_free:
  319. dev_kfree_skb_any(skb);
  320. return err;
  321. }
  322. int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
  323. void *key, void *value, u64 flags)
  324. {
  325. return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_UPDATE,
  326. key, value, flags, NULL, NULL);
  327. }
  328. int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key)
  329. {
  330. return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_DELETE,
  331. key, NULL, 0, NULL, NULL);
  332. }
  333. int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
  334. void *key, void *value)
  335. {
  336. return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_LOOKUP,
  337. key, NULL, 0, NULL, value);
  338. }
  339. int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
  340. void *next_key)
  341. {
  342. return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETFIRST,
  343. NULL, NULL, 0, next_key, NULL);
  344. }
  345. int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
  346. void *key, void *next_key)
  347. {
  348. return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETNEXT,
  349. key, NULL, 0, next_key, NULL);
  350. }
  351. void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
  352. {
  353. struct nfp_app_bpf *bpf = app->priv;
  354. unsigned int tag;
  355. if (unlikely(skb->len < sizeof(struct cmsg_reply_map_simple))) {
  356. cmsg_warn(bpf, "cmsg drop - too short %d!\n", skb->len);
  357. goto err_free;
  358. }
  359. nfp_ctrl_lock(bpf->app->ctrl);
  360. tag = nfp_bpf_cmsg_get_tag(skb);
  361. if (unlikely(!test_bit(tag, bpf->tag_allocator))) {
  362. cmsg_warn(bpf, "cmsg drop - no one is waiting for tag %u!\n",
  363. tag);
  364. goto err_unlock;
  365. }
  366. __skb_queue_tail(&bpf->cmsg_replies, skb);
  367. wake_up_interruptible_all(&bpf->cmsg_wq);
  368. nfp_ctrl_unlock(bpf->app->ctrl);
  369. return;
  370. err_unlock:
  371. nfp_ctrl_unlock(bpf->app->ctrl);
  372. err_free:
  373. dev_kfree_skb_any(skb);
  374. }