offload.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687
  1. /*
  2. * Copyright (C) 2017-2018 Netronome Systems, Inc.
  3. *
  4. * This software is licensed under the GNU General License Version 2,
  5. * June 1991 as shown in the file COPYING in the top-level directory of this
  6. * source tree.
  7. *
  8. * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
  9. * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
  10. * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  11. * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
  12. * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
  13. * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
  14. */
  15. #include <linux/bpf.h>
  16. #include <linux/bpf_verifier.h>
  17. #include <linux/bug.h>
  18. #include <linux/kdev_t.h>
  19. #include <linux/list.h>
  20. #include <linux/lockdep.h>
  21. #include <linux/netdevice.h>
  22. #include <linux/printk.h>
  23. #include <linux/proc_ns.h>
  24. #include <linux/rhashtable.h>
  25. #include <linux/rtnetlink.h>
  26. #include <linux/rwsem.h>
  27. /* Protects offdevs, members of bpf_offload_netdev and offload members
  28. * of all progs.
  29. * RTNL lock cannot be taken when holding this lock.
  30. */
  31. static DECLARE_RWSEM(bpf_devs_lock);
  32. struct bpf_offload_dev {
  33. struct list_head netdevs;
  34. };
  35. struct bpf_offload_netdev {
  36. struct rhash_head l;
  37. struct net_device *netdev;
  38. struct bpf_offload_dev *offdev;
  39. struct list_head progs;
  40. struct list_head maps;
  41. struct list_head offdev_netdevs;
  42. };
  43. static const struct rhashtable_params offdevs_params = {
  44. .nelem_hint = 4,
  45. .key_len = sizeof(struct net_device *),
  46. .key_offset = offsetof(struct bpf_offload_netdev, netdev),
  47. .head_offset = offsetof(struct bpf_offload_netdev, l),
  48. .automatic_shrinking = true,
  49. };
  50. static struct rhashtable offdevs;
  51. static bool offdevs_inited;
  52. static int bpf_dev_offload_check(struct net_device *netdev)
  53. {
  54. if (!netdev)
  55. return -EINVAL;
  56. if (!netdev->netdev_ops->ndo_bpf)
  57. return -EOPNOTSUPP;
  58. return 0;
  59. }
  60. static struct bpf_offload_netdev *
  61. bpf_offload_find_netdev(struct net_device *netdev)
  62. {
  63. lockdep_assert_held(&bpf_devs_lock);
  64. if (!offdevs_inited)
  65. return NULL;
  66. return rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
  67. }
  68. int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
  69. {
  70. struct bpf_offload_netdev *ondev;
  71. struct bpf_prog_offload *offload;
  72. int err;
  73. if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
  74. attr->prog_type != BPF_PROG_TYPE_XDP)
  75. return -EINVAL;
  76. if (attr->prog_flags)
  77. return -EINVAL;
  78. offload = kzalloc(sizeof(*offload), GFP_USER);
  79. if (!offload)
  80. return -ENOMEM;
  81. offload->prog = prog;
  82. offload->netdev = dev_get_by_index(current->nsproxy->net_ns,
  83. attr->prog_ifindex);
  84. err = bpf_dev_offload_check(offload->netdev);
  85. if (err)
  86. goto err_maybe_put;
  87. down_write(&bpf_devs_lock);
  88. ondev = bpf_offload_find_netdev(offload->netdev);
  89. if (!ondev) {
  90. err = -EINVAL;
  91. goto err_unlock;
  92. }
  93. prog->aux->offload = offload;
  94. list_add_tail(&offload->offloads, &ondev->progs);
  95. dev_put(offload->netdev);
  96. up_write(&bpf_devs_lock);
  97. return 0;
  98. err_unlock:
  99. up_write(&bpf_devs_lock);
  100. err_maybe_put:
  101. if (offload->netdev)
  102. dev_put(offload->netdev);
  103. kfree(offload);
  104. return err;
  105. }
  106. static int __bpf_offload_ndo(struct bpf_prog *prog, enum bpf_netdev_command cmd,
  107. struct netdev_bpf *data)
  108. {
  109. struct bpf_prog_offload *offload = prog->aux->offload;
  110. struct net_device *netdev;
  111. ASSERT_RTNL();
  112. if (!offload)
  113. return -ENODEV;
  114. netdev = offload->netdev;
  115. data->command = cmd;
  116. return netdev->netdev_ops->ndo_bpf(netdev, data);
  117. }
  118. int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
  119. {
  120. struct netdev_bpf data = {};
  121. int err;
  122. data.verifier.prog = env->prog;
  123. rtnl_lock();
  124. err = __bpf_offload_ndo(env->prog, BPF_OFFLOAD_VERIFIER_PREP, &data);
  125. if (err)
  126. goto exit_unlock;
  127. env->prog->aux->offload->dev_ops = data.verifier.ops;
  128. env->prog->aux->offload->dev_state = true;
  129. exit_unlock:
  130. rtnl_unlock();
  131. return err;
  132. }
  133. int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
  134. int insn_idx, int prev_insn_idx)
  135. {
  136. struct bpf_prog_offload *offload;
  137. int ret = -ENODEV;
  138. down_read(&bpf_devs_lock);
  139. offload = env->prog->aux->offload;
  140. if (offload)
  141. ret = offload->dev_ops->insn_hook(env, insn_idx, prev_insn_idx);
  142. up_read(&bpf_devs_lock);
  143. return ret;
  144. }
  145. int bpf_prog_offload_finalize(struct bpf_verifier_env *env)
  146. {
  147. struct bpf_prog_offload *offload;
  148. int ret = -ENODEV;
  149. down_read(&bpf_devs_lock);
  150. offload = env->prog->aux->offload;
  151. if (offload) {
  152. if (offload->dev_ops->finalize)
  153. ret = offload->dev_ops->finalize(env);
  154. else
  155. ret = 0;
  156. }
  157. up_read(&bpf_devs_lock);
  158. return ret;
  159. }
  160. static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
  161. {
  162. struct bpf_prog_offload *offload = prog->aux->offload;
  163. struct netdev_bpf data = {};
  164. data.offload.prog = prog;
  165. if (offload->dev_state)
  166. WARN_ON(__bpf_offload_ndo(prog, BPF_OFFLOAD_DESTROY, &data));
  167. /* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */
  168. bpf_prog_free_id(prog, true);
  169. list_del_init(&offload->offloads);
  170. kfree(offload);
  171. prog->aux->offload = NULL;
  172. }
  173. void bpf_prog_offload_destroy(struct bpf_prog *prog)
  174. {
  175. rtnl_lock();
  176. down_write(&bpf_devs_lock);
  177. if (prog->aux->offload)
  178. __bpf_prog_offload_destroy(prog);
  179. up_write(&bpf_devs_lock);
  180. rtnl_unlock();
  181. }
  182. static int bpf_prog_offload_translate(struct bpf_prog *prog)
  183. {
  184. struct netdev_bpf data = {};
  185. int ret;
  186. data.offload.prog = prog;
  187. rtnl_lock();
  188. ret = __bpf_offload_ndo(prog, BPF_OFFLOAD_TRANSLATE, &data);
  189. rtnl_unlock();
  190. return ret;
  191. }
  192. static unsigned int bpf_prog_warn_on_exec(const void *ctx,
  193. const struct bpf_insn *insn)
  194. {
  195. WARN(1, "attempt to execute device eBPF program on the host!");
  196. return 0;
  197. }
  198. int bpf_prog_offload_compile(struct bpf_prog *prog)
  199. {
  200. prog->bpf_func = bpf_prog_warn_on_exec;
  201. return bpf_prog_offload_translate(prog);
  202. }
  203. struct ns_get_path_bpf_prog_args {
  204. struct bpf_prog *prog;
  205. struct bpf_prog_info *info;
  206. };
  207. static struct ns_common *bpf_prog_offload_info_fill_ns(void *private_data)
  208. {
  209. struct ns_get_path_bpf_prog_args *args = private_data;
  210. struct bpf_prog_aux *aux = args->prog->aux;
  211. struct ns_common *ns;
  212. struct net *net;
  213. rtnl_lock();
  214. down_read(&bpf_devs_lock);
  215. if (aux->offload) {
  216. args->info->ifindex = aux->offload->netdev->ifindex;
  217. net = dev_net(aux->offload->netdev);
  218. get_net(net);
  219. ns = &net->ns;
  220. } else {
  221. args->info->ifindex = 0;
  222. ns = NULL;
  223. }
  224. up_read(&bpf_devs_lock);
  225. rtnl_unlock();
  226. return ns;
  227. }
  228. int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
  229. struct bpf_prog *prog)
  230. {
  231. struct ns_get_path_bpf_prog_args args = {
  232. .prog = prog,
  233. .info = info,
  234. };
  235. struct bpf_prog_aux *aux = prog->aux;
  236. struct inode *ns_inode;
  237. struct path ns_path;
  238. char __user *uinsns;
  239. void *res;
  240. u32 ulen;
  241. res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args);
  242. if (IS_ERR(res)) {
  243. if (!info->ifindex)
  244. return -ENODEV;
  245. return PTR_ERR(res);
  246. }
  247. down_read(&bpf_devs_lock);
  248. if (!aux->offload) {
  249. up_read(&bpf_devs_lock);
  250. return -ENODEV;
  251. }
  252. ulen = info->jited_prog_len;
  253. info->jited_prog_len = aux->offload->jited_len;
  254. if (info->jited_prog_len & ulen) {
  255. uinsns = u64_to_user_ptr(info->jited_prog_insns);
  256. ulen = min_t(u32, info->jited_prog_len, ulen);
  257. if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) {
  258. up_read(&bpf_devs_lock);
  259. return -EFAULT;
  260. }
  261. }
  262. up_read(&bpf_devs_lock);
  263. ns_inode = ns_path.dentry->d_inode;
  264. info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
  265. info->netns_ino = ns_inode->i_ino;
  266. path_put(&ns_path);
  267. return 0;
  268. }
  269. const struct bpf_prog_ops bpf_offload_prog_ops = {
  270. };
  271. static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap,
  272. enum bpf_netdev_command cmd)
  273. {
  274. struct netdev_bpf data = {};
  275. struct net_device *netdev;
  276. ASSERT_RTNL();
  277. data.command = cmd;
  278. data.offmap = offmap;
  279. /* Caller must make sure netdev is valid */
  280. netdev = offmap->netdev;
  281. return netdev->netdev_ops->ndo_bpf(netdev, &data);
  282. }
  283. struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
  284. {
  285. struct net *net = current->nsproxy->net_ns;
  286. struct bpf_offload_netdev *ondev;
  287. struct bpf_offloaded_map *offmap;
  288. int err;
  289. if (!capable(CAP_SYS_ADMIN))
  290. return ERR_PTR(-EPERM);
  291. if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
  292. attr->map_type != BPF_MAP_TYPE_HASH)
  293. return ERR_PTR(-EINVAL);
  294. offmap = kzalloc(sizeof(*offmap), GFP_USER);
  295. if (!offmap)
  296. return ERR_PTR(-ENOMEM);
  297. bpf_map_init_from_attr(&offmap->map, attr);
  298. rtnl_lock();
  299. down_write(&bpf_devs_lock);
  300. offmap->netdev = __dev_get_by_index(net, attr->map_ifindex);
  301. err = bpf_dev_offload_check(offmap->netdev);
  302. if (err)
  303. goto err_unlock;
  304. ondev = bpf_offload_find_netdev(offmap->netdev);
  305. if (!ondev) {
  306. err = -EINVAL;
  307. goto err_unlock;
  308. }
  309. err = bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_ALLOC);
  310. if (err)
  311. goto err_unlock;
  312. list_add_tail(&offmap->offloads, &ondev->maps);
  313. up_write(&bpf_devs_lock);
  314. rtnl_unlock();
  315. return &offmap->map;
  316. err_unlock:
  317. up_write(&bpf_devs_lock);
  318. rtnl_unlock();
  319. kfree(offmap);
  320. return ERR_PTR(err);
  321. }
  322. static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap)
  323. {
  324. WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE));
  325. /* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
  326. bpf_map_free_id(&offmap->map, true);
  327. list_del_init(&offmap->offloads);
  328. offmap->netdev = NULL;
  329. }
  330. void bpf_map_offload_map_free(struct bpf_map *map)
  331. {
  332. struct bpf_offloaded_map *offmap = map_to_offmap(map);
  333. rtnl_lock();
  334. down_write(&bpf_devs_lock);
  335. if (offmap->netdev)
  336. __bpf_map_offload_destroy(offmap);
  337. up_write(&bpf_devs_lock);
  338. rtnl_unlock();
  339. kfree(offmap);
  340. }
  341. int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value)
  342. {
  343. struct bpf_offloaded_map *offmap = map_to_offmap(map);
  344. int ret = -ENODEV;
  345. down_read(&bpf_devs_lock);
  346. if (offmap->netdev)
  347. ret = offmap->dev_ops->map_lookup_elem(offmap, key, value);
  348. up_read(&bpf_devs_lock);
  349. return ret;
  350. }
  351. int bpf_map_offload_update_elem(struct bpf_map *map,
  352. void *key, void *value, u64 flags)
  353. {
  354. struct bpf_offloaded_map *offmap = map_to_offmap(map);
  355. int ret = -ENODEV;
  356. if (unlikely(flags > BPF_EXIST))
  357. return -EINVAL;
  358. down_read(&bpf_devs_lock);
  359. if (offmap->netdev)
  360. ret = offmap->dev_ops->map_update_elem(offmap, key, value,
  361. flags);
  362. up_read(&bpf_devs_lock);
  363. return ret;
  364. }
  365. int bpf_map_offload_delete_elem(struct bpf_map *map, void *key)
  366. {
  367. struct bpf_offloaded_map *offmap = map_to_offmap(map);
  368. int ret = -ENODEV;
  369. down_read(&bpf_devs_lock);
  370. if (offmap->netdev)
  371. ret = offmap->dev_ops->map_delete_elem(offmap, key);
  372. up_read(&bpf_devs_lock);
  373. return ret;
  374. }
  375. int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key)
  376. {
  377. struct bpf_offloaded_map *offmap = map_to_offmap(map);
  378. int ret = -ENODEV;
  379. down_read(&bpf_devs_lock);
  380. if (offmap->netdev)
  381. ret = offmap->dev_ops->map_get_next_key(offmap, key, next_key);
  382. up_read(&bpf_devs_lock);
  383. return ret;
  384. }
  385. struct ns_get_path_bpf_map_args {
  386. struct bpf_offloaded_map *offmap;
  387. struct bpf_map_info *info;
  388. };
  389. static struct ns_common *bpf_map_offload_info_fill_ns(void *private_data)
  390. {
  391. struct ns_get_path_bpf_map_args *args = private_data;
  392. struct ns_common *ns;
  393. struct net *net;
  394. rtnl_lock();
  395. down_read(&bpf_devs_lock);
  396. if (args->offmap->netdev) {
  397. args->info->ifindex = args->offmap->netdev->ifindex;
  398. net = dev_net(args->offmap->netdev);
  399. get_net(net);
  400. ns = &net->ns;
  401. } else {
  402. args->info->ifindex = 0;
  403. ns = NULL;
  404. }
  405. up_read(&bpf_devs_lock);
  406. rtnl_unlock();
  407. return ns;
  408. }
  409. int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map)
  410. {
  411. struct ns_get_path_bpf_map_args args = {
  412. .offmap = map_to_offmap(map),
  413. .info = info,
  414. };
  415. struct inode *ns_inode;
  416. struct path ns_path;
  417. void *res;
  418. res = ns_get_path_cb(&ns_path, bpf_map_offload_info_fill_ns, &args);
  419. if (IS_ERR(res)) {
  420. if (!info->ifindex)
  421. return -ENODEV;
  422. return PTR_ERR(res);
  423. }
  424. ns_inode = ns_path.dentry->d_inode;
  425. info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
  426. info->netns_ino = ns_inode->i_ino;
  427. path_put(&ns_path);
  428. return 0;
  429. }
  430. static bool __bpf_offload_dev_match(struct bpf_prog *prog,
  431. struct net_device *netdev)
  432. {
  433. struct bpf_offload_netdev *ondev1, *ondev2;
  434. struct bpf_prog_offload *offload;
  435. if (!bpf_prog_is_dev_bound(prog->aux))
  436. return false;
  437. offload = prog->aux->offload;
  438. if (!offload)
  439. return false;
  440. if (offload->netdev == netdev)
  441. return true;
  442. ondev1 = bpf_offload_find_netdev(offload->netdev);
  443. ondev2 = bpf_offload_find_netdev(netdev);
  444. return ondev1 && ondev2 && ondev1->offdev == ondev2->offdev;
  445. }
  446. bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev)
  447. {
  448. bool ret;
  449. down_read(&bpf_devs_lock);
  450. ret = __bpf_offload_dev_match(prog, netdev);
  451. up_read(&bpf_devs_lock);
  452. return ret;
  453. }
  454. EXPORT_SYMBOL_GPL(bpf_offload_dev_match);
  455. bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map)
  456. {
  457. struct bpf_offloaded_map *offmap;
  458. bool ret;
  459. if (!bpf_map_is_dev_bound(map))
  460. return bpf_map_offload_neutral(map);
  461. offmap = map_to_offmap(map);
  462. down_read(&bpf_devs_lock);
  463. ret = __bpf_offload_dev_match(prog, offmap->netdev);
  464. up_read(&bpf_devs_lock);
  465. return ret;
  466. }
  467. int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
  468. struct net_device *netdev)
  469. {
  470. struct bpf_offload_netdev *ondev;
  471. int err;
  472. ondev = kzalloc(sizeof(*ondev), GFP_KERNEL);
  473. if (!ondev)
  474. return -ENOMEM;
  475. ondev->netdev = netdev;
  476. ondev->offdev = offdev;
  477. INIT_LIST_HEAD(&ondev->progs);
  478. INIT_LIST_HEAD(&ondev->maps);
  479. down_write(&bpf_devs_lock);
  480. err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params);
  481. if (err) {
  482. netdev_warn(netdev, "failed to register for BPF offload\n");
  483. goto err_unlock_free;
  484. }
  485. list_add(&ondev->offdev_netdevs, &offdev->netdevs);
  486. up_write(&bpf_devs_lock);
  487. return 0;
  488. err_unlock_free:
  489. up_write(&bpf_devs_lock);
  490. kfree(ondev);
  491. return err;
  492. }
  493. EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register);
  494. void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
  495. struct net_device *netdev)
  496. {
  497. struct bpf_offload_netdev *ondev, *altdev;
  498. struct bpf_offloaded_map *offmap, *mtmp;
  499. struct bpf_prog_offload *offload, *ptmp;
  500. ASSERT_RTNL();
  501. down_write(&bpf_devs_lock);
  502. ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
  503. if (WARN_ON(!ondev))
  504. goto unlock;
  505. WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params));
  506. list_del(&ondev->offdev_netdevs);
  507. /* Try to move the objects to another netdev of the device */
  508. altdev = list_first_entry_or_null(&offdev->netdevs,
  509. struct bpf_offload_netdev,
  510. offdev_netdevs);
  511. if (altdev) {
  512. list_for_each_entry(offload, &ondev->progs, offloads)
  513. offload->netdev = altdev->netdev;
  514. list_splice_init(&ondev->progs, &altdev->progs);
  515. list_for_each_entry(offmap, &ondev->maps, offloads)
  516. offmap->netdev = altdev->netdev;
  517. list_splice_init(&ondev->maps, &altdev->maps);
  518. } else {
  519. list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads)
  520. __bpf_prog_offload_destroy(offload->prog);
  521. list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads)
  522. __bpf_map_offload_destroy(offmap);
  523. }
  524. WARN_ON(!list_empty(&ondev->progs));
  525. WARN_ON(!list_empty(&ondev->maps));
  526. kfree(ondev);
  527. unlock:
  528. up_write(&bpf_devs_lock);
  529. }
  530. EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister);
  531. struct bpf_offload_dev *bpf_offload_dev_create(void)
  532. {
  533. struct bpf_offload_dev *offdev;
  534. int err;
  535. down_write(&bpf_devs_lock);
  536. if (!offdevs_inited) {
  537. err = rhashtable_init(&offdevs, &offdevs_params);
  538. if (err)
  539. return ERR_PTR(err);
  540. offdevs_inited = true;
  541. }
  542. up_write(&bpf_devs_lock);
  543. offdev = kzalloc(sizeof(*offdev), GFP_KERNEL);
  544. if (!offdev)
  545. return ERR_PTR(-ENOMEM);
  546. INIT_LIST_HEAD(&offdev->netdevs);
  547. return offdev;
  548. }
  549. EXPORT_SYMBOL_GPL(bpf_offload_dev_create);
  550. void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev)
  551. {
  552. WARN_ON(!list_empty(&offdev->netdevs));
  553. kfree(offdev);
  554. }
  555. EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy);