offload.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709
  1. /*
  2. * Copyright (C) 2017-2018 Netronome Systems, Inc.
  3. *
  4. * This software is licensed under the GNU General License Version 2,
  5. * June 1991 as shown in the file COPYING in the top-level directory of this
  6. * source tree.
  7. *
  8. * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
  9. * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
  10. * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  11. * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
  12. * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
  13. * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
  14. */
  15. #include <linux/bpf.h>
  16. #include <linux/bpf_verifier.h>
  17. #include <linux/bug.h>
  18. #include <linux/kdev_t.h>
  19. #include <linux/list.h>
  20. #include <linux/lockdep.h>
  21. #include <linux/netdevice.h>
  22. #include <linux/printk.h>
  23. #include <linux/proc_ns.h>
  24. #include <linux/rhashtable.h>
  25. #include <linux/rtnetlink.h>
  26. #include <linux/rwsem.h>
  27. /* Protects offdevs, members of bpf_offload_netdev and offload members
  28. * of all progs.
  29. * RTNL lock cannot be taken when holding this lock.
  30. */
  31. static DECLARE_RWSEM(bpf_devs_lock);
  32. struct bpf_offload_dev {
  33. const struct bpf_prog_offload_ops *ops;
  34. struct list_head netdevs;
  35. void *priv;
  36. };
  37. struct bpf_offload_netdev {
  38. struct rhash_head l;
  39. struct net_device *netdev;
  40. struct bpf_offload_dev *offdev;
  41. struct list_head progs;
  42. struct list_head maps;
  43. struct list_head offdev_netdevs;
  44. };
  45. static const struct rhashtable_params offdevs_params = {
  46. .nelem_hint = 4,
  47. .key_len = sizeof(struct net_device *),
  48. .key_offset = offsetof(struct bpf_offload_netdev, netdev),
  49. .head_offset = offsetof(struct bpf_offload_netdev, l),
  50. .automatic_shrinking = true,
  51. };
  52. static struct rhashtable offdevs;
  53. static bool offdevs_inited;
  54. static int bpf_dev_offload_check(struct net_device *netdev)
  55. {
  56. if (!netdev)
  57. return -EINVAL;
  58. if (!netdev->netdev_ops->ndo_bpf)
  59. return -EOPNOTSUPP;
  60. return 0;
  61. }
  62. static struct bpf_offload_netdev *
  63. bpf_offload_find_netdev(struct net_device *netdev)
  64. {
  65. lockdep_assert_held(&bpf_devs_lock);
  66. if (!offdevs_inited)
  67. return NULL;
  68. return rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
  69. }
  70. int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
  71. {
  72. struct bpf_offload_netdev *ondev;
  73. struct bpf_prog_offload *offload;
  74. int err;
  75. if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
  76. attr->prog_type != BPF_PROG_TYPE_XDP)
  77. return -EINVAL;
  78. if (attr->prog_flags)
  79. return -EINVAL;
  80. offload = kzalloc(sizeof(*offload), GFP_USER);
  81. if (!offload)
  82. return -ENOMEM;
  83. offload->prog = prog;
  84. offload->netdev = dev_get_by_index(current->nsproxy->net_ns,
  85. attr->prog_ifindex);
  86. err = bpf_dev_offload_check(offload->netdev);
  87. if (err)
  88. goto err_maybe_put;
  89. down_write(&bpf_devs_lock);
  90. ondev = bpf_offload_find_netdev(offload->netdev);
  91. if (!ondev) {
  92. err = -EINVAL;
  93. goto err_unlock;
  94. }
  95. offload->offdev = ondev->offdev;
  96. prog->aux->offload = offload;
  97. list_add_tail(&offload->offloads, &ondev->progs);
  98. dev_put(offload->netdev);
  99. up_write(&bpf_devs_lock);
  100. return 0;
  101. err_unlock:
  102. up_write(&bpf_devs_lock);
  103. err_maybe_put:
  104. if (offload->netdev)
  105. dev_put(offload->netdev);
  106. kfree(offload);
  107. return err;
  108. }
  109. int bpf_prog_offload_verifier_prep(struct bpf_prog *prog)
  110. {
  111. struct bpf_prog_offload *offload;
  112. int ret = -ENODEV;
  113. down_read(&bpf_devs_lock);
  114. offload = prog->aux->offload;
  115. if (offload) {
  116. ret = offload->offdev->ops->prepare(prog);
  117. offload->dev_state = !ret;
  118. }
  119. up_read(&bpf_devs_lock);
  120. return ret;
  121. }
  122. int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
  123. int insn_idx, int prev_insn_idx)
  124. {
  125. struct bpf_prog_offload *offload;
  126. int ret = -ENODEV;
  127. down_read(&bpf_devs_lock);
  128. offload = env->prog->aux->offload;
  129. if (offload)
  130. ret = offload->offdev->ops->insn_hook(env, insn_idx,
  131. prev_insn_idx);
  132. up_read(&bpf_devs_lock);
  133. return ret;
  134. }
  135. int bpf_prog_offload_finalize(struct bpf_verifier_env *env)
  136. {
  137. struct bpf_prog_offload *offload;
  138. int ret = -ENODEV;
  139. down_read(&bpf_devs_lock);
  140. offload = env->prog->aux->offload;
  141. if (offload) {
  142. if (offload->offdev->ops->finalize)
  143. ret = offload->offdev->ops->finalize(env);
  144. else
  145. ret = 0;
  146. }
  147. up_read(&bpf_devs_lock);
  148. return ret;
  149. }
  150. void
  151. bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
  152. struct bpf_insn *insn)
  153. {
  154. const struct bpf_prog_offload_ops *ops;
  155. struct bpf_prog_offload *offload;
  156. int ret = -EOPNOTSUPP;
  157. down_read(&bpf_devs_lock);
  158. offload = env->prog->aux->offload;
  159. if (offload) {
  160. ops = offload->offdev->ops;
  161. if (!offload->opt_failed && ops->replace_insn)
  162. ret = ops->replace_insn(env, off, insn);
  163. offload->opt_failed |= ret;
  164. }
  165. up_read(&bpf_devs_lock);
  166. }
  167. void
  168. bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
  169. {
  170. struct bpf_prog_offload *offload;
  171. int ret = -EOPNOTSUPP;
  172. down_read(&bpf_devs_lock);
  173. offload = env->prog->aux->offload;
  174. if (offload) {
  175. if (!offload->opt_failed && offload->offdev->ops->remove_insns)
  176. ret = offload->offdev->ops->remove_insns(env, off, cnt);
  177. offload->opt_failed |= ret;
  178. }
  179. up_read(&bpf_devs_lock);
  180. }
  181. static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
  182. {
  183. struct bpf_prog_offload *offload = prog->aux->offload;
  184. if (offload->dev_state)
  185. offload->offdev->ops->destroy(prog);
  186. list_del_init(&offload->offloads);
  187. kfree(offload);
  188. prog->aux->offload = NULL;
  189. }
  190. void bpf_prog_offload_destroy(struct bpf_prog *prog)
  191. {
  192. down_write(&bpf_devs_lock);
  193. if (prog->aux->offload)
  194. __bpf_prog_offload_destroy(prog);
  195. up_write(&bpf_devs_lock);
  196. }
  197. static int bpf_prog_offload_translate(struct bpf_prog *prog)
  198. {
  199. struct bpf_prog_offload *offload;
  200. int ret = -ENODEV;
  201. down_read(&bpf_devs_lock);
  202. offload = prog->aux->offload;
  203. if (offload)
  204. ret = offload->offdev->ops->translate(prog);
  205. up_read(&bpf_devs_lock);
  206. return ret;
  207. }
  208. static unsigned int bpf_prog_warn_on_exec(const void *ctx,
  209. const struct bpf_insn *insn)
  210. {
  211. WARN(1, "attempt to execute device eBPF program on the host!");
  212. return 0;
  213. }
  214. int bpf_prog_offload_compile(struct bpf_prog *prog)
  215. {
  216. prog->bpf_func = bpf_prog_warn_on_exec;
  217. return bpf_prog_offload_translate(prog);
  218. }
  219. struct ns_get_path_bpf_prog_args {
  220. struct bpf_prog *prog;
  221. struct bpf_prog_info *info;
  222. };
  223. static struct ns_common *bpf_prog_offload_info_fill_ns(void *private_data)
  224. {
  225. struct ns_get_path_bpf_prog_args *args = private_data;
  226. struct bpf_prog_aux *aux = args->prog->aux;
  227. struct ns_common *ns;
  228. struct net *net;
  229. rtnl_lock();
  230. down_read(&bpf_devs_lock);
  231. if (aux->offload) {
  232. args->info->ifindex = aux->offload->netdev->ifindex;
  233. net = dev_net(aux->offload->netdev);
  234. get_net(net);
  235. ns = &net->ns;
  236. } else {
  237. args->info->ifindex = 0;
  238. ns = NULL;
  239. }
  240. up_read(&bpf_devs_lock);
  241. rtnl_unlock();
  242. return ns;
  243. }
  244. int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
  245. struct bpf_prog *prog)
  246. {
  247. struct ns_get_path_bpf_prog_args args = {
  248. .prog = prog,
  249. .info = info,
  250. };
  251. struct bpf_prog_aux *aux = prog->aux;
  252. struct inode *ns_inode;
  253. struct path ns_path;
  254. char __user *uinsns;
  255. int res;
  256. u32 ulen;
  257. res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args);
  258. if (res) {
  259. if (!info->ifindex)
  260. return -ENODEV;
  261. return res;
  262. }
  263. down_read(&bpf_devs_lock);
  264. if (!aux->offload) {
  265. up_read(&bpf_devs_lock);
  266. return -ENODEV;
  267. }
  268. ulen = info->jited_prog_len;
  269. info->jited_prog_len = aux->offload->jited_len;
  270. if (info->jited_prog_len && ulen) {
  271. uinsns = u64_to_user_ptr(info->jited_prog_insns);
  272. ulen = min_t(u32, info->jited_prog_len, ulen);
  273. if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) {
  274. up_read(&bpf_devs_lock);
  275. return -EFAULT;
  276. }
  277. }
  278. up_read(&bpf_devs_lock);
  279. ns_inode = ns_path.dentry->d_inode;
  280. info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
  281. info->netns_ino = ns_inode->i_ino;
  282. path_put(&ns_path);
  283. return 0;
  284. }
  285. const struct bpf_prog_ops bpf_offload_prog_ops = {
  286. };
  287. static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap,
  288. enum bpf_netdev_command cmd)
  289. {
  290. struct netdev_bpf data = {};
  291. struct net_device *netdev;
  292. ASSERT_RTNL();
  293. data.command = cmd;
  294. data.offmap = offmap;
  295. /* Caller must make sure netdev is valid */
  296. netdev = offmap->netdev;
  297. return netdev->netdev_ops->ndo_bpf(netdev, &data);
  298. }
  299. struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
  300. {
  301. struct net *net = current->nsproxy->net_ns;
  302. struct bpf_offload_netdev *ondev;
  303. struct bpf_offloaded_map *offmap;
  304. int err;
  305. if (!capable(CAP_SYS_ADMIN))
  306. return ERR_PTR(-EPERM);
  307. if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
  308. attr->map_type != BPF_MAP_TYPE_HASH)
  309. return ERR_PTR(-EINVAL);
  310. offmap = bpf_map_area_alloc(sizeof(*offmap), NUMA_NO_NODE);
  311. if (!offmap)
  312. return ERR_PTR(-ENOMEM);
  313. bpf_map_init_from_attr(&offmap->map, attr);
  314. rtnl_lock();
  315. down_write(&bpf_devs_lock);
  316. offmap->netdev = __dev_get_by_index(net, attr->map_ifindex);
  317. err = bpf_dev_offload_check(offmap->netdev);
  318. if (err)
  319. goto err_unlock;
  320. ondev = bpf_offload_find_netdev(offmap->netdev);
  321. if (!ondev) {
  322. err = -EINVAL;
  323. goto err_unlock;
  324. }
  325. err = bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_ALLOC);
  326. if (err)
  327. goto err_unlock;
  328. list_add_tail(&offmap->offloads, &ondev->maps);
  329. up_write(&bpf_devs_lock);
  330. rtnl_unlock();
  331. return &offmap->map;
  332. err_unlock:
  333. up_write(&bpf_devs_lock);
  334. rtnl_unlock();
  335. bpf_map_area_free(offmap);
  336. return ERR_PTR(err);
  337. }
  338. static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap)
  339. {
  340. WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE));
  341. /* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
  342. bpf_map_free_id(&offmap->map, true);
  343. list_del_init(&offmap->offloads);
  344. offmap->netdev = NULL;
  345. }
  346. void bpf_map_offload_map_free(struct bpf_map *map)
  347. {
  348. struct bpf_offloaded_map *offmap = map_to_offmap(map);
  349. rtnl_lock();
  350. down_write(&bpf_devs_lock);
  351. if (offmap->netdev)
  352. __bpf_map_offload_destroy(offmap);
  353. up_write(&bpf_devs_lock);
  354. rtnl_unlock();
  355. bpf_map_area_free(offmap);
  356. }
  357. int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value)
  358. {
  359. struct bpf_offloaded_map *offmap = map_to_offmap(map);
  360. int ret = -ENODEV;
  361. down_read(&bpf_devs_lock);
  362. if (offmap->netdev)
  363. ret = offmap->dev_ops->map_lookup_elem(offmap, key, value);
  364. up_read(&bpf_devs_lock);
  365. return ret;
  366. }
  367. int bpf_map_offload_update_elem(struct bpf_map *map,
  368. void *key, void *value, u64 flags)
  369. {
  370. struct bpf_offloaded_map *offmap = map_to_offmap(map);
  371. int ret = -ENODEV;
  372. if (unlikely(flags > BPF_EXIST))
  373. return -EINVAL;
  374. down_read(&bpf_devs_lock);
  375. if (offmap->netdev)
  376. ret = offmap->dev_ops->map_update_elem(offmap, key, value,
  377. flags);
  378. up_read(&bpf_devs_lock);
  379. return ret;
  380. }
  381. int bpf_map_offload_delete_elem(struct bpf_map *map, void *key)
  382. {
  383. struct bpf_offloaded_map *offmap = map_to_offmap(map);
  384. int ret = -ENODEV;
  385. down_read(&bpf_devs_lock);
  386. if (offmap->netdev)
  387. ret = offmap->dev_ops->map_delete_elem(offmap, key);
  388. up_read(&bpf_devs_lock);
  389. return ret;
  390. }
  391. int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key)
  392. {
  393. struct bpf_offloaded_map *offmap = map_to_offmap(map);
  394. int ret = -ENODEV;
  395. down_read(&bpf_devs_lock);
  396. if (offmap->netdev)
  397. ret = offmap->dev_ops->map_get_next_key(offmap, key, next_key);
  398. up_read(&bpf_devs_lock);
  399. return ret;
  400. }
  401. struct ns_get_path_bpf_map_args {
  402. struct bpf_offloaded_map *offmap;
  403. struct bpf_map_info *info;
  404. };
  405. static struct ns_common *bpf_map_offload_info_fill_ns(void *private_data)
  406. {
  407. struct ns_get_path_bpf_map_args *args = private_data;
  408. struct ns_common *ns;
  409. struct net *net;
  410. rtnl_lock();
  411. down_read(&bpf_devs_lock);
  412. if (args->offmap->netdev) {
  413. args->info->ifindex = args->offmap->netdev->ifindex;
  414. net = dev_net(args->offmap->netdev);
  415. get_net(net);
  416. ns = &net->ns;
  417. } else {
  418. args->info->ifindex = 0;
  419. ns = NULL;
  420. }
  421. up_read(&bpf_devs_lock);
  422. rtnl_unlock();
  423. return ns;
  424. }
  425. int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map)
  426. {
  427. struct ns_get_path_bpf_map_args args = {
  428. .offmap = map_to_offmap(map),
  429. .info = info,
  430. };
  431. struct inode *ns_inode;
  432. struct path ns_path;
  433. int res;
  434. res = ns_get_path_cb(&ns_path, bpf_map_offload_info_fill_ns, &args);
  435. if (res) {
  436. if (!info->ifindex)
  437. return -ENODEV;
  438. return res;
  439. }
  440. ns_inode = ns_path.dentry->d_inode;
  441. info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
  442. info->netns_ino = ns_inode->i_ino;
  443. path_put(&ns_path);
  444. return 0;
  445. }
  446. static bool __bpf_offload_dev_match(struct bpf_prog *prog,
  447. struct net_device *netdev)
  448. {
  449. struct bpf_offload_netdev *ondev1, *ondev2;
  450. struct bpf_prog_offload *offload;
  451. if (!bpf_prog_is_dev_bound(prog->aux))
  452. return false;
  453. offload = prog->aux->offload;
  454. if (!offload)
  455. return false;
  456. if (offload->netdev == netdev)
  457. return true;
  458. ondev1 = bpf_offload_find_netdev(offload->netdev);
  459. ondev2 = bpf_offload_find_netdev(netdev);
  460. return ondev1 && ondev2 && ondev1->offdev == ondev2->offdev;
  461. }
  462. bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev)
  463. {
  464. bool ret;
  465. down_read(&bpf_devs_lock);
  466. ret = __bpf_offload_dev_match(prog, netdev);
  467. up_read(&bpf_devs_lock);
  468. return ret;
  469. }
  470. EXPORT_SYMBOL_GPL(bpf_offload_dev_match);
  471. bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map)
  472. {
  473. struct bpf_offloaded_map *offmap;
  474. bool ret;
  475. if (!bpf_map_is_dev_bound(map))
  476. return bpf_map_offload_neutral(map);
  477. offmap = map_to_offmap(map);
  478. down_read(&bpf_devs_lock);
  479. ret = __bpf_offload_dev_match(prog, offmap->netdev);
  480. up_read(&bpf_devs_lock);
  481. return ret;
  482. }
  483. int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
  484. struct net_device *netdev)
  485. {
  486. struct bpf_offload_netdev *ondev;
  487. int err;
  488. ondev = kzalloc(sizeof(*ondev), GFP_KERNEL);
  489. if (!ondev)
  490. return -ENOMEM;
  491. ondev->netdev = netdev;
  492. ondev->offdev = offdev;
  493. INIT_LIST_HEAD(&ondev->progs);
  494. INIT_LIST_HEAD(&ondev->maps);
  495. down_write(&bpf_devs_lock);
  496. err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params);
  497. if (err) {
  498. netdev_warn(netdev, "failed to register for BPF offload\n");
  499. goto err_unlock_free;
  500. }
  501. list_add(&ondev->offdev_netdevs, &offdev->netdevs);
  502. up_write(&bpf_devs_lock);
  503. return 0;
  504. err_unlock_free:
  505. up_write(&bpf_devs_lock);
  506. kfree(ondev);
  507. return err;
  508. }
  509. EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register);
  510. void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
  511. struct net_device *netdev)
  512. {
  513. struct bpf_offload_netdev *ondev, *altdev;
  514. struct bpf_offloaded_map *offmap, *mtmp;
  515. struct bpf_prog_offload *offload, *ptmp;
  516. ASSERT_RTNL();
  517. down_write(&bpf_devs_lock);
  518. ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
  519. if (WARN_ON(!ondev))
  520. goto unlock;
  521. WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params));
  522. list_del(&ondev->offdev_netdevs);
  523. /* Try to move the objects to another netdev of the device */
  524. altdev = list_first_entry_or_null(&offdev->netdevs,
  525. struct bpf_offload_netdev,
  526. offdev_netdevs);
  527. if (altdev) {
  528. list_for_each_entry(offload, &ondev->progs, offloads)
  529. offload->netdev = altdev->netdev;
  530. list_splice_init(&ondev->progs, &altdev->progs);
  531. list_for_each_entry(offmap, &ondev->maps, offloads)
  532. offmap->netdev = altdev->netdev;
  533. list_splice_init(&ondev->maps, &altdev->maps);
  534. } else {
  535. list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads)
  536. __bpf_prog_offload_destroy(offload->prog);
  537. list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads)
  538. __bpf_map_offload_destroy(offmap);
  539. }
  540. WARN_ON(!list_empty(&ondev->progs));
  541. WARN_ON(!list_empty(&ondev->maps));
  542. kfree(ondev);
  543. unlock:
  544. up_write(&bpf_devs_lock);
  545. }
  546. EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister);
  547. struct bpf_offload_dev *
  548. bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv)
  549. {
  550. struct bpf_offload_dev *offdev;
  551. int err;
  552. down_write(&bpf_devs_lock);
  553. if (!offdevs_inited) {
  554. err = rhashtable_init(&offdevs, &offdevs_params);
  555. if (err) {
  556. up_write(&bpf_devs_lock);
  557. return ERR_PTR(err);
  558. }
  559. offdevs_inited = true;
  560. }
  561. up_write(&bpf_devs_lock);
  562. offdev = kzalloc(sizeof(*offdev), GFP_KERNEL);
  563. if (!offdev)
  564. return ERR_PTR(-ENOMEM);
  565. offdev->ops = ops;
  566. offdev->priv = priv;
  567. INIT_LIST_HEAD(&offdev->netdevs);
  568. return offdev;
  569. }
  570. EXPORT_SYMBOL_GPL(bpf_offload_dev_create);
  571. void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev)
  572. {
  573. WARN_ON(!list_empty(&offdev->netdevs));
  574. kfree(offdev);
  575. }
  576. EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy);
  577. void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev)
  578. {
  579. return offdev->priv;
  580. }
  581. EXPORT_SYMBOL_GPL(bpf_offload_dev_priv);