inode.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Minimal file system backend for holding eBPF maps and programs,
  4. * used by bpf(2) object pinning.
  5. *
  6. * Authors:
  7. *
  8. * Daniel Borkmann <[email protected]>
  9. */
  10. #include <linux/init.h>
  11. #include <linux/magic.h>
  12. #include <linux/major.h>
  13. #include <linux/mount.h>
  14. #include <linux/namei.h>
  15. #include <linux/fs.h>
  16. #include <linux/fs_context.h>
  17. #include <linux/fs_parser.h>
  18. #include <linux/kdev_t.h>
  19. #include <linux/filter.h>
  20. #include <linux/bpf.h>
  21. #include <linux/bpf_trace.h>
  22. #include "preload/bpf_preload.h"
  23. enum bpf_type {
  24. BPF_TYPE_UNSPEC = 0,
  25. BPF_TYPE_PROG,
  26. BPF_TYPE_MAP,
  27. BPF_TYPE_LINK,
  28. };
  29. static void *bpf_any_get(void *raw, enum bpf_type type)
  30. {
  31. switch (type) {
  32. case BPF_TYPE_PROG:
  33. bpf_prog_inc(raw);
  34. break;
  35. case BPF_TYPE_MAP:
  36. bpf_map_inc_with_uref(raw);
  37. break;
  38. case BPF_TYPE_LINK:
  39. bpf_link_inc(raw);
  40. break;
  41. default:
  42. WARN_ON_ONCE(1);
  43. break;
  44. }
  45. return raw;
  46. }
  47. static void bpf_any_put(void *raw, enum bpf_type type)
  48. {
  49. switch (type) {
  50. case BPF_TYPE_PROG:
  51. bpf_prog_put(raw);
  52. break;
  53. case BPF_TYPE_MAP:
  54. bpf_map_put_with_uref(raw);
  55. break;
  56. case BPF_TYPE_LINK:
  57. bpf_link_put(raw);
  58. break;
  59. default:
  60. WARN_ON_ONCE(1);
  61. break;
  62. }
  63. }
  64. static void *bpf_fd_probe_obj(u32 ufd, enum bpf_type *type)
  65. {
  66. void *raw;
  67. raw = bpf_map_get_with_uref(ufd);
  68. if (!IS_ERR(raw)) {
  69. *type = BPF_TYPE_MAP;
  70. return raw;
  71. }
  72. raw = bpf_prog_get(ufd);
  73. if (!IS_ERR(raw)) {
  74. *type = BPF_TYPE_PROG;
  75. return raw;
  76. }
  77. raw = bpf_link_get_from_fd(ufd);
  78. if (!IS_ERR(raw)) {
  79. *type = BPF_TYPE_LINK;
  80. return raw;
  81. }
  82. return ERR_PTR(-EINVAL);
  83. }
  84. static const struct inode_operations bpf_dir_iops;
  85. static const struct inode_operations bpf_prog_iops = { };
  86. static const struct inode_operations bpf_map_iops = { };
  87. static const struct inode_operations bpf_link_iops = { };
  88. static struct inode *bpf_get_inode(struct super_block *sb,
  89. const struct inode *dir,
  90. umode_t mode)
  91. {
  92. struct inode *inode;
  93. switch (mode & S_IFMT) {
  94. case S_IFDIR:
  95. case S_IFREG:
  96. case S_IFLNK:
  97. break;
  98. default:
  99. return ERR_PTR(-EINVAL);
  100. }
  101. inode = new_inode(sb);
  102. if (!inode)
  103. return ERR_PTR(-ENOSPC);
  104. inode->i_ino = get_next_ino();
  105. inode->i_atime = current_time(inode);
  106. inode->i_mtime = inode->i_atime;
  107. inode->i_ctime = inode->i_atime;
  108. inode_init_owner(&init_user_ns, inode, dir, mode);
  109. return inode;
  110. }
  111. static int bpf_inode_type(const struct inode *inode, enum bpf_type *type)
  112. {
  113. *type = BPF_TYPE_UNSPEC;
  114. if (inode->i_op == &bpf_prog_iops)
  115. *type = BPF_TYPE_PROG;
  116. else if (inode->i_op == &bpf_map_iops)
  117. *type = BPF_TYPE_MAP;
  118. else if (inode->i_op == &bpf_link_iops)
  119. *type = BPF_TYPE_LINK;
  120. else
  121. return -EACCES;
  122. return 0;
  123. }
  124. static void bpf_dentry_finalize(struct dentry *dentry, struct inode *inode,
  125. struct inode *dir)
  126. {
  127. d_instantiate(dentry, inode);
  128. dget(dentry);
  129. dir->i_mtime = current_time(dir);
  130. dir->i_ctime = dir->i_mtime;
  131. }
  132. static int bpf_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
  133. struct dentry *dentry, umode_t mode)
  134. {
  135. struct inode *inode;
  136. inode = bpf_get_inode(dir->i_sb, dir, mode | S_IFDIR);
  137. if (IS_ERR(inode))
  138. return PTR_ERR(inode);
  139. inode->i_op = &bpf_dir_iops;
  140. inode->i_fop = &simple_dir_operations;
  141. inc_nlink(inode);
  142. inc_nlink(dir);
  143. bpf_dentry_finalize(dentry, inode, dir);
  144. return 0;
  145. }
  146. struct map_iter {
  147. void *key;
  148. bool done;
  149. };
  150. static struct map_iter *map_iter(struct seq_file *m)
  151. {
  152. return m->private;
  153. }
  154. static struct bpf_map *seq_file_to_map(struct seq_file *m)
  155. {
  156. return file_inode(m->file)->i_private;
  157. }
  158. static void map_iter_free(struct map_iter *iter)
  159. {
  160. if (iter) {
  161. kfree(iter->key);
  162. kfree(iter);
  163. }
  164. }
  165. static struct map_iter *map_iter_alloc(struct bpf_map *map)
  166. {
  167. struct map_iter *iter;
  168. iter = kzalloc(sizeof(*iter), GFP_KERNEL | __GFP_NOWARN);
  169. if (!iter)
  170. goto error;
  171. iter->key = kzalloc(map->key_size, GFP_KERNEL | __GFP_NOWARN);
  172. if (!iter->key)
  173. goto error;
  174. return iter;
  175. error:
  176. map_iter_free(iter);
  177. return NULL;
  178. }
  179. static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos)
  180. {
  181. struct bpf_map *map = seq_file_to_map(m);
  182. void *key = map_iter(m)->key;
  183. void *prev_key;
  184. (*pos)++;
  185. if (map_iter(m)->done)
  186. return NULL;
  187. if (unlikely(v == SEQ_START_TOKEN))
  188. prev_key = NULL;
  189. else
  190. prev_key = key;
  191. rcu_read_lock();
  192. if (map->ops->map_get_next_key(map, prev_key, key)) {
  193. map_iter(m)->done = true;
  194. key = NULL;
  195. }
  196. rcu_read_unlock();
  197. return key;
  198. }
  199. static void *map_seq_start(struct seq_file *m, loff_t *pos)
  200. {
  201. if (map_iter(m)->done)
  202. return NULL;
  203. return *pos ? map_iter(m)->key : SEQ_START_TOKEN;
  204. }
  205. static void map_seq_stop(struct seq_file *m, void *v)
  206. {
  207. }
  208. static int map_seq_show(struct seq_file *m, void *v)
  209. {
  210. struct bpf_map *map = seq_file_to_map(m);
  211. void *key = map_iter(m)->key;
  212. if (unlikely(v == SEQ_START_TOKEN)) {
  213. seq_puts(m, "# WARNING!! The output is for debug purpose only\n");
  214. seq_puts(m, "# WARNING!! The output format will change\n");
  215. } else {
  216. map->ops->map_seq_show_elem(map, key, m);
  217. }
  218. return 0;
  219. }
  220. static const struct seq_operations bpffs_map_seq_ops = {
  221. .start = map_seq_start,
  222. .next = map_seq_next,
  223. .show = map_seq_show,
  224. .stop = map_seq_stop,
  225. };
  226. static int bpffs_map_open(struct inode *inode, struct file *file)
  227. {
  228. struct bpf_map *map = inode->i_private;
  229. struct map_iter *iter;
  230. struct seq_file *m;
  231. int err;
  232. iter = map_iter_alloc(map);
  233. if (!iter)
  234. return -ENOMEM;
  235. err = seq_open(file, &bpffs_map_seq_ops);
  236. if (err) {
  237. map_iter_free(iter);
  238. return err;
  239. }
  240. m = file->private_data;
  241. m->private = iter;
  242. return 0;
  243. }
  244. static int bpffs_map_release(struct inode *inode, struct file *file)
  245. {
  246. struct seq_file *m = file->private_data;
  247. map_iter_free(map_iter(m));
  248. return seq_release(inode, file);
  249. }
  250. /* bpffs_map_fops should only implement the basic
  251. * read operation for a BPF map. The purpose is to
  252. * provide a simple user intuitive way to do
  253. * "cat bpffs/pathto/a-pinned-map".
  254. *
  255. * Other operations (e.g. write, lookup...) should be realized by
  256. * the userspace tools (e.g. bpftool) through the
  257. * BPF_OBJ_GET_INFO_BY_FD and the map's lookup/update
  258. * interface.
  259. */
  260. static const struct file_operations bpffs_map_fops = {
  261. .open = bpffs_map_open,
  262. .read = seq_read,
  263. .release = bpffs_map_release,
  264. };
  265. static int bpffs_obj_open(struct inode *inode, struct file *file)
  266. {
  267. return -EIO;
  268. }
  269. static const struct file_operations bpffs_obj_fops = {
  270. .open = bpffs_obj_open,
  271. };
  272. static int bpf_mkobj_ops(struct dentry *dentry, umode_t mode, void *raw,
  273. const struct inode_operations *iops,
  274. const struct file_operations *fops)
  275. {
  276. struct inode *dir = dentry->d_parent->d_inode;
  277. struct inode *inode = bpf_get_inode(dir->i_sb, dir, mode);
  278. if (IS_ERR(inode))
  279. return PTR_ERR(inode);
  280. inode->i_op = iops;
  281. inode->i_fop = fops;
  282. inode->i_private = raw;
  283. bpf_dentry_finalize(dentry, inode, dir);
  284. return 0;
  285. }
  286. static int bpf_mkprog(struct dentry *dentry, umode_t mode, void *arg)
  287. {
  288. return bpf_mkobj_ops(dentry, mode, arg, &bpf_prog_iops,
  289. &bpffs_obj_fops);
  290. }
  291. static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg)
  292. {
  293. struct bpf_map *map = arg;
  294. return bpf_mkobj_ops(dentry, mode, arg, &bpf_map_iops,
  295. bpf_map_support_seq_show(map) ?
  296. &bpffs_map_fops : &bpffs_obj_fops);
  297. }
  298. static int bpf_mklink(struct dentry *dentry, umode_t mode, void *arg)
  299. {
  300. struct bpf_link *link = arg;
  301. return bpf_mkobj_ops(dentry, mode, arg, &bpf_link_iops,
  302. bpf_link_is_iter(link) ?
  303. &bpf_iter_fops : &bpffs_obj_fops);
  304. }
  305. static struct dentry *
  306. bpf_lookup(struct inode *dir, struct dentry *dentry, unsigned flags)
  307. {
  308. /* Dots in names (e.g. "/sys/fs/bpf/foo.bar") are reserved for future
  309. * extensions. That allows popoulate_bpffs() create special files.
  310. */
  311. if ((dir->i_mode & S_IALLUGO) &&
  312. strchr(dentry->d_name.name, '.'))
  313. return ERR_PTR(-EPERM);
  314. return simple_lookup(dir, dentry, flags);
  315. }
  316. static int bpf_symlink(struct user_namespace *mnt_userns, struct inode *dir,
  317. struct dentry *dentry, const char *target)
  318. {
  319. char *link = kstrdup(target, GFP_USER | __GFP_NOWARN);
  320. struct inode *inode;
  321. if (!link)
  322. return -ENOMEM;
  323. inode = bpf_get_inode(dir->i_sb, dir, S_IRWXUGO | S_IFLNK);
  324. if (IS_ERR(inode)) {
  325. kfree(link);
  326. return PTR_ERR(inode);
  327. }
  328. inode->i_op = &simple_symlink_inode_operations;
  329. inode->i_link = link;
  330. bpf_dentry_finalize(dentry, inode, dir);
  331. return 0;
  332. }
  333. static const struct inode_operations bpf_dir_iops = {
  334. .lookup = bpf_lookup,
  335. .mkdir = bpf_mkdir,
  336. .symlink = bpf_symlink,
  337. .rmdir = simple_rmdir,
  338. .rename = simple_rename,
  339. .link = simple_link,
  340. .unlink = simple_unlink,
  341. };
  342. /* pin iterator link into bpffs */
  343. static int bpf_iter_link_pin_kernel(struct dentry *parent,
  344. const char *name, struct bpf_link *link)
  345. {
  346. umode_t mode = S_IFREG | S_IRUSR;
  347. struct dentry *dentry;
  348. int ret;
  349. inode_lock(parent->d_inode);
  350. dentry = lookup_one_len(name, parent, strlen(name));
  351. if (IS_ERR(dentry)) {
  352. inode_unlock(parent->d_inode);
  353. return PTR_ERR(dentry);
  354. }
  355. ret = bpf_mkobj_ops(dentry, mode, link, &bpf_link_iops,
  356. &bpf_iter_fops);
  357. dput(dentry);
  358. inode_unlock(parent->d_inode);
  359. return ret;
  360. }
  361. static int bpf_obj_do_pin(const char __user *pathname, void *raw,
  362. enum bpf_type type)
  363. {
  364. struct dentry *dentry;
  365. struct inode *dir;
  366. struct path path;
  367. umode_t mode;
  368. int ret;
  369. dentry = user_path_create(AT_FDCWD, pathname, &path, 0);
  370. if (IS_ERR(dentry))
  371. return PTR_ERR(dentry);
  372. mode = S_IFREG | ((S_IRUSR | S_IWUSR) & ~current_umask());
  373. ret = security_path_mknod(&path, dentry, mode, 0);
  374. if (ret)
  375. goto out;
  376. dir = d_inode(path.dentry);
  377. if (dir->i_op != &bpf_dir_iops) {
  378. ret = -EPERM;
  379. goto out;
  380. }
  381. switch (type) {
  382. case BPF_TYPE_PROG:
  383. ret = vfs_mkobj(dentry, mode, bpf_mkprog, raw);
  384. break;
  385. case BPF_TYPE_MAP:
  386. ret = vfs_mkobj(dentry, mode, bpf_mkmap, raw);
  387. break;
  388. case BPF_TYPE_LINK:
  389. ret = vfs_mkobj(dentry, mode, bpf_mklink, raw);
  390. break;
  391. default:
  392. ret = -EPERM;
  393. }
  394. out:
  395. done_path_create(&path, dentry);
  396. return ret;
  397. }
  398. int bpf_obj_pin_user(u32 ufd, const char __user *pathname)
  399. {
  400. enum bpf_type type;
  401. void *raw;
  402. int ret;
  403. raw = bpf_fd_probe_obj(ufd, &type);
  404. if (IS_ERR(raw))
  405. return PTR_ERR(raw);
  406. ret = bpf_obj_do_pin(pathname, raw, type);
  407. if (ret != 0)
  408. bpf_any_put(raw, type);
  409. return ret;
  410. }
  411. static void *bpf_obj_do_get(const char __user *pathname,
  412. enum bpf_type *type, int flags)
  413. {
  414. struct inode *inode;
  415. struct path path;
  416. void *raw;
  417. int ret;
  418. ret = user_path_at(AT_FDCWD, pathname, LOOKUP_FOLLOW, &path);
  419. if (ret)
  420. return ERR_PTR(ret);
  421. inode = d_backing_inode(path.dentry);
  422. ret = path_permission(&path, ACC_MODE(flags));
  423. if (ret)
  424. goto out;
  425. ret = bpf_inode_type(inode, type);
  426. if (ret)
  427. goto out;
  428. raw = bpf_any_get(inode->i_private, *type);
  429. if (!IS_ERR(raw))
  430. touch_atime(&path);
  431. path_put(&path);
  432. return raw;
  433. out:
  434. path_put(&path);
  435. return ERR_PTR(ret);
  436. }
  437. int bpf_obj_get_user(const char __user *pathname, int flags)
  438. {
  439. enum bpf_type type = BPF_TYPE_UNSPEC;
  440. int f_flags;
  441. void *raw;
  442. int ret;
  443. f_flags = bpf_get_file_flag(flags);
  444. if (f_flags < 0)
  445. return f_flags;
  446. raw = bpf_obj_do_get(pathname, &type, f_flags);
  447. if (IS_ERR(raw))
  448. return PTR_ERR(raw);
  449. if (type == BPF_TYPE_PROG)
  450. ret = bpf_prog_new_fd(raw);
  451. else if (type == BPF_TYPE_MAP)
  452. ret = bpf_map_new_fd(raw, f_flags);
  453. else if (type == BPF_TYPE_LINK)
  454. ret = (f_flags != O_RDWR) ? -EINVAL : bpf_link_new_fd(raw);
  455. else
  456. return -ENOENT;
  457. if (ret < 0)
  458. bpf_any_put(raw, type);
  459. return ret;
  460. }
  461. static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type type)
  462. {
  463. struct bpf_prog *prog;
  464. int ret = inode_permission(&init_user_ns, inode, MAY_READ);
  465. if (ret)
  466. return ERR_PTR(ret);
  467. if (inode->i_op == &bpf_map_iops)
  468. return ERR_PTR(-EINVAL);
  469. if (inode->i_op == &bpf_link_iops)
  470. return ERR_PTR(-EINVAL);
  471. if (inode->i_op != &bpf_prog_iops)
  472. return ERR_PTR(-EACCES);
  473. prog = inode->i_private;
  474. ret = security_bpf_prog(prog);
  475. if (ret < 0)
  476. return ERR_PTR(ret);
  477. if (!bpf_prog_get_ok(prog, &type, false))
  478. return ERR_PTR(-EINVAL);
  479. bpf_prog_inc(prog);
  480. return prog;
  481. }
  482. struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type)
  483. {
  484. struct bpf_prog *prog;
  485. struct path path;
  486. int ret = kern_path(name, LOOKUP_FOLLOW, &path);
  487. if (ret)
  488. return ERR_PTR(ret);
  489. prog = __get_prog_inode(d_backing_inode(path.dentry), type);
  490. if (!IS_ERR(prog))
  491. touch_atime(&path);
  492. path_put(&path);
  493. return prog;
  494. }
  495. EXPORT_SYMBOL(bpf_prog_get_type_path);
  496. /*
  497. * Display the mount options in /proc/mounts.
  498. */
  499. static int bpf_show_options(struct seq_file *m, struct dentry *root)
  500. {
  501. umode_t mode = d_inode(root)->i_mode & S_IALLUGO & ~S_ISVTX;
  502. if (mode != S_IRWXUGO)
  503. seq_printf(m, ",mode=%o", mode);
  504. return 0;
  505. }
  506. static void bpf_free_inode(struct inode *inode)
  507. {
  508. enum bpf_type type;
  509. if (S_ISLNK(inode->i_mode))
  510. kfree(inode->i_link);
  511. if (!bpf_inode_type(inode, &type))
  512. bpf_any_put(inode->i_private, type);
  513. free_inode_nonrcu(inode);
  514. }
  515. static const struct super_operations bpf_super_ops = {
  516. .statfs = simple_statfs,
  517. .drop_inode = generic_delete_inode,
  518. .show_options = bpf_show_options,
  519. .free_inode = bpf_free_inode,
  520. };
  521. enum {
  522. OPT_MODE,
  523. };
  524. static const struct fs_parameter_spec bpf_fs_parameters[] = {
  525. fsparam_u32oct ("mode", OPT_MODE),
  526. {}
  527. };
  528. struct bpf_mount_opts {
  529. umode_t mode;
  530. };
  531. static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param)
  532. {
  533. struct bpf_mount_opts *opts = fc->fs_private;
  534. struct fs_parse_result result;
  535. int opt;
  536. opt = fs_parse(fc, bpf_fs_parameters, param, &result);
  537. if (opt < 0) {
  538. /* We might like to report bad mount options here, but
  539. * traditionally we've ignored all mount options, so we'd
  540. * better continue to ignore non-existing options for bpf.
  541. */
  542. if (opt == -ENOPARAM) {
  543. opt = vfs_parse_fs_param_source(fc, param);
  544. if (opt != -ENOPARAM)
  545. return opt;
  546. return 0;
  547. }
  548. if (opt < 0)
  549. return opt;
  550. }
  551. switch (opt) {
  552. case OPT_MODE:
  553. opts->mode = result.uint_32 & S_IALLUGO;
  554. break;
  555. }
  556. return 0;
  557. }
  558. struct bpf_preload_ops *bpf_preload_ops;
  559. EXPORT_SYMBOL_GPL(bpf_preload_ops);
  560. static bool bpf_preload_mod_get(void)
  561. {
  562. /* If bpf_preload.ko wasn't loaded earlier then load it now.
  563. * When bpf_preload is built into vmlinux the module's __init
  564. * function will populate it.
  565. */
  566. if (!bpf_preload_ops) {
  567. request_module("bpf_preload");
  568. if (!bpf_preload_ops)
  569. return false;
  570. }
  571. /* And grab the reference, so the module doesn't disappear while the
  572. * kernel is interacting with the kernel module and its UMD.
  573. */
  574. if (!try_module_get(bpf_preload_ops->owner)) {
  575. pr_err("bpf_preload module get failed.\n");
  576. return false;
  577. }
  578. return true;
  579. }
  580. static void bpf_preload_mod_put(void)
  581. {
  582. if (bpf_preload_ops)
  583. /* now user can "rmmod bpf_preload" if necessary */
  584. module_put(bpf_preload_ops->owner);
  585. }
  586. static DEFINE_MUTEX(bpf_preload_lock);
  587. static int populate_bpffs(struct dentry *parent)
  588. {
  589. struct bpf_preload_info objs[BPF_PRELOAD_LINKS] = {};
  590. int err = 0, i;
  591. /* grab the mutex to make sure the kernel interactions with bpf_preload
  592. * are serialized
  593. */
  594. mutex_lock(&bpf_preload_lock);
  595. /* if bpf_preload.ko wasn't built into vmlinux then load it */
  596. if (!bpf_preload_mod_get())
  597. goto out;
  598. err = bpf_preload_ops->preload(objs);
  599. if (err)
  600. goto out_put;
  601. for (i = 0; i < BPF_PRELOAD_LINKS; i++) {
  602. bpf_link_inc(objs[i].link);
  603. err = bpf_iter_link_pin_kernel(parent,
  604. objs[i].link_name, objs[i].link);
  605. if (err) {
  606. bpf_link_put(objs[i].link);
  607. goto out_put;
  608. }
  609. }
  610. out_put:
  611. bpf_preload_mod_put();
  612. out:
  613. mutex_unlock(&bpf_preload_lock);
  614. return err;
  615. }
  616. static int bpf_fill_super(struct super_block *sb, struct fs_context *fc)
  617. {
  618. static const struct tree_descr bpf_rfiles[] = { { "" } };
  619. struct bpf_mount_opts *opts = fc->fs_private;
  620. struct inode *inode;
  621. int ret;
  622. ret = simple_fill_super(sb, BPF_FS_MAGIC, bpf_rfiles);
  623. if (ret)
  624. return ret;
  625. sb->s_op = &bpf_super_ops;
  626. inode = sb->s_root->d_inode;
  627. inode->i_op = &bpf_dir_iops;
  628. inode->i_mode &= ~S_IALLUGO;
  629. populate_bpffs(sb->s_root);
  630. inode->i_mode |= S_ISVTX | opts->mode;
  631. return 0;
  632. }
  633. static int bpf_get_tree(struct fs_context *fc)
  634. {
  635. return get_tree_nodev(fc, bpf_fill_super);
  636. }
  637. static void bpf_free_fc(struct fs_context *fc)
  638. {
  639. kfree(fc->fs_private);
  640. }
  641. static const struct fs_context_operations bpf_context_ops = {
  642. .free = bpf_free_fc,
  643. .parse_param = bpf_parse_param,
  644. .get_tree = bpf_get_tree,
  645. };
  646. /*
  647. * Set up the filesystem mount context.
  648. */
  649. static int bpf_init_fs_context(struct fs_context *fc)
  650. {
  651. struct bpf_mount_opts *opts;
  652. opts = kzalloc(sizeof(struct bpf_mount_opts), GFP_KERNEL);
  653. if (!opts)
  654. return -ENOMEM;
  655. opts->mode = S_IRWXUGO;
  656. fc->fs_private = opts;
  657. fc->ops = &bpf_context_ops;
  658. return 0;
  659. }
  660. static struct file_system_type bpf_fs_type = {
  661. .owner = THIS_MODULE,
  662. .name = "bpf",
  663. .init_fs_context = bpf_init_fs_context,
  664. .parameters = bpf_fs_parameters,
  665. .kill_sb = kill_litter_super,
  666. };
  667. static int __init bpf_init(void)
  668. {
  669. int ret;
  670. ret = sysfs_create_mount_point(fs_kobj, "bpf");
  671. if (ret)
  672. return ret;
  673. ret = register_filesystem(&bpf_fs_type);
  674. if (ret)
  675. sysfs_remove_mount_point(fs_kobj, "bpf");
  676. return ret;
  677. }
  678. fs_initcall(bpf_init);