daemon.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* Daemon interface
  3. *
  4. * Copyright (C) 2007, 2021 Red Hat, Inc. All Rights Reserved.
  5. * Written by David Howells ([email protected])
  6. */
  7. #include <linux/module.h>
  8. #include <linux/init.h>
  9. #include <linux/sched.h>
  10. #include <linux/completion.h>
  11. #include <linux/slab.h>
  12. #include <linux/fs.h>
  13. #include <linux/file.h>
  14. #include <linux/namei.h>
  15. #include <linux/poll.h>
  16. #include <linux/mount.h>
  17. #include <linux/statfs.h>
  18. #include <linux/ctype.h>
  19. #include <linux/string.h>
  20. #include <linux/fs_struct.h>
  21. #include "internal.h"
  22. static int cachefiles_daemon_open(struct inode *, struct file *);
  23. static int cachefiles_daemon_release(struct inode *, struct file *);
  24. static ssize_t cachefiles_daemon_read(struct file *, char __user *, size_t,
  25. loff_t *);
  26. static ssize_t cachefiles_daemon_write(struct file *, const char __user *,
  27. size_t, loff_t *);
  28. static __poll_t cachefiles_daemon_poll(struct file *,
  29. struct poll_table_struct *);
  30. static int cachefiles_daemon_frun(struct cachefiles_cache *, char *);
  31. static int cachefiles_daemon_fcull(struct cachefiles_cache *, char *);
  32. static int cachefiles_daemon_fstop(struct cachefiles_cache *, char *);
  33. static int cachefiles_daemon_brun(struct cachefiles_cache *, char *);
  34. static int cachefiles_daemon_bcull(struct cachefiles_cache *, char *);
  35. static int cachefiles_daemon_bstop(struct cachefiles_cache *, char *);
  36. static int cachefiles_daemon_cull(struct cachefiles_cache *, char *);
  37. static int cachefiles_daemon_debug(struct cachefiles_cache *, char *);
  38. static int cachefiles_daemon_dir(struct cachefiles_cache *, char *);
  39. static int cachefiles_daemon_inuse(struct cachefiles_cache *, char *);
  40. static int cachefiles_daemon_secctx(struct cachefiles_cache *, char *);
  41. static int cachefiles_daemon_tag(struct cachefiles_cache *, char *);
  42. static int cachefiles_daemon_bind(struct cachefiles_cache *, char *);
  43. static void cachefiles_daemon_unbind(struct cachefiles_cache *);
  44. static unsigned long cachefiles_open;
  45. const struct file_operations cachefiles_daemon_fops = {
  46. .owner = THIS_MODULE,
  47. .open = cachefiles_daemon_open,
  48. .release = cachefiles_daemon_release,
  49. .read = cachefiles_daemon_read,
  50. .write = cachefiles_daemon_write,
  51. .poll = cachefiles_daemon_poll,
  52. .llseek = noop_llseek,
  53. };
  54. struct cachefiles_daemon_cmd {
  55. char name[8];
  56. int (*handler)(struct cachefiles_cache *cache, char *args);
  57. };
  58. static const struct cachefiles_daemon_cmd cachefiles_daemon_cmds[] = {
  59. { "bind", cachefiles_daemon_bind },
  60. { "brun", cachefiles_daemon_brun },
  61. { "bcull", cachefiles_daemon_bcull },
  62. { "bstop", cachefiles_daemon_bstop },
  63. { "cull", cachefiles_daemon_cull },
  64. { "debug", cachefiles_daemon_debug },
  65. { "dir", cachefiles_daemon_dir },
  66. { "frun", cachefiles_daemon_frun },
  67. { "fcull", cachefiles_daemon_fcull },
  68. { "fstop", cachefiles_daemon_fstop },
  69. { "inuse", cachefiles_daemon_inuse },
  70. { "secctx", cachefiles_daemon_secctx },
  71. { "tag", cachefiles_daemon_tag },
  72. #ifdef CONFIG_CACHEFILES_ONDEMAND
  73. { "copen", cachefiles_ondemand_copen },
  74. #endif
  75. { "", NULL }
  76. };
  77. /*
  78. * Prepare a cache for caching.
  79. */
  80. static int cachefiles_daemon_open(struct inode *inode, struct file *file)
  81. {
  82. struct cachefiles_cache *cache;
  83. _enter("");
  84. /* only the superuser may do this */
  85. if (!capable(CAP_SYS_ADMIN))
  86. return -EPERM;
  87. /* the cachefiles device may only be open once at a time */
  88. if (xchg(&cachefiles_open, 1) == 1)
  89. return -EBUSY;
  90. /* allocate a cache record */
  91. cache = kzalloc(sizeof(struct cachefiles_cache), GFP_KERNEL);
  92. if (!cache) {
  93. cachefiles_open = 0;
  94. return -ENOMEM;
  95. }
  96. mutex_init(&cache->daemon_mutex);
  97. init_waitqueue_head(&cache->daemon_pollwq);
  98. INIT_LIST_HEAD(&cache->volumes);
  99. INIT_LIST_HEAD(&cache->object_list);
  100. spin_lock_init(&cache->object_list_lock);
  101. refcount_set(&cache->unbind_pincount, 1);
  102. xa_init_flags(&cache->reqs, XA_FLAGS_ALLOC);
  103. xa_init_flags(&cache->ondemand_ids, XA_FLAGS_ALLOC1);
  104. /* set default caching limits
  105. * - limit at 1% free space and/or free files
  106. * - cull below 5% free space and/or free files
  107. * - cease culling above 7% free space and/or free files
  108. */
  109. cache->frun_percent = 7;
  110. cache->fcull_percent = 5;
  111. cache->fstop_percent = 1;
  112. cache->brun_percent = 7;
  113. cache->bcull_percent = 5;
  114. cache->bstop_percent = 1;
  115. file->private_data = cache;
  116. cache->cachefilesd = file;
  117. return 0;
  118. }
  119. static void cachefiles_flush_reqs(struct cachefiles_cache *cache)
  120. {
  121. struct xarray *xa = &cache->reqs;
  122. struct cachefiles_req *req;
  123. unsigned long index;
  124. /*
  125. * Make sure the following two operations won't be reordered.
  126. * 1) set CACHEFILES_DEAD bit
  127. * 2) flush requests in the xarray
  128. * Otherwise the request may be enqueued after xarray has been
  129. * flushed, leaving the orphan request never being completed.
  130. *
  131. * CPU 1 CPU 2
  132. * ===== =====
  133. * flush requests in the xarray
  134. * test CACHEFILES_DEAD bit
  135. * enqueue the request
  136. * set CACHEFILES_DEAD bit
  137. */
  138. smp_mb();
  139. xa_lock(xa);
  140. xa_for_each(xa, index, req) {
  141. req->error = -EIO;
  142. complete(&req->done);
  143. }
  144. xa_unlock(xa);
  145. xa_destroy(&cache->reqs);
  146. xa_destroy(&cache->ondemand_ids);
  147. }
  148. void cachefiles_put_unbind_pincount(struct cachefiles_cache *cache)
  149. {
  150. if (refcount_dec_and_test(&cache->unbind_pincount)) {
  151. cachefiles_daemon_unbind(cache);
  152. cachefiles_open = 0;
  153. kfree(cache);
  154. }
  155. }
  156. void cachefiles_get_unbind_pincount(struct cachefiles_cache *cache)
  157. {
  158. refcount_inc(&cache->unbind_pincount);
  159. }
  160. /*
  161. * Release a cache.
  162. */
  163. static int cachefiles_daemon_release(struct inode *inode, struct file *file)
  164. {
  165. struct cachefiles_cache *cache = file->private_data;
  166. _enter("");
  167. ASSERT(cache);
  168. set_bit(CACHEFILES_DEAD, &cache->flags);
  169. if (cachefiles_in_ondemand_mode(cache))
  170. cachefiles_flush_reqs(cache);
  171. /* clean up the control file interface */
  172. cache->cachefilesd = NULL;
  173. file->private_data = NULL;
  174. cachefiles_put_unbind_pincount(cache);
  175. _leave("");
  176. return 0;
  177. }
  178. static ssize_t cachefiles_do_daemon_read(struct cachefiles_cache *cache,
  179. char __user *_buffer, size_t buflen)
  180. {
  181. unsigned long long b_released;
  182. unsigned f_released;
  183. char buffer[256];
  184. int n;
  185. /* check how much space the cache has */
  186. cachefiles_has_space(cache, 0, 0, cachefiles_has_space_check);
  187. /* summarise */
  188. f_released = atomic_xchg(&cache->f_released, 0);
  189. b_released = atomic_long_xchg(&cache->b_released, 0);
  190. clear_bit(CACHEFILES_STATE_CHANGED, &cache->flags);
  191. n = snprintf(buffer, sizeof(buffer),
  192. "cull=%c"
  193. " frun=%llx"
  194. " fcull=%llx"
  195. " fstop=%llx"
  196. " brun=%llx"
  197. " bcull=%llx"
  198. " bstop=%llx"
  199. " freleased=%x"
  200. " breleased=%llx",
  201. test_bit(CACHEFILES_CULLING, &cache->flags) ? '1' : '0',
  202. (unsigned long long) cache->frun,
  203. (unsigned long long) cache->fcull,
  204. (unsigned long long) cache->fstop,
  205. (unsigned long long) cache->brun,
  206. (unsigned long long) cache->bcull,
  207. (unsigned long long) cache->bstop,
  208. f_released,
  209. b_released);
  210. if (n > buflen)
  211. return -EMSGSIZE;
  212. if (copy_to_user(_buffer, buffer, n) != 0)
  213. return -EFAULT;
  214. return n;
  215. }
  216. /*
  217. * Read the cache state.
  218. */
  219. static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
  220. size_t buflen, loff_t *pos)
  221. {
  222. struct cachefiles_cache *cache = file->private_data;
  223. //_enter(",,%zu,", buflen);
  224. if (!test_bit(CACHEFILES_READY, &cache->flags))
  225. return 0;
  226. if (cachefiles_in_ondemand_mode(cache))
  227. return cachefiles_ondemand_daemon_read(cache, _buffer, buflen);
  228. else
  229. return cachefiles_do_daemon_read(cache, _buffer, buflen);
  230. }
  231. /*
  232. * Take a command from cachefilesd, parse it and act on it.
  233. */
  234. static ssize_t cachefiles_daemon_write(struct file *file,
  235. const char __user *_data,
  236. size_t datalen,
  237. loff_t *pos)
  238. {
  239. const struct cachefiles_daemon_cmd *cmd;
  240. struct cachefiles_cache *cache = file->private_data;
  241. ssize_t ret;
  242. char *data, *args, *cp;
  243. //_enter(",,%zu,", datalen);
  244. ASSERT(cache);
  245. if (test_bit(CACHEFILES_DEAD, &cache->flags))
  246. return -EIO;
  247. if (datalen > PAGE_SIZE - 1)
  248. return -EOPNOTSUPP;
  249. /* drag the command string into the kernel so we can parse it */
  250. data = memdup_user_nul(_data, datalen);
  251. if (IS_ERR(data))
  252. return PTR_ERR(data);
  253. ret = -EINVAL;
  254. if (memchr(data, '\0', datalen))
  255. goto error;
  256. /* strip any newline */
  257. cp = memchr(data, '\n', datalen);
  258. if (cp) {
  259. if (cp == data)
  260. goto error;
  261. *cp = '\0';
  262. }
  263. /* parse the command */
  264. ret = -EOPNOTSUPP;
  265. for (args = data; *args; args++)
  266. if (isspace(*args))
  267. break;
  268. if (*args) {
  269. if (args == data)
  270. goto error;
  271. *args = '\0';
  272. args = skip_spaces(++args);
  273. }
  274. /* run the appropriate command handler */
  275. for (cmd = cachefiles_daemon_cmds; cmd->name[0]; cmd++)
  276. if (strcmp(cmd->name, data) == 0)
  277. goto found_command;
  278. error:
  279. kfree(data);
  280. //_leave(" = %zd", ret);
  281. return ret;
  282. found_command:
  283. mutex_lock(&cache->daemon_mutex);
  284. ret = -EIO;
  285. if (!test_bit(CACHEFILES_DEAD, &cache->flags))
  286. ret = cmd->handler(cache, args);
  287. mutex_unlock(&cache->daemon_mutex);
  288. if (ret == 0)
  289. ret = datalen;
  290. goto error;
  291. }
  292. /*
  293. * Poll for culling state
  294. * - use EPOLLOUT to indicate culling state
  295. */
  296. static __poll_t cachefiles_daemon_poll(struct file *file,
  297. struct poll_table_struct *poll)
  298. {
  299. struct cachefiles_cache *cache = file->private_data;
  300. __poll_t mask;
  301. poll_wait(file, &cache->daemon_pollwq, poll);
  302. mask = 0;
  303. if (cachefiles_in_ondemand_mode(cache)) {
  304. if (!xa_empty(&cache->reqs))
  305. mask |= EPOLLIN;
  306. } else {
  307. if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags))
  308. mask |= EPOLLIN;
  309. }
  310. if (test_bit(CACHEFILES_CULLING, &cache->flags))
  311. mask |= EPOLLOUT;
  312. return mask;
  313. }
  314. /*
  315. * Give a range error for cache space constraints
  316. * - can be tail-called
  317. */
  318. static int cachefiles_daemon_range_error(struct cachefiles_cache *cache,
  319. char *args)
  320. {
  321. pr_err("Free space limits must be in range 0%%<=stop<cull<run<100%%\n");
  322. return -EINVAL;
  323. }
  324. /*
  325. * Set the percentage of files at which to stop culling
  326. * - command: "frun <N>%"
  327. */
  328. static int cachefiles_daemon_frun(struct cachefiles_cache *cache, char *args)
  329. {
  330. unsigned long frun;
  331. _enter(",%s", args);
  332. if (!*args)
  333. return -EINVAL;
  334. frun = simple_strtoul(args, &args, 10);
  335. if (args[0] != '%' || args[1] != '\0')
  336. return -EINVAL;
  337. if (frun <= cache->fcull_percent || frun >= 100)
  338. return cachefiles_daemon_range_error(cache, args);
  339. cache->frun_percent = frun;
  340. return 0;
  341. }
  342. /*
  343. * Set the percentage of files at which to start culling
  344. * - command: "fcull <N>%"
  345. */
  346. static int cachefiles_daemon_fcull(struct cachefiles_cache *cache, char *args)
  347. {
  348. unsigned long fcull;
  349. _enter(",%s", args);
  350. if (!*args)
  351. return -EINVAL;
  352. fcull = simple_strtoul(args, &args, 10);
  353. if (args[0] != '%' || args[1] != '\0')
  354. return -EINVAL;
  355. if (fcull <= cache->fstop_percent || fcull >= cache->frun_percent)
  356. return cachefiles_daemon_range_error(cache, args);
  357. cache->fcull_percent = fcull;
  358. return 0;
  359. }
  360. /*
  361. * Set the percentage of files at which to stop allocating
  362. * - command: "fstop <N>%"
  363. */
  364. static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
  365. {
  366. unsigned long fstop;
  367. _enter(",%s", args);
  368. if (!*args)
  369. return -EINVAL;
  370. fstop = simple_strtoul(args, &args, 10);
  371. if (args[0] != '%' || args[1] != '\0')
  372. return -EINVAL;
  373. if (fstop >= cache->fcull_percent)
  374. return cachefiles_daemon_range_error(cache, args);
  375. cache->fstop_percent = fstop;
  376. return 0;
  377. }
  378. /*
  379. * Set the percentage of blocks at which to stop culling
  380. * - command: "brun <N>%"
  381. */
  382. static int cachefiles_daemon_brun(struct cachefiles_cache *cache, char *args)
  383. {
  384. unsigned long brun;
  385. _enter(",%s", args);
  386. if (!*args)
  387. return -EINVAL;
  388. brun = simple_strtoul(args, &args, 10);
  389. if (args[0] != '%' || args[1] != '\0')
  390. return -EINVAL;
  391. if (brun <= cache->bcull_percent || brun >= 100)
  392. return cachefiles_daemon_range_error(cache, args);
  393. cache->brun_percent = brun;
  394. return 0;
  395. }
  396. /*
  397. * Set the percentage of blocks at which to start culling
  398. * - command: "bcull <N>%"
  399. */
  400. static int cachefiles_daemon_bcull(struct cachefiles_cache *cache, char *args)
  401. {
  402. unsigned long bcull;
  403. _enter(",%s", args);
  404. if (!*args)
  405. return -EINVAL;
  406. bcull = simple_strtoul(args, &args, 10);
  407. if (args[0] != '%' || args[1] != '\0')
  408. return -EINVAL;
  409. if (bcull <= cache->bstop_percent || bcull >= cache->brun_percent)
  410. return cachefiles_daemon_range_error(cache, args);
  411. cache->bcull_percent = bcull;
  412. return 0;
  413. }
  414. /*
  415. * Set the percentage of blocks at which to stop allocating
  416. * - command: "bstop <N>%"
  417. */
  418. static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
  419. {
  420. unsigned long bstop;
  421. _enter(",%s", args);
  422. if (!*args)
  423. return -EINVAL;
  424. bstop = simple_strtoul(args, &args, 10);
  425. if (args[0] != '%' || args[1] != '\0')
  426. return -EINVAL;
  427. if (bstop >= cache->bcull_percent)
  428. return cachefiles_daemon_range_error(cache, args);
  429. cache->bstop_percent = bstop;
  430. return 0;
  431. }
  432. /*
  433. * Set the cache directory
  434. * - command: "dir <name>"
  435. */
  436. static int cachefiles_daemon_dir(struct cachefiles_cache *cache, char *args)
  437. {
  438. char *dir;
  439. _enter(",%s", args);
  440. if (!*args) {
  441. pr_err("Empty directory specified\n");
  442. return -EINVAL;
  443. }
  444. if (cache->rootdirname) {
  445. pr_err("Second cache directory specified\n");
  446. return -EEXIST;
  447. }
  448. dir = kstrdup(args, GFP_KERNEL);
  449. if (!dir)
  450. return -ENOMEM;
  451. cache->rootdirname = dir;
  452. return 0;
  453. }
  454. /*
  455. * Set the cache security context
  456. * - command: "secctx <ctx>"
  457. */
  458. static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args)
  459. {
  460. char *secctx;
  461. _enter(",%s", args);
  462. if (!*args) {
  463. pr_err("Empty security context specified\n");
  464. return -EINVAL;
  465. }
  466. if (cache->secctx) {
  467. pr_err("Second security context specified\n");
  468. return -EINVAL;
  469. }
  470. secctx = kstrdup(args, GFP_KERNEL);
  471. if (!secctx)
  472. return -ENOMEM;
  473. cache->secctx = secctx;
  474. return 0;
  475. }
  476. /*
  477. * Set the cache tag
  478. * - command: "tag <name>"
  479. */
  480. static int cachefiles_daemon_tag(struct cachefiles_cache *cache, char *args)
  481. {
  482. char *tag;
  483. _enter(",%s", args);
  484. if (!*args) {
  485. pr_err("Empty tag specified\n");
  486. return -EINVAL;
  487. }
  488. if (cache->tag)
  489. return -EEXIST;
  490. tag = kstrdup(args, GFP_KERNEL);
  491. if (!tag)
  492. return -ENOMEM;
  493. cache->tag = tag;
  494. return 0;
  495. }
  496. /*
  497. * Request a node in the cache be culled from the current working directory
  498. * - command: "cull <name>"
  499. */
  500. static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args)
  501. {
  502. struct path path;
  503. const struct cred *saved_cred;
  504. int ret;
  505. _enter(",%s", args);
  506. if (strchr(args, '/'))
  507. goto inval;
  508. if (!test_bit(CACHEFILES_READY, &cache->flags)) {
  509. pr_err("cull applied to unready cache\n");
  510. return -EIO;
  511. }
  512. if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
  513. pr_err("cull applied to dead cache\n");
  514. return -EIO;
  515. }
  516. get_fs_pwd(current->fs, &path);
  517. if (!d_can_lookup(path.dentry))
  518. goto notdir;
  519. cachefiles_begin_secure(cache, &saved_cred);
  520. ret = cachefiles_cull(cache, path.dentry, args);
  521. cachefiles_end_secure(cache, saved_cred);
  522. path_put(&path);
  523. _leave(" = %d", ret);
  524. return ret;
  525. notdir:
  526. path_put(&path);
  527. pr_err("cull command requires dirfd to be a directory\n");
  528. return -ENOTDIR;
  529. inval:
  530. pr_err("cull command requires dirfd and filename\n");
  531. return -EINVAL;
  532. }
  533. /*
  534. * Set debugging mode
  535. * - command: "debug <mask>"
  536. */
  537. static int cachefiles_daemon_debug(struct cachefiles_cache *cache, char *args)
  538. {
  539. unsigned long mask;
  540. _enter(",%s", args);
  541. mask = simple_strtoul(args, &args, 0);
  542. if (args[0] != '\0')
  543. goto inval;
  544. cachefiles_debug = mask;
  545. _leave(" = 0");
  546. return 0;
  547. inval:
  548. pr_err("debug command requires mask\n");
  549. return -EINVAL;
  550. }
  551. /*
  552. * Find out whether an object in the current working directory is in use or not
  553. * - command: "inuse <name>"
  554. */
  555. static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args)
  556. {
  557. struct path path;
  558. const struct cred *saved_cred;
  559. int ret;
  560. //_enter(",%s", args);
  561. if (strchr(args, '/'))
  562. goto inval;
  563. if (!test_bit(CACHEFILES_READY, &cache->flags)) {
  564. pr_err("inuse applied to unready cache\n");
  565. return -EIO;
  566. }
  567. if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
  568. pr_err("inuse applied to dead cache\n");
  569. return -EIO;
  570. }
  571. get_fs_pwd(current->fs, &path);
  572. if (!d_can_lookup(path.dentry))
  573. goto notdir;
  574. cachefiles_begin_secure(cache, &saved_cred);
  575. ret = cachefiles_check_in_use(cache, path.dentry, args);
  576. cachefiles_end_secure(cache, saved_cred);
  577. path_put(&path);
  578. //_leave(" = %d", ret);
  579. return ret;
  580. notdir:
  581. path_put(&path);
  582. pr_err("inuse command requires dirfd to be a directory\n");
  583. return -ENOTDIR;
  584. inval:
  585. pr_err("inuse command requires dirfd and filename\n");
  586. return -EINVAL;
  587. }
  588. /*
  589. * Bind a directory as a cache
  590. */
  591. static int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
  592. {
  593. _enter("{%u,%u,%u,%u,%u,%u},%s",
  594. cache->frun_percent,
  595. cache->fcull_percent,
  596. cache->fstop_percent,
  597. cache->brun_percent,
  598. cache->bcull_percent,
  599. cache->bstop_percent,
  600. args);
  601. if (cache->fstop_percent >= cache->fcull_percent ||
  602. cache->fcull_percent >= cache->frun_percent ||
  603. cache->frun_percent >= 100)
  604. return -ERANGE;
  605. if (cache->bstop_percent >= cache->bcull_percent ||
  606. cache->bcull_percent >= cache->brun_percent ||
  607. cache->brun_percent >= 100)
  608. return -ERANGE;
  609. if (!cache->rootdirname) {
  610. pr_err("No cache directory specified\n");
  611. return -EINVAL;
  612. }
  613. /* Don't permit already bound caches to be re-bound */
  614. if (test_bit(CACHEFILES_READY, &cache->flags)) {
  615. pr_err("Cache already bound\n");
  616. return -EBUSY;
  617. }
  618. if (IS_ENABLED(CONFIG_CACHEFILES_ONDEMAND)) {
  619. if (!strcmp(args, "ondemand")) {
  620. set_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags);
  621. } else if (*args) {
  622. pr_err("Invalid argument to the 'bind' command\n");
  623. return -EINVAL;
  624. }
  625. } else if (*args) {
  626. pr_err("'bind' command doesn't take an argument\n");
  627. return -EINVAL;
  628. }
  629. /* Make sure we have copies of the tag string */
  630. if (!cache->tag) {
  631. /*
  632. * The tag string is released by the fops->release()
  633. * function, so we don't release it on error here
  634. */
  635. cache->tag = kstrdup("CacheFiles", GFP_KERNEL);
  636. if (!cache->tag)
  637. return -ENOMEM;
  638. }
  639. return cachefiles_add_cache(cache);
  640. }
  641. /*
  642. * Unbind a cache.
  643. */
  644. static void cachefiles_daemon_unbind(struct cachefiles_cache *cache)
  645. {
  646. _enter("");
  647. if (test_bit(CACHEFILES_READY, &cache->flags))
  648. cachefiles_withdraw_cache(cache);
  649. cachefiles_put_directory(cache->graveyard);
  650. cachefiles_put_directory(cache->store);
  651. mntput(cache->mnt);
  652. kfree(cache->rootdirname);
  653. kfree(cache->secctx);
  654. kfree(cache->tag);
  655. _leave("");
  656. }