cached_dir.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Functions to handle the cached directory entries
  4. *
  5. * Copyright (c) 2022, Ronnie Sahlberg <[email protected]>
  6. */
  7. #include <linux/namei.h>
  8. #include "cifsglob.h"
  9. #include "cifsproto.h"
  10. #include "cifs_debug.h"
  11. #include "smb2proto.h"
  12. #include "cached_dir.h"
  13. static struct cached_fid *init_cached_dir(const char *path);
  14. static void free_cached_dir(struct cached_fid *cfid);
  15. static void smb2_close_cached_fid(struct kref *ref);
  16. static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
  17. const char *path,
  18. bool lookup_only)
  19. {
  20. struct cached_fid *cfid;
  21. spin_lock(&cfids->cfid_list_lock);
  22. list_for_each_entry(cfid, &cfids->entries, entry) {
  23. if (!strcmp(cfid->path, path)) {
  24. /*
  25. * If it doesn't have a lease it is either not yet
  26. * fully cached or it may be in the process of
  27. * being deleted due to a lease break.
  28. */
  29. if (!cfid->has_lease) {
  30. spin_unlock(&cfids->cfid_list_lock);
  31. return NULL;
  32. }
  33. kref_get(&cfid->refcount);
  34. spin_unlock(&cfids->cfid_list_lock);
  35. return cfid;
  36. }
  37. }
  38. if (lookup_only) {
  39. spin_unlock(&cfids->cfid_list_lock);
  40. return NULL;
  41. }
  42. if (cfids->num_entries >= MAX_CACHED_FIDS) {
  43. spin_unlock(&cfids->cfid_list_lock);
  44. return NULL;
  45. }
  46. cfid = init_cached_dir(path);
  47. if (cfid == NULL) {
  48. spin_unlock(&cfids->cfid_list_lock);
  49. return NULL;
  50. }
  51. cfid->cfids = cfids;
  52. cfids->num_entries++;
  53. list_add(&cfid->entry, &cfids->entries);
  54. cfid->on_list = true;
  55. kref_get(&cfid->refcount);
  56. spin_unlock(&cfids->cfid_list_lock);
  57. return cfid;
  58. }
  59. static struct dentry *
  60. path_to_dentry(struct cifs_sb_info *cifs_sb, const char *path)
  61. {
  62. struct dentry *dentry;
  63. const char *s, *p;
  64. char sep;
  65. sep = CIFS_DIR_SEP(cifs_sb);
  66. dentry = dget(cifs_sb->root);
  67. s = path;
  68. do {
  69. struct inode *dir = d_inode(dentry);
  70. struct dentry *child;
  71. if (!S_ISDIR(dir->i_mode)) {
  72. dput(dentry);
  73. dentry = ERR_PTR(-ENOTDIR);
  74. break;
  75. }
  76. /* skip separators */
  77. while (*s == sep)
  78. s++;
  79. if (!*s)
  80. break;
  81. p = s++;
  82. /* next separator */
  83. while (*s && *s != sep)
  84. s++;
  85. child = lookup_positive_unlocked(p, dentry, s - p);
  86. dput(dentry);
  87. dentry = child;
  88. } while (!IS_ERR(dentry));
  89. return dentry;
  90. }
  91. static const char *path_no_prefix(struct cifs_sb_info *cifs_sb,
  92. const char *path)
  93. {
  94. size_t len = 0;
  95. if (!*path)
  96. return path;
  97. if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
  98. cifs_sb->prepath) {
  99. len = strlen(cifs_sb->prepath) + 1;
  100. if (unlikely(len > strlen(path)))
  101. return ERR_PTR(-EINVAL);
  102. }
  103. return path + len;
  104. }
  105. /*
  106. * Open the and cache a directory handle.
  107. * If error then *cfid is not initialized.
  108. */
  109. int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
  110. const char *path,
  111. struct cifs_sb_info *cifs_sb,
  112. bool lookup_only, struct cached_fid **ret_cfid)
  113. {
  114. struct cifs_ses *ses;
  115. struct TCP_Server_Info *server;
  116. struct cifs_open_parms oparms;
  117. struct smb2_create_rsp *o_rsp = NULL;
  118. struct smb2_query_info_rsp *qi_rsp = NULL;
  119. int resp_buftype[2];
  120. struct smb_rqst rqst[2];
  121. struct kvec rsp_iov[2];
  122. struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
  123. struct kvec qi_iov[1];
  124. int rc, flags = 0;
  125. __le16 *utf16_path = NULL;
  126. u8 oplock = SMB2_OPLOCK_LEVEL_II;
  127. struct cifs_fid *pfid;
  128. struct dentry *dentry = NULL;
  129. struct cached_fid *cfid;
  130. struct cached_fids *cfids;
  131. const char *npath;
  132. if (tcon == NULL || tcon->cfids == NULL || tcon->nohandlecache ||
  133. is_smb1_server(tcon->ses->server))
  134. return -EOPNOTSUPP;
  135. ses = tcon->ses;
  136. server = ses->server;
  137. cfids = tcon->cfids;
  138. if (!server->ops->new_lease_key)
  139. return -EIO;
  140. if (cifs_sb->root == NULL)
  141. return -ENOENT;
  142. utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
  143. if (!utf16_path)
  144. return -ENOMEM;
  145. cfid = find_or_create_cached_dir(cfids, path, lookup_only);
  146. if (cfid == NULL) {
  147. kfree(utf16_path);
  148. return -ENOENT;
  149. }
  150. /*
  151. * At this point we either have a lease already and we can just
  152. * return it. If not we are guaranteed to be the only thread accessing
  153. * this cfid.
  154. */
  155. if (cfid->has_lease) {
  156. *ret_cfid = cfid;
  157. kfree(utf16_path);
  158. return 0;
  159. }
  160. /*
  161. * Skip any prefix paths in @path as lookup_positive_unlocked() ends up
  162. * calling ->lookup() which already adds those through
  163. * build_path_from_dentry(). Also, do it earlier as we might reconnect
  164. * below when trying to send compounded request and then potentially
  165. * having a different prefix path (e.g. after DFS failover).
  166. */
  167. npath = path_no_prefix(cifs_sb, path);
  168. if (IS_ERR(npath)) {
  169. rc = PTR_ERR(npath);
  170. kfree(utf16_path);
  171. return rc;
  172. }
  173. /*
  174. * We do not hold the lock for the open because in case
  175. * SMB2_open needs to reconnect.
  176. * This is safe because no other thread will be able to get a ref
  177. * to the cfid until we have finished opening the file and (possibly)
  178. * acquired a lease.
  179. */
  180. if (smb3_encryption_required(tcon))
  181. flags |= CIFS_TRANSFORM_REQ;
  182. pfid = &cfid->fid;
  183. server->ops->new_lease_key(pfid);
  184. memset(rqst, 0, sizeof(rqst));
  185. resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
  186. memset(rsp_iov, 0, sizeof(rsp_iov));
  187. /* Open */
  188. memset(&open_iov, 0, sizeof(open_iov));
  189. rqst[0].rq_iov = open_iov;
  190. rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
  191. oparms = (struct cifs_open_parms) {
  192. .tcon = tcon,
  193. .path = path,
  194. .create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE),
  195. .desired_access = FILE_READ_DATA | FILE_READ_ATTRIBUTES,
  196. .disposition = FILE_OPEN,
  197. .fid = pfid,
  198. };
  199. rc = SMB2_open_init(tcon, server,
  200. &rqst[0], &oplock, &oparms, utf16_path);
  201. if (rc)
  202. goto oshr_free;
  203. smb2_set_next_command(tcon, &rqst[0]);
  204. memset(&qi_iov, 0, sizeof(qi_iov));
  205. rqst[1].rq_iov = qi_iov;
  206. rqst[1].rq_nvec = 1;
  207. rc = SMB2_query_info_init(tcon, server,
  208. &rqst[1], COMPOUND_FID,
  209. COMPOUND_FID, FILE_ALL_INFORMATION,
  210. SMB2_O_INFO_FILE, 0,
  211. sizeof(struct smb2_file_all_info) +
  212. PATH_MAX * 2, 0, NULL);
  213. if (rc)
  214. goto oshr_free;
  215. smb2_set_related(&rqst[1]);
  216. rc = compound_send_recv(xid, ses, server,
  217. flags, 2, rqst,
  218. resp_buftype, rsp_iov);
  219. if (rc) {
  220. if (rc == -EREMCHG) {
  221. tcon->need_reconnect = true;
  222. pr_warn_once("server share %s deleted\n",
  223. tcon->tree_name);
  224. }
  225. goto oshr_free;
  226. }
  227. cfid->tcon = tcon;
  228. cfid->is_open = true;
  229. o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
  230. oparms.fid->persistent_fid = o_rsp->PersistentFileId;
  231. oparms.fid->volatile_fid = o_rsp->VolatileFileId;
  232. #ifdef CONFIG_CIFS_DEBUG2
  233. oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId);
  234. #endif /* CIFS_DEBUG2 */
  235. if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE)
  236. goto oshr_free;
  237. smb2_parse_contexts(server, o_rsp,
  238. &oparms.fid->epoch,
  239. oparms.fid->lease_key, &oplock,
  240. NULL, NULL);
  241. if (!(oplock & SMB2_LEASE_READ_CACHING_HE))
  242. goto oshr_free;
  243. qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
  244. if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
  245. goto oshr_free;
  246. if (!smb2_validate_and_copy_iov(
  247. le16_to_cpu(qi_rsp->OutputBufferOffset),
  248. sizeof(struct smb2_file_all_info),
  249. &rsp_iov[1], sizeof(struct smb2_file_all_info),
  250. (char *)&cfid->file_all_info))
  251. cfid->file_all_info_is_valid = true;
  252. if (!npath[0])
  253. dentry = dget(cifs_sb->root);
  254. else {
  255. dentry = path_to_dentry(cifs_sb, npath);
  256. if (IS_ERR(dentry)) {
  257. rc = -ENOENT;
  258. goto oshr_free;
  259. }
  260. }
  261. cfid->dentry = dentry;
  262. cfid->time = jiffies;
  263. cfid->has_lease = true;
  264. oshr_free:
  265. kfree(utf16_path);
  266. SMB2_open_free(&rqst[0]);
  267. SMB2_query_info_free(&rqst[1]);
  268. free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
  269. free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
  270. spin_lock(&cfids->cfid_list_lock);
  271. if (rc && !cfid->has_lease) {
  272. if (cfid->on_list) {
  273. list_del(&cfid->entry);
  274. cfid->on_list = false;
  275. cfids->num_entries--;
  276. }
  277. rc = -ENOENT;
  278. }
  279. spin_unlock(&cfids->cfid_list_lock);
  280. if (!rc && !cfid->has_lease) {
  281. /*
  282. * We are guaranteed to have two references at this point.
  283. * One for the caller and one for a potential lease.
  284. * Release the Lease-ref so that the directory will be closed
  285. * when the caller closes the cached handle.
  286. */
  287. kref_put(&cfid->refcount, smb2_close_cached_fid);
  288. }
  289. if (rc) {
  290. if (cfid->is_open)
  291. SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
  292. cfid->fid.volatile_fid);
  293. free_cached_dir(cfid);
  294. cfid = NULL;
  295. }
  296. if (rc == 0) {
  297. *ret_cfid = cfid;
  298. atomic_inc(&tcon->num_remote_opens);
  299. }
  300. return rc;
  301. }
  302. int open_cached_dir_by_dentry(struct cifs_tcon *tcon,
  303. struct dentry *dentry,
  304. struct cached_fid **ret_cfid)
  305. {
  306. struct cached_fid *cfid;
  307. struct cached_fids *cfids = tcon->cfids;
  308. if (cfids == NULL)
  309. return -ENOENT;
  310. spin_lock(&cfids->cfid_list_lock);
  311. list_for_each_entry(cfid, &cfids->entries, entry) {
  312. if (dentry && cfid->dentry == dentry) {
  313. cifs_dbg(FYI, "found a cached root file handle by dentry\n");
  314. kref_get(&cfid->refcount);
  315. *ret_cfid = cfid;
  316. spin_unlock(&cfids->cfid_list_lock);
  317. return 0;
  318. }
  319. }
  320. spin_unlock(&cfids->cfid_list_lock);
  321. return -ENOENT;
  322. }
  323. static void
  324. smb2_close_cached_fid(struct kref *ref)
  325. {
  326. struct cached_fid *cfid = container_of(ref, struct cached_fid,
  327. refcount);
  328. spin_lock(&cfid->cfids->cfid_list_lock);
  329. if (cfid->on_list) {
  330. list_del(&cfid->entry);
  331. cfid->on_list = false;
  332. cfid->cfids->num_entries--;
  333. }
  334. spin_unlock(&cfid->cfids->cfid_list_lock);
  335. dput(cfid->dentry);
  336. cfid->dentry = NULL;
  337. if (cfid->is_open) {
  338. SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
  339. cfid->fid.volatile_fid);
  340. atomic_dec(&cfid->tcon->num_remote_opens);
  341. }
  342. free_cached_dir(cfid);
  343. }
  344. void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon,
  345. const char *name, struct cifs_sb_info *cifs_sb)
  346. {
  347. struct cached_fid *cfid = NULL;
  348. int rc;
  349. rc = open_cached_dir(xid, tcon, name, cifs_sb, true, &cfid);
  350. if (rc) {
  351. cifs_dbg(FYI, "no cached dir found for rmdir(%s)\n", name);
  352. return;
  353. }
  354. spin_lock(&cfid->cfids->cfid_list_lock);
  355. if (cfid->has_lease) {
  356. cfid->has_lease = false;
  357. kref_put(&cfid->refcount, smb2_close_cached_fid);
  358. }
  359. spin_unlock(&cfid->cfids->cfid_list_lock);
  360. close_cached_dir(cfid);
  361. }
  362. void close_cached_dir(struct cached_fid *cfid)
  363. {
  364. kref_put(&cfid->refcount, smb2_close_cached_fid);
  365. }
  366. /*
  367. * Called from cifs_kill_sb when we unmount a share
  368. */
  369. void close_all_cached_dirs(struct cifs_sb_info *cifs_sb)
  370. {
  371. struct rb_root *root = &cifs_sb->tlink_tree;
  372. struct rb_node *node;
  373. struct cached_fid *cfid;
  374. struct cifs_tcon *tcon;
  375. struct tcon_link *tlink;
  376. struct cached_fids *cfids;
  377. for (node = rb_first(root); node; node = rb_next(node)) {
  378. tlink = rb_entry(node, struct tcon_link, tl_rbnode);
  379. tcon = tlink_tcon(tlink);
  380. if (IS_ERR(tcon))
  381. continue;
  382. cfids = tcon->cfids;
  383. if (cfids == NULL)
  384. continue;
  385. list_for_each_entry(cfid, &cfids->entries, entry) {
  386. dput(cfid->dentry);
  387. cfid->dentry = NULL;
  388. }
  389. }
  390. }
  391. /*
  392. * Invalidate all cached dirs when a TCON has been reset
  393. * due to a session loss.
  394. */
  395. void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
  396. {
  397. struct cached_fids *cfids = tcon->cfids;
  398. struct cached_fid *cfid, *q;
  399. LIST_HEAD(entry);
  400. spin_lock(&cfids->cfid_list_lock);
  401. list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
  402. list_move(&cfid->entry, &entry);
  403. cfids->num_entries--;
  404. cfid->is_open = false;
  405. cfid->on_list = false;
  406. /* To prevent race with smb2_cached_lease_break() */
  407. kref_get(&cfid->refcount);
  408. }
  409. spin_unlock(&cfids->cfid_list_lock);
  410. list_for_each_entry_safe(cfid, q, &entry, entry) {
  411. list_del(&cfid->entry);
  412. cancel_work_sync(&cfid->lease_break);
  413. if (cfid->has_lease) {
  414. /*
  415. * We lease was never cancelled from the server so we
  416. * need to drop the reference.
  417. */
  418. spin_lock(&cfids->cfid_list_lock);
  419. cfid->has_lease = false;
  420. spin_unlock(&cfids->cfid_list_lock);
  421. kref_put(&cfid->refcount, smb2_close_cached_fid);
  422. }
  423. /* Drop the extra reference opened above*/
  424. kref_put(&cfid->refcount, smb2_close_cached_fid);
  425. }
  426. }
  427. static void
  428. smb2_cached_lease_break(struct work_struct *work)
  429. {
  430. struct cached_fid *cfid = container_of(work,
  431. struct cached_fid, lease_break);
  432. spin_lock(&cfid->cfids->cfid_list_lock);
  433. cfid->has_lease = false;
  434. spin_unlock(&cfid->cfids->cfid_list_lock);
  435. kref_put(&cfid->refcount, smb2_close_cached_fid);
  436. }
  437. int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16])
  438. {
  439. struct cached_fids *cfids = tcon->cfids;
  440. struct cached_fid *cfid;
  441. if (cfids == NULL)
  442. return false;
  443. spin_lock(&cfids->cfid_list_lock);
  444. list_for_each_entry(cfid, &cfids->entries, entry) {
  445. if (cfid->has_lease &&
  446. !memcmp(lease_key,
  447. cfid->fid.lease_key,
  448. SMB2_LEASE_KEY_SIZE)) {
  449. cfid->time = 0;
  450. /*
  451. * We found a lease remove it from the list
  452. * so no threads can access it.
  453. */
  454. list_del(&cfid->entry);
  455. cfid->on_list = false;
  456. cfids->num_entries--;
  457. queue_work(cifsiod_wq,
  458. &cfid->lease_break);
  459. spin_unlock(&cfids->cfid_list_lock);
  460. return true;
  461. }
  462. }
  463. spin_unlock(&cfids->cfid_list_lock);
  464. return false;
  465. }
  466. static struct cached_fid *init_cached_dir(const char *path)
  467. {
  468. struct cached_fid *cfid;
  469. cfid = kzalloc(sizeof(*cfid), GFP_ATOMIC);
  470. if (!cfid)
  471. return NULL;
  472. cfid->path = kstrdup(path, GFP_ATOMIC);
  473. if (!cfid->path) {
  474. kfree(cfid);
  475. return NULL;
  476. }
  477. INIT_WORK(&cfid->lease_break, smb2_cached_lease_break);
  478. INIT_LIST_HEAD(&cfid->entry);
  479. INIT_LIST_HEAD(&cfid->dirents.entries);
  480. mutex_init(&cfid->dirents.de_mutex);
  481. spin_lock_init(&cfid->fid_lock);
  482. kref_init(&cfid->refcount);
  483. return cfid;
  484. }
  485. static void free_cached_dir(struct cached_fid *cfid)
  486. {
  487. struct cached_dirent *dirent, *q;
  488. dput(cfid->dentry);
  489. cfid->dentry = NULL;
  490. /*
  491. * Delete all cached dirent names
  492. */
  493. list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) {
  494. list_del(&dirent->entry);
  495. kfree(dirent->name);
  496. kfree(dirent);
  497. }
  498. kfree(cfid->path);
  499. cfid->path = NULL;
  500. kfree(cfid);
  501. }
  502. struct cached_fids *init_cached_dirs(void)
  503. {
  504. struct cached_fids *cfids;
  505. cfids = kzalloc(sizeof(*cfids), GFP_KERNEL);
  506. if (!cfids)
  507. return NULL;
  508. spin_lock_init(&cfids->cfid_list_lock);
  509. INIT_LIST_HEAD(&cfids->entries);
  510. return cfids;
  511. }
  512. /*
  513. * Called from tconInfoFree when we are tearing down the tcon.
  514. * There are no active users or open files/directories at this point.
  515. */
  516. void free_cached_dirs(struct cached_fids *cfids)
  517. {
  518. struct cached_fid *cfid, *q;
  519. LIST_HEAD(entry);
  520. spin_lock(&cfids->cfid_list_lock);
  521. list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
  522. cfid->on_list = false;
  523. cfid->is_open = false;
  524. list_move(&cfid->entry, &entry);
  525. }
  526. spin_unlock(&cfids->cfid_list_lock);
  527. list_for_each_entry_safe(cfid, q, &entry, entry) {
  528. list_del(&cfid->entry);
  529. free_cached_dir(cfid);
  530. }
  531. kfree(cfids);
  532. }