pnfs_dev.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384
  1. /*
  2. * Device operations for the pnfs client.
  3. *
  4. * Copyright (c) 2002
  5. * The Regents of the University of Michigan
  6. * All Rights Reserved
  7. *
  8. * Dean Hildebrand <[email protected]>
  9. * Garth Goodson <[email protected]>
  10. *
  11. * Permission is granted to use, copy, create derivative works, and
  12. * redistribute this software and such derivative works for any purpose,
  13. * so long as the name of the University of Michigan is not used in
  14. * any advertising or publicity pertaining to the use or distribution
  15. * of this software without specific, written prior authorization. If
  16. * the above copyright notice or any other identification of the
  17. * University of Michigan is included in any copy of any portion of
  18. * this software, then the disclaimer below must also be included.
  19. *
  20. * This software is provided as is, without representation or warranty
  21. * of any kind either express or implied, including without limitation
  22. * the implied warranties of merchantability, fitness for a particular
  23. * purpose, or noninfringement. The Regents of the University of
  24. * Michigan shall not be liable for any damages, including special,
  25. * indirect, incidental, or consequential damages, with respect to any
  26. * claim arising out of or in connection with the use of the software,
  27. * even if it has been or is hereafter advised of the possibility of
  28. * such damages.
  29. */
  30. #include <linux/export.h>
  31. #include <linux/nfs_fs.h>
  32. #include "nfs4session.h"
  33. #include "internal.h"
  34. #include "pnfs.h"
  35. #include "nfs4trace.h"
  36. #define NFSDBG_FACILITY NFSDBG_PNFS
  37. /*
  38. * Device ID RCU cache. A device ID is unique per server and layout type.
  39. */
  40. #define NFS4_DEVICE_ID_HASH_BITS 5
  41. #define NFS4_DEVICE_ID_HASH_SIZE (1 << NFS4_DEVICE_ID_HASH_BITS)
  42. #define NFS4_DEVICE_ID_HASH_MASK (NFS4_DEVICE_ID_HASH_SIZE - 1)
  43. static struct hlist_head nfs4_deviceid_cache[NFS4_DEVICE_ID_HASH_SIZE];
  44. static DEFINE_SPINLOCK(nfs4_deviceid_lock);
  45. #ifdef NFS_DEBUG
  46. void
  47. nfs4_print_deviceid(const struct nfs4_deviceid *id)
  48. {
  49. u32 *p = (u32 *)id;
  50. dprintk("%s: device id= [%x%x%x%x]\n", __func__,
  51. p[0], p[1], p[2], p[3]);
  52. }
  53. EXPORT_SYMBOL_GPL(nfs4_print_deviceid);
  54. #endif
  55. static inline u32
  56. nfs4_deviceid_hash(const struct nfs4_deviceid *id)
  57. {
  58. unsigned char *cptr = (unsigned char *)id->data;
  59. unsigned int nbytes = NFS4_DEVICEID4_SIZE;
  60. u32 x = 0;
  61. while (nbytes--) {
  62. x *= 37;
  63. x += *cptr++;
  64. }
  65. return x & NFS4_DEVICE_ID_HASH_MASK;
  66. }
  67. static struct nfs4_deviceid_node *
  68. _lookup_deviceid(const struct pnfs_layoutdriver_type *ld,
  69. const struct nfs_client *clp, const struct nfs4_deviceid *id,
  70. long hash)
  71. {
  72. struct nfs4_deviceid_node *d;
  73. hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node)
  74. if (d->ld == ld && d->nfs_client == clp &&
  75. !memcmp(&d->deviceid, id, sizeof(*id))) {
  76. if (atomic_read(&d->ref))
  77. return d;
  78. else
  79. continue;
  80. }
  81. return NULL;
  82. }
  83. static struct nfs4_deviceid_node *
  84. nfs4_get_device_info(struct nfs_server *server,
  85. const struct nfs4_deviceid *dev_id,
  86. const struct cred *cred, gfp_t gfp_flags)
  87. {
  88. struct nfs4_deviceid_node *d = NULL;
  89. struct pnfs_device *pdev = NULL;
  90. struct page **pages = NULL;
  91. u32 max_resp_sz;
  92. int max_pages;
  93. int rc, i;
  94. /*
  95. * Use the session max response size as the basis for setting
  96. * GETDEVICEINFO's maxcount
  97. */
  98. max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
  99. if (server->pnfs_curr_ld->max_deviceinfo_size &&
  100. server->pnfs_curr_ld->max_deviceinfo_size < max_resp_sz)
  101. max_resp_sz = server->pnfs_curr_ld->max_deviceinfo_size;
  102. max_pages = nfs_page_array_len(0, max_resp_sz);
  103. dprintk("%s: server %p max_resp_sz %u max_pages %d\n",
  104. __func__, server, max_resp_sz, max_pages);
  105. pdev = kzalloc(sizeof(*pdev), gfp_flags);
  106. if (!pdev)
  107. return NULL;
  108. pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags);
  109. if (!pages)
  110. goto out_free_pdev;
  111. for (i = 0; i < max_pages; i++) {
  112. pages[i] = alloc_page(gfp_flags);
  113. if (!pages[i])
  114. goto out_free_pages;
  115. }
  116. memcpy(&pdev->dev_id, dev_id, sizeof(*dev_id));
  117. pdev->layout_type = server->pnfs_curr_ld->id;
  118. pdev->pages = pages;
  119. pdev->pgbase = 0;
  120. pdev->pglen = max_resp_sz;
  121. pdev->mincount = 0;
  122. pdev->maxcount = max_resp_sz - nfs41_maxgetdevinfo_overhead;
  123. rc = nfs4_proc_getdeviceinfo(server, pdev, cred);
  124. dprintk("%s getdevice info returns %d\n", __func__, rc);
  125. if (rc)
  126. goto out_free_pages;
  127. /*
  128. * Found new device, need to decode it and then add it to the
  129. * list of known devices for this mountpoint.
  130. */
  131. d = server->pnfs_curr_ld->alloc_deviceid_node(server, pdev,
  132. gfp_flags);
  133. if (d && pdev->nocache)
  134. set_bit(NFS_DEVICEID_NOCACHE, &d->flags);
  135. out_free_pages:
  136. while (--i >= 0)
  137. __free_page(pages[i]);
  138. kfree(pages);
  139. out_free_pdev:
  140. kfree(pdev);
  141. dprintk("<-- %s d %p\n", __func__, d);
  142. return d;
  143. }
  144. /*
  145. * Lookup a deviceid in cache and get a reference count on it if found
  146. *
  147. * @clp nfs_client associated with deviceid
  148. * @id deviceid to look up
  149. */
  150. static struct nfs4_deviceid_node *
  151. __nfs4_find_get_deviceid(struct nfs_server *server,
  152. const struct nfs4_deviceid *id, long hash)
  153. {
  154. struct nfs4_deviceid_node *d;
  155. rcu_read_lock();
  156. d = _lookup_deviceid(server->pnfs_curr_ld, server->nfs_client, id,
  157. hash);
  158. if (d != NULL && !atomic_inc_not_zero(&d->ref))
  159. d = NULL;
  160. rcu_read_unlock();
  161. return d;
  162. }
  163. struct nfs4_deviceid_node *
  164. nfs4_find_get_deviceid(struct nfs_server *server,
  165. const struct nfs4_deviceid *id, const struct cred *cred,
  166. gfp_t gfp_mask)
  167. {
  168. long hash = nfs4_deviceid_hash(id);
  169. struct nfs4_deviceid_node *d, *new;
  170. d = __nfs4_find_get_deviceid(server, id, hash);
  171. if (d)
  172. goto found;
  173. new = nfs4_get_device_info(server, id, cred, gfp_mask);
  174. if (!new) {
  175. trace_nfs4_find_deviceid(server, id, -ENOENT);
  176. return new;
  177. }
  178. spin_lock(&nfs4_deviceid_lock);
  179. d = __nfs4_find_get_deviceid(server, id, hash);
  180. if (d) {
  181. spin_unlock(&nfs4_deviceid_lock);
  182. server->pnfs_curr_ld->free_deviceid_node(new);
  183. } else {
  184. atomic_inc(&new->ref);
  185. hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]);
  186. spin_unlock(&nfs4_deviceid_lock);
  187. d = new;
  188. }
  189. found:
  190. trace_nfs4_find_deviceid(server, id, 0);
  191. return d;
  192. }
  193. EXPORT_SYMBOL_GPL(nfs4_find_get_deviceid);
  194. /*
  195. * Remove a deviceid from cache
  196. *
  197. * @clp nfs_client associated with deviceid
  198. * @id the deviceid to unhash
  199. *
  200. * @ret the unhashed node, if found and dereferenced to zero, NULL otherwise.
  201. */
  202. void
  203. nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *ld,
  204. const struct nfs_client *clp, const struct nfs4_deviceid *id)
  205. {
  206. struct nfs4_deviceid_node *d;
  207. spin_lock(&nfs4_deviceid_lock);
  208. rcu_read_lock();
  209. d = _lookup_deviceid(ld, clp, id, nfs4_deviceid_hash(id));
  210. rcu_read_unlock();
  211. if (!d) {
  212. spin_unlock(&nfs4_deviceid_lock);
  213. return;
  214. }
  215. hlist_del_init_rcu(&d->node);
  216. clear_bit(NFS_DEVICEID_NOCACHE, &d->flags);
  217. spin_unlock(&nfs4_deviceid_lock);
  218. /* balance the initial ref set in pnfs_insert_deviceid */
  219. nfs4_put_deviceid_node(d);
  220. }
  221. EXPORT_SYMBOL_GPL(nfs4_delete_deviceid);
  222. void
  223. nfs4_init_deviceid_node(struct nfs4_deviceid_node *d, struct nfs_server *server,
  224. const struct nfs4_deviceid *id)
  225. {
  226. INIT_HLIST_NODE(&d->node);
  227. INIT_HLIST_NODE(&d->tmpnode);
  228. d->ld = server->pnfs_curr_ld;
  229. d->nfs_client = server->nfs_client;
  230. d->flags = 0;
  231. d->deviceid = *id;
  232. atomic_set(&d->ref, 1);
  233. }
  234. EXPORT_SYMBOL_GPL(nfs4_init_deviceid_node);
  235. /*
  236. * Dereference a deviceid node and delete it when its reference count drops
  237. * to zero.
  238. *
  239. * @d deviceid node to put
  240. *
  241. * return true iff the node was deleted
  242. * Note that since the test for d->ref == 0 is sufficient to establish
  243. * that the node is no longer hashed in the global device id cache.
  244. */
  245. bool
  246. nfs4_put_deviceid_node(struct nfs4_deviceid_node *d)
  247. {
  248. if (test_bit(NFS_DEVICEID_NOCACHE, &d->flags)) {
  249. if (atomic_add_unless(&d->ref, -1, 2))
  250. return false;
  251. nfs4_delete_deviceid(d->ld, d->nfs_client, &d->deviceid);
  252. }
  253. if (!atomic_dec_and_test(&d->ref))
  254. return false;
  255. trace_nfs4_deviceid_free(d->nfs_client, &d->deviceid);
  256. d->ld->free_deviceid_node(d);
  257. return true;
  258. }
  259. EXPORT_SYMBOL_GPL(nfs4_put_deviceid_node);
  260. void
  261. nfs4_mark_deviceid_available(struct nfs4_deviceid_node *node)
  262. {
  263. if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags)) {
  264. clear_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
  265. smp_mb__after_atomic();
  266. }
  267. }
  268. EXPORT_SYMBOL_GPL(nfs4_mark_deviceid_available);
  269. void
  270. nfs4_mark_deviceid_unavailable(struct nfs4_deviceid_node *node)
  271. {
  272. node->timestamp_unavailable = jiffies;
  273. smp_mb__before_atomic();
  274. set_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
  275. smp_mb__after_atomic();
  276. }
  277. EXPORT_SYMBOL_GPL(nfs4_mark_deviceid_unavailable);
  278. bool
  279. nfs4_test_deviceid_unavailable(struct nfs4_deviceid_node *node)
  280. {
  281. if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags)) {
  282. unsigned long start, end;
  283. end = jiffies;
  284. start = end - PNFS_DEVICE_RETRY_TIMEOUT;
  285. if (time_in_range(node->timestamp_unavailable, start, end))
  286. return true;
  287. clear_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
  288. smp_mb__after_atomic();
  289. }
  290. return false;
  291. }
  292. EXPORT_SYMBOL_GPL(nfs4_test_deviceid_unavailable);
  293. static void
  294. _deviceid_purge_client(const struct nfs_client *clp, long hash)
  295. {
  296. struct nfs4_deviceid_node *d;
  297. HLIST_HEAD(tmp);
  298. spin_lock(&nfs4_deviceid_lock);
  299. rcu_read_lock();
  300. hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node)
  301. if (d->nfs_client == clp && atomic_read(&d->ref)) {
  302. hlist_del_init_rcu(&d->node);
  303. hlist_add_head(&d->tmpnode, &tmp);
  304. clear_bit(NFS_DEVICEID_NOCACHE, &d->flags);
  305. }
  306. rcu_read_unlock();
  307. spin_unlock(&nfs4_deviceid_lock);
  308. if (hlist_empty(&tmp))
  309. return;
  310. while (!hlist_empty(&tmp)) {
  311. d = hlist_entry(tmp.first, struct nfs4_deviceid_node, tmpnode);
  312. hlist_del(&d->tmpnode);
  313. nfs4_put_deviceid_node(d);
  314. }
  315. }
  316. void
  317. nfs4_deviceid_purge_client(const struct nfs_client *clp)
  318. {
  319. long h;
  320. if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_USE_PNFS_MDS))
  321. return;
  322. for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++)
  323. _deviceid_purge_client(clp, h);
  324. }
  325. /*
  326. * Stop use of all deviceids associated with an nfs_client
  327. */
  328. void
  329. nfs4_deviceid_mark_client_invalid(struct nfs_client *clp)
  330. {
  331. struct nfs4_deviceid_node *d;
  332. int i;
  333. rcu_read_lock();
  334. for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i ++){
  335. hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[i], node)
  336. if (d->nfs_client == clp)
  337. set_bit(NFS_DEVICEID_INVALID, &d->flags);
  338. }
  339. rcu_read_unlock();
  340. }