fs_probe.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* AFS fileserver probing
  3. *
  4. * Copyright (C) 2018, 2020 Red Hat, Inc. All Rights Reserved.
  5. * Written by David Howells ([email protected])
  6. */
  7. #include <linux/sched.h>
  8. #include <linux/slab.h>
  9. #include "afs_fs.h"
  10. #include "internal.h"
  11. #include "protocol_afs.h"
  12. #include "protocol_yfs.h"
  13. static unsigned int afs_fs_probe_fast_poll_interval = 30 * HZ;
  14. static unsigned int afs_fs_probe_slow_poll_interval = 5 * 60 * HZ;
  15. /*
  16. * Start the probe polling timer. We have to supply it with an inc on the
  17. * outstanding server count.
  18. */
  19. static void afs_schedule_fs_probe(struct afs_net *net,
  20. struct afs_server *server, bool fast)
  21. {
  22. unsigned long atj;
  23. if (!net->live)
  24. return;
  25. atj = server->probed_at;
  26. atj += fast ? afs_fs_probe_fast_poll_interval : afs_fs_probe_slow_poll_interval;
  27. afs_inc_servers_outstanding(net);
  28. if (timer_reduce(&net->fs_probe_timer, atj))
  29. afs_dec_servers_outstanding(net);
  30. }
  31. /*
  32. * Handle the completion of a set of probes.
  33. */
  34. static void afs_finished_fs_probe(struct afs_net *net, struct afs_server *server)
  35. {
  36. bool responded = server->probe.responded;
  37. write_seqlock(&net->fs_lock);
  38. if (responded) {
  39. list_add_tail(&server->probe_link, &net->fs_probe_slow);
  40. } else {
  41. server->rtt = UINT_MAX;
  42. clear_bit(AFS_SERVER_FL_RESPONDING, &server->flags);
  43. list_add_tail(&server->probe_link, &net->fs_probe_fast);
  44. }
  45. write_sequnlock(&net->fs_lock);
  46. afs_schedule_fs_probe(net, server, !responded);
  47. }
  48. /*
  49. * Handle the completion of a probe.
  50. */
  51. static void afs_done_one_fs_probe(struct afs_net *net, struct afs_server *server)
  52. {
  53. _enter("");
  54. if (atomic_dec_and_test(&server->probe_outstanding))
  55. afs_finished_fs_probe(net, server);
  56. wake_up_all(&server->probe_wq);
  57. }
  58. /*
  59. * Handle inability to send a probe due to ENOMEM when trying to allocate a
  60. * call struct.
  61. */
  62. static void afs_fs_probe_not_done(struct afs_net *net,
  63. struct afs_server *server,
  64. struct afs_addr_cursor *ac)
  65. {
  66. struct afs_addr_list *alist = ac->alist;
  67. unsigned int index = ac->index;
  68. _enter("");
  69. trace_afs_io_error(0, -ENOMEM, afs_io_error_fs_probe_fail);
  70. spin_lock(&server->probe_lock);
  71. server->probe.local_failure = true;
  72. if (server->probe.error == 0)
  73. server->probe.error = -ENOMEM;
  74. set_bit(index, &alist->failed);
  75. spin_unlock(&server->probe_lock);
  76. return afs_done_one_fs_probe(net, server);
  77. }
  78. /*
  79. * Process the result of probing a fileserver. This is called after successful
  80. * or failed delivery of an FS.GetCapabilities operation.
  81. */
  82. void afs_fileserver_probe_result(struct afs_call *call)
  83. {
  84. struct afs_addr_list *alist = call->alist;
  85. struct afs_server *server = call->server;
  86. unsigned int index = call->addr_ix;
  87. unsigned int rtt_us = 0, cap0;
  88. int ret = call->error;
  89. _enter("%pU,%u", &server->uuid, index);
  90. spin_lock(&server->probe_lock);
  91. switch (ret) {
  92. case 0:
  93. server->probe.error = 0;
  94. goto responded;
  95. case -ECONNABORTED:
  96. if (!server->probe.responded) {
  97. server->probe.abort_code = call->abort_code;
  98. server->probe.error = ret;
  99. }
  100. goto responded;
  101. case -ENOMEM:
  102. case -ENONET:
  103. clear_bit(index, &alist->responded);
  104. server->probe.local_failure = true;
  105. trace_afs_io_error(call->debug_id, ret, afs_io_error_fs_probe_fail);
  106. goto out;
  107. case -ECONNRESET: /* Responded, but call expired. */
  108. case -ERFKILL:
  109. case -EADDRNOTAVAIL:
  110. case -ENETUNREACH:
  111. case -EHOSTUNREACH:
  112. case -EHOSTDOWN:
  113. case -ECONNREFUSED:
  114. case -ETIMEDOUT:
  115. case -ETIME:
  116. default:
  117. clear_bit(index, &alist->responded);
  118. set_bit(index, &alist->failed);
  119. if (!server->probe.responded &&
  120. (server->probe.error == 0 ||
  121. server->probe.error == -ETIMEDOUT ||
  122. server->probe.error == -ETIME))
  123. server->probe.error = ret;
  124. trace_afs_io_error(call->debug_id, ret, afs_io_error_fs_probe_fail);
  125. goto out;
  126. }
  127. responded:
  128. clear_bit(index, &alist->failed);
  129. if (call->service_id == YFS_FS_SERVICE) {
  130. server->probe.is_yfs = true;
  131. set_bit(AFS_SERVER_FL_IS_YFS, &server->flags);
  132. alist->addrs[index].srx_service = call->service_id;
  133. } else {
  134. server->probe.not_yfs = true;
  135. if (!server->probe.is_yfs) {
  136. clear_bit(AFS_SERVER_FL_IS_YFS, &server->flags);
  137. alist->addrs[index].srx_service = call->service_id;
  138. }
  139. cap0 = ntohl(call->tmp);
  140. if (cap0 & AFS3_VICED_CAPABILITY_64BITFILES)
  141. set_bit(AFS_SERVER_FL_HAS_FS64, &server->flags);
  142. else
  143. clear_bit(AFS_SERVER_FL_HAS_FS64, &server->flags);
  144. }
  145. rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us);
  146. if (rtt_us < server->probe.rtt) {
  147. server->probe.rtt = rtt_us;
  148. server->rtt = rtt_us;
  149. alist->preferred = index;
  150. }
  151. smp_wmb(); /* Set rtt before responded. */
  152. server->probe.responded = true;
  153. set_bit(index, &alist->responded);
  154. set_bit(AFS_SERVER_FL_RESPONDING, &server->flags);
  155. out:
  156. spin_unlock(&server->probe_lock);
  157. _debug("probe %pU [%u] %pISpc rtt=%u ret=%d",
  158. &server->uuid, index, &alist->addrs[index].transport,
  159. rtt_us, ret);
  160. return afs_done_one_fs_probe(call->net, server);
  161. }
  162. /*
  163. * Probe one or all of a fileserver's addresses to find out the best route and
  164. * to query its capabilities.
  165. */
  166. void afs_fs_probe_fileserver(struct afs_net *net, struct afs_server *server,
  167. struct key *key, bool all)
  168. {
  169. struct afs_addr_cursor ac = {
  170. .index = 0,
  171. };
  172. _enter("%pU", &server->uuid);
  173. read_lock(&server->fs_lock);
  174. ac.alist = rcu_dereference_protected(server->addresses,
  175. lockdep_is_held(&server->fs_lock));
  176. afs_get_addrlist(ac.alist);
  177. read_unlock(&server->fs_lock);
  178. server->probed_at = jiffies;
  179. atomic_set(&server->probe_outstanding, all ? ac.alist->nr_addrs : 1);
  180. memset(&server->probe, 0, sizeof(server->probe));
  181. server->probe.rtt = UINT_MAX;
  182. ac.index = ac.alist->preferred;
  183. if (ac.index < 0 || ac.index >= ac.alist->nr_addrs)
  184. all = true;
  185. if (all) {
  186. for (ac.index = 0; ac.index < ac.alist->nr_addrs; ac.index++)
  187. if (!afs_fs_get_capabilities(net, server, &ac, key))
  188. afs_fs_probe_not_done(net, server, &ac);
  189. } else {
  190. if (!afs_fs_get_capabilities(net, server, &ac, key))
  191. afs_fs_probe_not_done(net, server, &ac);
  192. }
  193. afs_put_addrlist(ac.alist);
  194. }
  195. /*
  196. * Wait for the first as-yet untried fileserver to respond.
  197. */
  198. int afs_wait_for_fs_probes(struct afs_server_list *slist, unsigned long untried)
  199. {
  200. struct wait_queue_entry *waits;
  201. struct afs_server *server;
  202. unsigned int rtt = UINT_MAX, rtt_s;
  203. bool have_responders = false;
  204. int pref = -1, i;
  205. _enter("%u,%lx", slist->nr_servers, untried);
  206. /* Only wait for servers that have a probe outstanding. */
  207. for (i = 0; i < slist->nr_servers; i++) {
  208. if (test_bit(i, &untried)) {
  209. server = slist->servers[i].server;
  210. if (!atomic_read(&server->probe_outstanding))
  211. __clear_bit(i, &untried);
  212. if (server->probe.responded)
  213. have_responders = true;
  214. }
  215. }
  216. if (have_responders || !untried)
  217. return 0;
  218. waits = kmalloc(array_size(slist->nr_servers, sizeof(*waits)), GFP_KERNEL);
  219. if (!waits)
  220. return -ENOMEM;
  221. for (i = 0; i < slist->nr_servers; i++) {
  222. if (test_bit(i, &untried)) {
  223. server = slist->servers[i].server;
  224. init_waitqueue_entry(&waits[i], current);
  225. add_wait_queue(&server->probe_wq, &waits[i]);
  226. }
  227. }
  228. for (;;) {
  229. bool still_probing = false;
  230. set_current_state(TASK_INTERRUPTIBLE);
  231. for (i = 0; i < slist->nr_servers; i++) {
  232. if (test_bit(i, &untried)) {
  233. server = slist->servers[i].server;
  234. if (server->probe.responded)
  235. goto stop;
  236. if (atomic_read(&server->probe_outstanding))
  237. still_probing = true;
  238. }
  239. }
  240. if (!still_probing || signal_pending(current))
  241. goto stop;
  242. schedule();
  243. }
  244. stop:
  245. set_current_state(TASK_RUNNING);
  246. for (i = 0; i < slist->nr_servers; i++) {
  247. if (test_bit(i, &untried)) {
  248. server = slist->servers[i].server;
  249. rtt_s = READ_ONCE(server->rtt);
  250. if (test_bit(AFS_SERVER_FL_RESPONDING, &server->flags) &&
  251. rtt_s < rtt) {
  252. pref = i;
  253. rtt = rtt_s;
  254. }
  255. remove_wait_queue(&server->probe_wq, &waits[i]);
  256. }
  257. }
  258. kfree(waits);
  259. if (pref == -1 && signal_pending(current))
  260. return -ERESTARTSYS;
  261. if (pref >= 0)
  262. slist->preferred = pref;
  263. return 0;
  264. }
  265. /*
  266. * Probe timer. We have an increment on fs_outstanding that we need to pass
  267. * along to the work item.
  268. */
  269. void afs_fs_probe_timer(struct timer_list *timer)
  270. {
  271. struct afs_net *net = container_of(timer, struct afs_net, fs_probe_timer);
  272. if (!net->live || !queue_work(afs_wq, &net->fs_prober))
  273. afs_dec_servers_outstanding(net);
  274. }
  275. /*
  276. * Dispatch a probe to a server.
  277. */
  278. static void afs_dispatch_fs_probe(struct afs_net *net, struct afs_server *server, bool all)
  279. __releases(&net->fs_lock)
  280. {
  281. struct key *key = NULL;
  282. /* We remove it from the queues here - it will be added back to
  283. * one of the queues on the completion of the probe.
  284. */
  285. list_del_init(&server->probe_link);
  286. afs_get_server(server, afs_server_trace_get_probe);
  287. write_sequnlock(&net->fs_lock);
  288. afs_fs_probe_fileserver(net, server, key, all);
  289. afs_put_server(net, server, afs_server_trace_put_probe);
  290. }
  291. /*
  292. * Probe a server immediately without waiting for its due time to come
  293. * round. This is used when all of the addresses have been tried.
  294. */
  295. void afs_probe_fileserver(struct afs_net *net, struct afs_server *server)
  296. {
  297. write_seqlock(&net->fs_lock);
  298. if (!list_empty(&server->probe_link))
  299. return afs_dispatch_fs_probe(net, server, true);
  300. write_sequnlock(&net->fs_lock);
  301. }
  302. /*
  303. * Probe dispatcher to regularly dispatch probes to keep NAT alive.
  304. */
  305. void afs_fs_probe_dispatcher(struct work_struct *work)
  306. {
  307. struct afs_net *net = container_of(work, struct afs_net, fs_prober);
  308. struct afs_server *fast, *slow, *server;
  309. unsigned long nowj, timer_at, poll_at;
  310. bool first_pass = true, set_timer = false;
  311. if (!net->live) {
  312. afs_dec_servers_outstanding(net);
  313. return;
  314. }
  315. _enter("");
  316. if (list_empty(&net->fs_probe_fast) && list_empty(&net->fs_probe_slow)) {
  317. afs_dec_servers_outstanding(net);
  318. _leave(" [none]");
  319. return;
  320. }
  321. again:
  322. write_seqlock(&net->fs_lock);
  323. fast = slow = server = NULL;
  324. nowj = jiffies;
  325. timer_at = nowj + MAX_JIFFY_OFFSET;
  326. if (!list_empty(&net->fs_probe_fast)) {
  327. fast = list_first_entry(&net->fs_probe_fast, struct afs_server, probe_link);
  328. poll_at = fast->probed_at + afs_fs_probe_fast_poll_interval;
  329. if (time_before(nowj, poll_at)) {
  330. timer_at = poll_at;
  331. set_timer = true;
  332. fast = NULL;
  333. }
  334. }
  335. if (!list_empty(&net->fs_probe_slow)) {
  336. slow = list_first_entry(&net->fs_probe_slow, struct afs_server, probe_link);
  337. poll_at = slow->probed_at + afs_fs_probe_slow_poll_interval;
  338. if (time_before(nowj, poll_at)) {
  339. if (time_before(poll_at, timer_at))
  340. timer_at = poll_at;
  341. set_timer = true;
  342. slow = NULL;
  343. }
  344. }
  345. server = fast ?: slow;
  346. if (server)
  347. _debug("probe %pU", &server->uuid);
  348. if (server && (first_pass || !need_resched())) {
  349. afs_dispatch_fs_probe(net, server, server == fast);
  350. first_pass = false;
  351. goto again;
  352. }
  353. write_sequnlock(&net->fs_lock);
  354. if (server) {
  355. if (!queue_work(afs_wq, &net->fs_prober))
  356. afs_dec_servers_outstanding(net);
  357. _leave(" [requeue]");
  358. } else if (set_timer) {
  359. if (timer_reduce(&net->fs_probe_timer, timer_at))
  360. afs_dec_servers_outstanding(net);
  361. _leave(" [timer]");
  362. } else {
  363. afs_dec_servers_outstanding(net);
  364. _leave(" [quiesce]");
  365. }
  366. }
  367. /*
  368. * Wait for a probe on a particular fileserver to complete for 2s.
  369. */
  370. int afs_wait_for_one_fs_probe(struct afs_server *server, bool is_intr)
  371. {
  372. struct wait_queue_entry wait;
  373. unsigned long timo = 2 * HZ;
  374. if (atomic_read(&server->probe_outstanding) == 0)
  375. goto dont_wait;
  376. init_wait_entry(&wait, 0);
  377. for (;;) {
  378. prepare_to_wait_event(&server->probe_wq, &wait,
  379. is_intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
  380. if (timo == 0 ||
  381. server->probe.responded ||
  382. atomic_read(&server->probe_outstanding) == 0 ||
  383. (is_intr && signal_pending(current)))
  384. break;
  385. timo = schedule_timeout(timo);
  386. }
  387. finish_wait(&server->probe_wq, &wait);
  388. dont_wait:
  389. if (server->probe.responded)
  390. return 0;
  391. if (is_intr && signal_pending(current))
  392. return -ERESTARTSYS;
  393. if (timo == 0)
  394. return -ETIME;
  395. return -EDESTADDRREQ;
  396. }
  397. /*
  398. * Clean up the probing when the namespace is killed off.
  399. */
  400. void afs_fs_probe_cleanup(struct afs_net *net)
  401. {
  402. if (del_timer_sync(&net->fs_probe_timer))
  403. afs_dec_servers_outstanding(net);
  404. }