sched.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/net/sunrpc/sched.c
  4. *
  5. * Scheduling for synchronous and asynchronous RPC requests.
  6. *
  7. * Copyright (C) 1996 Olaf Kirch, <[email protected]>
  8. *
  9. * TCP NFS related read + write fixes
  10. * (C) 1999 Dave Airlie, University of Limerick, Ireland <[email protected]>
  11. */
  12. #include <linux/module.h>
  13. #include <linux/sched.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/slab.h>
  16. #include <linux/mempool.h>
  17. #include <linux/smp.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/mutex.h>
  20. #include <linux/freezer.h>
  21. #include <linux/sched/mm.h>
  22. #include <linux/sunrpc/clnt.h>
  23. #include <linux/sunrpc/metrics.h>
  24. #include "sunrpc.h"
  25. #define CREATE_TRACE_POINTS
  26. #include <trace/events/sunrpc.h>
  27. /*
  28. * RPC slabs and memory pools
  29. */
  30. #define RPC_BUFFER_MAXSIZE (2048)
  31. #define RPC_BUFFER_POOLSIZE (8)
  32. #define RPC_TASK_POOLSIZE (8)
  33. static struct kmem_cache *rpc_task_slabp __read_mostly;
  34. static struct kmem_cache *rpc_buffer_slabp __read_mostly;
  35. static mempool_t *rpc_task_mempool __read_mostly;
  36. static mempool_t *rpc_buffer_mempool __read_mostly;
  37. static void rpc_async_schedule(struct work_struct *);
  38. static void rpc_release_task(struct rpc_task *task);
  39. static void __rpc_queue_timer_fn(struct work_struct *);
  40. /*
  41. * RPC tasks sit here while waiting for conditions to improve.
  42. */
  43. static struct rpc_wait_queue delay_queue;
  44. /*
  45. * rpciod-related stuff
  46. */
  47. struct workqueue_struct *rpciod_workqueue __read_mostly;
  48. struct workqueue_struct *xprtiod_workqueue __read_mostly;
  49. EXPORT_SYMBOL_GPL(xprtiod_workqueue);
  50. gfp_t rpc_task_gfp_mask(void)
  51. {
  52. if (current->flags & PF_WQ_WORKER)
  53. return GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
  54. return GFP_KERNEL;
  55. }
  56. EXPORT_SYMBOL_GPL(rpc_task_gfp_mask);
  57. bool rpc_task_set_rpc_status(struct rpc_task *task, int rpc_status)
  58. {
  59. if (cmpxchg(&task->tk_rpc_status, 0, rpc_status) == 0)
  60. return true;
  61. return false;
  62. }
  63. unsigned long
  64. rpc_task_timeout(const struct rpc_task *task)
  65. {
  66. unsigned long timeout = READ_ONCE(task->tk_timeout);
  67. if (timeout != 0) {
  68. unsigned long now = jiffies;
  69. if (time_before(now, timeout))
  70. return timeout - now;
  71. }
  72. return 0;
  73. }
  74. EXPORT_SYMBOL_GPL(rpc_task_timeout);
  75. /*
  76. * Disable the timer for a given RPC task. Should be called with
  77. * queue->lock and bh_disabled in order to avoid races within
  78. * rpc_run_timer().
  79. */
  80. static void
  81. __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
  82. {
  83. if (list_empty(&task->u.tk_wait.timer_list))
  84. return;
  85. task->tk_timeout = 0;
  86. list_del(&task->u.tk_wait.timer_list);
  87. if (list_empty(&queue->timer_list.list))
  88. cancel_delayed_work(&queue->timer_list.dwork);
  89. }
  90. static void
  91. rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
  92. {
  93. unsigned long now = jiffies;
  94. queue->timer_list.expires = expires;
  95. if (time_before_eq(expires, now))
  96. expires = 0;
  97. else
  98. expires -= now;
  99. mod_delayed_work(rpciod_workqueue, &queue->timer_list.dwork, expires);
  100. }
  101. /*
  102. * Set up a timer for the current task.
  103. */
  104. static void
  105. __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task,
  106. unsigned long timeout)
  107. {
  108. task->tk_timeout = timeout;
  109. if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires))
  110. rpc_set_queue_timer(queue, timeout);
  111. list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
  112. }
  113. static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
  114. {
  115. if (queue->priority != priority) {
  116. queue->priority = priority;
  117. queue->nr = 1U << priority;
  118. }
  119. }
  120. static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
  121. {
  122. rpc_set_waitqueue_priority(queue, queue->maxpriority);
  123. }
  124. /*
  125. * Add a request to a queue list
  126. */
  127. static void
  128. __rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task)
  129. {
  130. struct rpc_task *t;
  131. list_for_each_entry(t, q, u.tk_wait.list) {
  132. if (t->tk_owner == task->tk_owner) {
  133. list_add_tail(&task->u.tk_wait.links,
  134. &t->u.tk_wait.links);
  135. /* Cache the queue head in task->u.tk_wait.list */
  136. task->u.tk_wait.list.next = q;
  137. task->u.tk_wait.list.prev = NULL;
  138. return;
  139. }
  140. }
  141. INIT_LIST_HEAD(&task->u.tk_wait.links);
  142. list_add_tail(&task->u.tk_wait.list, q);
  143. }
  144. /*
  145. * Remove request from a queue list
  146. */
  147. static void
  148. __rpc_list_dequeue_task(struct rpc_task *task)
  149. {
  150. struct list_head *q;
  151. struct rpc_task *t;
  152. if (task->u.tk_wait.list.prev == NULL) {
  153. list_del(&task->u.tk_wait.links);
  154. return;
  155. }
  156. if (!list_empty(&task->u.tk_wait.links)) {
  157. t = list_first_entry(&task->u.tk_wait.links,
  158. struct rpc_task,
  159. u.tk_wait.links);
  160. /* Assume __rpc_list_enqueue_task() cached the queue head */
  161. q = t->u.tk_wait.list.next;
  162. list_add_tail(&t->u.tk_wait.list, q);
  163. list_del(&task->u.tk_wait.links);
  164. }
  165. list_del(&task->u.tk_wait.list);
  166. }
  167. /*
  168. * Add new request to a priority queue.
  169. */
  170. static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
  171. struct rpc_task *task,
  172. unsigned char queue_priority)
  173. {
  174. if (unlikely(queue_priority > queue->maxpriority))
  175. queue_priority = queue->maxpriority;
  176. __rpc_list_enqueue_task(&queue->tasks[queue_priority], task);
  177. }
  178. /*
  179. * Add new request to wait queue.
  180. */
  181. static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
  182. struct rpc_task *task,
  183. unsigned char queue_priority)
  184. {
  185. INIT_LIST_HEAD(&task->u.tk_wait.timer_list);
  186. if (RPC_IS_PRIORITY(queue))
  187. __rpc_add_wait_queue_priority(queue, task, queue_priority);
  188. else
  189. list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
  190. task->tk_waitqueue = queue;
  191. queue->qlen++;
  192. /* barrier matches the read in rpc_wake_up_task_queue_locked() */
  193. smp_wmb();
  194. rpc_set_queued(task);
  195. }
  196. /*
  197. * Remove request from a priority queue.
  198. */
  199. static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
  200. {
  201. __rpc_list_dequeue_task(task);
  202. }
  203. /*
  204. * Remove request from queue.
  205. * Note: must be called with spin lock held.
  206. */
  207. static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
  208. {
  209. __rpc_disable_timer(queue, task);
  210. if (RPC_IS_PRIORITY(queue))
  211. __rpc_remove_wait_queue_priority(task);
  212. else
  213. list_del(&task->u.tk_wait.list);
  214. queue->qlen--;
  215. }
  216. static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
  217. {
  218. int i;
  219. spin_lock_init(&queue->lock);
  220. for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
  221. INIT_LIST_HEAD(&queue->tasks[i]);
  222. queue->maxpriority = nr_queues - 1;
  223. rpc_reset_waitqueue_priority(queue);
  224. queue->qlen = 0;
  225. queue->timer_list.expires = 0;
  226. INIT_DELAYED_WORK(&queue->timer_list.dwork, __rpc_queue_timer_fn);
  227. INIT_LIST_HEAD(&queue->timer_list.list);
  228. rpc_assign_waitqueue_name(queue, qname);
  229. }
  230. void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
  231. {
  232. __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
  233. }
  234. EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
  235. void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
  236. {
  237. __rpc_init_priority_wait_queue(queue, qname, 1);
  238. }
  239. EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
  240. void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
  241. {
  242. cancel_delayed_work_sync(&queue->timer_list.dwork);
  243. }
  244. EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
  245. static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
  246. {
  247. schedule();
  248. if (signal_pending_state(mode, current))
  249. return -ERESTARTSYS;
  250. return 0;
  251. }
  252. #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
  253. static void rpc_task_set_debuginfo(struct rpc_task *task)
  254. {
  255. struct rpc_clnt *clnt = task->tk_client;
  256. /* Might be a task carrying a reverse-direction operation */
  257. if (!clnt) {
  258. static atomic_t rpc_pid;
  259. task->tk_pid = atomic_inc_return(&rpc_pid);
  260. return;
  261. }
  262. task->tk_pid = atomic_inc_return(&clnt->cl_pid);
  263. }
  264. #else
  265. static inline void rpc_task_set_debuginfo(struct rpc_task *task)
  266. {
  267. }
  268. #endif
  269. static void rpc_set_active(struct rpc_task *task)
  270. {
  271. rpc_task_set_debuginfo(task);
  272. set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
  273. trace_rpc_task_begin(task, NULL);
  274. }
  275. /*
  276. * Mark an RPC call as having completed by clearing the 'active' bit
  277. * and then waking up all tasks that were sleeping.
  278. */
  279. static int rpc_complete_task(struct rpc_task *task)
  280. {
  281. void *m = &task->tk_runstate;
  282. wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
  283. struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
  284. unsigned long flags;
  285. int ret;
  286. trace_rpc_task_complete(task, NULL);
  287. spin_lock_irqsave(&wq->lock, flags);
  288. clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
  289. ret = atomic_dec_and_test(&task->tk_count);
  290. if (waitqueue_active(wq))
  291. __wake_up_locked_key(wq, TASK_NORMAL, &k);
  292. spin_unlock_irqrestore(&wq->lock, flags);
  293. return ret;
  294. }
  295. /*
  296. * Allow callers to wait for completion of an RPC call
  297. *
  298. * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
  299. * to enforce taking of the wq->lock and hence avoid races with
  300. * rpc_complete_task().
  301. */
  302. int rpc_wait_for_completion_task(struct rpc_task *task)
  303. {
  304. return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
  305. rpc_wait_bit_killable, TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
  306. }
  307. EXPORT_SYMBOL_GPL(rpc_wait_for_completion_task);
  308. /*
  309. * Make an RPC task runnable.
  310. *
  311. * Note: If the task is ASYNC, and is being made runnable after sitting on an
  312. * rpc_wait_queue, this must be called with the queue spinlock held to protect
  313. * the wait queue operation.
  314. * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(),
  315. * which is needed to ensure that __rpc_execute() doesn't loop (due to the
  316. * lockless RPC_IS_QUEUED() test) before we've had a chance to test
  317. * the RPC_TASK_RUNNING flag.
  318. */
  319. static void rpc_make_runnable(struct workqueue_struct *wq,
  320. struct rpc_task *task)
  321. {
  322. bool need_wakeup = !rpc_test_and_set_running(task);
  323. rpc_clear_queued(task);
  324. if (!need_wakeup)
  325. return;
  326. if (RPC_IS_ASYNC(task)) {
  327. INIT_WORK(&task->u.tk_work, rpc_async_schedule);
  328. queue_work(wq, &task->u.tk_work);
  329. } else
  330. wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
  331. }
  332. /*
  333. * Prepare for sleeping on a wait queue.
  334. * By always appending tasks to the list we ensure FIFO behavior.
  335. * NB: An RPC task will only receive interrupt-driven events as long
  336. * as it's on a wait queue.
  337. */
  338. static void __rpc_do_sleep_on_priority(struct rpc_wait_queue *q,
  339. struct rpc_task *task,
  340. unsigned char queue_priority)
  341. {
  342. trace_rpc_task_sleep(task, q);
  343. __rpc_add_wait_queue(q, task, queue_priority);
  344. }
  345. static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
  346. struct rpc_task *task,
  347. unsigned char queue_priority)
  348. {
  349. if (WARN_ON_ONCE(RPC_IS_QUEUED(task)))
  350. return;
  351. __rpc_do_sleep_on_priority(q, task, queue_priority);
  352. }
  353. static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
  354. struct rpc_task *task, unsigned long timeout,
  355. unsigned char queue_priority)
  356. {
  357. if (WARN_ON_ONCE(RPC_IS_QUEUED(task)))
  358. return;
  359. if (time_is_after_jiffies(timeout)) {
  360. __rpc_do_sleep_on_priority(q, task, queue_priority);
  361. __rpc_add_timer(q, task, timeout);
  362. } else
  363. task->tk_status = -ETIMEDOUT;
  364. }
  365. static void rpc_set_tk_callback(struct rpc_task *task, rpc_action action)
  366. {
  367. if (action && !WARN_ON_ONCE(task->tk_callback != NULL))
  368. task->tk_callback = action;
  369. }
  370. static bool rpc_sleep_check_activated(struct rpc_task *task)
  371. {
  372. /* We shouldn't ever put an inactive task to sleep */
  373. if (WARN_ON_ONCE(!RPC_IS_ACTIVATED(task))) {
  374. task->tk_status = -EIO;
  375. rpc_put_task_async(task);
  376. return false;
  377. }
  378. return true;
  379. }
  380. void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task,
  381. rpc_action action, unsigned long timeout)
  382. {
  383. if (!rpc_sleep_check_activated(task))
  384. return;
  385. rpc_set_tk_callback(task, action);
  386. /*
  387. * Protect the queue operations.
  388. */
  389. spin_lock(&q->lock);
  390. __rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority);
  391. spin_unlock(&q->lock);
  392. }
  393. EXPORT_SYMBOL_GPL(rpc_sleep_on_timeout);
  394. void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
  395. rpc_action action)
  396. {
  397. if (!rpc_sleep_check_activated(task))
  398. return;
  399. rpc_set_tk_callback(task, action);
  400. WARN_ON_ONCE(task->tk_timeout != 0);
  401. /*
  402. * Protect the queue operations.
  403. */
  404. spin_lock(&q->lock);
  405. __rpc_sleep_on_priority(q, task, task->tk_priority);
  406. spin_unlock(&q->lock);
  407. }
  408. EXPORT_SYMBOL_GPL(rpc_sleep_on);
  409. void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
  410. struct rpc_task *task, unsigned long timeout, int priority)
  411. {
  412. if (!rpc_sleep_check_activated(task))
  413. return;
  414. priority -= RPC_PRIORITY_LOW;
  415. /*
  416. * Protect the queue operations.
  417. */
  418. spin_lock(&q->lock);
  419. __rpc_sleep_on_priority_timeout(q, task, timeout, priority);
  420. spin_unlock(&q->lock);
  421. }
  422. EXPORT_SYMBOL_GPL(rpc_sleep_on_priority_timeout);
  423. void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
  424. int priority)
  425. {
  426. if (!rpc_sleep_check_activated(task))
  427. return;
  428. WARN_ON_ONCE(task->tk_timeout != 0);
  429. priority -= RPC_PRIORITY_LOW;
  430. /*
  431. * Protect the queue operations.
  432. */
  433. spin_lock(&q->lock);
  434. __rpc_sleep_on_priority(q, task, priority);
  435. spin_unlock(&q->lock);
  436. }
  437. EXPORT_SYMBOL_GPL(rpc_sleep_on_priority);
  438. /**
  439. * __rpc_do_wake_up_task_on_wq - wake up a single rpc_task
  440. * @wq: workqueue on which to run task
  441. * @queue: wait queue
  442. * @task: task to be woken up
  443. *
  444. * Caller must hold queue->lock, and have cleared the task queued flag.
  445. */
  446. static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq,
  447. struct rpc_wait_queue *queue,
  448. struct rpc_task *task)
  449. {
  450. /* Has the task been executed yet? If not, we cannot wake it up! */
  451. if (!RPC_IS_ACTIVATED(task)) {
  452. printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
  453. return;
  454. }
  455. trace_rpc_task_wakeup(task, queue);
  456. __rpc_remove_wait_queue(queue, task);
  457. rpc_make_runnable(wq, task);
  458. }
  459. /*
  460. * Wake up a queued task while the queue lock is being held
  461. */
  462. static struct rpc_task *
  463. rpc_wake_up_task_on_wq_queue_action_locked(struct workqueue_struct *wq,
  464. struct rpc_wait_queue *queue, struct rpc_task *task,
  465. bool (*action)(struct rpc_task *, void *), void *data)
  466. {
  467. if (RPC_IS_QUEUED(task)) {
  468. smp_rmb();
  469. if (task->tk_waitqueue == queue) {
  470. if (action == NULL || action(task, data)) {
  471. __rpc_do_wake_up_task_on_wq(wq, queue, task);
  472. return task;
  473. }
  474. }
  475. }
  476. return NULL;
  477. }
  478. /*
  479. * Wake up a queued task while the queue lock is being held
  480. */
  481. static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue,
  482. struct rpc_task *task)
  483. {
  484. rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
  485. task, NULL, NULL);
  486. }
  487. /*
  488. * Wake up a task on a specific queue
  489. */
  490. void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
  491. {
  492. if (!RPC_IS_QUEUED(task))
  493. return;
  494. spin_lock(&queue->lock);
  495. rpc_wake_up_task_queue_locked(queue, task);
  496. spin_unlock(&queue->lock);
  497. }
  498. EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
  499. static bool rpc_task_action_set_status(struct rpc_task *task, void *status)
  500. {
  501. task->tk_status = *(int *)status;
  502. return true;
  503. }
  504. static void
  505. rpc_wake_up_task_queue_set_status_locked(struct rpc_wait_queue *queue,
  506. struct rpc_task *task, int status)
  507. {
  508. rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
  509. task, rpc_task_action_set_status, &status);
  510. }
  511. /**
  512. * rpc_wake_up_queued_task_set_status - wake up a task and set task->tk_status
  513. * @queue: pointer to rpc_wait_queue
  514. * @task: pointer to rpc_task
  515. * @status: integer error value
  516. *
  517. * If @task is queued on @queue, then it is woken up, and @task->tk_status is
  518. * set to the value of @status.
  519. */
  520. void
  521. rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *queue,
  522. struct rpc_task *task, int status)
  523. {
  524. if (!RPC_IS_QUEUED(task))
  525. return;
  526. spin_lock(&queue->lock);
  527. rpc_wake_up_task_queue_set_status_locked(queue, task, status);
  528. spin_unlock(&queue->lock);
  529. }
  530. /*
  531. * Wake up the next task on a priority queue.
  532. */
  533. static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue)
  534. {
  535. struct list_head *q;
  536. struct rpc_task *task;
  537. /*
  538. * Service the privileged queue.
  539. */
  540. q = &queue->tasks[RPC_NR_PRIORITY - 1];
  541. if (queue->maxpriority > RPC_PRIORITY_PRIVILEGED && !list_empty(q)) {
  542. task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
  543. goto out;
  544. }
  545. /*
  546. * Service a batch of tasks from a single owner.
  547. */
  548. q = &queue->tasks[queue->priority];
  549. if (!list_empty(q) && queue->nr) {
  550. queue->nr--;
  551. task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
  552. goto out;
  553. }
  554. /*
  555. * Service the next queue.
  556. */
  557. do {
  558. if (q == &queue->tasks[0])
  559. q = &queue->tasks[queue->maxpriority];
  560. else
  561. q = q - 1;
  562. if (!list_empty(q)) {
  563. task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
  564. goto new_queue;
  565. }
  566. } while (q != &queue->tasks[queue->priority]);
  567. rpc_reset_waitqueue_priority(queue);
  568. return NULL;
  569. new_queue:
  570. rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
  571. out:
  572. return task;
  573. }
  574. static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue)
  575. {
  576. if (RPC_IS_PRIORITY(queue))
  577. return __rpc_find_next_queued_priority(queue);
  578. if (!list_empty(&queue->tasks[0]))
  579. return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list);
  580. return NULL;
  581. }
  582. /*
  583. * Wake up the first task on the wait queue.
  584. */
  585. struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq,
  586. struct rpc_wait_queue *queue,
  587. bool (*func)(struct rpc_task *, void *), void *data)
  588. {
  589. struct rpc_task *task = NULL;
  590. spin_lock(&queue->lock);
  591. task = __rpc_find_next_queued(queue);
  592. if (task != NULL)
  593. task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue,
  594. task, func, data);
  595. spin_unlock(&queue->lock);
  596. return task;
  597. }
  598. /*
  599. * Wake up the first task on the wait queue.
  600. */
  601. struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue,
  602. bool (*func)(struct rpc_task *, void *), void *data)
  603. {
  604. return rpc_wake_up_first_on_wq(rpciod_workqueue, queue, func, data);
  605. }
  606. EXPORT_SYMBOL_GPL(rpc_wake_up_first);
  607. static bool rpc_wake_up_next_func(struct rpc_task *task, void *data)
  608. {
  609. return true;
  610. }
  611. /*
  612. * Wake up the next task on the wait queue.
  613. */
  614. struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue)
  615. {
  616. return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL);
  617. }
  618. EXPORT_SYMBOL_GPL(rpc_wake_up_next);
  619. /**
  620. * rpc_wake_up_locked - wake up all rpc_tasks
  621. * @queue: rpc_wait_queue on which the tasks are sleeping
  622. *
  623. */
  624. static void rpc_wake_up_locked(struct rpc_wait_queue *queue)
  625. {
  626. struct rpc_task *task;
  627. for (;;) {
  628. task = __rpc_find_next_queued(queue);
  629. if (task == NULL)
  630. break;
  631. rpc_wake_up_task_queue_locked(queue, task);
  632. }
  633. }
  634. /**
  635. * rpc_wake_up - wake up all rpc_tasks
  636. * @queue: rpc_wait_queue on which the tasks are sleeping
  637. *
  638. * Grabs queue->lock
  639. */
  640. void rpc_wake_up(struct rpc_wait_queue *queue)
  641. {
  642. spin_lock(&queue->lock);
  643. rpc_wake_up_locked(queue);
  644. spin_unlock(&queue->lock);
  645. }
  646. EXPORT_SYMBOL_GPL(rpc_wake_up);
  647. /**
  648. * rpc_wake_up_status_locked - wake up all rpc_tasks and set their status value.
  649. * @queue: rpc_wait_queue on which the tasks are sleeping
  650. * @status: status value to set
  651. */
  652. static void rpc_wake_up_status_locked(struct rpc_wait_queue *queue, int status)
  653. {
  654. struct rpc_task *task;
  655. for (;;) {
  656. task = __rpc_find_next_queued(queue);
  657. if (task == NULL)
  658. break;
  659. rpc_wake_up_task_queue_set_status_locked(queue, task, status);
  660. }
  661. }
  662. /**
  663. * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
  664. * @queue: rpc_wait_queue on which the tasks are sleeping
  665. * @status: status value to set
  666. *
  667. * Grabs queue->lock
  668. */
  669. void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
  670. {
  671. spin_lock(&queue->lock);
  672. rpc_wake_up_status_locked(queue, status);
  673. spin_unlock(&queue->lock);
  674. }
  675. EXPORT_SYMBOL_GPL(rpc_wake_up_status);
  676. static void __rpc_queue_timer_fn(struct work_struct *work)
  677. {
  678. struct rpc_wait_queue *queue = container_of(work,
  679. struct rpc_wait_queue,
  680. timer_list.dwork.work);
  681. struct rpc_task *task, *n;
  682. unsigned long expires, now, timeo;
  683. spin_lock(&queue->lock);
  684. expires = now = jiffies;
  685. list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
  686. timeo = task->tk_timeout;
  687. if (time_after_eq(now, timeo)) {
  688. trace_rpc_task_timeout(task, task->tk_action);
  689. task->tk_status = -ETIMEDOUT;
  690. rpc_wake_up_task_queue_locked(queue, task);
  691. continue;
  692. }
  693. if (expires == now || time_after(expires, timeo))
  694. expires = timeo;
  695. }
  696. if (!list_empty(&queue->timer_list.list))
  697. rpc_set_queue_timer(queue, expires);
  698. spin_unlock(&queue->lock);
  699. }
  700. static void __rpc_atrun(struct rpc_task *task)
  701. {
  702. if (task->tk_status == -ETIMEDOUT)
  703. task->tk_status = 0;
  704. }
  705. /*
  706. * Run a task at a later time
  707. */
  708. void rpc_delay(struct rpc_task *task, unsigned long delay)
  709. {
  710. rpc_sleep_on_timeout(&delay_queue, task, __rpc_atrun, jiffies + delay);
  711. }
  712. EXPORT_SYMBOL_GPL(rpc_delay);
  713. /*
  714. * Helper to call task->tk_ops->rpc_call_prepare
  715. */
  716. void rpc_prepare_task(struct rpc_task *task)
  717. {
  718. task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
  719. }
  720. static void
  721. rpc_init_task_statistics(struct rpc_task *task)
  722. {
  723. /* Initialize retry counters */
  724. task->tk_garb_retry = 2;
  725. task->tk_cred_retry = 2;
  726. /* starting timestamp */
  727. task->tk_start = ktime_get();
  728. }
  729. static void
  730. rpc_reset_task_statistics(struct rpc_task *task)
  731. {
  732. task->tk_timeouts = 0;
  733. task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_SENT);
  734. rpc_init_task_statistics(task);
  735. }
  736. /*
  737. * Helper that calls task->tk_ops->rpc_call_done if it exists
  738. */
  739. void rpc_exit_task(struct rpc_task *task)
  740. {
  741. trace_rpc_task_end(task, task->tk_action);
  742. task->tk_action = NULL;
  743. if (task->tk_ops->rpc_count_stats)
  744. task->tk_ops->rpc_count_stats(task, task->tk_calldata);
  745. else if (task->tk_client)
  746. rpc_count_iostats(task, task->tk_client->cl_metrics);
  747. if (task->tk_ops->rpc_call_done != NULL) {
  748. trace_rpc_task_call_done(task, task->tk_ops->rpc_call_done);
  749. task->tk_ops->rpc_call_done(task, task->tk_calldata);
  750. if (task->tk_action != NULL) {
  751. /* Always release the RPC slot and buffer memory */
  752. xprt_release(task);
  753. rpc_reset_task_statistics(task);
  754. }
  755. }
  756. }
  757. void rpc_signal_task(struct rpc_task *task)
  758. {
  759. struct rpc_wait_queue *queue;
  760. if (!RPC_IS_ACTIVATED(task))
  761. return;
  762. if (!rpc_task_set_rpc_status(task, -ERESTARTSYS))
  763. return;
  764. trace_rpc_task_signalled(task, task->tk_action);
  765. set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
  766. smp_mb__after_atomic();
  767. queue = READ_ONCE(task->tk_waitqueue);
  768. if (queue)
  769. rpc_wake_up_queued_task(queue, task);
  770. }
  771. void rpc_task_try_cancel(struct rpc_task *task, int error)
  772. {
  773. struct rpc_wait_queue *queue;
  774. if (!rpc_task_set_rpc_status(task, error))
  775. return;
  776. queue = READ_ONCE(task->tk_waitqueue);
  777. if (queue)
  778. rpc_wake_up_queued_task(queue, task);
  779. }
  780. void rpc_exit(struct rpc_task *task, int status)
  781. {
  782. task->tk_status = status;
  783. task->tk_action = rpc_exit_task;
  784. rpc_wake_up_queued_task(task->tk_waitqueue, task);
  785. }
  786. EXPORT_SYMBOL_GPL(rpc_exit);
  787. void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
  788. {
  789. if (ops->rpc_release != NULL)
  790. ops->rpc_release(calldata);
  791. }
  792. static bool xprt_needs_memalloc(struct rpc_xprt *xprt, struct rpc_task *tk)
  793. {
  794. if (!xprt)
  795. return false;
  796. if (!atomic_read(&xprt->swapper))
  797. return false;
  798. return test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == tk;
  799. }
  800. /*
  801. * This is the RPC `scheduler' (or rather, the finite state machine).
  802. */
  803. static void __rpc_execute(struct rpc_task *task)
  804. {
  805. struct rpc_wait_queue *queue;
  806. int task_is_async = RPC_IS_ASYNC(task);
  807. int status = 0;
  808. unsigned long pflags = current->flags;
  809. WARN_ON_ONCE(RPC_IS_QUEUED(task));
  810. if (RPC_IS_QUEUED(task))
  811. return;
  812. for (;;) {
  813. void (*do_action)(struct rpc_task *);
  814. /*
  815. * Perform the next FSM step or a pending callback.
  816. *
  817. * tk_action may be NULL if the task has been killed.
  818. */
  819. do_action = task->tk_action;
  820. /* Tasks with an RPC error status should exit */
  821. if (do_action && do_action != rpc_exit_task &&
  822. (status = READ_ONCE(task->tk_rpc_status)) != 0) {
  823. task->tk_status = status;
  824. do_action = rpc_exit_task;
  825. }
  826. /* Callbacks override all actions */
  827. if (task->tk_callback) {
  828. do_action = task->tk_callback;
  829. task->tk_callback = NULL;
  830. }
  831. if (!do_action)
  832. break;
  833. if (RPC_IS_SWAPPER(task) ||
  834. xprt_needs_memalloc(task->tk_xprt, task))
  835. current->flags |= PF_MEMALLOC;
  836. trace_rpc_task_run_action(task, do_action);
  837. do_action(task);
  838. /*
  839. * Lockless check for whether task is sleeping or not.
  840. */
  841. if (!RPC_IS_QUEUED(task)) {
  842. cond_resched();
  843. continue;
  844. }
  845. /*
  846. * The queue->lock protects against races with
  847. * rpc_make_runnable().
  848. *
  849. * Note that once we clear RPC_TASK_RUNNING on an asynchronous
  850. * rpc_task, rpc_make_runnable() can assign it to a
  851. * different workqueue. We therefore cannot assume that the
  852. * rpc_task pointer may still be dereferenced.
  853. */
  854. queue = task->tk_waitqueue;
  855. spin_lock(&queue->lock);
  856. if (!RPC_IS_QUEUED(task)) {
  857. spin_unlock(&queue->lock);
  858. continue;
  859. }
  860. /* Wake up any task that has an exit status */
  861. if (READ_ONCE(task->tk_rpc_status) != 0) {
  862. rpc_wake_up_task_queue_locked(queue, task);
  863. spin_unlock(&queue->lock);
  864. continue;
  865. }
  866. rpc_clear_running(task);
  867. spin_unlock(&queue->lock);
  868. if (task_is_async)
  869. goto out;
  870. /* sync task: sleep here */
  871. trace_rpc_task_sync_sleep(task, task->tk_action);
  872. status = out_of_line_wait_on_bit(&task->tk_runstate,
  873. RPC_TASK_QUEUED, rpc_wait_bit_killable,
  874. TASK_KILLABLE|TASK_FREEZABLE);
  875. if (status < 0) {
  876. /*
  877. * When a sync task receives a signal, it exits with
  878. * -ERESTARTSYS. In order to catch any callbacks that
  879. * clean up after sleeping on some queue, we don't
  880. * break the loop here, but go around once more.
  881. */
  882. rpc_signal_task(task);
  883. }
  884. trace_rpc_task_sync_wake(task, task->tk_action);
  885. }
  886. /* Release all resources associated with the task */
  887. rpc_release_task(task);
  888. out:
  889. current_restore_flags(pflags, PF_MEMALLOC);
  890. }
  891. /*
  892. * User-visible entry point to the scheduler.
  893. *
  894. * This may be called recursively if e.g. an async NFS task updates
  895. * the attributes and finds that dirty pages must be flushed.
  896. * NOTE: Upon exit of this function the task is guaranteed to be
  897. * released. In particular note that tk_release() will have
  898. * been called, so your task memory may have been freed.
  899. */
  900. void rpc_execute(struct rpc_task *task)
  901. {
  902. bool is_async = RPC_IS_ASYNC(task);
  903. rpc_set_active(task);
  904. rpc_make_runnable(rpciod_workqueue, task);
  905. if (!is_async) {
  906. unsigned int pflags = memalloc_nofs_save();
  907. __rpc_execute(task);
  908. memalloc_nofs_restore(pflags);
  909. }
  910. }
  911. static void rpc_async_schedule(struct work_struct *work)
  912. {
  913. unsigned int pflags = memalloc_nofs_save();
  914. __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
  915. memalloc_nofs_restore(pflags);
  916. }
  917. /**
  918. * rpc_malloc - allocate RPC buffer resources
  919. * @task: RPC task
  920. *
  921. * A single memory region is allocated, which is split between the
  922. * RPC call and RPC reply that this task is being used for. When
  923. * this RPC is retired, the memory is released by calling rpc_free.
  924. *
  925. * To prevent rpciod from hanging, this allocator never sleeps,
  926. * returning -ENOMEM and suppressing warning if the request cannot
  927. * be serviced immediately. The caller can arrange to sleep in a
  928. * way that is safe for rpciod.
  929. *
  930. * Most requests are 'small' (under 2KiB) and can be serviced from a
  931. * mempool, ensuring that NFS reads and writes can always proceed,
  932. * and that there is good locality of reference for these buffers.
  933. */
  934. int rpc_malloc(struct rpc_task *task)
  935. {
  936. struct rpc_rqst *rqst = task->tk_rqstp;
  937. size_t size = rqst->rq_callsize + rqst->rq_rcvsize;
  938. struct rpc_buffer *buf;
  939. gfp_t gfp = rpc_task_gfp_mask();
  940. size += sizeof(struct rpc_buffer);
  941. if (size <= RPC_BUFFER_MAXSIZE) {
  942. buf = kmem_cache_alloc(rpc_buffer_slabp, gfp);
  943. /* Reach for the mempool if dynamic allocation fails */
  944. if (!buf && RPC_IS_ASYNC(task))
  945. buf = mempool_alloc(rpc_buffer_mempool, GFP_NOWAIT);
  946. } else
  947. buf = kmalloc(size, gfp);
  948. if (!buf)
  949. return -ENOMEM;
  950. buf->len = size;
  951. rqst->rq_buffer = buf->data;
  952. rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
  953. return 0;
  954. }
  955. EXPORT_SYMBOL_GPL(rpc_malloc);
  956. /**
  957. * rpc_free - free RPC buffer resources allocated via rpc_malloc
  958. * @task: RPC task
  959. *
  960. */
  961. void rpc_free(struct rpc_task *task)
  962. {
  963. void *buffer = task->tk_rqstp->rq_buffer;
  964. size_t size;
  965. struct rpc_buffer *buf;
  966. buf = container_of(buffer, struct rpc_buffer, data);
  967. size = buf->len;
  968. if (size <= RPC_BUFFER_MAXSIZE)
  969. mempool_free(buf, rpc_buffer_mempool);
  970. else
  971. kfree(buf);
  972. }
  973. EXPORT_SYMBOL_GPL(rpc_free);
  974. /*
  975. * Creation and deletion of RPC task structures
  976. */
  977. static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
  978. {
  979. memset(task, 0, sizeof(*task));
  980. atomic_set(&task->tk_count, 1);
  981. task->tk_flags = task_setup_data->flags;
  982. task->tk_ops = task_setup_data->callback_ops;
  983. task->tk_calldata = task_setup_data->callback_data;
  984. INIT_LIST_HEAD(&task->tk_task);
  985. task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
  986. task->tk_owner = current->tgid;
  987. /* Initialize workqueue for async tasks */
  988. task->tk_workqueue = task_setup_data->workqueue;
  989. task->tk_xprt = rpc_task_get_xprt(task_setup_data->rpc_client,
  990. xprt_get(task_setup_data->rpc_xprt));
  991. task->tk_op_cred = get_rpccred(task_setup_data->rpc_op_cred);
  992. if (task->tk_ops->rpc_call_prepare != NULL)
  993. task->tk_action = rpc_prepare_task;
  994. rpc_init_task_statistics(task);
  995. }
  996. static struct rpc_task *rpc_alloc_task(void)
  997. {
  998. struct rpc_task *task;
  999. task = kmem_cache_alloc(rpc_task_slabp, rpc_task_gfp_mask());
  1000. if (task)
  1001. return task;
  1002. return mempool_alloc(rpc_task_mempool, GFP_NOWAIT);
  1003. }
  1004. /*
  1005. * Create a new task for the specified client.
  1006. */
  1007. struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
  1008. {
  1009. struct rpc_task *task = setup_data->task;
  1010. unsigned short flags = 0;
  1011. if (task == NULL) {
  1012. task = rpc_alloc_task();
  1013. if (task == NULL) {
  1014. rpc_release_calldata(setup_data->callback_ops,
  1015. setup_data->callback_data);
  1016. return ERR_PTR(-ENOMEM);
  1017. }
  1018. flags = RPC_TASK_DYNAMIC;
  1019. }
  1020. rpc_init_task(task, setup_data);
  1021. task->tk_flags |= flags;
  1022. return task;
  1023. }
  1024. /*
  1025. * rpc_free_task - release rpc task and perform cleanups
  1026. *
  1027. * Note that we free up the rpc_task _after_ rpc_release_calldata()
  1028. * in order to work around a workqueue dependency issue.
  1029. *
  1030. * Tejun Heo states:
  1031. * "Workqueue currently considers two work items to be the same if they're
  1032. * on the same address and won't execute them concurrently - ie. it
  1033. * makes a work item which is queued again while being executed wait
  1034. * for the previous execution to complete.
  1035. *
  1036. * If a work function frees the work item, and then waits for an event
  1037. * which should be performed by another work item and *that* work item
  1038. * recycles the freed work item, it can create a false dependency loop.
  1039. * There really is no reliable way to detect this short of verifying
  1040. * every memory free."
  1041. *
  1042. */
  1043. static void rpc_free_task(struct rpc_task *task)
  1044. {
  1045. unsigned short tk_flags = task->tk_flags;
  1046. put_rpccred(task->tk_op_cred);
  1047. rpc_release_calldata(task->tk_ops, task->tk_calldata);
  1048. if (tk_flags & RPC_TASK_DYNAMIC)
  1049. mempool_free(task, rpc_task_mempool);
  1050. }
  1051. static void rpc_async_release(struct work_struct *work)
  1052. {
  1053. unsigned int pflags = memalloc_nofs_save();
  1054. rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
  1055. memalloc_nofs_restore(pflags);
  1056. }
  1057. static void rpc_release_resources_task(struct rpc_task *task)
  1058. {
  1059. xprt_release(task);
  1060. if (task->tk_msg.rpc_cred) {
  1061. if (!(task->tk_flags & RPC_TASK_CRED_NOREF))
  1062. put_cred(task->tk_msg.rpc_cred);
  1063. task->tk_msg.rpc_cred = NULL;
  1064. }
  1065. rpc_task_release_client(task);
  1066. }
  1067. static void rpc_final_put_task(struct rpc_task *task,
  1068. struct workqueue_struct *q)
  1069. {
  1070. if (q != NULL) {
  1071. INIT_WORK(&task->u.tk_work, rpc_async_release);
  1072. queue_work(q, &task->u.tk_work);
  1073. } else
  1074. rpc_free_task(task);
  1075. }
  1076. static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
  1077. {
  1078. if (atomic_dec_and_test(&task->tk_count)) {
  1079. rpc_release_resources_task(task);
  1080. rpc_final_put_task(task, q);
  1081. }
  1082. }
  1083. void rpc_put_task(struct rpc_task *task)
  1084. {
  1085. rpc_do_put_task(task, NULL);
  1086. }
  1087. EXPORT_SYMBOL_GPL(rpc_put_task);
  1088. void rpc_put_task_async(struct rpc_task *task)
  1089. {
  1090. rpc_do_put_task(task, task->tk_workqueue);
  1091. }
  1092. EXPORT_SYMBOL_GPL(rpc_put_task_async);
  1093. static void rpc_release_task(struct rpc_task *task)
  1094. {
  1095. WARN_ON_ONCE(RPC_IS_QUEUED(task));
  1096. rpc_release_resources_task(task);
  1097. /*
  1098. * Note: at this point we have been removed from rpc_clnt->cl_tasks,
  1099. * so it should be safe to use task->tk_count as a test for whether
  1100. * or not any other processes still hold references to our rpc_task.
  1101. */
  1102. if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
  1103. /* Wake up anyone who may be waiting for task completion */
  1104. if (!rpc_complete_task(task))
  1105. return;
  1106. } else {
  1107. if (!atomic_dec_and_test(&task->tk_count))
  1108. return;
  1109. }
  1110. rpc_final_put_task(task, task->tk_workqueue);
  1111. }
  1112. int rpciod_up(void)
  1113. {
  1114. return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
  1115. }
  1116. void rpciod_down(void)
  1117. {
  1118. module_put(THIS_MODULE);
  1119. }
  1120. /*
  1121. * Start up the rpciod workqueue.
  1122. */
  1123. static int rpciod_start(void)
  1124. {
  1125. struct workqueue_struct *wq;
  1126. /*
  1127. * Create the rpciod thread and wait for it to start.
  1128. */
  1129. wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
  1130. if (!wq)
  1131. goto out_failed;
  1132. rpciod_workqueue = wq;
  1133. wq = alloc_workqueue("xprtiod", WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
  1134. if (!wq)
  1135. goto free_rpciod;
  1136. xprtiod_workqueue = wq;
  1137. return 1;
  1138. free_rpciod:
  1139. wq = rpciod_workqueue;
  1140. rpciod_workqueue = NULL;
  1141. destroy_workqueue(wq);
  1142. out_failed:
  1143. return 0;
  1144. }
  1145. static void rpciod_stop(void)
  1146. {
  1147. struct workqueue_struct *wq = NULL;
  1148. if (rpciod_workqueue == NULL)
  1149. return;
  1150. wq = rpciod_workqueue;
  1151. rpciod_workqueue = NULL;
  1152. destroy_workqueue(wq);
  1153. wq = xprtiod_workqueue;
  1154. xprtiod_workqueue = NULL;
  1155. destroy_workqueue(wq);
  1156. }
  1157. void
  1158. rpc_destroy_mempool(void)
  1159. {
  1160. rpciod_stop();
  1161. mempool_destroy(rpc_buffer_mempool);
  1162. mempool_destroy(rpc_task_mempool);
  1163. kmem_cache_destroy(rpc_task_slabp);
  1164. kmem_cache_destroy(rpc_buffer_slabp);
  1165. rpc_destroy_wait_queue(&delay_queue);
  1166. }
  1167. int
  1168. rpc_init_mempool(void)
  1169. {
  1170. /*
  1171. * The following is not strictly a mempool initialisation,
  1172. * but there is no harm in doing it here
  1173. */
  1174. rpc_init_wait_queue(&delay_queue, "delayq");
  1175. if (!rpciod_start())
  1176. goto err_nomem;
  1177. rpc_task_slabp = kmem_cache_create("rpc_tasks",
  1178. sizeof(struct rpc_task),
  1179. 0, SLAB_HWCACHE_ALIGN,
  1180. NULL);
  1181. if (!rpc_task_slabp)
  1182. goto err_nomem;
  1183. rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
  1184. RPC_BUFFER_MAXSIZE,
  1185. 0, SLAB_HWCACHE_ALIGN,
  1186. NULL);
  1187. if (!rpc_buffer_slabp)
  1188. goto err_nomem;
  1189. rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
  1190. rpc_task_slabp);
  1191. if (!rpc_task_mempool)
  1192. goto err_nomem;
  1193. rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
  1194. rpc_buffer_slabp);
  1195. if (!rpc_buffer_mempool)
  1196. goto err_nomem;
  1197. return 0;
  1198. err_nomem:
  1199. rpc_destroy_mempool();
  1200. return -ENOMEM;
  1201. }