kthread.c 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Kernel thread helper functions.
  3. * Copyright (C) 2004 IBM Corporation, Rusty Russell.
  4. * Copyright (C) 2009 Red Hat, Inc.
  5. *
  6. * Creation is done via kthreadd, so that we get a clean environment
  7. * even if we're invoked from userspace (think modprobe, hotplug cpu,
  8. * etc.).
  9. */
  10. #include <uapi/linux/sched/types.h>
  11. #include <linux/mm.h>
  12. #include <linux/mmu_context.h>
  13. #include <linux/sched.h>
  14. #include <linux/sched/mm.h>
  15. #include <linux/sched/task.h>
  16. #include <linux/kthread.h>
  17. #include <linux/completion.h>
  18. #include <linux/err.h>
  19. #include <linux/cgroup.h>
  20. #include <linux/cpuset.h>
  21. #include <linux/unistd.h>
  22. #include <linux/file.h>
  23. #include <linux/export.h>
  24. #include <linux/mutex.h>
  25. #include <linux/slab.h>
  26. #include <linux/freezer.h>
  27. #include <linux/ptrace.h>
  28. #include <linux/uaccess.h>
  29. #include <linux/numa.h>
  30. #include <linux/sched/isolation.h>
  31. #include <trace/events/sched.h>
  32. static DEFINE_SPINLOCK(kthread_create_lock);
  33. static LIST_HEAD(kthread_create_list);
  34. struct task_struct *kthreadd_task;
  35. struct kthread_create_info
  36. {
  37. /* Information passed to kthread() from kthreadd. */
  38. int (*threadfn)(void *data);
  39. void *data;
  40. int node;
  41. /* Result passed back to kthread_create() from kthreadd. */
  42. struct task_struct *result;
  43. struct completion *done;
  44. struct list_head list;
  45. };
  46. struct kthread {
  47. unsigned long flags;
  48. unsigned int cpu;
  49. int result;
  50. int (*threadfn)(void *);
  51. void *data;
  52. struct completion parked;
  53. struct completion exited;
  54. #ifdef CONFIG_BLK_CGROUP
  55. struct cgroup_subsys_state *blkcg_css;
  56. #endif
  57. /* To store the full name if task comm is truncated. */
  58. char *full_name;
  59. };
  60. enum KTHREAD_BITS {
  61. KTHREAD_IS_PER_CPU = 0,
  62. KTHREAD_SHOULD_STOP,
  63. KTHREAD_SHOULD_PARK,
  64. };
  65. static inline struct kthread *to_kthread(struct task_struct *k)
  66. {
  67. WARN_ON(!(k->flags & PF_KTHREAD));
  68. return k->worker_private;
  69. }
  70. /*
  71. * Variant of to_kthread() that doesn't assume @p is a kthread.
  72. *
  73. * Per construction; when:
  74. *
  75. * (p->flags & PF_KTHREAD) && p->worker_private
  76. *
  77. * the task is both a kthread and struct kthread is persistent. However
  78. * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
  79. * begin_new_exec()).
  80. */
  81. static inline struct kthread *__to_kthread(struct task_struct *p)
  82. {
  83. void *kthread = p->worker_private;
  84. if (kthread && !(p->flags & PF_KTHREAD))
  85. kthread = NULL;
  86. return kthread;
  87. }
  88. void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk)
  89. {
  90. struct kthread *kthread = to_kthread(tsk);
  91. if (!kthread || !kthread->full_name) {
  92. __get_task_comm(buf, buf_size, tsk);
  93. return;
  94. }
  95. strscpy_pad(buf, kthread->full_name, buf_size);
  96. }
  97. bool set_kthread_struct(struct task_struct *p)
  98. {
  99. struct kthread *kthread;
  100. if (WARN_ON_ONCE(to_kthread(p)))
  101. return false;
  102. kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
  103. if (!kthread)
  104. return false;
  105. init_completion(&kthread->exited);
  106. init_completion(&kthread->parked);
  107. p->vfork_done = &kthread->exited;
  108. p->worker_private = kthread;
  109. return true;
  110. }
  111. void free_kthread_struct(struct task_struct *k)
  112. {
  113. struct kthread *kthread;
  114. /*
  115. * Can be NULL if kmalloc() in set_kthread_struct() failed.
  116. */
  117. kthread = to_kthread(k);
  118. if (!kthread)
  119. return;
  120. #ifdef CONFIG_BLK_CGROUP
  121. WARN_ON_ONCE(kthread->blkcg_css);
  122. #endif
  123. k->worker_private = NULL;
  124. kfree(kthread->full_name);
  125. kfree(kthread);
  126. }
  127. /**
  128. * kthread_should_stop - should this kthread return now?
  129. *
  130. * When someone calls kthread_stop() on your kthread, it will be woken
  131. * and this will return true. You should then return, and your return
  132. * value will be passed through to kthread_stop().
  133. */
  134. bool kthread_should_stop(void)
  135. {
  136. return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
  137. }
  138. EXPORT_SYMBOL(kthread_should_stop);
  139. bool __kthread_should_park(struct task_struct *k)
  140. {
  141. return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
  142. }
  143. EXPORT_SYMBOL_GPL(__kthread_should_park);
  144. /**
  145. * kthread_should_park - should this kthread park now?
  146. *
  147. * When someone calls kthread_park() on your kthread, it will be woken
  148. * and this will return true. You should then do the necessary
  149. * cleanup and call kthread_parkme()
  150. *
  151. * Similar to kthread_should_stop(), but this keeps the thread alive
  152. * and in a park position. kthread_unpark() "restarts" the thread and
  153. * calls the thread function again.
  154. */
  155. bool kthread_should_park(void)
  156. {
  157. return __kthread_should_park(current);
  158. }
  159. EXPORT_SYMBOL_GPL(kthread_should_park);
  160. /**
  161. * kthread_freezable_should_stop - should this freezable kthread return now?
  162. * @was_frozen: optional out parameter, indicates whether %current was frozen
  163. *
  164. * kthread_should_stop() for freezable kthreads, which will enter
  165. * refrigerator if necessary. This function is safe from kthread_stop() /
  166. * freezer deadlock and freezable kthreads should use this function instead
  167. * of calling try_to_freeze() directly.
  168. */
  169. bool kthread_freezable_should_stop(bool *was_frozen)
  170. {
  171. bool frozen = false;
  172. might_sleep();
  173. if (unlikely(freezing(current)))
  174. frozen = __refrigerator(true);
  175. if (was_frozen)
  176. *was_frozen = frozen;
  177. return kthread_should_stop();
  178. }
  179. EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
  180. /**
  181. * kthread_func - return the function specified on kthread creation
  182. * @task: kthread task in question
  183. *
  184. * Returns NULL if the task is not a kthread.
  185. */
  186. void *kthread_func(struct task_struct *task)
  187. {
  188. struct kthread *kthread = __to_kthread(task);
  189. if (kthread)
  190. return kthread->threadfn;
  191. return NULL;
  192. }
  193. EXPORT_SYMBOL_GPL(kthread_func);
  194. /**
  195. * kthread_data - return data value specified on kthread creation
  196. * @task: kthread task in question
  197. *
  198. * Return the data value specified when kthread @task was created.
  199. * The caller is responsible for ensuring the validity of @task when
  200. * calling this function.
  201. */
  202. void *kthread_data(struct task_struct *task)
  203. {
  204. return to_kthread(task)->data;
  205. }
  206. EXPORT_SYMBOL_GPL(kthread_data);
  207. /**
  208. * kthread_probe_data - speculative version of kthread_data()
  209. * @task: possible kthread task in question
  210. *
  211. * @task could be a kthread task. Return the data value specified when it
  212. * was created if accessible. If @task isn't a kthread task or its data is
  213. * inaccessible for any reason, %NULL is returned. This function requires
  214. * that @task itself is safe to dereference.
  215. */
  216. void *kthread_probe_data(struct task_struct *task)
  217. {
  218. struct kthread *kthread = __to_kthread(task);
  219. void *data = NULL;
  220. if (kthread)
  221. copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
  222. return data;
  223. }
  224. static void __kthread_parkme(struct kthread *self)
  225. {
  226. for (;;) {
  227. /*
  228. * TASK_PARKED is a special state; we must serialize against
  229. * possible pending wakeups to avoid store-store collisions on
  230. * task->state.
  231. *
  232. * Such a collision might possibly result in the task state
  233. * changin from TASK_PARKED and us failing the
  234. * wait_task_inactive() in kthread_park().
  235. */
  236. set_special_state(TASK_PARKED);
  237. if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
  238. break;
  239. /*
  240. * Thread is going to call schedule(), do not preempt it,
  241. * or the caller of kthread_park() may spend more time in
  242. * wait_task_inactive().
  243. */
  244. preempt_disable();
  245. complete(&self->parked);
  246. schedule_preempt_disabled();
  247. preempt_enable();
  248. }
  249. __set_current_state(TASK_RUNNING);
  250. }
  251. void kthread_parkme(void)
  252. {
  253. __kthread_parkme(to_kthread(current));
  254. }
  255. EXPORT_SYMBOL_GPL(kthread_parkme);
  256. /**
  257. * kthread_exit - Cause the current kthread return @result to kthread_stop().
  258. * @result: The integer value to return to kthread_stop().
  259. *
  260. * While kthread_exit can be called directly, it exists so that
  261. * functions which do some additional work in non-modular code such as
  262. * module_put_and_kthread_exit can be implemented.
  263. *
  264. * Does not return.
  265. */
  266. void __noreturn kthread_exit(long result)
  267. {
  268. struct kthread *kthread = to_kthread(current);
  269. kthread->result = result;
  270. do_exit(0);
  271. }
  272. /**
  273. * kthread_complete_and_exit - Exit the current kthread.
  274. * @comp: Completion to complete
  275. * @code: The integer value to return to kthread_stop().
  276. *
  277. * If present complete @comp and the reuturn code to kthread_stop().
  278. *
  279. * A kernel thread whose module may be removed after the completion of
  280. * @comp can use this function exit safely.
  281. *
  282. * Does not return.
  283. */
  284. void __noreturn kthread_complete_and_exit(struct completion *comp, long code)
  285. {
  286. if (comp)
  287. complete(comp);
  288. kthread_exit(code);
  289. }
  290. EXPORT_SYMBOL(kthread_complete_and_exit);
  291. static int kthread(void *_create)
  292. {
  293. static const struct sched_param param = { .sched_priority = 0 };
  294. /* Copy data: it's on kthread's stack */
  295. struct kthread_create_info *create = _create;
  296. int (*threadfn)(void *data) = create->threadfn;
  297. void *data = create->data;
  298. struct completion *done;
  299. struct kthread *self;
  300. int ret;
  301. self = to_kthread(current);
  302. /* Release the structure when caller killed by a fatal signal. */
  303. done = xchg(&create->done, NULL);
  304. if (!done) {
  305. kfree(create);
  306. kthread_exit(-EINTR);
  307. }
  308. self->threadfn = threadfn;
  309. self->data = data;
  310. /*
  311. * The new thread inherited kthreadd's priority and CPU mask. Reset
  312. * back to default in case they have been changed.
  313. */
  314. sched_setscheduler_nocheck(current, SCHED_NORMAL, &param);
  315. set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_KTHREAD));
  316. /* OK, tell user we're spawned, wait for stop or wakeup */
  317. __set_current_state(TASK_UNINTERRUPTIBLE);
  318. create->result = current;
  319. /*
  320. * Thread is going to call schedule(), do not preempt it,
  321. * or the creator may spend more time in wait_task_inactive().
  322. */
  323. preempt_disable();
  324. complete(done);
  325. schedule_preempt_disabled();
  326. preempt_enable();
  327. ret = -EINTR;
  328. if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
  329. cgroup_kthread_ready();
  330. __kthread_parkme(self);
  331. ret = threadfn(data);
  332. }
  333. kthread_exit(ret);
  334. }
  335. /* called from kernel_clone() to get node information for about to be created task */
  336. int tsk_fork_get_node(struct task_struct *tsk)
  337. {
  338. #ifdef CONFIG_NUMA
  339. if (tsk == kthreadd_task)
  340. return tsk->pref_node_fork;
  341. #endif
  342. return NUMA_NO_NODE;
  343. }
  344. static void create_kthread(struct kthread_create_info *create)
  345. {
  346. int pid;
  347. #ifdef CONFIG_NUMA
  348. current->pref_node_fork = create->node;
  349. #endif
  350. /* We want our own signal handler (we take no signals by default). */
  351. pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
  352. if (pid < 0) {
  353. /* Release the structure when caller killed by a fatal signal. */
  354. struct completion *done = xchg(&create->done, NULL);
  355. if (!done) {
  356. kfree(create);
  357. return;
  358. }
  359. create->result = ERR_PTR(pid);
  360. complete(done);
  361. }
  362. }
  363. static __printf(4, 0)
  364. struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
  365. void *data, int node,
  366. const char namefmt[],
  367. va_list args)
  368. {
  369. DECLARE_COMPLETION_ONSTACK(done);
  370. struct task_struct *task;
  371. struct kthread_create_info *create = kmalloc(sizeof(*create),
  372. GFP_KERNEL);
  373. if (!create)
  374. return ERR_PTR(-ENOMEM);
  375. create->threadfn = threadfn;
  376. create->data = data;
  377. create->node = node;
  378. create->done = &done;
  379. spin_lock(&kthread_create_lock);
  380. list_add_tail(&create->list, &kthread_create_list);
  381. spin_unlock(&kthread_create_lock);
  382. wake_up_process(kthreadd_task);
  383. /*
  384. * Wait for completion in killable state, for I might be chosen by
  385. * the OOM killer while kthreadd is trying to allocate memory for
  386. * new kernel thread.
  387. */
  388. if (unlikely(wait_for_completion_killable(&done))) {
  389. /*
  390. * If I was killed by a fatal signal before kthreadd (or new
  391. * kernel thread) calls complete(), leave the cleanup of this
  392. * structure to that thread.
  393. */
  394. if (xchg(&create->done, NULL))
  395. return ERR_PTR(-EINTR);
  396. /*
  397. * kthreadd (or new kernel thread) will call complete()
  398. * shortly.
  399. */
  400. wait_for_completion(&done);
  401. }
  402. task = create->result;
  403. if (!IS_ERR(task)) {
  404. char name[TASK_COMM_LEN];
  405. va_list aq;
  406. int len;
  407. /*
  408. * task is already visible to other tasks, so updating
  409. * COMM must be protected.
  410. */
  411. va_copy(aq, args);
  412. len = vsnprintf(name, sizeof(name), namefmt, aq);
  413. va_end(aq);
  414. if (len >= TASK_COMM_LEN) {
  415. struct kthread *kthread = to_kthread(task);
  416. /* leave it truncated when out of memory. */
  417. kthread->full_name = kvasprintf(GFP_KERNEL, namefmt, args);
  418. }
  419. set_task_comm(task, name);
  420. }
  421. kfree(create);
  422. return task;
  423. }
  424. /**
  425. * kthread_create_on_node - create a kthread.
  426. * @threadfn: the function to run until signal_pending(current).
  427. * @data: data ptr for @threadfn.
  428. * @node: task and thread structures for the thread are allocated on this node
  429. * @namefmt: printf-style name for the thread.
  430. *
  431. * Description: This helper function creates and names a kernel
  432. * thread. The thread will be stopped: use wake_up_process() to start
  433. * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and
  434. * is affine to all CPUs.
  435. *
  436. * If thread is going to be bound on a particular cpu, give its node
  437. * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
  438. * When woken, the thread will run @threadfn() with @data as its
  439. * argument. @threadfn() can either return directly if it is a
  440. * standalone thread for which no one will call kthread_stop(), or
  441. * return when 'kthread_should_stop()' is true (which means
  442. * kthread_stop() has been called). The return value should be zero
  443. * or a negative error number; it will be passed to kthread_stop().
  444. *
  445. * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
  446. */
  447. struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
  448. void *data, int node,
  449. const char namefmt[],
  450. ...)
  451. {
  452. struct task_struct *task;
  453. va_list args;
  454. va_start(args, namefmt);
  455. task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
  456. va_end(args);
  457. return task;
  458. }
  459. EXPORT_SYMBOL(kthread_create_on_node);
  460. static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state)
  461. {
  462. unsigned long flags;
  463. if (!wait_task_inactive(p, state)) {
  464. WARN_ON(1);
  465. return;
  466. }
  467. /* It's safe because the task is inactive. */
  468. raw_spin_lock_irqsave(&p->pi_lock, flags);
  469. do_set_cpus_allowed(p, mask);
  470. p->flags |= PF_NO_SETAFFINITY;
  471. raw_spin_unlock_irqrestore(&p->pi_lock, flags);
  472. }
  473. static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state)
  474. {
  475. __kthread_bind_mask(p, cpumask_of(cpu), state);
  476. }
  477. void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
  478. {
  479. __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
  480. }
  481. EXPORT_SYMBOL_GPL(kthread_bind_mask);
  482. /**
  483. * kthread_bind - bind a just-created kthread to a cpu.
  484. * @p: thread created by kthread_create().
  485. * @cpu: cpu (might not be online, must be possible) for @k to run on.
  486. *
  487. * Description: This function is equivalent to set_cpus_allowed(),
  488. * except that @cpu doesn't need to be online, and the thread must be
  489. * stopped (i.e., just returned from kthread_create()).
  490. */
  491. void kthread_bind(struct task_struct *p, unsigned int cpu)
  492. {
  493. __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
  494. }
  495. EXPORT_SYMBOL(kthread_bind);
  496. /**
  497. * kthread_create_on_cpu - Create a cpu bound kthread
  498. * @threadfn: the function to run until signal_pending(current).
  499. * @data: data ptr for @threadfn.
  500. * @cpu: The cpu on which the thread should be bound,
  501. * @namefmt: printf-style name for the thread. Format is restricted
  502. * to "name.*%u". Code fills in cpu number.
  503. *
  504. * Description: This helper function creates and names a kernel thread
  505. */
  506. struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
  507. void *data, unsigned int cpu,
  508. const char *namefmt)
  509. {
  510. struct task_struct *p;
  511. p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
  512. cpu);
  513. if (IS_ERR(p))
  514. return p;
  515. kthread_bind(p, cpu);
  516. /* CPU hotplug need to bind once again when unparking the thread. */
  517. to_kthread(p)->cpu = cpu;
  518. return p;
  519. }
  520. EXPORT_SYMBOL(kthread_create_on_cpu);
  521. void kthread_set_per_cpu(struct task_struct *k, int cpu)
  522. {
  523. struct kthread *kthread = to_kthread(k);
  524. if (!kthread)
  525. return;
  526. WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
  527. if (cpu < 0) {
  528. clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
  529. return;
  530. }
  531. kthread->cpu = cpu;
  532. set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
  533. }
  534. EXPORT_SYMBOL_GPL(kthread_set_per_cpu);
  535. bool kthread_is_per_cpu(struct task_struct *p)
  536. {
  537. struct kthread *kthread = __to_kthread(p);
  538. if (!kthread)
  539. return false;
  540. return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
  541. }
  542. /**
  543. * kthread_unpark - unpark a thread created by kthread_create().
  544. * @k: thread created by kthread_create().
  545. *
  546. * Sets kthread_should_park() for @k to return false, wakes it, and
  547. * waits for it to return. If the thread is marked percpu then its
  548. * bound to the cpu again.
  549. */
  550. void kthread_unpark(struct task_struct *k)
  551. {
  552. struct kthread *kthread = to_kthread(k);
  553. /*
  554. * Newly created kthread was parked when the CPU was offline.
  555. * The binding was lost and we need to set it again.
  556. */
  557. if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
  558. __kthread_bind(k, kthread->cpu, TASK_PARKED);
  559. clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
  560. /*
  561. * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
  562. */
  563. wake_up_state(k, TASK_PARKED);
  564. }
  565. EXPORT_SYMBOL_GPL(kthread_unpark);
  566. /**
  567. * kthread_park - park a thread created by kthread_create().
  568. * @k: thread created by kthread_create().
  569. *
  570. * Sets kthread_should_park() for @k to return true, wakes it, and
  571. * waits for it to return. This can also be called after kthread_create()
  572. * instead of calling wake_up_process(): the thread will park without
  573. * calling threadfn().
  574. *
  575. * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
  576. * If called by the kthread itself just the park bit is set.
  577. */
  578. int kthread_park(struct task_struct *k)
  579. {
  580. struct kthread *kthread = to_kthread(k);
  581. if (WARN_ON(k->flags & PF_EXITING))
  582. return -ENOSYS;
  583. if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
  584. return -EBUSY;
  585. set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
  586. if (k != current) {
  587. wake_up_process(k);
  588. /*
  589. * Wait for __kthread_parkme() to complete(), this means we
  590. * _will_ have TASK_PARKED and are about to call schedule().
  591. */
  592. wait_for_completion(&kthread->parked);
  593. /*
  594. * Now wait for that schedule() to complete and the task to
  595. * get scheduled out.
  596. */
  597. WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
  598. }
  599. return 0;
  600. }
  601. EXPORT_SYMBOL_GPL(kthread_park);
  602. /**
  603. * kthread_stop - stop a thread created by kthread_create().
  604. * @k: thread created by kthread_create().
  605. *
  606. * Sets kthread_should_stop() for @k to return true, wakes it, and
  607. * waits for it to exit. This can also be called after kthread_create()
  608. * instead of calling wake_up_process(): the thread will exit without
  609. * calling threadfn().
  610. *
  611. * If threadfn() may call kthread_exit() itself, the caller must ensure
  612. * task_struct can't go away.
  613. *
  614. * Returns the result of threadfn(), or %-EINTR if wake_up_process()
  615. * was never called.
  616. */
  617. int kthread_stop(struct task_struct *k)
  618. {
  619. struct kthread *kthread;
  620. int ret;
  621. trace_sched_kthread_stop(k);
  622. get_task_struct(k);
  623. kthread = to_kthread(k);
  624. set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
  625. kthread_unpark(k);
  626. set_tsk_thread_flag(k, TIF_NOTIFY_SIGNAL);
  627. wake_up_process(k);
  628. wait_for_completion(&kthread->exited);
  629. ret = kthread->result;
  630. put_task_struct(k);
  631. trace_sched_kthread_stop_ret(ret);
  632. return ret;
  633. }
  634. EXPORT_SYMBOL(kthread_stop);
  635. int kthreadd(void *unused)
  636. {
  637. struct task_struct *tsk = current;
  638. /* Setup a clean context for our children to inherit. */
  639. set_task_comm(tsk, "kthreadd");
  640. ignore_signals(tsk);
  641. set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_TYPE_KTHREAD));
  642. set_mems_allowed(node_states[N_MEMORY]);
  643. current->flags |= PF_NOFREEZE;
  644. cgroup_init_kthreadd();
  645. for (;;) {
  646. set_current_state(TASK_INTERRUPTIBLE);
  647. if (list_empty(&kthread_create_list))
  648. schedule();
  649. __set_current_state(TASK_RUNNING);
  650. spin_lock(&kthread_create_lock);
  651. while (!list_empty(&kthread_create_list)) {
  652. struct kthread_create_info *create;
  653. create = list_entry(kthread_create_list.next,
  654. struct kthread_create_info, list);
  655. list_del_init(&create->list);
  656. spin_unlock(&kthread_create_lock);
  657. create_kthread(create);
  658. spin_lock(&kthread_create_lock);
  659. }
  660. spin_unlock(&kthread_create_lock);
  661. }
  662. return 0;
  663. }
  664. void __kthread_init_worker(struct kthread_worker *worker,
  665. const char *name,
  666. struct lock_class_key *key)
  667. {
  668. memset(worker, 0, sizeof(struct kthread_worker));
  669. raw_spin_lock_init(&worker->lock);
  670. lockdep_set_class_and_name(&worker->lock, key, name);
  671. INIT_LIST_HEAD(&worker->work_list);
  672. INIT_LIST_HEAD(&worker->delayed_work_list);
  673. }
  674. EXPORT_SYMBOL_GPL(__kthread_init_worker);
  675. /**
  676. * kthread_worker_fn - kthread function to process kthread_worker
  677. * @worker_ptr: pointer to initialized kthread_worker
  678. *
  679. * This function implements the main cycle of kthread worker. It processes
  680. * work_list until it is stopped with kthread_stop(). It sleeps when the queue
  681. * is empty.
  682. *
  683. * The works are not allowed to keep any locks, disable preemption or interrupts
  684. * when they finish. There is defined a safe point for freezing when one work
  685. * finishes and before a new one is started.
  686. *
  687. * Also the works must not be handled by more than one worker at the same time,
  688. * see also kthread_queue_work().
  689. */
  690. int kthread_worker_fn(void *worker_ptr)
  691. {
  692. struct kthread_worker *worker = worker_ptr;
  693. struct kthread_work *work;
  694. /*
  695. * FIXME: Update the check and remove the assignment when all kthread
  696. * worker users are created using kthread_create_worker*() functions.
  697. */
  698. WARN_ON(worker->task && worker->task != current);
  699. worker->task = current;
  700. if (worker->flags & KTW_FREEZABLE)
  701. set_freezable();
  702. repeat:
  703. set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
  704. if (kthread_should_stop()) {
  705. __set_current_state(TASK_RUNNING);
  706. raw_spin_lock_irq(&worker->lock);
  707. worker->task = NULL;
  708. raw_spin_unlock_irq(&worker->lock);
  709. return 0;
  710. }
  711. work = NULL;
  712. raw_spin_lock_irq(&worker->lock);
  713. if (!list_empty(&worker->work_list)) {
  714. work = list_first_entry(&worker->work_list,
  715. struct kthread_work, node);
  716. list_del_init(&work->node);
  717. }
  718. worker->current_work = work;
  719. raw_spin_unlock_irq(&worker->lock);
  720. if (work) {
  721. kthread_work_func_t func = work->func;
  722. __set_current_state(TASK_RUNNING);
  723. trace_sched_kthread_work_execute_start(work);
  724. work->func(work);
  725. /*
  726. * Avoid dereferencing work after this point. The trace
  727. * event only cares about the address.
  728. */
  729. trace_sched_kthread_work_execute_end(work, func);
  730. } else if (!freezing(current))
  731. schedule();
  732. try_to_freeze();
  733. cond_resched();
  734. goto repeat;
  735. }
  736. EXPORT_SYMBOL_GPL(kthread_worker_fn);
  737. static __printf(3, 0) struct kthread_worker *
  738. __kthread_create_worker(int cpu, unsigned int flags,
  739. const char namefmt[], va_list args)
  740. {
  741. struct kthread_worker *worker;
  742. struct task_struct *task;
  743. int node = NUMA_NO_NODE;
  744. worker = kzalloc(sizeof(*worker), GFP_KERNEL);
  745. if (!worker)
  746. return ERR_PTR(-ENOMEM);
  747. kthread_init_worker(worker);
  748. if (cpu >= 0)
  749. node = cpu_to_node(cpu);
  750. task = __kthread_create_on_node(kthread_worker_fn, worker,
  751. node, namefmt, args);
  752. if (IS_ERR(task))
  753. goto fail_task;
  754. if (cpu >= 0)
  755. kthread_bind(task, cpu);
  756. worker->flags = flags;
  757. worker->task = task;
  758. wake_up_process(task);
  759. return worker;
  760. fail_task:
  761. kfree(worker);
  762. return ERR_CAST(task);
  763. }
  764. /**
  765. * kthread_create_worker - create a kthread worker
  766. * @flags: flags modifying the default behavior of the worker
  767. * @namefmt: printf-style name for the kthread worker (task).
  768. *
  769. * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
  770. * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
  771. * when the caller was killed by a fatal signal.
  772. */
  773. struct kthread_worker *
  774. kthread_create_worker(unsigned int flags, const char namefmt[], ...)
  775. {
  776. struct kthread_worker *worker;
  777. va_list args;
  778. va_start(args, namefmt);
  779. worker = __kthread_create_worker(-1, flags, namefmt, args);
  780. va_end(args);
  781. return worker;
  782. }
  783. EXPORT_SYMBOL(kthread_create_worker);
  784. /**
  785. * kthread_create_worker_on_cpu - create a kthread worker and bind it
  786. * to a given CPU and the associated NUMA node.
  787. * @cpu: CPU number
  788. * @flags: flags modifying the default behavior of the worker
  789. * @namefmt: printf-style name for the kthread worker (task).
  790. *
  791. * Use a valid CPU number if you want to bind the kthread worker
  792. * to the given CPU and the associated NUMA node.
  793. *
  794. * A good practice is to add the cpu number also into the worker name.
  795. * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
  796. *
  797. * CPU hotplug:
  798. * The kthread worker API is simple and generic. It just provides a way
  799. * to create, use, and destroy workers.
  800. *
  801. * It is up to the API user how to handle CPU hotplug. They have to decide
  802. * how to handle pending work items, prevent queuing new ones, and
  803. * restore the functionality when the CPU goes off and on. There are a
  804. * few catches:
  805. *
  806. * - CPU affinity gets lost when it is scheduled on an offline CPU.
  807. *
  808. * - The worker might not exist when the CPU was off when the user
  809. * created the workers.
  810. *
  811. * Good practice is to implement two CPU hotplug callbacks and to
  812. * destroy/create the worker when the CPU goes down/up.
  813. *
  814. * Return:
  815. * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
  816. * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
  817. * when the caller was killed by a fatal signal.
  818. */
  819. struct kthread_worker *
  820. kthread_create_worker_on_cpu(int cpu, unsigned int flags,
  821. const char namefmt[], ...)
  822. {
  823. struct kthread_worker *worker;
  824. va_list args;
  825. va_start(args, namefmt);
  826. worker = __kthread_create_worker(cpu, flags, namefmt, args);
  827. va_end(args);
  828. return worker;
  829. }
  830. EXPORT_SYMBOL(kthread_create_worker_on_cpu);
  831. /*
  832. * Returns true when the work could not be queued at the moment.
  833. * It happens when it is already pending in a worker list
  834. * or when it is being cancelled.
  835. */
  836. static inline bool queuing_blocked(struct kthread_worker *worker,
  837. struct kthread_work *work)
  838. {
  839. lockdep_assert_held(&worker->lock);
  840. return !list_empty(&work->node) || work->canceling;
  841. }
  842. static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
  843. struct kthread_work *work)
  844. {
  845. lockdep_assert_held(&worker->lock);
  846. WARN_ON_ONCE(!list_empty(&work->node));
  847. /* Do not use a work with >1 worker, see kthread_queue_work() */
  848. WARN_ON_ONCE(work->worker && work->worker != worker);
  849. }
  850. /* insert @work before @pos in @worker */
  851. static void kthread_insert_work(struct kthread_worker *worker,
  852. struct kthread_work *work,
  853. struct list_head *pos)
  854. {
  855. kthread_insert_work_sanity_check(worker, work);
  856. trace_sched_kthread_work_queue_work(worker, work);
  857. list_add_tail(&work->node, pos);
  858. work->worker = worker;
  859. if (!worker->current_work && likely(worker->task))
  860. wake_up_process(worker->task);
  861. }
  862. /**
  863. * kthread_queue_work - queue a kthread_work
  864. * @worker: target kthread_worker
  865. * @work: kthread_work to queue
  866. *
  867. * Queue @work to work processor @task for async execution. @task
  868. * must have been created with kthread_worker_create(). Returns %true
  869. * if @work was successfully queued, %false if it was already pending.
  870. *
  871. * Reinitialize the work if it needs to be used by another worker.
  872. * For example, when the worker was stopped and started again.
  873. */
  874. bool kthread_queue_work(struct kthread_worker *worker,
  875. struct kthread_work *work)
  876. {
  877. bool ret = false;
  878. unsigned long flags;
  879. raw_spin_lock_irqsave(&worker->lock, flags);
  880. if (!queuing_blocked(worker, work)) {
  881. kthread_insert_work(worker, work, &worker->work_list);
  882. ret = true;
  883. }
  884. raw_spin_unlock_irqrestore(&worker->lock, flags);
  885. return ret;
  886. }
  887. EXPORT_SYMBOL_GPL(kthread_queue_work);
  888. /**
  889. * kthread_delayed_work_timer_fn - callback that queues the associated kthread
  890. * delayed work when the timer expires.
  891. * @t: pointer to the expired timer
  892. *
  893. * The format of the function is defined by struct timer_list.
  894. * It should have been called from irqsafe timer with irq already off.
  895. */
  896. void kthread_delayed_work_timer_fn(struct timer_list *t)
  897. {
  898. struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
  899. struct kthread_work *work = &dwork->work;
  900. struct kthread_worker *worker = work->worker;
  901. unsigned long flags;
  902. /*
  903. * This might happen when a pending work is reinitialized.
  904. * It means that it is used a wrong way.
  905. */
  906. if (WARN_ON_ONCE(!worker))
  907. return;
  908. raw_spin_lock_irqsave(&worker->lock, flags);
  909. /* Work must not be used with >1 worker, see kthread_queue_work(). */
  910. WARN_ON_ONCE(work->worker != worker);
  911. /* Move the work from worker->delayed_work_list. */
  912. WARN_ON_ONCE(list_empty(&work->node));
  913. list_del_init(&work->node);
  914. if (!work->canceling)
  915. kthread_insert_work(worker, work, &worker->work_list);
  916. raw_spin_unlock_irqrestore(&worker->lock, flags);
  917. }
  918. EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
  919. static void __kthread_queue_delayed_work(struct kthread_worker *worker,
  920. struct kthread_delayed_work *dwork,
  921. unsigned long delay)
  922. {
  923. struct timer_list *timer = &dwork->timer;
  924. struct kthread_work *work = &dwork->work;
  925. WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
  926. /*
  927. * If @delay is 0, queue @dwork->work immediately. This is for
  928. * both optimization and correctness. The earliest @timer can
  929. * expire is on the closest next tick and delayed_work users depend
  930. * on that there's no such delay when @delay is 0.
  931. */
  932. if (!delay) {
  933. kthread_insert_work(worker, work, &worker->work_list);
  934. return;
  935. }
  936. /* Be paranoid and try to detect possible races already now. */
  937. kthread_insert_work_sanity_check(worker, work);
  938. list_add(&work->node, &worker->delayed_work_list);
  939. work->worker = worker;
  940. timer->expires = jiffies + delay;
  941. add_timer(timer);
  942. }
  943. /**
  944. * kthread_queue_delayed_work - queue the associated kthread work
  945. * after a delay.
  946. * @worker: target kthread_worker
  947. * @dwork: kthread_delayed_work to queue
  948. * @delay: number of jiffies to wait before queuing
  949. *
  950. * If the work has not been pending it starts a timer that will queue
  951. * the work after the given @delay. If @delay is zero, it queues the
  952. * work immediately.
  953. *
  954. * Return: %false if the @work has already been pending. It means that
  955. * either the timer was running or the work was queued. It returns %true
  956. * otherwise.
  957. */
  958. bool kthread_queue_delayed_work(struct kthread_worker *worker,
  959. struct kthread_delayed_work *dwork,
  960. unsigned long delay)
  961. {
  962. struct kthread_work *work = &dwork->work;
  963. unsigned long flags;
  964. bool ret = false;
  965. raw_spin_lock_irqsave(&worker->lock, flags);
  966. if (!queuing_blocked(worker, work)) {
  967. __kthread_queue_delayed_work(worker, dwork, delay);
  968. ret = true;
  969. }
  970. raw_spin_unlock_irqrestore(&worker->lock, flags);
  971. return ret;
  972. }
  973. EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
  974. struct kthread_flush_work {
  975. struct kthread_work work;
  976. struct completion done;
  977. };
  978. static void kthread_flush_work_fn(struct kthread_work *work)
  979. {
  980. struct kthread_flush_work *fwork =
  981. container_of(work, struct kthread_flush_work, work);
  982. complete(&fwork->done);
  983. }
  984. /**
  985. * kthread_flush_work - flush a kthread_work
  986. * @work: work to flush
  987. *
  988. * If @work is queued or executing, wait for it to finish execution.
  989. */
  990. void kthread_flush_work(struct kthread_work *work)
  991. {
  992. struct kthread_flush_work fwork = {
  993. KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
  994. COMPLETION_INITIALIZER_ONSTACK(fwork.done),
  995. };
  996. struct kthread_worker *worker;
  997. bool noop = false;
  998. worker = work->worker;
  999. if (!worker)
  1000. return;
  1001. raw_spin_lock_irq(&worker->lock);
  1002. /* Work must not be used with >1 worker, see kthread_queue_work(). */
  1003. WARN_ON_ONCE(work->worker != worker);
  1004. if (!list_empty(&work->node))
  1005. kthread_insert_work(worker, &fwork.work, work->node.next);
  1006. else if (worker->current_work == work)
  1007. kthread_insert_work(worker, &fwork.work,
  1008. worker->work_list.next);
  1009. else
  1010. noop = true;
  1011. raw_spin_unlock_irq(&worker->lock);
  1012. if (!noop)
  1013. wait_for_completion(&fwork.done);
  1014. }
  1015. EXPORT_SYMBOL_GPL(kthread_flush_work);
  1016. /*
  1017. * Make sure that the timer is neither set nor running and could
  1018. * not manipulate the work list_head any longer.
  1019. *
  1020. * The function is called under worker->lock. The lock is temporary
  1021. * released but the timer can't be set again in the meantime.
  1022. */
  1023. static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
  1024. unsigned long *flags)
  1025. {
  1026. struct kthread_delayed_work *dwork =
  1027. container_of(work, struct kthread_delayed_work, work);
  1028. struct kthread_worker *worker = work->worker;
  1029. /*
  1030. * del_timer_sync() must be called to make sure that the timer
  1031. * callback is not running. The lock must be temporary released
  1032. * to avoid a deadlock with the callback. In the meantime,
  1033. * any queuing is blocked by setting the canceling counter.
  1034. */
  1035. work->canceling++;
  1036. raw_spin_unlock_irqrestore(&worker->lock, *flags);
  1037. del_timer_sync(&dwork->timer);
  1038. raw_spin_lock_irqsave(&worker->lock, *flags);
  1039. work->canceling--;
  1040. }
  1041. /*
  1042. * This function removes the work from the worker queue.
  1043. *
  1044. * It is called under worker->lock. The caller must make sure that
  1045. * the timer used by delayed work is not running, e.g. by calling
  1046. * kthread_cancel_delayed_work_timer().
  1047. *
  1048. * The work might still be in use when this function finishes. See the
  1049. * current_work proceed by the worker.
  1050. *
  1051. * Return: %true if @work was pending and successfully canceled,
  1052. * %false if @work was not pending
  1053. */
  1054. static bool __kthread_cancel_work(struct kthread_work *work)
  1055. {
  1056. /*
  1057. * Try to remove the work from a worker list. It might either
  1058. * be from worker->work_list or from worker->delayed_work_list.
  1059. */
  1060. if (!list_empty(&work->node)) {
  1061. list_del_init(&work->node);
  1062. return true;
  1063. }
  1064. return false;
  1065. }
  1066. /**
  1067. * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
  1068. * @worker: kthread worker to use
  1069. * @dwork: kthread delayed work to queue
  1070. * @delay: number of jiffies to wait before queuing
  1071. *
  1072. * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
  1073. * modify @dwork's timer so that it expires after @delay. If @delay is zero,
  1074. * @work is guaranteed to be queued immediately.
  1075. *
  1076. * Return: %false if @dwork was idle and queued, %true otherwise.
  1077. *
  1078. * A special case is when the work is being canceled in parallel.
  1079. * It might be caused either by the real kthread_cancel_delayed_work_sync()
  1080. * or yet another kthread_mod_delayed_work() call. We let the other command
  1081. * win and return %true here. The return value can be used for reference
  1082. * counting and the number of queued works stays the same. Anyway, the caller
  1083. * is supposed to synchronize these operations a reasonable way.
  1084. *
  1085. * This function is safe to call from any context including IRQ handler.
  1086. * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
  1087. * for details.
  1088. */
  1089. bool kthread_mod_delayed_work(struct kthread_worker *worker,
  1090. struct kthread_delayed_work *dwork,
  1091. unsigned long delay)
  1092. {
  1093. struct kthread_work *work = &dwork->work;
  1094. unsigned long flags;
  1095. int ret;
  1096. raw_spin_lock_irqsave(&worker->lock, flags);
  1097. /* Do not bother with canceling when never queued. */
  1098. if (!work->worker) {
  1099. ret = false;
  1100. goto fast_queue;
  1101. }
  1102. /* Work must not be used with >1 worker, see kthread_queue_work() */
  1103. WARN_ON_ONCE(work->worker != worker);
  1104. /*
  1105. * Temporary cancel the work but do not fight with another command
  1106. * that is canceling the work as well.
  1107. *
  1108. * It is a bit tricky because of possible races with another
  1109. * mod_delayed_work() and cancel_delayed_work() callers.
  1110. *
  1111. * The timer must be canceled first because worker->lock is released
  1112. * when doing so. But the work can be removed from the queue (list)
  1113. * only when it can be queued again so that the return value can
  1114. * be used for reference counting.
  1115. */
  1116. kthread_cancel_delayed_work_timer(work, &flags);
  1117. if (work->canceling) {
  1118. /* The number of works in the queue does not change. */
  1119. ret = true;
  1120. goto out;
  1121. }
  1122. ret = __kthread_cancel_work(work);
  1123. fast_queue:
  1124. __kthread_queue_delayed_work(worker, dwork, delay);
  1125. out:
  1126. raw_spin_unlock_irqrestore(&worker->lock, flags);
  1127. return ret;
  1128. }
  1129. EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
  1130. static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
  1131. {
  1132. struct kthread_worker *worker = work->worker;
  1133. unsigned long flags;
  1134. int ret = false;
  1135. if (!worker)
  1136. goto out;
  1137. raw_spin_lock_irqsave(&worker->lock, flags);
  1138. /* Work must not be used with >1 worker, see kthread_queue_work(). */
  1139. WARN_ON_ONCE(work->worker != worker);
  1140. if (is_dwork)
  1141. kthread_cancel_delayed_work_timer(work, &flags);
  1142. ret = __kthread_cancel_work(work);
  1143. if (worker->current_work != work)
  1144. goto out_fast;
  1145. /*
  1146. * The work is in progress and we need to wait with the lock released.
  1147. * In the meantime, block any queuing by setting the canceling counter.
  1148. */
  1149. work->canceling++;
  1150. raw_spin_unlock_irqrestore(&worker->lock, flags);
  1151. kthread_flush_work(work);
  1152. raw_spin_lock_irqsave(&worker->lock, flags);
  1153. work->canceling--;
  1154. out_fast:
  1155. raw_spin_unlock_irqrestore(&worker->lock, flags);
  1156. out:
  1157. return ret;
  1158. }
  1159. /**
  1160. * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
  1161. * @work: the kthread work to cancel
  1162. *
  1163. * Cancel @work and wait for its execution to finish. This function
  1164. * can be used even if the work re-queues itself. On return from this
  1165. * function, @work is guaranteed to be not pending or executing on any CPU.
  1166. *
  1167. * kthread_cancel_work_sync(&delayed_work->work) must not be used for
  1168. * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
  1169. *
  1170. * The caller must ensure that the worker on which @work was last
  1171. * queued can't be destroyed before this function returns.
  1172. *
  1173. * Return: %true if @work was pending, %false otherwise.
  1174. */
  1175. bool kthread_cancel_work_sync(struct kthread_work *work)
  1176. {
  1177. return __kthread_cancel_work_sync(work, false);
  1178. }
  1179. EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
  1180. /**
  1181. * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
  1182. * wait for it to finish.
  1183. * @dwork: the kthread delayed work to cancel
  1184. *
  1185. * This is kthread_cancel_work_sync() for delayed works.
  1186. *
  1187. * Return: %true if @dwork was pending, %false otherwise.
  1188. */
  1189. bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
  1190. {
  1191. return __kthread_cancel_work_sync(&dwork->work, true);
  1192. }
  1193. EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
  1194. /**
  1195. * kthread_flush_worker - flush all current works on a kthread_worker
  1196. * @worker: worker to flush
  1197. *
  1198. * Wait until all currently executing or pending works on @worker are
  1199. * finished.
  1200. */
  1201. void kthread_flush_worker(struct kthread_worker *worker)
  1202. {
  1203. struct kthread_flush_work fwork = {
  1204. KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
  1205. COMPLETION_INITIALIZER_ONSTACK(fwork.done),
  1206. };
  1207. kthread_queue_work(worker, &fwork.work);
  1208. wait_for_completion(&fwork.done);
  1209. }
  1210. EXPORT_SYMBOL_GPL(kthread_flush_worker);
  1211. /**
  1212. * kthread_destroy_worker - destroy a kthread worker
  1213. * @worker: worker to be destroyed
  1214. *
  1215. * Flush and destroy @worker. The simple flush is enough because the kthread
  1216. * worker API is used only in trivial scenarios. There are no multi-step state
  1217. * machines needed.
  1218. */
  1219. void kthread_destroy_worker(struct kthread_worker *worker)
  1220. {
  1221. struct task_struct *task;
  1222. task = worker->task;
  1223. if (WARN_ON(!task))
  1224. return;
  1225. kthread_flush_worker(worker);
  1226. kthread_stop(task);
  1227. WARN_ON(!list_empty(&worker->work_list));
  1228. kfree(worker);
  1229. }
  1230. EXPORT_SYMBOL(kthread_destroy_worker);
  1231. /**
  1232. * kthread_use_mm - make the calling kthread operate on an address space
  1233. * @mm: address space to operate on
  1234. */
  1235. void kthread_use_mm(struct mm_struct *mm)
  1236. {
  1237. struct mm_struct *active_mm;
  1238. struct task_struct *tsk = current;
  1239. WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
  1240. WARN_ON_ONCE(tsk->mm);
  1241. task_lock(tsk);
  1242. /* Hold off tlb flush IPIs while switching mm's */
  1243. local_irq_disable();
  1244. active_mm = tsk->active_mm;
  1245. if (active_mm != mm) {
  1246. mmgrab(mm);
  1247. tsk->active_mm = mm;
  1248. }
  1249. tsk->mm = mm;
  1250. membarrier_update_current_mm(mm);
  1251. switch_mm_irqs_off(active_mm, mm, tsk);
  1252. local_irq_enable();
  1253. task_unlock(tsk);
  1254. #ifdef finish_arch_post_lock_switch
  1255. finish_arch_post_lock_switch();
  1256. #endif
  1257. /*
  1258. * When a kthread starts operating on an address space, the loop
  1259. * in membarrier_{private,global}_expedited() may not observe
  1260. * that tsk->mm, and not issue an IPI. Membarrier requires a
  1261. * memory barrier after storing to tsk->mm, before accessing
  1262. * user-space memory. A full memory barrier for membarrier
  1263. * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by
  1264. * mmdrop(), or explicitly with smp_mb().
  1265. */
  1266. if (active_mm != mm)
  1267. mmdrop(active_mm);
  1268. else
  1269. smp_mb();
  1270. }
  1271. EXPORT_SYMBOL_GPL(kthread_use_mm);
  1272. /**
  1273. * kthread_unuse_mm - reverse the effect of kthread_use_mm()
  1274. * @mm: address space to operate on
  1275. */
  1276. void kthread_unuse_mm(struct mm_struct *mm)
  1277. {
  1278. struct task_struct *tsk = current;
  1279. WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
  1280. WARN_ON_ONCE(!tsk->mm);
  1281. task_lock(tsk);
  1282. /*
  1283. * When a kthread stops operating on an address space, the loop
  1284. * in membarrier_{private,global}_expedited() may not observe
  1285. * that tsk->mm, and not issue an IPI. Membarrier requires a
  1286. * memory barrier after accessing user-space memory, before
  1287. * clearing tsk->mm.
  1288. */
  1289. smp_mb__after_spinlock();
  1290. sync_mm_rss(mm);
  1291. local_irq_disable();
  1292. tsk->mm = NULL;
  1293. membarrier_update_current_mm(NULL);
  1294. /* active_mm is still 'mm' */
  1295. enter_lazy_tlb(mm, tsk);
  1296. local_irq_enable();
  1297. task_unlock(tsk);
  1298. }
  1299. EXPORT_SYMBOL_GPL(kthread_unuse_mm);
  1300. #ifdef CONFIG_BLK_CGROUP
  1301. /**
  1302. * kthread_associate_blkcg - associate blkcg to current kthread
  1303. * @css: the cgroup info
  1304. *
  1305. * Current thread must be a kthread. The thread is running jobs on behalf of
  1306. * other threads. In some cases, we expect the jobs attach cgroup info of
  1307. * original threads instead of that of current thread. This function stores
  1308. * original thread's cgroup info in current kthread context for later
  1309. * retrieval.
  1310. */
  1311. void kthread_associate_blkcg(struct cgroup_subsys_state *css)
  1312. {
  1313. struct kthread *kthread;
  1314. if (!(current->flags & PF_KTHREAD))
  1315. return;
  1316. kthread = to_kthread(current);
  1317. if (!kthread)
  1318. return;
  1319. if (kthread->blkcg_css) {
  1320. css_put(kthread->blkcg_css);
  1321. kthread->blkcg_css = NULL;
  1322. }
  1323. if (css) {
  1324. css_get(css);
  1325. kthread->blkcg_css = css;
  1326. }
  1327. }
  1328. EXPORT_SYMBOL(kthread_associate_blkcg);
  1329. /**
  1330. * kthread_blkcg - get associated blkcg css of current kthread
  1331. *
  1332. * Current thread must be a kthread.
  1333. */
  1334. struct cgroup_subsys_state *kthread_blkcg(void)
  1335. {
  1336. struct kthread *kthread;
  1337. if (current->flags & PF_KTHREAD) {
  1338. kthread = to_kthread(current);
  1339. if (kthread)
  1340. return kthread->blkcg_css;
  1341. }
  1342. return NULL;
  1343. }
  1344. #endif