kthread.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150
  1. /* Kernel thread helper functions.
  2. * Copyright (C) 2004 IBM Corporation, Rusty Russell.
  3. *
  4. * Creation is done via kthreadd, so that we get a clean environment
  5. * even if we're invoked from userspace (think modprobe, hotplug cpu,
  6. * etc.).
  7. */
  8. #include <linux/sched.h>
  9. #include <linux/kthread.h>
  10. #include <linux/completion.h>
  11. #include <linux/err.h>
  12. #include <linux/cpuset.h>
  13. #include <linux/unistd.h>
  14. #include <linux/file.h>
  15. #include <linux/export.h>
  16. #include <linux/mutex.h>
  17. #include <linux/slab.h>
  18. #include <linux/freezer.h>
  19. #include <linux/ptrace.h>
  20. #include <linux/uaccess.h>
  21. #include <trace/events/sched.h>
  22. static DEFINE_SPINLOCK(kthread_create_lock);
  23. static LIST_HEAD(kthread_create_list);
  24. struct task_struct *kthreadd_task;
  25. struct kthread_create_info
  26. {
  27. /* Information passed to kthread() from kthreadd. */
  28. int (*threadfn)(void *data);
  29. void *data;
  30. int node;
  31. /* Result passed back to kthread_create() from kthreadd. */
  32. struct task_struct *result;
  33. struct completion *done;
  34. struct list_head list;
  35. };
  36. struct kthread {
  37. unsigned long flags;
  38. unsigned int cpu;
  39. void *data;
  40. struct completion parked;
  41. struct completion exited;
  42. };
  43. enum KTHREAD_BITS {
  44. KTHREAD_IS_PER_CPU = 0,
  45. KTHREAD_SHOULD_STOP,
  46. KTHREAD_SHOULD_PARK,
  47. KTHREAD_IS_PARKED,
  48. };
  49. static inline void set_kthread_struct(void *kthread)
  50. {
  51. /*
  52. * We abuse ->set_child_tid to avoid the new member and because it
  53. * can't be wrongly copied by copy_process(). We also rely on fact
  54. * that the caller can't exec, so PF_KTHREAD can't be cleared.
  55. */
  56. current->set_child_tid = (__force void __user *)kthread;
  57. }
  58. static inline struct kthread *to_kthread(struct task_struct *k)
  59. {
  60. WARN_ON(!(k->flags & PF_KTHREAD));
  61. return (__force void *)k->set_child_tid;
  62. }
  63. void free_kthread_struct(struct task_struct *k)
  64. {
  65. /*
  66. * Can be NULL if this kthread was created by kernel_thread()
  67. * or if kmalloc() in kthread() failed.
  68. */
  69. kfree(to_kthread(k));
  70. }
  71. /**
  72. * kthread_should_stop - should this kthread return now?
  73. *
  74. * When someone calls kthread_stop() on your kthread, it will be woken
  75. * and this will return true. You should then return, and your return
  76. * value will be passed through to kthread_stop().
  77. */
  78. bool kthread_should_stop(void)
  79. {
  80. return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
  81. }
  82. EXPORT_SYMBOL(kthread_should_stop);
  83. /**
  84. * kthread_should_park - should this kthread park now?
  85. *
  86. * When someone calls kthread_park() on your kthread, it will be woken
  87. * and this will return true. You should then do the necessary
  88. * cleanup and call kthread_parkme()
  89. *
  90. * Similar to kthread_should_stop(), but this keeps the thread alive
  91. * and in a park position. kthread_unpark() "restarts" the thread and
  92. * calls the thread function again.
  93. */
  94. bool kthread_should_park(void)
  95. {
  96. return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
  97. }
  98. EXPORT_SYMBOL_GPL(kthread_should_park);
  99. /**
  100. * kthread_freezable_should_stop - should this freezable kthread return now?
  101. * @was_frozen: optional out parameter, indicates whether %current was frozen
  102. *
  103. * kthread_should_stop() for freezable kthreads, which will enter
  104. * refrigerator if necessary. This function is safe from kthread_stop() /
  105. * freezer deadlock and freezable kthreads should use this function instead
  106. * of calling try_to_freeze() directly.
  107. */
  108. bool kthread_freezable_should_stop(bool *was_frozen)
  109. {
  110. bool frozen = false;
  111. might_sleep();
  112. if (unlikely(freezing(current)))
  113. frozen = __refrigerator(true);
  114. if (was_frozen)
  115. *was_frozen = frozen;
  116. return kthread_should_stop();
  117. }
  118. EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
  119. /**
  120. * kthread_data - return data value specified on kthread creation
  121. * @task: kthread task in question
  122. *
  123. * Return the data value specified when kthread @task was created.
  124. * The caller is responsible for ensuring the validity of @task when
  125. * calling this function.
  126. */
  127. void *kthread_data(struct task_struct *task)
  128. {
  129. return to_kthread(task)->data;
  130. }
  131. /**
  132. * kthread_probe_data - speculative version of kthread_data()
  133. * @task: possible kthread task in question
  134. *
  135. * @task could be a kthread task. Return the data value specified when it
  136. * was created if accessible. If @task isn't a kthread task or its data is
  137. * inaccessible for any reason, %NULL is returned. This function requires
  138. * that @task itself is safe to dereference.
  139. */
  140. void *kthread_probe_data(struct task_struct *task)
  141. {
  142. struct kthread *kthread = to_kthread(task);
  143. void *data = NULL;
  144. probe_kernel_read(&data, &kthread->data, sizeof(data));
  145. return data;
  146. }
  147. static void __kthread_parkme(struct kthread *self)
  148. {
  149. __set_current_state(TASK_PARKED);
  150. while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
  151. if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
  152. complete(&self->parked);
  153. schedule();
  154. __set_current_state(TASK_PARKED);
  155. }
  156. clear_bit(KTHREAD_IS_PARKED, &self->flags);
  157. __set_current_state(TASK_RUNNING);
  158. }
  159. void kthread_parkme(void)
  160. {
  161. __kthread_parkme(to_kthread(current));
  162. }
  163. EXPORT_SYMBOL_GPL(kthread_parkme);
  164. static int kthread(void *_create)
  165. {
  166. /* Copy data: it's on kthread's stack */
  167. struct kthread_create_info *create = _create;
  168. int (*threadfn)(void *data) = create->threadfn;
  169. void *data = create->data;
  170. struct completion *done;
  171. struct kthread *self;
  172. int ret;
  173. self = kmalloc(sizeof(*self), GFP_KERNEL);
  174. set_kthread_struct(self);
  175. /* If user was SIGKILLed, I release the structure. */
  176. done = xchg(&create->done, NULL);
  177. if (!done) {
  178. kfree(create);
  179. do_exit(-EINTR);
  180. }
  181. if (!self) {
  182. create->result = ERR_PTR(-ENOMEM);
  183. complete(done);
  184. do_exit(-ENOMEM);
  185. }
  186. self->flags = 0;
  187. self->data = data;
  188. init_completion(&self->exited);
  189. init_completion(&self->parked);
  190. current->vfork_done = &self->exited;
  191. /* OK, tell user we're spawned, wait for stop or wakeup */
  192. __set_current_state(TASK_UNINTERRUPTIBLE);
  193. create->result = current;
  194. complete(done);
  195. schedule();
  196. ret = -EINTR;
  197. if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
  198. __kthread_parkme(self);
  199. ret = threadfn(data);
  200. }
  201. do_exit(ret);
  202. }
  203. /* called from do_fork() to get node information for about to be created task */
  204. int tsk_fork_get_node(struct task_struct *tsk)
  205. {
  206. #ifdef CONFIG_NUMA
  207. if (tsk == kthreadd_task)
  208. return tsk->pref_node_fork;
  209. #endif
  210. return NUMA_NO_NODE;
  211. }
  212. static void create_kthread(struct kthread_create_info *create)
  213. {
  214. int pid;
  215. #ifdef CONFIG_NUMA
  216. current->pref_node_fork = create->node;
  217. #endif
  218. /* We want our own signal handler (we take no signals by default). */
  219. pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
  220. if (pid < 0) {
  221. /* If user was SIGKILLed, I release the structure. */
  222. struct completion *done = xchg(&create->done, NULL);
  223. if (!done) {
  224. kfree(create);
  225. return;
  226. }
  227. create->result = ERR_PTR(pid);
  228. complete(done);
  229. }
  230. }
  231. static struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
  232. void *data, int node,
  233. const char namefmt[],
  234. va_list args)
  235. {
  236. DECLARE_COMPLETION_ONSTACK(done);
  237. struct task_struct *task;
  238. struct kthread_create_info *create = kmalloc(sizeof(*create),
  239. GFP_KERNEL);
  240. if (!create)
  241. return ERR_PTR(-ENOMEM);
  242. create->threadfn = threadfn;
  243. create->data = data;
  244. create->node = node;
  245. create->done = &done;
  246. spin_lock(&kthread_create_lock);
  247. list_add_tail(&create->list, &kthread_create_list);
  248. spin_unlock(&kthread_create_lock);
  249. wake_up_process(kthreadd_task);
  250. /*
  251. * Wait for completion in killable state, for I might be chosen by
  252. * the OOM killer while kthreadd is trying to allocate memory for
  253. * new kernel thread.
  254. */
  255. if (unlikely(wait_for_completion_killable(&done))) {
  256. /*
  257. * If I was SIGKILLed before kthreadd (or new kernel thread)
  258. * calls complete(), leave the cleanup of this structure to
  259. * that thread.
  260. */
  261. if (xchg(&create->done, NULL))
  262. return ERR_PTR(-EINTR);
  263. /*
  264. * kthreadd (or new kernel thread) will call complete()
  265. * shortly.
  266. */
  267. wait_for_completion(&done);
  268. }
  269. task = create->result;
  270. if (!IS_ERR(task)) {
  271. static const struct sched_param param = { .sched_priority = 0 };
  272. vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
  273. /*
  274. * root may have changed our (kthreadd's) priority or CPU mask.
  275. * The kernel thread should not inherit these properties.
  276. */
  277. sched_setscheduler_nocheck(task, SCHED_NORMAL, &param);
  278. set_cpus_allowed_ptr(task, cpu_all_mask);
  279. }
  280. kfree(create);
  281. return task;
  282. }
  283. /**
  284. * kthread_create_on_node - create a kthread.
  285. * @threadfn: the function to run until signal_pending(current).
  286. * @data: data ptr for @threadfn.
  287. * @node: task and thread structures for the thread are allocated on this node
  288. * @namefmt: printf-style name for the thread.
  289. *
  290. * Description: This helper function creates and names a kernel
  291. * thread. The thread will be stopped: use wake_up_process() to start
  292. * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and
  293. * is affine to all CPUs.
  294. *
  295. * If thread is going to be bound on a particular cpu, give its node
  296. * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
  297. * When woken, the thread will run @threadfn() with @data as its
  298. * argument. @threadfn() can either call do_exit() directly if it is a
  299. * standalone thread for which no one will call kthread_stop(), or
  300. * return when 'kthread_should_stop()' is true (which means
  301. * kthread_stop() has been called). The return value should be zero
  302. * or a negative error number; it will be passed to kthread_stop().
  303. *
  304. * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
  305. */
  306. struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
  307. void *data, int node,
  308. const char namefmt[],
  309. ...)
  310. {
  311. struct task_struct *task;
  312. va_list args;
  313. va_start(args, namefmt);
  314. task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
  315. va_end(args);
  316. return task;
  317. }
  318. EXPORT_SYMBOL(kthread_create_on_node);
  319. static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
  320. {
  321. unsigned long flags;
  322. if (!wait_task_inactive(p, state)) {
  323. WARN_ON(1);
  324. return;
  325. }
  326. /* It's safe because the task is inactive. */
  327. raw_spin_lock_irqsave(&p->pi_lock, flags);
  328. do_set_cpus_allowed(p, mask);
  329. p->flags |= PF_NO_SETAFFINITY;
  330. raw_spin_unlock_irqrestore(&p->pi_lock, flags);
  331. }
  332. static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
  333. {
  334. __kthread_bind_mask(p, cpumask_of(cpu), state);
  335. }
  336. void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
  337. {
  338. __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
  339. }
  340. /**
  341. * kthread_bind - bind a just-created kthread to a cpu.
  342. * @p: thread created by kthread_create().
  343. * @cpu: cpu (might not be online, must be possible) for @k to run on.
  344. *
  345. * Description: This function is equivalent to set_cpus_allowed(),
  346. * except that @cpu doesn't need to be online, and the thread must be
  347. * stopped (i.e., just returned from kthread_create()).
  348. */
  349. void kthread_bind(struct task_struct *p, unsigned int cpu)
  350. {
  351. __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
  352. }
  353. EXPORT_SYMBOL(kthread_bind);
  354. /**
  355. * kthread_create_on_cpu - Create a cpu bound kthread
  356. * @threadfn: the function to run until signal_pending(current).
  357. * @data: data ptr for @threadfn.
  358. * @cpu: The cpu on which the thread should be bound,
  359. * @namefmt: printf-style name for the thread. Format is restricted
  360. * to "name.*%u". Code fills in cpu number.
  361. *
  362. * Description: This helper function creates and names a kernel thread
  363. * The thread will be woken and put into park mode.
  364. */
  365. struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
  366. void *data, unsigned int cpu,
  367. const char *namefmt)
  368. {
  369. struct task_struct *p;
  370. p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
  371. cpu);
  372. if (IS_ERR(p))
  373. return p;
  374. kthread_bind(p, cpu);
  375. /* CPU hotplug need to bind once again when unparking the thread. */
  376. set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
  377. to_kthread(p)->cpu = cpu;
  378. return p;
  379. }
  380. /**
  381. * kthread_unpark - unpark a thread created by kthread_create().
  382. * @k: thread created by kthread_create().
  383. *
  384. * Sets kthread_should_park() for @k to return false, wakes it, and
  385. * waits for it to return. If the thread is marked percpu then its
  386. * bound to the cpu again.
  387. */
  388. void kthread_unpark(struct task_struct *k)
  389. {
  390. struct kthread *kthread = to_kthread(k);
  391. clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
  392. /*
  393. * We clear the IS_PARKED bit here as we don't wait
  394. * until the task has left the park code. So if we'd
  395. * park before that happens we'd see the IS_PARKED bit
  396. * which might be about to be cleared.
  397. */
  398. if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
  399. /*
  400. * Newly created kthread was parked when the CPU was offline.
  401. * The binding was lost and we need to set it again.
  402. */
  403. if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
  404. __kthread_bind(k, kthread->cpu, TASK_PARKED);
  405. wake_up_state(k, TASK_PARKED);
  406. }
  407. }
  408. EXPORT_SYMBOL_GPL(kthread_unpark);
  409. /**
  410. * kthread_park - park a thread created by kthread_create().
  411. * @k: thread created by kthread_create().
  412. *
  413. * Sets kthread_should_park() for @k to return true, wakes it, and
  414. * waits for it to return. This can also be called after kthread_create()
  415. * instead of calling wake_up_process(): the thread will park without
  416. * calling threadfn().
  417. *
  418. * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
  419. * If called by the kthread itself just the park bit is set.
  420. */
  421. int kthread_park(struct task_struct *k)
  422. {
  423. struct kthread *kthread = to_kthread(k);
  424. if (WARN_ON(k->flags & PF_EXITING))
  425. return -ENOSYS;
  426. if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
  427. set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
  428. if (k != current) {
  429. wake_up_process(k);
  430. wait_for_completion(&kthread->parked);
  431. }
  432. }
  433. return 0;
  434. }
  435. EXPORT_SYMBOL_GPL(kthread_park);
  436. /**
  437. * kthread_stop - stop a thread created by kthread_create().
  438. * @k: thread created by kthread_create().
  439. *
  440. * Sets kthread_should_stop() for @k to return true, wakes it, and
  441. * waits for it to exit. This can also be called after kthread_create()
  442. * instead of calling wake_up_process(): the thread will exit without
  443. * calling threadfn().
  444. *
  445. * If threadfn() may call do_exit() itself, the caller must ensure
  446. * task_struct can't go away.
  447. *
  448. * Returns the result of threadfn(), or %-EINTR if wake_up_process()
  449. * was never called.
  450. */
  451. int kthread_stop(struct task_struct *k)
  452. {
  453. struct kthread *kthread;
  454. int ret;
  455. trace_sched_kthread_stop(k);
  456. get_task_struct(k);
  457. kthread = to_kthread(k);
  458. set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
  459. kthread_unpark(k);
  460. wake_up_process(k);
  461. wait_for_completion(&kthread->exited);
  462. ret = k->exit_code;
  463. put_task_struct(k);
  464. trace_sched_kthread_stop_ret(ret);
  465. return ret;
  466. }
  467. EXPORT_SYMBOL(kthread_stop);
  468. int kthreadd(void *unused)
  469. {
  470. struct task_struct *tsk = current;
  471. /* Setup a clean context for our children to inherit. */
  472. set_task_comm(tsk, "kthreadd");
  473. ignore_signals(tsk);
  474. set_cpus_allowed_ptr(tsk, cpu_all_mask);
  475. set_mems_allowed(node_states[N_MEMORY]);
  476. current->flags |= PF_NOFREEZE;
  477. for (;;) {
  478. set_current_state(TASK_INTERRUPTIBLE);
  479. if (list_empty(&kthread_create_list))
  480. schedule();
  481. __set_current_state(TASK_RUNNING);
  482. spin_lock(&kthread_create_lock);
  483. while (!list_empty(&kthread_create_list)) {
  484. struct kthread_create_info *create;
  485. create = list_entry(kthread_create_list.next,
  486. struct kthread_create_info, list);
  487. list_del_init(&create->list);
  488. spin_unlock(&kthread_create_lock);
  489. create_kthread(create);
  490. spin_lock(&kthread_create_lock);
  491. }
  492. spin_unlock(&kthread_create_lock);
  493. }
  494. return 0;
  495. }
  496. void __kthread_init_worker(struct kthread_worker *worker,
  497. const char *name,
  498. struct lock_class_key *key)
  499. {
  500. memset(worker, 0, sizeof(struct kthread_worker));
  501. spin_lock_init(&worker->lock);
  502. lockdep_set_class_and_name(&worker->lock, key, name);
  503. INIT_LIST_HEAD(&worker->work_list);
  504. INIT_LIST_HEAD(&worker->delayed_work_list);
  505. }
  506. EXPORT_SYMBOL_GPL(__kthread_init_worker);
  507. /**
  508. * kthread_worker_fn - kthread function to process kthread_worker
  509. * @worker_ptr: pointer to initialized kthread_worker
  510. *
  511. * This function implements the main cycle of kthread worker. It processes
  512. * work_list until it is stopped with kthread_stop(). It sleeps when the queue
  513. * is empty.
  514. *
  515. * The works are not allowed to keep any locks, disable preemption or interrupts
  516. * when they finish. There is defined a safe point for freezing when one work
  517. * finishes and before a new one is started.
  518. *
  519. * Also the works must not be handled by more than one worker at the same time,
  520. * see also kthread_queue_work().
  521. */
  522. int kthread_worker_fn(void *worker_ptr)
  523. {
  524. struct kthread_worker *worker = worker_ptr;
  525. struct kthread_work *work;
  526. /*
  527. * FIXME: Update the check and remove the assignment when all kthread
  528. * worker users are created using kthread_create_worker*() functions.
  529. */
  530. WARN_ON(worker->task && worker->task != current);
  531. worker->task = current;
  532. if (worker->flags & KTW_FREEZABLE)
  533. set_freezable();
  534. repeat:
  535. set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
  536. if (kthread_should_stop()) {
  537. __set_current_state(TASK_RUNNING);
  538. spin_lock_irq(&worker->lock);
  539. worker->task = NULL;
  540. spin_unlock_irq(&worker->lock);
  541. return 0;
  542. }
  543. work = NULL;
  544. spin_lock_irq(&worker->lock);
  545. if (!list_empty(&worker->work_list)) {
  546. work = list_first_entry(&worker->work_list,
  547. struct kthread_work, node);
  548. list_del_init(&work->node);
  549. }
  550. worker->current_work = work;
  551. spin_unlock_irq(&worker->lock);
  552. if (work) {
  553. __set_current_state(TASK_RUNNING);
  554. work->func(work);
  555. } else if (!freezing(current))
  556. schedule();
  557. try_to_freeze();
  558. goto repeat;
  559. }
  560. EXPORT_SYMBOL_GPL(kthread_worker_fn);
  561. static struct kthread_worker *
  562. __kthread_create_worker(int cpu, unsigned int flags,
  563. const char namefmt[], va_list args)
  564. {
  565. struct kthread_worker *worker;
  566. struct task_struct *task;
  567. int node = -1;
  568. worker = kzalloc(sizeof(*worker), GFP_KERNEL);
  569. if (!worker)
  570. return ERR_PTR(-ENOMEM);
  571. kthread_init_worker(worker);
  572. if (cpu >= 0)
  573. node = cpu_to_node(cpu);
  574. task = __kthread_create_on_node(kthread_worker_fn, worker,
  575. node, namefmt, args);
  576. if (IS_ERR(task))
  577. goto fail_task;
  578. if (cpu >= 0)
  579. kthread_bind(task, cpu);
  580. worker->flags = flags;
  581. worker->task = task;
  582. wake_up_process(task);
  583. return worker;
  584. fail_task:
  585. kfree(worker);
  586. return ERR_CAST(task);
  587. }
  588. /**
  589. * kthread_create_worker - create a kthread worker
  590. * @flags: flags modifying the default behavior of the worker
  591. * @namefmt: printf-style name for the kthread worker (task).
  592. *
  593. * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
  594. * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
  595. * when the worker was SIGKILLed.
  596. */
  597. struct kthread_worker *
  598. kthread_create_worker(unsigned int flags, const char namefmt[], ...)
  599. {
  600. struct kthread_worker *worker;
  601. va_list args;
  602. va_start(args, namefmt);
  603. worker = __kthread_create_worker(-1, flags, namefmt, args);
  604. va_end(args);
  605. return worker;
  606. }
  607. EXPORT_SYMBOL(kthread_create_worker);
  608. /**
  609. * kthread_create_worker_on_cpu - create a kthread worker and bind it
  610. * it to a given CPU and the associated NUMA node.
  611. * @cpu: CPU number
  612. * @flags: flags modifying the default behavior of the worker
  613. * @namefmt: printf-style name for the kthread worker (task).
  614. *
  615. * Use a valid CPU number if you want to bind the kthread worker
  616. * to the given CPU and the associated NUMA node.
  617. *
  618. * A good practice is to add the cpu number also into the worker name.
  619. * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
  620. *
  621. * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
  622. * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
  623. * when the worker was SIGKILLed.
  624. */
  625. struct kthread_worker *
  626. kthread_create_worker_on_cpu(int cpu, unsigned int flags,
  627. const char namefmt[], ...)
  628. {
  629. struct kthread_worker *worker;
  630. va_list args;
  631. va_start(args, namefmt);
  632. worker = __kthread_create_worker(cpu, flags, namefmt, args);
  633. va_end(args);
  634. return worker;
  635. }
  636. EXPORT_SYMBOL(kthread_create_worker_on_cpu);
  637. /*
  638. * Returns true when the work could not be queued at the moment.
  639. * It happens when it is already pending in a worker list
  640. * or when it is being cancelled.
  641. */
  642. static inline bool queuing_blocked(struct kthread_worker *worker,
  643. struct kthread_work *work)
  644. {
  645. lockdep_assert_held(&worker->lock);
  646. return !list_empty(&work->node) || work->canceling;
  647. }
  648. static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
  649. struct kthread_work *work)
  650. {
  651. lockdep_assert_held(&worker->lock);
  652. WARN_ON_ONCE(!list_empty(&work->node));
  653. /* Do not use a work with >1 worker, see kthread_queue_work() */
  654. WARN_ON_ONCE(work->worker && work->worker != worker);
  655. }
  656. /* insert @work before @pos in @worker */
  657. static void kthread_insert_work(struct kthread_worker *worker,
  658. struct kthread_work *work,
  659. struct list_head *pos)
  660. {
  661. kthread_insert_work_sanity_check(worker, work);
  662. list_add_tail(&work->node, pos);
  663. work->worker = worker;
  664. if (!worker->current_work && likely(worker->task))
  665. wake_up_process(worker->task);
  666. }
  667. /**
  668. * kthread_queue_work - queue a kthread_work
  669. * @worker: target kthread_worker
  670. * @work: kthread_work to queue
  671. *
  672. * Queue @work to work processor @task for async execution. @task
  673. * must have been created with kthread_worker_create(). Returns %true
  674. * if @work was successfully queued, %false if it was already pending.
  675. *
  676. * Reinitialize the work if it needs to be used by another worker.
  677. * For example, when the worker was stopped and started again.
  678. */
  679. bool kthread_queue_work(struct kthread_worker *worker,
  680. struct kthread_work *work)
  681. {
  682. bool ret = false;
  683. unsigned long flags;
  684. spin_lock_irqsave(&worker->lock, flags);
  685. if (!queuing_blocked(worker, work)) {
  686. kthread_insert_work(worker, work, &worker->work_list);
  687. ret = true;
  688. }
  689. spin_unlock_irqrestore(&worker->lock, flags);
  690. return ret;
  691. }
  692. EXPORT_SYMBOL_GPL(kthread_queue_work);
  693. /**
  694. * kthread_delayed_work_timer_fn - callback that queues the associated kthread
  695. * delayed work when the timer expires.
  696. * @__data: pointer to the data associated with the timer
  697. *
  698. * The format of the function is defined by struct timer_list.
  699. * It should have been called from irqsafe timer with irq already off.
  700. */
  701. void kthread_delayed_work_timer_fn(unsigned long __data)
  702. {
  703. struct kthread_delayed_work *dwork =
  704. (struct kthread_delayed_work *)__data;
  705. struct kthread_work *work = &dwork->work;
  706. struct kthread_worker *worker = work->worker;
  707. /*
  708. * This might happen when a pending work is reinitialized.
  709. * It means that it is used a wrong way.
  710. */
  711. if (WARN_ON_ONCE(!worker))
  712. return;
  713. spin_lock(&worker->lock);
  714. /* Work must not be used with >1 worker, see kthread_queue_work(). */
  715. WARN_ON_ONCE(work->worker != worker);
  716. /* Move the work from worker->delayed_work_list. */
  717. WARN_ON_ONCE(list_empty(&work->node));
  718. list_del_init(&work->node);
  719. kthread_insert_work(worker, work, &worker->work_list);
  720. spin_unlock(&worker->lock);
  721. }
  722. EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
  723. void __kthread_queue_delayed_work(struct kthread_worker *worker,
  724. struct kthread_delayed_work *dwork,
  725. unsigned long delay)
  726. {
  727. struct timer_list *timer = &dwork->timer;
  728. struct kthread_work *work = &dwork->work;
  729. WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn ||
  730. timer->data != (unsigned long)dwork);
  731. /*
  732. * If @delay is 0, queue @dwork->work immediately. This is for
  733. * both optimization and correctness. The earliest @timer can
  734. * expire is on the closest next tick and delayed_work users depend
  735. * on that there's no such delay when @delay is 0.
  736. */
  737. if (!delay) {
  738. kthread_insert_work(worker, work, &worker->work_list);
  739. return;
  740. }
  741. /* Be paranoid and try to detect possible races already now. */
  742. kthread_insert_work_sanity_check(worker, work);
  743. list_add(&work->node, &worker->delayed_work_list);
  744. work->worker = worker;
  745. timer_stats_timer_set_start_info(&dwork->timer);
  746. timer->expires = jiffies + delay;
  747. add_timer(timer);
  748. }
  749. /**
  750. * kthread_queue_delayed_work - queue the associated kthread work
  751. * after a delay.
  752. * @worker: target kthread_worker
  753. * @dwork: kthread_delayed_work to queue
  754. * @delay: number of jiffies to wait before queuing
  755. *
  756. * If the work has not been pending it starts a timer that will queue
  757. * the work after the given @delay. If @delay is zero, it queues the
  758. * work immediately.
  759. *
  760. * Return: %false if the @work has already been pending. It means that
  761. * either the timer was running or the work was queued. It returns %true
  762. * otherwise.
  763. */
  764. bool kthread_queue_delayed_work(struct kthread_worker *worker,
  765. struct kthread_delayed_work *dwork,
  766. unsigned long delay)
  767. {
  768. struct kthread_work *work = &dwork->work;
  769. unsigned long flags;
  770. bool ret = false;
  771. spin_lock_irqsave(&worker->lock, flags);
  772. if (!queuing_blocked(worker, work)) {
  773. __kthread_queue_delayed_work(worker, dwork, delay);
  774. ret = true;
  775. }
  776. spin_unlock_irqrestore(&worker->lock, flags);
  777. return ret;
  778. }
  779. EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
  780. struct kthread_flush_work {
  781. struct kthread_work work;
  782. struct completion done;
  783. };
  784. static void kthread_flush_work_fn(struct kthread_work *work)
  785. {
  786. struct kthread_flush_work *fwork =
  787. container_of(work, struct kthread_flush_work, work);
  788. complete(&fwork->done);
  789. }
  790. /**
  791. * kthread_flush_work - flush a kthread_work
  792. * @work: work to flush
  793. *
  794. * If @work is queued or executing, wait for it to finish execution.
  795. */
  796. void kthread_flush_work(struct kthread_work *work)
  797. {
  798. struct kthread_flush_work fwork = {
  799. KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
  800. COMPLETION_INITIALIZER_ONSTACK(fwork.done),
  801. };
  802. struct kthread_worker *worker;
  803. bool noop = false;
  804. worker = work->worker;
  805. if (!worker)
  806. return;
  807. spin_lock_irq(&worker->lock);
  808. /* Work must not be used with >1 worker, see kthread_queue_work(). */
  809. WARN_ON_ONCE(work->worker != worker);
  810. if (!list_empty(&work->node))
  811. kthread_insert_work(worker, &fwork.work, work->node.next);
  812. else if (worker->current_work == work)
  813. kthread_insert_work(worker, &fwork.work,
  814. worker->work_list.next);
  815. else
  816. noop = true;
  817. spin_unlock_irq(&worker->lock);
  818. if (!noop)
  819. wait_for_completion(&fwork.done);
  820. }
  821. EXPORT_SYMBOL_GPL(kthread_flush_work);
  822. /*
  823. * This function removes the work from the worker queue. Also it makes sure
  824. * that it won't get queued later via the delayed work's timer.
  825. *
  826. * The work might still be in use when this function finishes. See the
  827. * current_work proceed by the worker.
  828. *
  829. * Return: %true if @work was pending and successfully canceled,
  830. * %false if @work was not pending
  831. */
  832. static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
  833. unsigned long *flags)
  834. {
  835. /* Try to cancel the timer if exists. */
  836. if (is_dwork) {
  837. struct kthread_delayed_work *dwork =
  838. container_of(work, struct kthread_delayed_work, work);
  839. struct kthread_worker *worker = work->worker;
  840. /*
  841. * del_timer_sync() must be called to make sure that the timer
  842. * callback is not running. The lock must be temporary released
  843. * to avoid a deadlock with the callback. In the meantime,
  844. * any queuing is blocked by setting the canceling counter.
  845. */
  846. work->canceling++;
  847. spin_unlock_irqrestore(&worker->lock, *flags);
  848. del_timer_sync(&dwork->timer);
  849. spin_lock_irqsave(&worker->lock, *flags);
  850. work->canceling--;
  851. }
  852. /*
  853. * Try to remove the work from a worker list. It might either
  854. * be from worker->work_list or from worker->delayed_work_list.
  855. */
  856. if (!list_empty(&work->node)) {
  857. list_del_init(&work->node);
  858. return true;
  859. }
  860. return false;
  861. }
  862. /**
  863. * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
  864. * @worker: kthread worker to use
  865. * @dwork: kthread delayed work to queue
  866. * @delay: number of jiffies to wait before queuing
  867. *
  868. * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
  869. * modify @dwork's timer so that it expires after @delay. If @delay is zero,
  870. * @work is guaranteed to be queued immediately.
  871. *
  872. * Return: %true if @dwork was pending and its timer was modified,
  873. * %false otherwise.
  874. *
  875. * A special case is when the work is being canceled in parallel.
  876. * It might be caused either by the real kthread_cancel_delayed_work_sync()
  877. * or yet another kthread_mod_delayed_work() call. We let the other command
  878. * win and return %false here. The caller is supposed to synchronize these
  879. * operations a reasonable way.
  880. *
  881. * This function is safe to call from any context including IRQ handler.
  882. * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
  883. * for details.
  884. */
  885. bool kthread_mod_delayed_work(struct kthread_worker *worker,
  886. struct kthread_delayed_work *dwork,
  887. unsigned long delay)
  888. {
  889. struct kthread_work *work = &dwork->work;
  890. unsigned long flags;
  891. int ret = false;
  892. spin_lock_irqsave(&worker->lock, flags);
  893. /* Do not bother with canceling when never queued. */
  894. if (!work->worker)
  895. goto fast_queue;
  896. /* Work must not be used with >1 worker, see kthread_queue_work() */
  897. WARN_ON_ONCE(work->worker != worker);
  898. /* Do not fight with another command that is canceling this work. */
  899. if (work->canceling)
  900. goto out;
  901. ret = __kthread_cancel_work(work, true, &flags);
  902. fast_queue:
  903. __kthread_queue_delayed_work(worker, dwork, delay);
  904. out:
  905. spin_unlock_irqrestore(&worker->lock, flags);
  906. return ret;
  907. }
  908. EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
  909. static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
  910. {
  911. struct kthread_worker *worker = work->worker;
  912. unsigned long flags;
  913. int ret = false;
  914. if (!worker)
  915. goto out;
  916. spin_lock_irqsave(&worker->lock, flags);
  917. /* Work must not be used with >1 worker, see kthread_queue_work(). */
  918. WARN_ON_ONCE(work->worker != worker);
  919. ret = __kthread_cancel_work(work, is_dwork, &flags);
  920. if (worker->current_work != work)
  921. goto out_fast;
  922. /*
  923. * The work is in progress and we need to wait with the lock released.
  924. * In the meantime, block any queuing by setting the canceling counter.
  925. */
  926. work->canceling++;
  927. spin_unlock_irqrestore(&worker->lock, flags);
  928. kthread_flush_work(work);
  929. spin_lock_irqsave(&worker->lock, flags);
  930. work->canceling--;
  931. out_fast:
  932. spin_unlock_irqrestore(&worker->lock, flags);
  933. out:
  934. return ret;
  935. }
  936. /**
  937. * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
  938. * @work: the kthread work to cancel
  939. *
  940. * Cancel @work and wait for its execution to finish. This function
  941. * can be used even if the work re-queues itself. On return from this
  942. * function, @work is guaranteed to be not pending or executing on any CPU.
  943. *
  944. * kthread_cancel_work_sync(&delayed_work->work) must not be used for
  945. * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
  946. *
  947. * The caller must ensure that the worker on which @work was last
  948. * queued can't be destroyed before this function returns.
  949. *
  950. * Return: %true if @work was pending, %false otherwise.
  951. */
  952. bool kthread_cancel_work_sync(struct kthread_work *work)
  953. {
  954. return __kthread_cancel_work_sync(work, false);
  955. }
  956. EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
  957. /**
  958. * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
  959. * wait for it to finish.
  960. * @dwork: the kthread delayed work to cancel
  961. *
  962. * This is kthread_cancel_work_sync() for delayed works.
  963. *
  964. * Return: %true if @dwork was pending, %false otherwise.
  965. */
  966. bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
  967. {
  968. return __kthread_cancel_work_sync(&dwork->work, true);
  969. }
  970. EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
  971. /**
  972. * kthread_flush_worker - flush all current works on a kthread_worker
  973. * @worker: worker to flush
  974. *
  975. * Wait until all currently executing or pending works on @worker are
  976. * finished.
  977. */
  978. void kthread_flush_worker(struct kthread_worker *worker)
  979. {
  980. struct kthread_flush_work fwork = {
  981. KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
  982. COMPLETION_INITIALIZER_ONSTACK(fwork.done),
  983. };
  984. kthread_queue_work(worker, &fwork.work);
  985. wait_for_completion(&fwork.done);
  986. }
  987. EXPORT_SYMBOL_GPL(kthread_flush_worker);
  988. /**
  989. * kthread_destroy_worker - destroy a kthread worker
  990. * @worker: worker to be destroyed
  991. *
  992. * Flush and destroy @worker. The simple flush is enough because the kthread
  993. * worker API is used only in trivial scenarios. There are no multi-step state
  994. * machines needed.
  995. */
  996. void kthread_destroy_worker(struct kthread_worker *worker)
  997. {
  998. struct task_struct *task;
  999. task = worker->task;
  1000. if (WARN_ON(!task))
  1001. return;
  1002. kthread_flush_worker(worker);
  1003. kthread_stop(task);
  1004. WARN_ON(!list_empty(&worker->work_list));
  1005. kfree(worker);
  1006. }
  1007. EXPORT_SYMBOL(kthread_destroy_worker);