kthread.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398
  1. /* Kernel thread helper functions.
  2. * Copyright (C) 2004 IBM Corporation, Rusty Russell.
  3. *
  4. * Creation is done via kthreadd, so that we get a clean environment
  5. * even if we're invoked from userspace (think modprobe, hotplug cpu,
  6. * etc.).
  7. */
  8. #include <linux/sched.h>
  9. #include <linux/kthread.h>
  10. #include <linux/completion.h>
  11. #include <linux/err.h>
  12. #include <linux/cpuset.h>
  13. #include <linux/unistd.h>
  14. #include <linux/file.h>
  15. #include <linux/module.h>
  16. #include <linux/mutex.h>
  17. #include <linux/slab.h>
  18. #include <linux/freezer.h>
  19. #include <trace/events/sched.h>
  20. static DEFINE_SPINLOCK(kthread_create_lock);
  21. static LIST_HEAD(kthread_create_list);
  22. struct task_struct *kthreadd_task;
  23. struct kthread_create_info
  24. {
  25. /* Information passed to kthread() from kthreadd. */
  26. int (*threadfn)(void *data);
  27. void *data;
  28. /* Result passed back to kthread_create() from kthreadd. */
  29. struct task_struct *result;
  30. struct completion done;
  31. struct list_head list;
  32. };
  33. struct kthread {
  34. int should_stop;
  35. struct completion exited;
  36. };
  37. #define to_kthread(tsk) \
  38. container_of((tsk)->vfork_done, struct kthread, exited)
  39. /**
  40. * kthread_should_stop - should this kthread return now?
  41. *
  42. * When someone calls kthread_stop() on your kthread, it will be woken
  43. * and this will return true. You should then return, and your return
  44. * value will be passed through to kthread_stop().
  45. */
  46. int kthread_should_stop(void)
  47. {
  48. return to_kthread(current)->should_stop;
  49. }
  50. EXPORT_SYMBOL(kthread_should_stop);
  51. static int kthread(void *_create)
  52. {
  53. /* Copy data: it's on kthread's stack */
  54. struct kthread_create_info *create = _create;
  55. int (*threadfn)(void *data) = create->threadfn;
  56. void *data = create->data;
  57. struct kthread self;
  58. int ret;
  59. self.should_stop = 0;
  60. init_completion(&self.exited);
  61. current->vfork_done = &self.exited;
  62. /* OK, tell user we're spawned, wait for stop or wakeup */
  63. __set_current_state(TASK_UNINTERRUPTIBLE);
  64. create->result = current;
  65. complete(&create->done);
  66. schedule();
  67. ret = -EINTR;
  68. if (!self.should_stop)
  69. ret = threadfn(data);
  70. /* we can't just return, we must preserve "self" on stack */
  71. do_exit(ret);
  72. }
  73. static void create_kthread(struct kthread_create_info *create)
  74. {
  75. int pid;
  76. /* We want our own signal handler (we take no signals by default). */
  77. pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
  78. if (pid < 0) {
  79. create->result = ERR_PTR(pid);
  80. complete(&create->done);
  81. }
  82. }
  83. /**
  84. * kthread_create - create a kthread.
  85. * @threadfn: the function to run until signal_pending(current).
  86. * @data: data ptr for @threadfn.
  87. * @namefmt: printf-style name for the thread.
  88. *
  89. * Description: This helper function creates and names a kernel
  90. * thread. The thread will be stopped: use wake_up_process() to start
  91. * it. See also kthread_run().
  92. *
  93. * When woken, the thread will run @threadfn() with @data as its
  94. * argument. @threadfn() can either call do_exit() directly if it is a
  95. * standalone thread for which noone will call kthread_stop(), or
  96. * return when 'kthread_should_stop()' is true (which means
  97. * kthread_stop() has been called). The return value should be zero
  98. * or a negative error number; it will be passed to kthread_stop().
  99. *
  100. * Returns a task_struct or ERR_PTR(-ENOMEM).
  101. */
  102. struct task_struct *kthread_create(int (*threadfn)(void *data),
  103. void *data,
  104. const char namefmt[],
  105. ...)
  106. {
  107. struct kthread_create_info create;
  108. create.threadfn = threadfn;
  109. create.data = data;
  110. init_completion(&create.done);
  111. spin_lock(&kthread_create_lock);
  112. list_add_tail(&create.list, &kthread_create_list);
  113. spin_unlock(&kthread_create_lock);
  114. wake_up_process(kthreadd_task);
  115. wait_for_completion(&create.done);
  116. if (!IS_ERR(create.result)) {
  117. struct sched_param param = { .sched_priority = 0 };
  118. va_list args;
  119. va_start(args, namefmt);
  120. vsnprintf(create.result->comm, sizeof(create.result->comm),
  121. namefmt, args);
  122. va_end(args);
  123. /*
  124. * root may have changed our (kthreadd's) priority or CPU mask.
  125. * The kernel thread should not inherit these properties.
  126. */
  127. sched_setscheduler_nocheck(create.result, SCHED_NORMAL, &param);
  128. set_cpus_allowed_ptr(create.result, cpu_all_mask);
  129. }
  130. return create.result;
  131. }
  132. EXPORT_SYMBOL(kthread_create);
  133. /**
  134. * kthread_bind - bind a just-created kthread to a cpu.
  135. * @p: thread created by kthread_create().
  136. * @cpu: cpu (might not be online, must be possible) for @k to run on.
  137. *
  138. * Description: This function is equivalent to set_cpus_allowed(),
  139. * except that @cpu doesn't need to be online, and the thread must be
  140. * stopped (i.e., just returned from kthread_create()).
  141. */
  142. void kthread_bind(struct task_struct *p, unsigned int cpu)
  143. {
  144. /* Must have done schedule() in kthread() before we set_task_cpu */
  145. if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
  146. WARN_ON(1);
  147. return;
  148. }
  149. p->cpus_allowed = cpumask_of_cpu(cpu);
  150. p->rt.nr_cpus_allowed = 1;
  151. p->flags |= PF_THREAD_BOUND;
  152. }
  153. EXPORT_SYMBOL(kthread_bind);
  154. /**
  155. * kthread_stop - stop a thread created by kthread_create().
  156. * @k: thread created by kthread_create().
  157. *
  158. * Sets kthread_should_stop() for @k to return true, wakes it, and
  159. * waits for it to exit. This can also be called after kthread_create()
  160. * instead of calling wake_up_process(): the thread will exit without
  161. * calling threadfn().
  162. *
  163. * If threadfn() may call do_exit() itself, the caller must ensure
  164. * task_struct can't go away.
  165. *
  166. * Returns the result of threadfn(), or %-EINTR if wake_up_process()
  167. * was never called.
  168. */
  169. int kthread_stop(struct task_struct *k)
  170. {
  171. struct kthread *kthread;
  172. int ret;
  173. trace_sched_kthread_stop(k);
  174. get_task_struct(k);
  175. kthread = to_kthread(k);
  176. barrier(); /* it might have exited */
  177. if (k->vfork_done != NULL) {
  178. kthread->should_stop = 1;
  179. wake_up_process(k);
  180. wait_for_completion(&kthread->exited);
  181. }
  182. ret = k->exit_code;
  183. put_task_struct(k);
  184. trace_sched_kthread_stop_ret(ret);
  185. return ret;
  186. }
  187. EXPORT_SYMBOL(kthread_stop);
  188. int kthreadd(void *unused)
  189. {
  190. struct task_struct *tsk = current;
  191. /* Setup a clean context for our children to inherit. */
  192. set_task_comm(tsk, "kthreadd");
  193. ignore_signals(tsk);
  194. set_cpus_allowed_ptr(tsk, cpu_all_mask);
  195. set_mems_allowed(node_states[N_HIGH_MEMORY]);
  196. current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
  197. for (;;) {
  198. set_current_state(TASK_INTERRUPTIBLE);
  199. if (list_empty(&kthread_create_list))
  200. schedule();
  201. __set_current_state(TASK_RUNNING);
  202. spin_lock(&kthread_create_lock);
  203. while (!list_empty(&kthread_create_list)) {
  204. struct kthread_create_info *create;
  205. create = list_entry(kthread_create_list.next,
  206. struct kthread_create_info, list);
  207. list_del_init(&create->list);
  208. spin_unlock(&kthread_create_lock);
  209. create_kthread(create);
  210. spin_lock(&kthread_create_lock);
  211. }
  212. spin_unlock(&kthread_create_lock);
  213. }
  214. return 0;
  215. }
  216. /**
  217. * kthread_worker_fn - kthread function to process kthread_worker
  218. * @worker_ptr: pointer to initialized kthread_worker
  219. *
  220. * This function can be used as @threadfn to kthread_create() or
  221. * kthread_run() with @worker_ptr argument pointing to an initialized
  222. * kthread_worker. The started kthread will process work_list until
  223. * the it is stopped with kthread_stop(). A kthread can also call
  224. * this function directly after extra initialization.
  225. *
  226. * Different kthreads can be used for the same kthread_worker as long
  227. * as there's only one kthread attached to it at any given time. A
  228. * kthread_worker without an attached kthread simply collects queued
  229. * kthread_works.
  230. */
  231. int kthread_worker_fn(void *worker_ptr)
  232. {
  233. struct kthread_worker *worker = worker_ptr;
  234. struct kthread_work *work;
  235. WARN_ON(worker->task);
  236. worker->task = current;
  237. repeat:
  238. set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
  239. if (kthread_should_stop()) {
  240. __set_current_state(TASK_RUNNING);
  241. spin_lock_irq(&worker->lock);
  242. worker->task = NULL;
  243. spin_unlock_irq(&worker->lock);
  244. return 0;
  245. }
  246. work = NULL;
  247. spin_lock_irq(&worker->lock);
  248. if (!list_empty(&worker->work_list)) {
  249. work = list_first_entry(&worker->work_list,
  250. struct kthread_work, node);
  251. list_del_init(&work->node);
  252. }
  253. spin_unlock_irq(&worker->lock);
  254. if (work) {
  255. __set_current_state(TASK_RUNNING);
  256. work->func(work);
  257. smp_wmb(); /* wmb worker-b0 paired with flush-b1 */
  258. work->done_seq = work->queue_seq;
  259. smp_mb(); /* mb worker-b1 paired with flush-b0 */
  260. if (atomic_read(&work->flushing))
  261. wake_up_all(&work->done);
  262. } else if (!freezing(current))
  263. schedule();
  264. try_to_freeze();
  265. goto repeat;
  266. }
  267. EXPORT_SYMBOL_GPL(kthread_worker_fn);
  268. /**
  269. * queue_kthread_work - queue a kthread_work
  270. * @worker: target kthread_worker
  271. * @work: kthread_work to queue
  272. *
  273. * Queue @work to work processor @task for async execution. @task
  274. * must have been created with kthread_worker_create(). Returns %true
  275. * if @work was successfully queued, %false if it was already pending.
  276. */
  277. bool queue_kthread_work(struct kthread_worker *worker,
  278. struct kthread_work *work)
  279. {
  280. bool ret = false;
  281. unsigned long flags;
  282. spin_lock_irqsave(&worker->lock, flags);
  283. if (list_empty(&work->node)) {
  284. list_add_tail(&work->node, &worker->work_list);
  285. work->queue_seq++;
  286. if (likely(worker->task))
  287. wake_up_process(worker->task);
  288. ret = true;
  289. }
  290. spin_unlock_irqrestore(&worker->lock, flags);
  291. return ret;
  292. }
  293. EXPORT_SYMBOL_GPL(queue_kthread_work);
  294. /**
  295. * flush_kthread_work - flush a kthread_work
  296. * @work: work to flush
  297. *
  298. * If @work is queued or executing, wait for it to finish execution.
  299. */
  300. void flush_kthread_work(struct kthread_work *work)
  301. {
  302. int seq = work->queue_seq;
  303. atomic_inc(&work->flushing);
  304. /*
  305. * mb flush-b0 paired with worker-b1, to make sure either
  306. * worker sees the above increment or we see done_seq update.
  307. */
  308. smp_mb__after_atomic_inc();
  309. /* A - B <= 0 tests whether B is in front of A regardless of overflow */
  310. wait_event(work->done, seq - work->done_seq <= 0);
  311. atomic_dec(&work->flushing);
  312. /*
  313. * rmb flush-b1 paired with worker-b0, to make sure our caller
  314. * sees every change made by work->func().
  315. */
  316. smp_mb__after_atomic_dec();
  317. }
  318. EXPORT_SYMBOL_GPL(flush_kthread_work);
  319. struct kthread_flush_work {
  320. struct kthread_work work;
  321. struct completion done;
  322. };
  323. static void kthread_flush_work_fn(struct kthread_work *work)
  324. {
  325. struct kthread_flush_work *fwork =
  326. container_of(work, struct kthread_flush_work, work);
  327. complete(&fwork->done);
  328. }
  329. /**
  330. * flush_kthread_worker - flush all current works on a kthread_worker
  331. * @worker: worker to flush
  332. *
  333. * Wait until all currently executing or pending works on @worker are
  334. * finished.
  335. */
  336. void flush_kthread_worker(struct kthread_worker *worker)
  337. {
  338. struct kthread_flush_work fwork = {
  339. KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
  340. COMPLETION_INITIALIZER_ONSTACK(fwork.done),
  341. };
  342. queue_kthread_work(worker, &fwork.work);
  343. wait_for_completion(&fwork.done);
  344. }
  345. EXPORT_SYMBOL_GPL(flush_kthread_worker);