membarrier.c 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292
  1. /*
  2. * Copyright (C) 2010-2017 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  3. *
  4. * membarrier system call
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. */
  16. #include <linux/syscalls.h>
  17. #include <linux/membarrier.h>
  18. #include <linux/tick.h>
  19. #include <linux/cpumask.h>
  20. #include <linux/atomic.h>
  21. #include "sched.h" /* for cpu_rq(). */
  22. /*
  23. * Bitmask made from a "or" of all commands within enum membarrier_cmd,
  24. * except MEMBARRIER_CMD_QUERY.
  25. */
  26. #define MEMBARRIER_CMD_BITMASK \
  27. (MEMBARRIER_CMD_GLOBAL | MEMBARRIER_CMD_GLOBAL_EXPEDITED \
  28. | MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED \
  29. | MEMBARRIER_CMD_PRIVATE_EXPEDITED \
  30. | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED)
  31. static void ipi_mb(void *info)
  32. {
  33. smp_mb(); /* IPIs should be serializing but paranoid. */
  34. }
  35. static int membarrier_global_expedited(void)
  36. {
  37. int cpu;
  38. bool fallback = false;
  39. cpumask_var_t tmpmask;
  40. if (num_online_cpus() == 1)
  41. return 0;
  42. /*
  43. * Matches memory barriers around rq->curr modification in
  44. * scheduler.
  45. */
  46. smp_mb(); /* system call entry is not a mb. */
  47. /*
  48. * Expedited membarrier commands guarantee that they won't
  49. * block, hence the GFP_NOWAIT allocation flag and fallback
  50. * implementation.
  51. */
  52. if (!zalloc_cpumask_var(&tmpmask, GFP_NOWAIT)) {
  53. /* Fallback for OOM. */
  54. fallback = true;
  55. }
  56. cpus_read_lock();
  57. for_each_online_cpu(cpu) {
  58. struct task_struct *p;
  59. /*
  60. * Skipping the current CPU is OK even through we can be
  61. * migrated at any point. The current CPU, at the point
  62. * where we read raw_smp_processor_id(), is ensured to
  63. * be in program order with respect to the caller
  64. * thread. Therefore, we can skip this CPU from the
  65. * iteration.
  66. */
  67. if (cpu == raw_smp_processor_id())
  68. continue;
  69. rcu_read_lock();
  70. p = task_rcu_dereference(&cpu_rq(cpu)->curr);
  71. if (p && p->mm && (atomic_read(&p->mm->membarrier_state) &
  72. MEMBARRIER_STATE_GLOBAL_EXPEDITED)) {
  73. if (!fallback)
  74. __cpumask_set_cpu(cpu, tmpmask);
  75. else
  76. smp_call_function_single(cpu, ipi_mb, NULL, 1);
  77. }
  78. rcu_read_unlock();
  79. }
  80. if (!fallback) {
  81. preempt_disable();
  82. smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
  83. preempt_enable();
  84. free_cpumask_var(tmpmask);
  85. }
  86. cpus_read_unlock();
  87. /*
  88. * Memory barrier on the caller thread _after_ we finished
  89. * waiting for the last IPI. Matches memory barriers around
  90. * rq->curr modification in scheduler.
  91. */
  92. smp_mb(); /* exit from system call is not a mb */
  93. return 0;
  94. }
  95. static int membarrier_private_expedited(void)
  96. {
  97. int cpu;
  98. bool fallback = false;
  99. cpumask_var_t tmpmask;
  100. if (!(atomic_read(&current->mm->membarrier_state)
  101. & MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY))
  102. return -EPERM;
  103. if (num_online_cpus() == 1)
  104. return 0;
  105. /*
  106. * Matches memory barriers around rq->curr modification in
  107. * scheduler.
  108. */
  109. smp_mb(); /* system call entry is not a mb. */
  110. /*
  111. * Expedited membarrier commands guarantee that they won't
  112. * block, hence the GFP_NOWAIT allocation flag and fallback
  113. * implementation.
  114. */
  115. if (!zalloc_cpumask_var(&tmpmask, GFP_NOWAIT)) {
  116. /* Fallback for OOM. */
  117. fallback = true;
  118. }
  119. cpus_read_lock();
  120. for_each_online_cpu(cpu) {
  121. struct task_struct *p;
  122. /*
  123. * Skipping the current CPU is OK even through we can be
  124. * migrated at any point. The current CPU, at the point
  125. * where we read raw_smp_processor_id(), is ensured to
  126. * be in program order with respect to the caller
  127. * thread. Therefore, we can skip this CPU from the
  128. * iteration.
  129. */
  130. if (cpu == raw_smp_processor_id())
  131. continue;
  132. rcu_read_lock();
  133. p = task_rcu_dereference(&cpu_rq(cpu)->curr);
  134. if (p && p->mm == current->mm) {
  135. if (!fallback)
  136. __cpumask_set_cpu(cpu, tmpmask);
  137. else
  138. smp_call_function_single(cpu, ipi_mb, NULL, 1);
  139. }
  140. rcu_read_unlock();
  141. }
  142. if (!fallback) {
  143. preempt_disable();
  144. smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
  145. preempt_enable();
  146. free_cpumask_var(tmpmask);
  147. }
  148. cpus_read_unlock();
  149. /*
  150. * Memory barrier on the caller thread _after_ we finished
  151. * waiting for the last IPI. Matches memory barriers around
  152. * rq->curr modification in scheduler.
  153. */
  154. smp_mb(); /* exit from system call is not a mb */
  155. return 0;
  156. }
  157. static int membarrier_register_global_expedited(void)
  158. {
  159. struct task_struct *p = current;
  160. struct mm_struct *mm = p->mm;
  161. if (atomic_read(&mm->membarrier_state) &
  162. MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY)
  163. return 0;
  164. atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED, &mm->membarrier_state);
  165. if (atomic_read(&mm->mm_users) == 1 && get_nr_threads(p) == 1) {
  166. /*
  167. * For single mm user, single threaded process, we can
  168. * simply issue a memory barrier after setting
  169. * MEMBARRIER_STATE_GLOBAL_EXPEDITED to guarantee that
  170. * no memory access following registration is reordered
  171. * before registration.
  172. */
  173. smp_mb();
  174. } else {
  175. /*
  176. * For multi-mm user threads, we need to ensure all
  177. * future scheduler executions will observe the new
  178. * thread flag state for this mm.
  179. */
  180. synchronize_sched();
  181. }
  182. atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY,
  183. &mm->membarrier_state);
  184. return 0;
  185. }
  186. static int membarrier_register_private_expedited(void)
  187. {
  188. struct task_struct *p = current;
  189. struct mm_struct *mm = p->mm;
  190. /*
  191. * We need to consider threads belonging to different thread
  192. * groups, which use the same mm. (CLONE_VM but not
  193. * CLONE_THREAD).
  194. */
  195. if (atomic_read(&mm->membarrier_state)
  196. & MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY)
  197. return 0;
  198. atomic_or(MEMBARRIER_STATE_PRIVATE_EXPEDITED, &mm->membarrier_state);
  199. if (!(atomic_read(&mm->mm_users) == 1 && get_nr_threads(p) == 1)) {
  200. /*
  201. * Ensure all future scheduler executions will observe the
  202. * new thread flag state for this process.
  203. */
  204. synchronize_sched();
  205. }
  206. atomic_or(MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY,
  207. &mm->membarrier_state);
  208. return 0;
  209. }
  210. /**
  211. * sys_membarrier - issue memory barriers on a set of threads
  212. * @cmd: Takes command values defined in enum membarrier_cmd.
  213. * @flags: Currently needs to be 0. For future extensions.
  214. *
  215. * If this system call is not implemented, -ENOSYS is returned. If the
  216. * command specified does not exist, not available on the running
  217. * kernel, or if the command argument is invalid, this system call
  218. * returns -EINVAL. For a given command, with flags argument set to 0,
  219. * this system call is guaranteed to always return the same value until
  220. * reboot.
  221. *
  222. * All memory accesses performed in program order from each targeted thread
  223. * is guaranteed to be ordered with respect to sys_membarrier(). If we use
  224. * the semantic "barrier()" to represent a compiler barrier forcing memory
  225. * accesses to be performed in program order across the barrier, and
  226. * smp_mb() to represent explicit memory barriers forcing full memory
  227. * ordering across the barrier, we have the following ordering table for
  228. * each pair of barrier(), sys_membarrier() and smp_mb():
  229. *
  230. * The pair ordering is detailed as (O: ordered, X: not ordered):
  231. *
  232. * barrier() smp_mb() sys_membarrier()
  233. * barrier() X X O
  234. * smp_mb() X O O
  235. * sys_membarrier() O O O
  236. */
  237. SYSCALL_DEFINE2(membarrier, int, cmd, int, flags)
  238. {
  239. if (unlikely(flags))
  240. return -EINVAL;
  241. switch (cmd) {
  242. case MEMBARRIER_CMD_QUERY:
  243. {
  244. int cmd_mask = MEMBARRIER_CMD_BITMASK;
  245. if (tick_nohz_full_enabled())
  246. cmd_mask &= ~MEMBARRIER_CMD_GLOBAL;
  247. return cmd_mask;
  248. }
  249. case MEMBARRIER_CMD_GLOBAL:
  250. /* MEMBARRIER_CMD_GLOBAL is not compatible with nohz_full. */
  251. if (tick_nohz_full_enabled())
  252. return -EINVAL;
  253. if (num_online_cpus() > 1)
  254. synchronize_sched();
  255. return 0;
  256. case MEMBARRIER_CMD_GLOBAL_EXPEDITED:
  257. return membarrier_global_expedited();
  258. case MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED:
  259. return membarrier_register_global_expedited();
  260. case MEMBARRIER_CMD_PRIVATE_EXPEDITED:
  261. return membarrier_private_expedited();
  262. case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED:
  263. return membarrier_register_private_expedited();
  264. default:
  265. return -EINVAL;
  266. }
  267. }