cpuset.h 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279
  1. #ifndef _LINUX_CPUSET_H
  2. #define _LINUX_CPUSET_H
  3. /*
  4. * cpuset interface
  5. *
  6. * Copyright (C) 2003 BULL SA
  7. * Copyright (C) 2004-2006 Silicon Graphics, Inc.
  8. *
  9. */
  10. #include <linux/sched.h>
  11. #include <linux/sched/topology.h>
  12. #include <linux/sched/task.h>
  13. #include <linux/cpumask.h>
  14. #include <linux/nodemask.h>
  15. #include <linux/mm.h>
  16. #include <linux/jump_label.h>
  17. #ifdef CONFIG_CPUSETS
  18. /*
  19. * Static branch rewrites can happen in an arbitrary order for a given
  20. * key. In code paths where we need to loop with read_mems_allowed_begin() and
  21. * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
  22. * to ensure that begin() always gets rewritten before retry() in the
  23. * disabled -> enabled transition. If not, then if local irqs are disabled
  24. * around the loop, we can deadlock since retry() would always be
  25. * comparing the latest value of the mems_allowed seqcount against 0 as
  26. * begin() still would see cpusets_enabled() as false. The enabled -> disabled
  27. * transition should happen in reverse order for the same reasons (want to stop
  28. * looking at real value of mems_allowed.sequence in retry() first).
  29. */
  30. extern struct static_key_false cpusets_pre_enable_key;
  31. extern struct static_key_false cpusets_enabled_key;
  32. static inline bool cpusets_enabled(void)
  33. {
  34. return static_branch_unlikely(&cpusets_enabled_key);
  35. }
  36. static inline void cpuset_inc(void)
  37. {
  38. static_branch_inc(&cpusets_pre_enable_key);
  39. static_branch_inc(&cpusets_enabled_key);
  40. }
  41. static inline void cpuset_dec(void)
  42. {
  43. static_branch_dec(&cpusets_enabled_key);
  44. static_branch_dec(&cpusets_pre_enable_key);
  45. }
  46. extern int cpuset_init(void);
  47. extern void cpuset_init_smp(void);
  48. extern void cpuset_force_rebuild(void);
  49. extern void cpuset_update_active_cpus(void);
  50. extern void cpuset_wait_for_hotplug(void);
  51. extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
  52. extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
  53. extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
  54. #define cpuset_current_mems_allowed (current->mems_allowed)
  55. void cpuset_init_current_mems_allowed(void);
  56. int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
  57. extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
  58. static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
  59. {
  60. if (cpusets_enabled())
  61. return __cpuset_node_allowed(node, gfp_mask);
  62. return true;
  63. }
  64. static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
  65. {
  66. return __cpuset_node_allowed(zone_to_nid(z), gfp_mask);
  67. }
  68. static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
  69. {
  70. if (cpusets_enabled())
  71. return __cpuset_zone_allowed(z, gfp_mask);
  72. return true;
  73. }
  74. extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
  75. const struct task_struct *tsk2);
  76. #define cpuset_memory_pressure_bump() \
  77. do { \
  78. if (cpuset_memory_pressure_enabled) \
  79. __cpuset_memory_pressure_bump(); \
  80. } while (0)
  81. extern int cpuset_memory_pressure_enabled;
  82. extern void __cpuset_memory_pressure_bump(void);
  83. extern void cpuset_task_status_allowed(struct seq_file *m,
  84. struct task_struct *task);
  85. extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
  86. struct pid *pid, struct task_struct *tsk);
  87. extern int cpuset_mem_spread_node(void);
  88. extern int cpuset_slab_spread_node(void);
  89. static inline int cpuset_do_page_mem_spread(void)
  90. {
  91. return task_spread_page(current);
  92. }
  93. static inline int cpuset_do_slab_mem_spread(void)
  94. {
  95. return task_spread_slab(current);
  96. }
  97. extern int current_cpuset_is_being_rebound(void);
  98. extern void rebuild_sched_domains(void);
  99. extern void cpuset_print_current_mems_allowed(void);
  100. /*
  101. * read_mems_allowed_begin is required when making decisions involving
  102. * mems_allowed such as during page allocation. mems_allowed can be updated in
  103. * parallel and depending on the new value an operation can fail potentially
  104. * causing process failure. A retry loop with read_mems_allowed_begin and
  105. * read_mems_allowed_retry prevents these artificial failures.
  106. */
  107. static inline unsigned int read_mems_allowed_begin(void)
  108. {
  109. if (!static_branch_unlikely(&cpusets_pre_enable_key))
  110. return 0;
  111. return read_seqcount_begin(&current->mems_allowed_seq);
  112. }
  113. /*
  114. * If this returns true, the operation that took place after
  115. * read_mems_allowed_begin may have failed artificially due to a concurrent
  116. * update of mems_allowed. It is up to the caller to retry the operation if
  117. * appropriate.
  118. */
  119. static inline bool read_mems_allowed_retry(unsigned int seq)
  120. {
  121. if (!static_branch_unlikely(&cpusets_enabled_key))
  122. return false;
  123. return read_seqcount_retry(&current->mems_allowed_seq, seq);
  124. }
  125. static inline void set_mems_allowed(nodemask_t nodemask)
  126. {
  127. unsigned long flags;
  128. task_lock(current);
  129. local_irq_save(flags);
  130. write_seqcount_begin(&current->mems_allowed_seq);
  131. current->mems_allowed = nodemask;
  132. write_seqcount_end(&current->mems_allowed_seq);
  133. local_irq_restore(flags);
  134. task_unlock(current);
  135. }
  136. #else /* !CONFIG_CPUSETS */
  137. static inline bool cpusets_enabled(void) { return false; }
  138. static inline int cpuset_init(void) { return 0; }
  139. static inline void cpuset_init_smp(void) {}
  140. static inline void cpuset_force_rebuild(void) { }
  141. static inline void cpuset_update_active_cpus(void)
  142. {
  143. partition_sched_domains(1, NULL, NULL);
  144. }
  145. static inline void cpuset_wait_for_hotplug(void) { }
  146. static inline void cpuset_cpus_allowed(struct task_struct *p,
  147. struct cpumask *mask)
  148. {
  149. cpumask_copy(mask, cpu_possible_mask);
  150. }
  151. static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
  152. {
  153. }
  154. static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
  155. {
  156. return node_possible_map;
  157. }
  158. #define cpuset_current_mems_allowed (node_states[N_MEMORY])
  159. static inline void cpuset_init_current_mems_allowed(void) {}
  160. static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
  161. {
  162. return 1;
  163. }
  164. static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
  165. {
  166. return true;
  167. }
  168. static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
  169. {
  170. return true;
  171. }
  172. static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
  173. {
  174. return true;
  175. }
  176. static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
  177. const struct task_struct *tsk2)
  178. {
  179. return 1;
  180. }
  181. static inline void cpuset_memory_pressure_bump(void) {}
  182. static inline void cpuset_task_status_allowed(struct seq_file *m,
  183. struct task_struct *task)
  184. {
  185. }
  186. static inline int cpuset_mem_spread_node(void)
  187. {
  188. return 0;
  189. }
  190. static inline int cpuset_slab_spread_node(void)
  191. {
  192. return 0;
  193. }
  194. static inline int cpuset_do_page_mem_spread(void)
  195. {
  196. return 0;
  197. }
  198. static inline int cpuset_do_slab_mem_spread(void)
  199. {
  200. return 0;
  201. }
  202. static inline int current_cpuset_is_being_rebound(void)
  203. {
  204. return 0;
  205. }
  206. static inline void rebuild_sched_domains(void)
  207. {
  208. partition_sched_domains(1, NULL, NULL);
  209. }
  210. static inline void cpuset_print_current_mems_allowed(void)
  211. {
  212. }
  213. static inline void set_mems_allowed(nodemask_t nodemask)
  214. {
  215. }
  216. static inline unsigned int read_mems_allowed_begin(void)
  217. {
  218. return 0;
  219. }
  220. static inline bool read_mems_allowed_retry(unsigned int seq)
  221. {
  222. return false;
  223. }
  224. #endif /* !CONFIG_CPUSETS */
  225. #endif /* _LINUX_CPUSET_H */