cpuset.h 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264
  1. #ifndef _LINUX_CPUSET_H
  2. #define _LINUX_CPUSET_H
  3. /*
  4. * cpuset interface
  5. *
  6. * Copyright (C) 2003 BULL SA
  7. * Copyright (C) 2004-2006 Silicon Graphics, Inc.
  8. *
  9. */
  10. #include <linux/sched.h>
  11. #include <linux/sched/topology.h>
  12. #include <linux/sched/task.h>
  13. #include <linux/cpumask.h>
  14. #include <linux/nodemask.h>
  15. #include <linux/mm.h>
  16. #include <linux/jump_label.h>
  17. #ifdef CONFIG_CPUSETS
  18. extern struct static_key_false cpusets_enabled_key;
  19. static inline bool cpusets_enabled(void)
  20. {
  21. return static_branch_unlikely(&cpusets_enabled_key);
  22. }
  23. static inline int nr_cpusets(void)
  24. {
  25. /* jump label reference count + the top-level cpuset */
  26. return static_key_count(&cpusets_enabled_key.key) + 1;
  27. }
  28. static inline void cpuset_inc(void)
  29. {
  30. static_branch_inc(&cpusets_enabled_key);
  31. }
  32. static inline void cpuset_dec(void)
  33. {
  34. static_branch_dec(&cpusets_enabled_key);
  35. }
  36. extern int cpuset_init(void);
  37. extern void cpuset_init_smp(void);
  38. extern void cpuset_update_active_cpus(bool cpu_online);
  39. extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
  40. extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
  41. extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
  42. #define cpuset_current_mems_allowed (current->mems_allowed)
  43. void cpuset_init_current_mems_allowed(void);
  44. int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
  45. extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
  46. static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
  47. {
  48. if (cpusets_enabled())
  49. return __cpuset_node_allowed(node, gfp_mask);
  50. return true;
  51. }
  52. static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
  53. {
  54. return __cpuset_node_allowed(zone_to_nid(z), gfp_mask);
  55. }
  56. static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
  57. {
  58. if (cpusets_enabled())
  59. return __cpuset_zone_allowed(z, gfp_mask);
  60. return true;
  61. }
  62. extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
  63. const struct task_struct *tsk2);
  64. #define cpuset_memory_pressure_bump() \
  65. do { \
  66. if (cpuset_memory_pressure_enabled) \
  67. __cpuset_memory_pressure_bump(); \
  68. } while (0)
  69. extern int cpuset_memory_pressure_enabled;
  70. extern void __cpuset_memory_pressure_bump(void);
  71. extern void cpuset_task_status_allowed(struct seq_file *m,
  72. struct task_struct *task);
  73. extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
  74. struct pid *pid, struct task_struct *tsk);
  75. extern int cpuset_mem_spread_node(void);
  76. extern int cpuset_slab_spread_node(void);
  77. static inline int cpuset_do_page_mem_spread(void)
  78. {
  79. return task_spread_page(current);
  80. }
  81. static inline int cpuset_do_slab_mem_spread(void)
  82. {
  83. return task_spread_slab(current);
  84. }
  85. extern int current_cpuset_is_being_rebound(void);
  86. extern void rebuild_sched_domains(void);
  87. extern void cpuset_print_current_mems_allowed(void);
  88. /*
  89. * read_mems_allowed_begin is required when making decisions involving
  90. * mems_allowed such as during page allocation. mems_allowed can be updated in
  91. * parallel and depending on the new value an operation can fail potentially
  92. * causing process failure. A retry loop with read_mems_allowed_begin and
  93. * read_mems_allowed_retry prevents these artificial failures.
  94. */
  95. static inline unsigned int read_mems_allowed_begin(void)
  96. {
  97. if (!cpusets_enabled())
  98. return 0;
  99. return read_seqcount_begin(&current->mems_allowed_seq);
  100. }
  101. /*
  102. * If this returns true, the operation that took place after
  103. * read_mems_allowed_begin may have failed artificially due to a concurrent
  104. * update of mems_allowed. It is up to the caller to retry the operation if
  105. * appropriate.
  106. */
  107. static inline bool read_mems_allowed_retry(unsigned int seq)
  108. {
  109. if (!cpusets_enabled())
  110. return false;
  111. return read_seqcount_retry(&current->mems_allowed_seq, seq);
  112. }
  113. static inline void set_mems_allowed(nodemask_t nodemask)
  114. {
  115. unsigned long flags;
  116. task_lock(current);
  117. local_irq_save(flags);
  118. write_seqcount_begin(&current->mems_allowed_seq);
  119. current->mems_allowed = nodemask;
  120. write_seqcount_end(&current->mems_allowed_seq);
  121. local_irq_restore(flags);
  122. task_unlock(current);
  123. }
  124. #else /* !CONFIG_CPUSETS */
  125. static inline bool cpusets_enabled(void) { return false; }
  126. static inline int cpuset_init(void) { return 0; }
  127. static inline void cpuset_init_smp(void) {}
  128. static inline void cpuset_update_active_cpus(bool cpu_online)
  129. {
  130. partition_sched_domains(1, NULL, NULL);
  131. }
  132. static inline void cpuset_cpus_allowed(struct task_struct *p,
  133. struct cpumask *mask)
  134. {
  135. cpumask_copy(mask, cpu_possible_mask);
  136. }
  137. static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
  138. {
  139. }
  140. static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
  141. {
  142. return node_possible_map;
  143. }
  144. #define cpuset_current_mems_allowed (node_states[N_MEMORY])
  145. static inline void cpuset_init_current_mems_allowed(void) {}
  146. static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
  147. {
  148. return 1;
  149. }
  150. static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
  151. {
  152. return true;
  153. }
  154. static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
  155. {
  156. return true;
  157. }
  158. static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
  159. {
  160. return true;
  161. }
  162. static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
  163. const struct task_struct *tsk2)
  164. {
  165. return 1;
  166. }
  167. static inline void cpuset_memory_pressure_bump(void) {}
  168. static inline void cpuset_task_status_allowed(struct seq_file *m,
  169. struct task_struct *task)
  170. {
  171. }
  172. static inline int cpuset_mem_spread_node(void)
  173. {
  174. return 0;
  175. }
  176. static inline int cpuset_slab_spread_node(void)
  177. {
  178. return 0;
  179. }
  180. static inline int cpuset_do_page_mem_spread(void)
  181. {
  182. return 0;
  183. }
  184. static inline int cpuset_do_slab_mem_spread(void)
  185. {
  186. return 0;
  187. }
  188. static inline int current_cpuset_is_being_rebound(void)
  189. {
  190. return 0;
  191. }
  192. static inline void rebuild_sched_domains(void)
  193. {
  194. partition_sched_domains(1, NULL, NULL);
  195. }
  196. static inline void cpuset_print_current_mems_allowed(void)
  197. {
  198. }
  199. static inline void set_mems_allowed(nodemask_t nodemask)
  200. {
  201. }
  202. static inline unsigned int read_mems_allowed_begin(void)
  203. {
  204. return 0;
  205. }
  206. static inline bool read_mems_allowed_retry(unsigned int seq)
  207. {
  208. return false;
  209. }
  210. #endif /* !CONFIG_CPUSETS */
  211. #endif /* _LINUX_CPUSET_H */