numa_balancing.h 1.2 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546
  1. #ifndef _LINUX_SCHED_NUMA_BALANCING_H
  2. #define _LINUX_SCHED_NUMA_BALANCING_H
  3. /*
  4. * This is the interface between the scheduler and the MM that
  5. * implements memory access pattern based NUMA-balancing:
  6. */
  7. #include <linux/sched.h>
  8. #define TNF_MIGRATED 0x01
  9. #define TNF_NO_GROUP 0x02
  10. #define TNF_SHARED 0x04
  11. #define TNF_FAULT_LOCAL 0x08
  12. #define TNF_MIGRATE_FAIL 0x10
  13. #ifdef CONFIG_NUMA_BALANCING
  14. extern void task_numa_fault(int last_node, int node, int pages, int flags);
  15. extern pid_t task_numa_group_id(struct task_struct *p);
  16. extern void set_numabalancing_state(bool enabled);
  17. extern void task_numa_free(struct task_struct *p);
  18. extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
  19. int src_nid, int dst_cpu);
  20. #else
  21. static inline void task_numa_fault(int last_node, int node, int pages,
  22. int flags)
  23. {
  24. }
  25. static inline pid_t task_numa_group_id(struct task_struct *p)
  26. {
  27. return 0;
  28. }
  29. static inline void set_numabalancing_state(bool enabled)
  30. {
  31. }
  32. static inline void task_numa_free(struct task_struct *p)
  33. {
  34. }
  35. static inline bool should_numa_migrate_memory(struct task_struct *p,
  36. struct page *page, int src_nid, int dst_cpu)
  37. {
  38. return true;
  39. }
  40. #endif
  41. #endif /* _LINUX_SCHED_NUMA_BALANCING_H */