mips-mt-fpaff.c 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218
  1. /*
  2. * General MIPS MT support routines, usable in AP/SP and SMVP.
  3. * Copyright (C) 2005 Mips Technologies, Inc
  4. */
  5. #include <linux/cpu.h>
  6. #include <linux/cpuset.h>
  7. #include <linux/cpumask.h>
  8. #include <linux/delay.h>
  9. #include <linux/kernel.h>
  10. #include <linux/init.h>
  11. #include <linux/sched.h>
  12. #include <linux/sched/task.h>
  13. #include <linux/cred.h>
  14. #include <linux/security.h>
  15. #include <linux/types.h>
  16. #include <linux/uaccess.h>
  17. /*
  18. * CPU mask used to set process affinity for MT VPEs/TCs with FPUs
  19. */
  20. cpumask_t mt_fpu_cpumask;
  21. static int fpaff_threshold = -1;
  22. unsigned long mt_fpemul_threshold;
  23. /*
  24. * Replacement functions for the sys_sched_setaffinity() and
  25. * sys_sched_getaffinity() system calls, so that we can integrate
  26. * FPU affinity with the user's requested processor affinity.
  27. * This code is 98% identical with the sys_sched_setaffinity()
  28. * and sys_sched_getaffinity() system calls, and should be
  29. * updated when kernel/sched/core.c changes.
  30. */
  31. /*
  32. * find_process_by_pid - find a process with a matching PID value.
  33. * used in sys_sched_set/getaffinity() in kernel/sched/core.c, so
  34. * cloned here.
  35. */
  36. static inline struct task_struct *find_process_by_pid(pid_t pid)
  37. {
  38. return pid ? find_task_by_vpid(pid) : current;
  39. }
  40. /*
  41. * check the target process has a UID that matches the current process's
  42. */
  43. static bool check_same_owner(struct task_struct *p)
  44. {
  45. const struct cred *cred = current_cred(), *pcred;
  46. bool match;
  47. rcu_read_lock();
  48. pcred = __task_cred(p);
  49. match = (uid_eq(cred->euid, pcred->euid) ||
  50. uid_eq(cred->euid, pcred->uid));
  51. rcu_read_unlock();
  52. return match;
  53. }
  54. /*
  55. * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
  56. */
  57. asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
  58. unsigned long __user *user_mask_ptr)
  59. {
  60. cpumask_var_t cpus_allowed, new_mask, effective_mask;
  61. struct thread_info *ti;
  62. struct task_struct *p;
  63. int retval;
  64. if (len < sizeof(new_mask))
  65. return -EINVAL;
  66. if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
  67. return -EFAULT;
  68. get_online_cpus();
  69. rcu_read_lock();
  70. p = find_process_by_pid(pid);
  71. if (!p) {
  72. rcu_read_unlock();
  73. put_online_cpus();
  74. return -ESRCH;
  75. }
  76. /* Prevent p going away */
  77. get_task_struct(p);
  78. rcu_read_unlock();
  79. if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
  80. retval = -ENOMEM;
  81. goto out_put_task;
  82. }
  83. if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
  84. retval = -ENOMEM;
  85. goto out_free_cpus_allowed;
  86. }
  87. if (!alloc_cpumask_var(&effective_mask, GFP_KERNEL)) {
  88. retval = -ENOMEM;
  89. goto out_free_new_mask;
  90. }
  91. if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) {
  92. retval = -EPERM;
  93. goto out_unlock;
  94. }
  95. retval = security_task_setscheduler(p);
  96. if (retval)
  97. goto out_unlock;
  98. /* Record new user-specified CPU set for future reference */
  99. cpumask_copy(&p->thread.user_cpus_allowed, new_mask);
  100. again:
  101. /* Compute new global allowed CPU set if necessary */
  102. ti = task_thread_info(p);
  103. if (test_ti_thread_flag(ti, TIF_FPUBOUND) &&
  104. cpumask_intersects(new_mask, &mt_fpu_cpumask)) {
  105. cpumask_and(effective_mask, new_mask, &mt_fpu_cpumask);
  106. retval = set_cpus_allowed_ptr(p, effective_mask);
  107. } else {
  108. cpumask_copy(effective_mask, new_mask);
  109. clear_ti_thread_flag(ti, TIF_FPUBOUND);
  110. retval = set_cpus_allowed_ptr(p, new_mask);
  111. }
  112. if (!retval) {
  113. cpuset_cpus_allowed(p, cpus_allowed);
  114. if (!cpumask_subset(effective_mask, cpus_allowed)) {
  115. /*
  116. * We must have raced with a concurrent cpuset
  117. * update. Just reset the cpus_allowed to the
  118. * cpuset's cpus_allowed
  119. */
  120. cpumask_copy(new_mask, cpus_allowed);
  121. goto again;
  122. }
  123. }
  124. out_unlock:
  125. free_cpumask_var(effective_mask);
  126. out_free_new_mask:
  127. free_cpumask_var(new_mask);
  128. out_free_cpus_allowed:
  129. free_cpumask_var(cpus_allowed);
  130. out_put_task:
  131. put_task_struct(p);
  132. put_online_cpus();
  133. return retval;
  134. }
  135. /*
  136. * mipsmt_sys_sched_getaffinity - get the cpu affinity of a process
  137. */
  138. asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
  139. unsigned long __user *user_mask_ptr)
  140. {
  141. unsigned int real_len;
  142. cpumask_t allowed, mask;
  143. int retval;
  144. struct task_struct *p;
  145. real_len = sizeof(mask);
  146. if (len < real_len)
  147. return -EINVAL;
  148. get_online_cpus();
  149. read_lock(&tasklist_lock);
  150. retval = -ESRCH;
  151. p = find_process_by_pid(pid);
  152. if (!p)
  153. goto out_unlock;
  154. retval = security_task_getscheduler(p);
  155. if (retval)
  156. goto out_unlock;
  157. cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
  158. cpumask_and(&mask, &allowed, cpu_active_mask);
  159. out_unlock:
  160. read_unlock(&tasklist_lock);
  161. put_online_cpus();
  162. if (retval)
  163. return retval;
  164. if (copy_to_user(user_mask_ptr, &mask, real_len))
  165. return -EFAULT;
  166. return real_len;
  167. }
  168. static int __init fpaff_thresh(char *str)
  169. {
  170. get_option(&str, &fpaff_threshold);
  171. return 1;
  172. }
  173. __setup("fpaff=", fpaff_thresh);
  174. /*
  175. * FPU Use Factor empirically derived from experiments on 34K
  176. */
  177. #define FPUSEFACTOR 2000
  178. static __init int mt_fp_affinity_init(void)
  179. {
  180. if (fpaff_threshold >= 0) {
  181. mt_fpemul_threshold = fpaff_threshold;
  182. } else {
  183. mt_fpemul_threshold =
  184. (FPUSEFACTOR * (loops_per_jiffy/(500000/HZ))) / HZ;
  185. }
  186. printk(KERN_DEBUG "FPU Affinity set after %ld emulations\n",
  187. mt_fpemul_threshold);
  188. return 0;
  189. }
  190. arch_initcall(mt_fp_affinity_init);