sysfs.c 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431
  1. #include <linux/config.h>
  2. #include <linux/sysdev.h>
  3. #include <linux/cpu.h>
  4. #include <linux/smp.h>
  5. #include <linux/percpu.h>
  6. #include <linux/init.h>
  7. #include <linux/sched.h>
  8. #include <linux/module.h>
  9. #include <linux/nodemask.h>
  10. #include <linux/cpumask.h>
  11. #include <linux/notifier.h>
  12. #include <asm/current.h>
  13. #include <asm/processor.h>
  14. #include <asm/cputable.h>
  15. #include <asm/hvcall.h>
  16. #include <asm/prom.h>
  17. #include <asm/systemcfg.h>
  18. #include <asm/paca.h>
  19. #include <asm/lppaca.h>
  20. #include <asm/machdep.h>
  21. static DEFINE_PER_CPU(struct cpu, cpu_devices);
  22. /* SMT stuff */
  23. #ifdef CONFIG_PPC_MULTIPLATFORM
  24. /* default to snooze disabled */
  25. DEFINE_PER_CPU(unsigned long, smt_snooze_delay);
  26. static ssize_t store_smt_snooze_delay(struct sys_device *dev, const char *buf,
  27. size_t count)
  28. {
  29. struct cpu *cpu = container_of(dev, struct cpu, sysdev);
  30. ssize_t ret;
  31. unsigned long snooze;
  32. ret = sscanf(buf, "%lu", &snooze);
  33. if (ret != 1)
  34. return -EINVAL;
  35. per_cpu(smt_snooze_delay, cpu->sysdev.id) = snooze;
  36. return count;
  37. }
  38. static ssize_t show_smt_snooze_delay(struct sys_device *dev, char *buf)
  39. {
  40. struct cpu *cpu = container_of(dev, struct cpu, sysdev);
  41. return sprintf(buf, "%lu\n", per_cpu(smt_snooze_delay, cpu->sysdev.id));
  42. }
  43. static SYSDEV_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay,
  44. store_smt_snooze_delay);
  45. /* Only parse OF options if the matching cmdline option was not specified */
  46. static int smt_snooze_cmdline;
  47. static int __init smt_setup(void)
  48. {
  49. struct device_node *options;
  50. unsigned int *val;
  51. unsigned int cpu;
  52. if (!cpu_has_feature(CPU_FTR_SMT))
  53. return 1;
  54. options = find_path_device("/options");
  55. if (!options)
  56. return 1;
  57. val = (unsigned int *)get_property(options, "ibm,smt-snooze-delay",
  58. NULL);
  59. if (!smt_snooze_cmdline && val) {
  60. for_each_cpu(cpu)
  61. per_cpu(smt_snooze_delay, cpu) = *val;
  62. }
  63. return 1;
  64. }
  65. __initcall(smt_setup);
  66. static int __init setup_smt_snooze_delay(char *str)
  67. {
  68. unsigned int cpu;
  69. int snooze;
  70. if (!cpu_has_feature(CPU_FTR_SMT))
  71. return 1;
  72. smt_snooze_cmdline = 1;
  73. if (get_option(&str, &snooze)) {
  74. for_each_cpu(cpu)
  75. per_cpu(smt_snooze_delay, cpu) = snooze;
  76. }
  77. return 1;
  78. }
  79. __setup("smt-snooze-delay=", setup_smt_snooze_delay);
  80. /*
  81. * Enabling PMCs will slow partition context switch times so we only do
  82. * it the first time we write to the PMCs.
  83. */
  84. static DEFINE_PER_CPU(char, pmcs_enabled);
  85. void ppc64_enable_pmcs(void)
  86. {
  87. unsigned long hid0;
  88. #ifdef CONFIG_PPC_PSERIES
  89. unsigned long set, reset;
  90. int ret;
  91. unsigned int ctrl;
  92. #endif /* CONFIG_PPC_PSERIES */
  93. /* Only need to enable them once */
  94. if (__get_cpu_var(pmcs_enabled))
  95. return;
  96. __get_cpu_var(pmcs_enabled) = 1;
  97. switch (systemcfg->platform) {
  98. case PLATFORM_PSERIES:
  99. case PLATFORM_POWERMAC:
  100. hid0 = mfspr(HID0);
  101. hid0 |= 1UL << (63 - 20);
  102. /* POWER4 requires the following sequence */
  103. asm volatile(
  104. "sync\n"
  105. "mtspr %1, %0\n"
  106. "mfspr %0, %1\n"
  107. "mfspr %0, %1\n"
  108. "mfspr %0, %1\n"
  109. "mfspr %0, %1\n"
  110. "mfspr %0, %1\n"
  111. "mfspr %0, %1\n"
  112. "isync" : "=&r" (hid0) : "i" (HID0), "0" (hid0):
  113. "memory");
  114. break;
  115. #ifdef CONFIG_PPC_PSERIES
  116. case PLATFORM_PSERIES_LPAR:
  117. set = 1UL << 63;
  118. reset = 0;
  119. ret = plpar_hcall_norets(H_PERFMON, set, reset);
  120. if (ret)
  121. printk(KERN_ERR "H_PERFMON call on cpu %u "
  122. "returned %d\n",
  123. smp_processor_id(), ret);
  124. break;
  125. #endif /* CONFIG_PPC_PSERIES */
  126. default:
  127. break;
  128. }
  129. #ifdef CONFIG_PPC_PSERIES
  130. /* instruct hypervisor to maintain PMCs */
  131. if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR)
  132. get_paca()->lppaca.pmcregs_in_use = 1;
  133. /*
  134. * On SMT machines we have to set the run latch in the ctrl register
  135. * in order to make PMC6 spin.
  136. */
  137. if (cpu_has_feature(CPU_FTR_SMT)) {
  138. ctrl = mfspr(CTRLF);
  139. ctrl |= RUNLATCH;
  140. mtspr(CTRLT, ctrl);
  141. }
  142. #endif /* CONFIG_PPC_PSERIES */
  143. }
  144. #else
  145. /* PMC stuff */
  146. void ppc64_enable_pmcs(void)
  147. {
  148. /* XXX Implement for iseries */
  149. }
  150. #endif /* CONFIG_PPC_MULTIPLATFORM */
  151. EXPORT_SYMBOL(ppc64_enable_pmcs);
  152. /* XXX convert to rusty's on_one_cpu */
  153. static unsigned long run_on_cpu(unsigned long cpu,
  154. unsigned long (*func)(unsigned long),
  155. unsigned long arg)
  156. {
  157. cpumask_t old_affinity = current->cpus_allowed;
  158. unsigned long ret;
  159. /* should return -EINVAL to userspace */
  160. if (set_cpus_allowed(current, cpumask_of_cpu(cpu)))
  161. return 0;
  162. ret = func(arg);
  163. set_cpus_allowed(current, old_affinity);
  164. return ret;
  165. }
  166. #define SYSFS_PMCSETUP(NAME, ADDRESS) \
  167. static unsigned long read_##NAME(unsigned long junk) \
  168. { \
  169. return mfspr(ADDRESS); \
  170. } \
  171. static unsigned long write_##NAME(unsigned long val) \
  172. { \
  173. ppc64_enable_pmcs(); \
  174. mtspr(ADDRESS, val); \
  175. return 0; \
  176. } \
  177. static ssize_t show_##NAME(struct sys_device *dev, char *buf) \
  178. { \
  179. struct cpu *cpu = container_of(dev, struct cpu, sysdev); \
  180. unsigned long val = run_on_cpu(cpu->sysdev.id, read_##NAME, 0); \
  181. return sprintf(buf, "%lx\n", val); \
  182. } \
  183. static ssize_t __attribute_used__ \
  184. store_##NAME(struct sys_device *dev, const char *buf, size_t count) \
  185. { \
  186. struct cpu *cpu = container_of(dev, struct cpu, sysdev); \
  187. unsigned long val; \
  188. int ret = sscanf(buf, "%lx", &val); \
  189. if (ret != 1) \
  190. return -EINVAL; \
  191. run_on_cpu(cpu->sysdev.id, write_##NAME, val); \
  192. return count; \
  193. }
  194. SYSFS_PMCSETUP(mmcr0, SPRN_MMCR0);
  195. SYSFS_PMCSETUP(mmcr1, SPRN_MMCR1);
  196. SYSFS_PMCSETUP(mmcra, SPRN_MMCRA);
  197. SYSFS_PMCSETUP(pmc1, SPRN_PMC1);
  198. SYSFS_PMCSETUP(pmc2, SPRN_PMC2);
  199. SYSFS_PMCSETUP(pmc3, SPRN_PMC3);
  200. SYSFS_PMCSETUP(pmc4, SPRN_PMC4);
  201. SYSFS_PMCSETUP(pmc5, SPRN_PMC5);
  202. SYSFS_PMCSETUP(pmc6, SPRN_PMC6);
  203. SYSFS_PMCSETUP(pmc7, SPRN_PMC7);
  204. SYSFS_PMCSETUP(pmc8, SPRN_PMC8);
  205. SYSFS_PMCSETUP(purr, SPRN_PURR);
  206. static SYSDEV_ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0);
  207. static SYSDEV_ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1);
  208. static SYSDEV_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
  209. static SYSDEV_ATTR(pmc1, 0600, show_pmc1, store_pmc1);
  210. static SYSDEV_ATTR(pmc2, 0600, show_pmc2, store_pmc2);
  211. static SYSDEV_ATTR(pmc3, 0600, show_pmc3, store_pmc3);
  212. static SYSDEV_ATTR(pmc4, 0600, show_pmc4, store_pmc4);
  213. static SYSDEV_ATTR(pmc5, 0600, show_pmc5, store_pmc5);
  214. static SYSDEV_ATTR(pmc6, 0600, show_pmc6, store_pmc6);
  215. static SYSDEV_ATTR(pmc7, 0600, show_pmc7, store_pmc7);
  216. static SYSDEV_ATTR(pmc8, 0600, show_pmc8, store_pmc8);
  217. static SYSDEV_ATTR(purr, 0600, show_purr, NULL);
  218. static void register_cpu_online(unsigned int cpu)
  219. {
  220. struct cpu *c = &per_cpu(cpu_devices, cpu);
  221. struct sys_device *s = &c->sysdev;
  222. #ifndef CONFIG_PPC_ISERIES
  223. if (cpu_has_feature(CPU_FTR_SMT))
  224. sysdev_create_file(s, &attr_smt_snooze_delay);
  225. #endif
  226. /* PMC stuff */
  227. sysdev_create_file(s, &attr_mmcr0);
  228. sysdev_create_file(s, &attr_mmcr1);
  229. if (cpu_has_feature(CPU_FTR_MMCRA))
  230. sysdev_create_file(s, &attr_mmcra);
  231. sysdev_create_file(s, &attr_pmc1);
  232. sysdev_create_file(s, &attr_pmc2);
  233. sysdev_create_file(s, &attr_pmc3);
  234. sysdev_create_file(s, &attr_pmc4);
  235. sysdev_create_file(s, &attr_pmc5);
  236. sysdev_create_file(s, &attr_pmc6);
  237. if (cpu_has_feature(CPU_FTR_PMC8)) {
  238. sysdev_create_file(s, &attr_pmc7);
  239. sysdev_create_file(s, &attr_pmc8);
  240. }
  241. if (cpu_has_feature(CPU_FTR_SMT))
  242. sysdev_create_file(s, &attr_purr);
  243. }
  244. #ifdef CONFIG_HOTPLUG_CPU
  245. static void unregister_cpu_online(unsigned int cpu)
  246. {
  247. struct cpu *c = &per_cpu(cpu_devices, cpu);
  248. struct sys_device *s = &c->sysdev;
  249. BUG_ON(c->no_control);
  250. #ifndef CONFIG_PPC_ISERIES
  251. if (cpu_has_feature(CPU_FTR_SMT))
  252. sysdev_remove_file(s, &attr_smt_snooze_delay);
  253. #endif
  254. /* PMC stuff */
  255. sysdev_remove_file(s, &attr_mmcr0);
  256. sysdev_remove_file(s, &attr_mmcr1);
  257. if (cpu_has_feature(CPU_FTR_MMCRA))
  258. sysdev_remove_file(s, &attr_mmcra);
  259. sysdev_remove_file(s, &attr_pmc1);
  260. sysdev_remove_file(s, &attr_pmc2);
  261. sysdev_remove_file(s, &attr_pmc3);
  262. sysdev_remove_file(s, &attr_pmc4);
  263. sysdev_remove_file(s, &attr_pmc5);
  264. sysdev_remove_file(s, &attr_pmc6);
  265. if (cpu_has_feature(CPU_FTR_PMC8)) {
  266. sysdev_remove_file(s, &attr_pmc7);
  267. sysdev_remove_file(s, &attr_pmc8);
  268. }
  269. if (cpu_has_feature(CPU_FTR_SMT))
  270. sysdev_remove_file(s, &attr_purr);
  271. }
  272. #endif /* CONFIG_HOTPLUG_CPU */
  273. static int __devinit sysfs_cpu_notify(struct notifier_block *self,
  274. unsigned long action, void *hcpu)
  275. {
  276. unsigned int cpu = (unsigned int)(long)hcpu;
  277. switch (action) {
  278. case CPU_ONLINE:
  279. register_cpu_online(cpu);
  280. break;
  281. #ifdef CONFIG_HOTPLUG_CPU
  282. case CPU_DEAD:
  283. unregister_cpu_online(cpu);
  284. break;
  285. #endif
  286. }
  287. return NOTIFY_OK;
  288. }
  289. static struct notifier_block __devinitdata sysfs_cpu_nb = {
  290. .notifier_call = sysfs_cpu_notify,
  291. };
  292. /* NUMA stuff */
  293. #ifdef CONFIG_NUMA
  294. static struct node node_devices[MAX_NUMNODES];
  295. static void register_nodes(void)
  296. {
  297. int i;
  298. for (i = 0; i < MAX_NUMNODES; i++) {
  299. if (node_online(i)) {
  300. int p_node = parent_node(i);
  301. struct node *parent = NULL;
  302. if (p_node != i)
  303. parent = &node_devices[p_node];
  304. register_node(&node_devices[i], i, parent);
  305. }
  306. }
  307. }
  308. #else
  309. static void register_nodes(void)
  310. {
  311. return;
  312. }
  313. #endif
  314. /* Only valid if CPU is present. */
  315. static ssize_t show_physical_id(struct sys_device *dev, char *buf)
  316. {
  317. struct cpu *cpu = container_of(dev, struct cpu, sysdev);
  318. return sprintf(buf, "%d\n", get_hard_smp_processor_id(cpu->sysdev.id));
  319. }
  320. static SYSDEV_ATTR(physical_id, 0444, show_physical_id, NULL);
  321. static int __init topology_init(void)
  322. {
  323. int cpu;
  324. struct node *parent = NULL;
  325. register_nodes();
  326. register_cpu_notifier(&sysfs_cpu_nb);
  327. for_each_cpu(cpu) {
  328. struct cpu *c = &per_cpu(cpu_devices, cpu);
  329. #ifdef CONFIG_NUMA
  330. parent = &node_devices[cpu_to_node(cpu)];
  331. #endif
  332. /*
  333. * For now, we just see if the system supports making
  334. * the RTAS calls for CPU hotplug. But, there may be a
  335. * more comprehensive way to do this for an individual
  336. * CPU. For instance, the boot cpu might never be valid
  337. * for hotplugging.
  338. */
  339. if (!ppc_md.cpu_die)
  340. c->no_control = 1;
  341. if (cpu_online(cpu) || (c->no_control == 0)) {
  342. register_cpu(c, cpu, parent);
  343. sysdev_create_file(&c->sysdev, &attr_physical_id);
  344. }
  345. if (cpu_online(cpu))
  346. register_cpu_online(cpu);
  347. }
  348. return 0;
  349. }
  350. __initcall(topology_init);