sysfs.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276
  1. /* sysfs.c: Topology sysfs support code for sparc64.
  2. *
  3. * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
  4. */
  5. #include <linux/sched.h>
  6. #include <linux/device.h>
  7. #include <linux/cpu.h>
  8. #include <linux/smp.h>
  9. #include <linux/percpu.h>
  10. #include <linux/init.h>
  11. #include <asm/cpudata.h>
  12. #include <asm/hypervisor.h>
  13. #include <asm/spitfire.h>
  14. static DEFINE_PER_CPU(struct hv_mmu_statistics, mmu_stats) __attribute__((aligned(64)));
  15. #define SHOW_MMUSTAT_ULONG(NAME) \
  16. static ssize_t show_##NAME(struct device *dev, \
  17. struct device_attribute *attr, char *buf) \
  18. { \
  19. struct hv_mmu_statistics *p = &per_cpu(mmu_stats, dev->id); \
  20. return sprintf(buf, "%lu\n", p->NAME); \
  21. } \
  22. static DEVICE_ATTR(NAME, 0444, show_##NAME, NULL)
  23. SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_8k_tte);
  24. SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_8k_tte);
  25. SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_64k_tte);
  26. SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_64k_tte);
  27. SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_4mb_tte);
  28. SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_4mb_tte);
  29. SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_256mb_tte);
  30. SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_256mb_tte);
  31. SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_8k_tte);
  32. SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_8k_tte);
  33. SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_64k_tte);
  34. SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_64k_tte);
  35. SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_4mb_tte);
  36. SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_4mb_tte);
  37. SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_256mb_tte);
  38. SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_256mb_tte);
  39. SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_8k_tte);
  40. SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_8k_tte);
  41. SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_64k_tte);
  42. SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_64k_tte);
  43. SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_4mb_tte);
  44. SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_4mb_tte);
  45. SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_256mb_tte);
  46. SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_256mb_tte);
  47. SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_8k_tte);
  48. SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_8k_tte);
  49. SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_64k_tte);
  50. SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_64k_tte);
  51. SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_4mb_tte);
  52. SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_4mb_tte);
  53. SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_256mb_tte);
  54. SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_256mb_tte);
  55. static struct attribute *mmu_stat_attrs[] = {
  56. &dev_attr_immu_tsb_hits_ctx0_8k_tte.attr,
  57. &dev_attr_immu_tsb_ticks_ctx0_8k_tte.attr,
  58. &dev_attr_immu_tsb_hits_ctx0_64k_tte.attr,
  59. &dev_attr_immu_tsb_ticks_ctx0_64k_tte.attr,
  60. &dev_attr_immu_tsb_hits_ctx0_4mb_tte.attr,
  61. &dev_attr_immu_tsb_ticks_ctx0_4mb_tte.attr,
  62. &dev_attr_immu_tsb_hits_ctx0_256mb_tte.attr,
  63. &dev_attr_immu_tsb_ticks_ctx0_256mb_tte.attr,
  64. &dev_attr_immu_tsb_hits_ctxnon0_8k_tte.attr,
  65. &dev_attr_immu_tsb_ticks_ctxnon0_8k_tte.attr,
  66. &dev_attr_immu_tsb_hits_ctxnon0_64k_tte.attr,
  67. &dev_attr_immu_tsb_ticks_ctxnon0_64k_tte.attr,
  68. &dev_attr_immu_tsb_hits_ctxnon0_4mb_tte.attr,
  69. &dev_attr_immu_tsb_ticks_ctxnon0_4mb_tte.attr,
  70. &dev_attr_immu_tsb_hits_ctxnon0_256mb_tte.attr,
  71. &dev_attr_immu_tsb_ticks_ctxnon0_256mb_tte.attr,
  72. &dev_attr_dmmu_tsb_hits_ctx0_8k_tte.attr,
  73. &dev_attr_dmmu_tsb_ticks_ctx0_8k_tte.attr,
  74. &dev_attr_dmmu_tsb_hits_ctx0_64k_tte.attr,
  75. &dev_attr_dmmu_tsb_ticks_ctx0_64k_tte.attr,
  76. &dev_attr_dmmu_tsb_hits_ctx0_4mb_tte.attr,
  77. &dev_attr_dmmu_tsb_ticks_ctx0_4mb_tte.attr,
  78. &dev_attr_dmmu_tsb_hits_ctx0_256mb_tte.attr,
  79. &dev_attr_dmmu_tsb_ticks_ctx0_256mb_tte.attr,
  80. &dev_attr_dmmu_tsb_hits_ctxnon0_8k_tte.attr,
  81. &dev_attr_dmmu_tsb_ticks_ctxnon0_8k_tte.attr,
  82. &dev_attr_dmmu_tsb_hits_ctxnon0_64k_tte.attr,
  83. &dev_attr_dmmu_tsb_ticks_ctxnon0_64k_tte.attr,
  84. &dev_attr_dmmu_tsb_hits_ctxnon0_4mb_tte.attr,
  85. &dev_attr_dmmu_tsb_ticks_ctxnon0_4mb_tte.attr,
  86. &dev_attr_dmmu_tsb_hits_ctxnon0_256mb_tte.attr,
  87. &dev_attr_dmmu_tsb_ticks_ctxnon0_256mb_tte.attr,
  88. NULL,
  89. };
  90. static struct attribute_group mmu_stat_group = {
  91. .attrs = mmu_stat_attrs,
  92. .name = "mmu_stats",
  93. };
  94. static long read_mmustat_enable(void *data __maybe_unused)
  95. {
  96. unsigned long ra = 0;
  97. sun4v_mmustat_info(&ra);
  98. return ra != 0;
  99. }
  100. static long write_mmustat_enable(void *data)
  101. {
  102. unsigned long ra, orig_ra, *val = data;
  103. if (*val)
  104. ra = __pa(&per_cpu(mmu_stats, smp_processor_id()));
  105. else
  106. ra = 0UL;
  107. return sun4v_mmustat_conf(ra, &orig_ra);
  108. }
  109. static ssize_t show_mmustat_enable(struct device *s,
  110. struct device_attribute *attr, char *buf)
  111. {
  112. long val = work_on_cpu(s->id, read_mmustat_enable, NULL);
  113. return sprintf(buf, "%lx\n", val);
  114. }
  115. static ssize_t store_mmustat_enable(struct device *s,
  116. struct device_attribute *attr, const char *buf,
  117. size_t count)
  118. {
  119. unsigned long val;
  120. long err;
  121. int ret;
  122. ret = sscanf(buf, "%lu", &val);
  123. if (ret != 1)
  124. return -EINVAL;
  125. err = work_on_cpu(s->id, write_mmustat_enable, &val);
  126. if (err)
  127. return -EIO;
  128. return count;
  129. }
  130. static DEVICE_ATTR(mmustat_enable, 0644, show_mmustat_enable, store_mmustat_enable);
  131. static int mmu_stats_supported;
  132. static int register_mmu_stats(struct device *s)
  133. {
  134. if (!mmu_stats_supported)
  135. return 0;
  136. device_create_file(s, &dev_attr_mmustat_enable);
  137. return sysfs_create_group(&s->kobj, &mmu_stat_group);
  138. }
  139. #ifdef CONFIG_HOTPLUG_CPU
  140. static void unregister_mmu_stats(struct device *s)
  141. {
  142. if (!mmu_stats_supported)
  143. return;
  144. sysfs_remove_group(&s->kobj, &mmu_stat_group);
  145. device_remove_file(s, &dev_attr_mmustat_enable);
  146. }
  147. #endif
  148. #define SHOW_CPUDATA_ULONG_NAME(NAME, MEMBER) \
  149. static ssize_t show_##NAME(struct device *dev, \
  150. struct device_attribute *attr, char *buf) \
  151. { \
  152. cpuinfo_sparc *c = &cpu_data(dev->id); \
  153. return sprintf(buf, "%lu\n", c->MEMBER); \
  154. }
  155. #define SHOW_CPUDATA_UINT_NAME(NAME, MEMBER) \
  156. static ssize_t show_##NAME(struct device *dev, \
  157. struct device_attribute *attr, char *buf) \
  158. { \
  159. cpuinfo_sparc *c = &cpu_data(dev->id); \
  160. return sprintf(buf, "%u\n", c->MEMBER); \
  161. }
  162. SHOW_CPUDATA_ULONG_NAME(clock_tick, clock_tick);
  163. SHOW_CPUDATA_UINT_NAME(l1_dcache_size, dcache_size);
  164. SHOW_CPUDATA_UINT_NAME(l1_dcache_line_size, dcache_line_size);
  165. SHOW_CPUDATA_UINT_NAME(l1_icache_size, icache_size);
  166. SHOW_CPUDATA_UINT_NAME(l1_icache_line_size, icache_line_size);
  167. SHOW_CPUDATA_UINT_NAME(l2_cache_size, ecache_size);
  168. SHOW_CPUDATA_UINT_NAME(l2_cache_line_size, ecache_line_size);
  169. static struct device_attribute cpu_core_attrs[] = {
  170. __ATTR(clock_tick, 0444, show_clock_tick, NULL),
  171. __ATTR(l1_dcache_size, 0444, show_l1_dcache_size, NULL),
  172. __ATTR(l1_dcache_line_size, 0444, show_l1_dcache_line_size, NULL),
  173. __ATTR(l1_icache_size, 0444, show_l1_icache_size, NULL),
  174. __ATTR(l1_icache_line_size, 0444, show_l1_icache_line_size, NULL),
  175. __ATTR(l2_cache_size, 0444, show_l2_cache_size, NULL),
  176. __ATTR(l2_cache_line_size, 0444, show_l2_cache_line_size, NULL),
  177. };
  178. static DEFINE_PER_CPU(struct cpu, cpu_devices);
  179. static int register_cpu_online(unsigned int cpu)
  180. {
  181. struct cpu *c = &per_cpu(cpu_devices, cpu);
  182. struct device *s = &c->dev;
  183. int i;
  184. for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++)
  185. device_create_file(s, &cpu_core_attrs[i]);
  186. register_mmu_stats(s);
  187. return 0;
  188. }
  189. static int unregister_cpu_online(unsigned int cpu)
  190. {
  191. #ifdef CONFIG_HOTPLUG_CPU
  192. struct cpu *c = &per_cpu(cpu_devices, cpu);
  193. struct device *s = &c->dev;
  194. int i;
  195. unregister_mmu_stats(s);
  196. for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++)
  197. device_remove_file(s, &cpu_core_attrs[i]);
  198. #endif
  199. return 0;
  200. }
  201. static void __init check_mmu_stats(void)
  202. {
  203. unsigned long dummy1, err;
  204. if (tlb_type != hypervisor)
  205. return;
  206. err = sun4v_mmustat_info(&dummy1);
  207. if (!err)
  208. mmu_stats_supported = 1;
  209. }
  210. static void register_nodes(void)
  211. {
  212. #ifdef CONFIG_NUMA
  213. int i;
  214. for (i = 0; i < MAX_NUMNODES; i++)
  215. register_one_node(i);
  216. #endif
  217. }
  218. static int __init topology_init(void)
  219. {
  220. int cpu, ret;
  221. register_nodes();
  222. check_mmu_stats();
  223. for_each_possible_cpu(cpu) {
  224. struct cpu *c = &per_cpu(cpu_devices, cpu);
  225. register_cpu(c, cpu);
  226. }
  227. ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "sparc/topology:online",
  228. register_cpu_online, unregister_cpu_online);
  229. WARN_ON(ret < 0);
  230. return 0;
  231. }
  232. subsys_initcall(topology_init);