sysfs.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062
  1. #include <linux/device.h>
  2. #include <linux/cpu.h>
  3. #include <linux/smp.h>
  4. #include <linux/percpu.h>
  5. #include <linux/init.h>
  6. #include <linux/sched.h>
  7. #include <linux/export.h>
  8. #include <linux/nodemask.h>
  9. #include <linux/cpumask.h>
  10. #include <linux/notifier.h>
  11. #include <asm/current.h>
  12. #include <asm/processor.h>
  13. #include <asm/cputable.h>
  14. #include <asm/hvcall.h>
  15. #include <asm/prom.h>
  16. #include <asm/machdep.h>
  17. #include <asm/smp.h>
  18. #include <asm/pmc.h>
  19. #include <asm/firmware.h>
  20. #include "cacheinfo.h"
  21. #include "setup.h"
  22. #ifdef CONFIG_PPC64
  23. #include <asm/paca.h>
  24. #include <asm/lppaca.h>
  25. #endif
  26. static DEFINE_PER_CPU(struct cpu, cpu_devices);
  27. /*
  28. * SMT snooze delay stuff, 64-bit only for now
  29. */
  30. #ifdef CONFIG_PPC64
  31. /* Time in microseconds we delay before sleeping in the idle loop */
  32. static DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 };
  33. static ssize_t store_smt_snooze_delay(struct device *dev,
  34. struct device_attribute *attr,
  35. const char *buf,
  36. size_t count)
  37. {
  38. struct cpu *cpu = container_of(dev, struct cpu, dev);
  39. ssize_t ret;
  40. long snooze;
  41. ret = sscanf(buf, "%ld", &snooze);
  42. if (ret != 1)
  43. return -EINVAL;
  44. per_cpu(smt_snooze_delay, cpu->dev.id) = snooze;
  45. return count;
  46. }
  47. static ssize_t show_smt_snooze_delay(struct device *dev,
  48. struct device_attribute *attr,
  49. char *buf)
  50. {
  51. struct cpu *cpu = container_of(dev, struct cpu, dev);
  52. return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->dev.id));
  53. }
  54. static DEVICE_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay,
  55. store_smt_snooze_delay);
  56. static int __init setup_smt_snooze_delay(char *str)
  57. {
  58. unsigned int cpu;
  59. long snooze;
  60. if (!cpu_has_feature(CPU_FTR_SMT))
  61. return 1;
  62. snooze = simple_strtol(str, NULL, 10);
  63. for_each_possible_cpu(cpu)
  64. per_cpu(smt_snooze_delay, cpu) = snooze;
  65. return 1;
  66. }
  67. __setup("smt-snooze-delay=", setup_smt_snooze_delay);
  68. #endif /* CONFIG_PPC64 */
  69. #ifdef CONFIG_PPC_FSL_BOOK3E
  70. #define MAX_BIT 63
  71. static u64 pw20_wt;
  72. static u64 altivec_idle_wt;
  73. static unsigned int get_idle_ticks_bit(u64 ns)
  74. {
  75. u64 cycle;
  76. if (ns >= 10000)
  77. cycle = div_u64(ns + 500, 1000) * tb_ticks_per_usec;
  78. else
  79. cycle = div_u64(ns * tb_ticks_per_usec, 1000);
  80. if (!cycle)
  81. return 0;
  82. return ilog2(cycle);
  83. }
  84. static void do_show_pwrmgtcr0(void *val)
  85. {
  86. u32 *value = val;
  87. *value = mfspr(SPRN_PWRMGTCR0);
  88. }
  89. static ssize_t show_pw20_state(struct device *dev,
  90. struct device_attribute *attr, char *buf)
  91. {
  92. u32 value;
  93. unsigned int cpu = dev->id;
  94. smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
  95. value &= PWRMGTCR0_PW20_WAIT;
  96. return sprintf(buf, "%u\n", value ? 1 : 0);
  97. }
  98. static void do_store_pw20_state(void *val)
  99. {
  100. u32 *value = val;
  101. u32 pw20_state;
  102. pw20_state = mfspr(SPRN_PWRMGTCR0);
  103. if (*value)
  104. pw20_state |= PWRMGTCR0_PW20_WAIT;
  105. else
  106. pw20_state &= ~PWRMGTCR0_PW20_WAIT;
  107. mtspr(SPRN_PWRMGTCR0, pw20_state);
  108. }
  109. static ssize_t store_pw20_state(struct device *dev,
  110. struct device_attribute *attr,
  111. const char *buf, size_t count)
  112. {
  113. u32 value;
  114. unsigned int cpu = dev->id;
  115. if (kstrtou32(buf, 0, &value))
  116. return -EINVAL;
  117. if (value > 1)
  118. return -EINVAL;
  119. smp_call_function_single(cpu, do_store_pw20_state, &value, 1);
  120. return count;
  121. }
  122. static ssize_t show_pw20_wait_time(struct device *dev,
  123. struct device_attribute *attr, char *buf)
  124. {
  125. u32 value;
  126. u64 tb_cycle = 1;
  127. u64 time;
  128. unsigned int cpu = dev->id;
  129. if (!pw20_wt) {
  130. smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
  131. value = (value & PWRMGTCR0_PW20_ENT) >>
  132. PWRMGTCR0_PW20_ENT_SHIFT;
  133. tb_cycle = (tb_cycle << (MAX_BIT - value + 1));
  134. /* convert ms to ns */
  135. if (tb_ticks_per_usec > 1000) {
  136. time = div_u64(tb_cycle, tb_ticks_per_usec / 1000);
  137. } else {
  138. u32 rem_us;
  139. time = div_u64_rem(tb_cycle, tb_ticks_per_usec,
  140. &rem_us);
  141. time = time * 1000 + rem_us * 1000 / tb_ticks_per_usec;
  142. }
  143. } else {
  144. time = pw20_wt;
  145. }
  146. return sprintf(buf, "%llu\n", time > 0 ? time : 0);
  147. }
  148. static void set_pw20_wait_entry_bit(void *val)
  149. {
  150. u32 *value = val;
  151. u32 pw20_idle;
  152. pw20_idle = mfspr(SPRN_PWRMGTCR0);
  153. /* Set Automatic PW20 Core Idle Count */
  154. /* clear count */
  155. pw20_idle &= ~PWRMGTCR0_PW20_ENT;
  156. /* set count */
  157. pw20_idle |= ((MAX_BIT - *value) << PWRMGTCR0_PW20_ENT_SHIFT);
  158. mtspr(SPRN_PWRMGTCR0, pw20_idle);
  159. }
  160. static ssize_t store_pw20_wait_time(struct device *dev,
  161. struct device_attribute *attr,
  162. const char *buf, size_t count)
  163. {
  164. u32 entry_bit;
  165. u64 value;
  166. unsigned int cpu = dev->id;
  167. if (kstrtou64(buf, 0, &value))
  168. return -EINVAL;
  169. if (!value)
  170. return -EINVAL;
  171. entry_bit = get_idle_ticks_bit(value);
  172. if (entry_bit > MAX_BIT)
  173. return -EINVAL;
  174. pw20_wt = value;
  175. smp_call_function_single(cpu, set_pw20_wait_entry_bit,
  176. &entry_bit, 1);
  177. return count;
  178. }
  179. static ssize_t show_altivec_idle(struct device *dev,
  180. struct device_attribute *attr, char *buf)
  181. {
  182. u32 value;
  183. unsigned int cpu = dev->id;
  184. smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
  185. value &= PWRMGTCR0_AV_IDLE_PD_EN;
  186. return sprintf(buf, "%u\n", value ? 1 : 0);
  187. }
  188. static void do_store_altivec_idle(void *val)
  189. {
  190. u32 *value = val;
  191. u32 altivec_idle;
  192. altivec_idle = mfspr(SPRN_PWRMGTCR0);
  193. if (*value)
  194. altivec_idle |= PWRMGTCR0_AV_IDLE_PD_EN;
  195. else
  196. altivec_idle &= ~PWRMGTCR0_AV_IDLE_PD_EN;
  197. mtspr(SPRN_PWRMGTCR0, altivec_idle);
  198. }
  199. static ssize_t store_altivec_idle(struct device *dev,
  200. struct device_attribute *attr,
  201. const char *buf, size_t count)
  202. {
  203. u32 value;
  204. unsigned int cpu = dev->id;
  205. if (kstrtou32(buf, 0, &value))
  206. return -EINVAL;
  207. if (value > 1)
  208. return -EINVAL;
  209. smp_call_function_single(cpu, do_store_altivec_idle, &value, 1);
  210. return count;
  211. }
  212. static ssize_t show_altivec_idle_wait_time(struct device *dev,
  213. struct device_attribute *attr, char *buf)
  214. {
  215. u32 value;
  216. u64 tb_cycle = 1;
  217. u64 time;
  218. unsigned int cpu = dev->id;
  219. if (!altivec_idle_wt) {
  220. smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
  221. value = (value & PWRMGTCR0_AV_IDLE_CNT) >>
  222. PWRMGTCR0_AV_IDLE_CNT_SHIFT;
  223. tb_cycle = (tb_cycle << (MAX_BIT - value + 1));
  224. /* convert ms to ns */
  225. if (tb_ticks_per_usec > 1000) {
  226. time = div_u64(tb_cycle, tb_ticks_per_usec / 1000);
  227. } else {
  228. u32 rem_us;
  229. time = div_u64_rem(tb_cycle, tb_ticks_per_usec,
  230. &rem_us);
  231. time = time * 1000 + rem_us * 1000 / tb_ticks_per_usec;
  232. }
  233. } else {
  234. time = altivec_idle_wt;
  235. }
  236. return sprintf(buf, "%llu\n", time > 0 ? time : 0);
  237. }
  238. static void set_altivec_idle_wait_entry_bit(void *val)
  239. {
  240. u32 *value = val;
  241. u32 altivec_idle;
  242. altivec_idle = mfspr(SPRN_PWRMGTCR0);
  243. /* Set Automatic AltiVec Idle Count */
  244. /* clear count */
  245. altivec_idle &= ~PWRMGTCR0_AV_IDLE_CNT;
  246. /* set count */
  247. altivec_idle |= ((MAX_BIT - *value) << PWRMGTCR0_AV_IDLE_CNT_SHIFT);
  248. mtspr(SPRN_PWRMGTCR0, altivec_idle);
  249. }
  250. static ssize_t store_altivec_idle_wait_time(struct device *dev,
  251. struct device_attribute *attr,
  252. const char *buf, size_t count)
  253. {
  254. u32 entry_bit;
  255. u64 value;
  256. unsigned int cpu = dev->id;
  257. if (kstrtou64(buf, 0, &value))
  258. return -EINVAL;
  259. if (!value)
  260. return -EINVAL;
  261. entry_bit = get_idle_ticks_bit(value);
  262. if (entry_bit > MAX_BIT)
  263. return -EINVAL;
  264. altivec_idle_wt = value;
  265. smp_call_function_single(cpu, set_altivec_idle_wait_entry_bit,
  266. &entry_bit, 1);
  267. return count;
  268. }
  269. /*
  270. * Enable/Disable interface:
  271. * 0, disable. 1, enable.
  272. */
  273. static DEVICE_ATTR(pw20_state, 0600, show_pw20_state, store_pw20_state);
  274. static DEVICE_ATTR(altivec_idle, 0600, show_altivec_idle, store_altivec_idle);
  275. /*
  276. * Set wait time interface:(Nanosecond)
  277. * Example: Base on TBfreq is 41MHZ.
  278. * 1~48(ns): TB[63]
  279. * 49~97(ns): TB[62]
  280. * 98~195(ns): TB[61]
  281. * 196~390(ns): TB[60]
  282. * 391~780(ns): TB[59]
  283. * 781~1560(ns): TB[58]
  284. * ...
  285. */
  286. static DEVICE_ATTR(pw20_wait_time, 0600,
  287. show_pw20_wait_time,
  288. store_pw20_wait_time);
  289. static DEVICE_ATTR(altivec_idle_wait_time, 0600,
  290. show_altivec_idle_wait_time,
  291. store_altivec_idle_wait_time);
  292. #endif
  293. /*
  294. * Enabling PMCs will slow partition context switch times so we only do
  295. * it the first time we write to the PMCs.
  296. */
  297. static DEFINE_PER_CPU(char, pmcs_enabled);
  298. void ppc_enable_pmcs(void)
  299. {
  300. ppc_set_pmu_inuse(1);
  301. /* Only need to enable them once */
  302. if (__this_cpu_read(pmcs_enabled))
  303. return;
  304. __this_cpu_write(pmcs_enabled, 1);
  305. if (ppc_md.enable_pmcs)
  306. ppc_md.enable_pmcs();
  307. }
  308. EXPORT_SYMBOL(ppc_enable_pmcs);
  309. #define __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, EXTRA) \
  310. static void read_##NAME(void *val) \
  311. { \
  312. *(unsigned long *)val = mfspr(ADDRESS); \
  313. } \
  314. static void write_##NAME(void *val) \
  315. { \
  316. EXTRA; \
  317. mtspr(ADDRESS, *(unsigned long *)val); \
  318. }
  319. #define __SYSFS_SPRSETUP_SHOW_STORE(NAME) \
  320. static ssize_t show_##NAME(struct device *dev, \
  321. struct device_attribute *attr, \
  322. char *buf) \
  323. { \
  324. struct cpu *cpu = container_of(dev, struct cpu, dev); \
  325. unsigned long val; \
  326. smp_call_function_single(cpu->dev.id, read_##NAME, &val, 1); \
  327. return sprintf(buf, "%lx\n", val); \
  328. } \
  329. static ssize_t __used \
  330. store_##NAME(struct device *dev, struct device_attribute *attr, \
  331. const char *buf, size_t count) \
  332. { \
  333. struct cpu *cpu = container_of(dev, struct cpu, dev); \
  334. unsigned long val; \
  335. int ret = sscanf(buf, "%lx", &val); \
  336. if (ret != 1) \
  337. return -EINVAL; \
  338. smp_call_function_single(cpu->dev.id, write_##NAME, &val, 1); \
  339. return count; \
  340. }
  341. #define SYSFS_PMCSETUP(NAME, ADDRESS) \
  342. __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, ppc_enable_pmcs()) \
  343. __SYSFS_SPRSETUP_SHOW_STORE(NAME)
  344. #define SYSFS_SPRSETUP(NAME, ADDRESS) \
  345. __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, ) \
  346. __SYSFS_SPRSETUP_SHOW_STORE(NAME)
  347. #define SYSFS_SPRSETUP_SHOW_STORE(NAME) \
  348. __SYSFS_SPRSETUP_SHOW_STORE(NAME)
  349. /* Let's define all possible registers, we'll only hook up the ones
  350. * that are implemented on the current processor
  351. */
  352. #if defined(CONFIG_PPC64)
  353. #define HAS_PPC_PMC_CLASSIC 1
  354. #define HAS_PPC_PMC_IBM 1
  355. #define HAS_PPC_PMC_PA6T 1
  356. #elif defined(CONFIG_6xx)
  357. #define HAS_PPC_PMC_CLASSIC 1
  358. #define HAS_PPC_PMC_IBM 1
  359. #define HAS_PPC_PMC_G4 1
  360. #endif
  361. #ifdef HAS_PPC_PMC_CLASSIC
  362. SYSFS_PMCSETUP(mmcr0, SPRN_MMCR0);
  363. SYSFS_PMCSETUP(mmcr1, SPRN_MMCR1);
  364. SYSFS_PMCSETUP(pmc1, SPRN_PMC1);
  365. SYSFS_PMCSETUP(pmc2, SPRN_PMC2);
  366. SYSFS_PMCSETUP(pmc3, SPRN_PMC3);
  367. SYSFS_PMCSETUP(pmc4, SPRN_PMC4);
  368. SYSFS_PMCSETUP(pmc5, SPRN_PMC5);
  369. SYSFS_PMCSETUP(pmc6, SPRN_PMC6);
  370. #ifdef HAS_PPC_PMC_G4
  371. SYSFS_PMCSETUP(mmcr2, SPRN_MMCR2);
  372. #endif
  373. #ifdef CONFIG_PPC64
  374. SYSFS_PMCSETUP(pmc7, SPRN_PMC7);
  375. SYSFS_PMCSETUP(pmc8, SPRN_PMC8);
  376. SYSFS_PMCSETUP(mmcra, SPRN_MMCRA);
  377. SYSFS_SPRSETUP(purr, SPRN_PURR);
  378. SYSFS_SPRSETUP(spurr, SPRN_SPURR);
  379. SYSFS_SPRSETUP(pir, SPRN_PIR);
  380. SYSFS_SPRSETUP(tscr, SPRN_TSCR);
  381. /*
  382. Lets only enable read for phyp resources and
  383. enable write when needed with a separate function.
  384. Lets be conservative and default to pseries.
  385. */
  386. static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
  387. static DEVICE_ATTR(spurr, 0400, show_spurr, NULL);
  388. static DEVICE_ATTR(purr, 0400, show_purr, store_purr);
  389. static DEVICE_ATTR(pir, 0400, show_pir, NULL);
  390. static DEVICE_ATTR(tscr, 0600, show_tscr, store_tscr);
  391. /*
  392. * This is the system wide DSCR register default value. Any
  393. * change to this default value through the sysfs interface
  394. * will update all per cpu DSCR default values across the
  395. * system stored in their respective PACA structures.
  396. */
  397. static unsigned long dscr_default;
  398. /**
  399. * read_dscr() - Fetch the cpu specific DSCR default
  400. * @val: Returned cpu specific DSCR default value
  401. *
  402. * This function returns the per cpu DSCR default value
  403. * for any cpu which is contained in it's PACA structure.
  404. */
  405. static void read_dscr(void *val)
  406. {
  407. *(unsigned long *)val = get_paca()->dscr_default;
  408. }
  409. /**
  410. * write_dscr() - Update the cpu specific DSCR default
  411. * @val: New cpu specific DSCR default value to update
  412. *
  413. * This function updates the per cpu DSCR default value
  414. * for any cpu which is contained in it's PACA structure.
  415. */
  416. static void write_dscr(void *val)
  417. {
  418. get_paca()->dscr_default = *(unsigned long *)val;
  419. if (!current->thread.dscr_inherit) {
  420. current->thread.dscr = *(unsigned long *)val;
  421. mtspr(SPRN_DSCR, *(unsigned long *)val);
  422. }
  423. }
  424. SYSFS_SPRSETUP_SHOW_STORE(dscr);
  425. static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr);
  426. static void add_write_permission_dev_attr(struct device_attribute *attr)
  427. {
  428. attr->attr.mode |= 0200;
  429. }
  430. /**
  431. * show_dscr_default() - Fetch the system wide DSCR default
  432. * @dev: Device structure
  433. * @attr: Device attribute structure
  434. * @buf: Interface buffer
  435. *
  436. * This function returns the system wide DSCR default value.
  437. */
  438. static ssize_t show_dscr_default(struct device *dev,
  439. struct device_attribute *attr, char *buf)
  440. {
  441. return sprintf(buf, "%lx\n", dscr_default);
  442. }
  443. /**
  444. * store_dscr_default() - Update the system wide DSCR default
  445. * @dev: Device structure
  446. * @attr: Device attribute structure
  447. * @buf: Interface buffer
  448. * @count: Size of the update
  449. *
  450. * This function updates the system wide DSCR default value.
  451. */
  452. static ssize_t __used store_dscr_default(struct device *dev,
  453. struct device_attribute *attr, const char *buf,
  454. size_t count)
  455. {
  456. unsigned long val;
  457. int ret = 0;
  458. ret = sscanf(buf, "%lx", &val);
  459. if (ret != 1)
  460. return -EINVAL;
  461. dscr_default = val;
  462. on_each_cpu(write_dscr, &val, 1);
  463. return count;
  464. }
  465. static DEVICE_ATTR(dscr_default, 0600,
  466. show_dscr_default, store_dscr_default);
  467. static void sysfs_create_dscr_default(void)
  468. {
  469. if (cpu_has_feature(CPU_FTR_DSCR)) {
  470. int err = 0;
  471. int cpu;
  472. dscr_default = spr_default_dscr;
  473. for_each_possible_cpu(cpu)
  474. paca_ptrs[cpu]->dscr_default = dscr_default;
  475. err = device_create_file(cpu_subsys.dev_root, &dev_attr_dscr_default);
  476. }
  477. }
  478. #endif /* CONFIG_PPC64 */
  479. #ifdef HAS_PPC_PMC_PA6T
  480. SYSFS_PMCSETUP(pa6t_pmc0, SPRN_PA6T_PMC0);
  481. SYSFS_PMCSETUP(pa6t_pmc1, SPRN_PA6T_PMC1);
  482. SYSFS_PMCSETUP(pa6t_pmc2, SPRN_PA6T_PMC2);
  483. SYSFS_PMCSETUP(pa6t_pmc3, SPRN_PA6T_PMC3);
  484. SYSFS_PMCSETUP(pa6t_pmc4, SPRN_PA6T_PMC4);
  485. SYSFS_PMCSETUP(pa6t_pmc5, SPRN_PA6T_PMC5);
  486. #ifdef CONFIG_DEBUG_KERNEL
  487. SYSFS_SPRSETUP(hid0, SPRN_HID0);
  488. SYSFS_SPRSETUP(hid1, SPRN_HID1);
  489. SYSFS_SPRSETUP(hid4, SPRN_HID4);
  490. SYSFS_SPRSETUP(hid5, SPRN_HID5);
  491. SYSFS_SPRSETUP(ima0, SPRN_PA6T_IMA0);
  492. SYSFS_SPRSETUP(ima1, SPRN_PA6T_IMA1);
  493. SYSFS_SPRSETUP(ima2, SPRN_PA6T_IMA2);
  494. SYSFS_SPRSETUP(ima3, SPRN_PA6T_IMA3);
  495. SYSFS_SPRSETUP(ima4, SPRN_PA6T_IMA4);
  496. SYSFS_SPRSETUP(ima5, SPRN_PA6T_IMA5);
  497. SYSFS_SPRSETUP(ima6, SPRN_PA6T_IMA6);
  498. SYSFS_SPRSETUP(ima7, SPRN_PA6T_IMA7);
  499. SYSFS_SPRSETUP(ima8, SPRN_PA6T_IMA8);
  500. SYSFS_SPRSETUP(ima9, SPRN_PA6T_IMA9);
  501. SYSFS_SPRSETUP(imaat, SPRN_PA6T_IMAAT);
  502. SYSFS_SPRSETUP(btcr, SPRN_PA6T_BTCR);
  503. SYSFS_SPRSETUP(pccr, SPRN_PA6T_PCCR);
  504. SYSFS_SPRSETUP(rpccr, SPRN_PA6T_RPCCR);
  505. SYSFS_SPRSETUP(der, SPRN_PA6T_DER);
  506. SYSFS_SPRSETUP(mer, SPRN_PA6T_MER);
  507. SYSFS_SPRSETUP(ber, SPRN_PA6T_BER);
  508. SYSFS_SPRSETUP(ier, SPRN_PA6T_IER);
  509. SYSFS_SPRSETUP(sier, SPRN_PA6T_SIER);
  510. SYSFS_SPRSETUP(siar, SPRN_PA6T_SIAR);
  511. SYSFS_SPRSETUP(tsr0, SPRN_PA6T_TSR0);
  512. SYSFS_SPRSETUP(tsr1, SPRN_PA6T_TSR1);
  513. SYSFS_SPRSETUP(tsr2, SPRN_PA6T_TSR2);
  514. SYSFS_SPRSETUP(tsr3, SPRN_PA6T_TSR3);
  515. #endif /* CONFIG_DEBUG_KERNEL */
  516. #endif /* HAS_PPC_PMC_PA6T */
  517. #ifdef HAS_PPC_PMC_IBM
  518. static struct device_attribute ibm_common_attrs[] = {
  519. __ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
  520. __ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
  521. };
  522. #endif /* HAS_PPC_PMC_G4 */
  523. #ifdef HAS_PPC_PMC_G4
  524. static struct device_attribute g4_common_attrs[] = {
  525. __ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
  526. __ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
  527. __ATTR(mmcr2, 0600, show_mmcr2, store_mmcr2),
  528. };
  529. #endif /* HAS_PPC_PMC_G4 */
  530. static struct device_attribute classic_pmc_attrs[] = {
  531. __ATTR(pmc1, 0600, show_pmc1, store_pmc1),
  532. __ATTR(pmc2, 0600, show_pmc2, store_pmc2),
  533. __ATTR(pmc3, 0600, show_pmc3, store_pmc3),
  534. __ATTR(pmc4, 0600, show_pmc4, store_pmc4),
  535. __ATTR(pmc5, 0600, show_pmc5, store_pmc5),
  536. __ATTR(pmc6, 0600, show_pmc6, store_pmc6),
  537. #ifdef CONFIG_PPC64
  538. __ATTR(pmc7, 0600, show_pmc7, store_pmc7),
  539. __ATTR(pmc8, 0600, show_pmc8, store_pmc8),
  540. #endif
  541. };
  542. #ifdef HAS_PPC_PMC_PA6T
  543. static struct device_attribute pa6t_attrs[] = {
  544. __ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
  545. __ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
  546. __ATTR(pmc0, 0600, show_pa6t_pmc0, store_pa6t_pmc0),
  547. __ATTR(pmc1, 0600, show_pa6t_pmc1, store_pa6t_pmc1),
  548. __ATTR(pmc2, 0600, show_pa6t_pmc2, store_pa6t_pmc2),
  549. __ATTR(pmc3, 0600, show_pa6t_pmc3, store_pa6t_pmc3),
  550. __ATTR(pmc4, 0600, show_pa6t_pmc4, store_pa6t_pmc4),
  551. __ATTR(pmc5, 0600, show_pa6t_pmc5, store_pa6t_pmc5),
  552. #ifdef CONFIG_DEBUG_KERNEL
  553. __ATTR(hid0, 0600, show_hid0, store_hid0),
  554. __ATTR(hid1, 0600, show_hid1, store_hid1),
  555. __ATTR(hid4, 0600, show_hid4, store_hid4),
  556. __ATTR(hid5, 0600, show_hid5, store_hid5),
  557. __ATTR(ima0, 0600, show_ima0, store_ima0),
  558. __ATTR(ima1, 0600, show_ima1, store_ima1),
  559. __ATTR(ima2, 0600, show_ima2, store_ima2),
  560. __ATTR(ima3, 0600, show_ima3, store_ima3),
  561. __ATTR(ima4, 0600, show_ima4, store_ima4),
  562. __ATTR(ima5, 0600, show_ima5, store_ima5),
  563. __ATTR(ima6, 0600, show_ima6, store_ima6),
  564. __ATTR(ima7, 0600, show_ima7, store_ima7),
  565. __ATTR(ima8, 0600, show_ima8, store_ima8),
  566. __ATTR(ima9, 0600, show_ima9, store_ima9),
  567. __ATTR(imaat, 0600, show_imaat, store_imaat),
  568. __ATTR(btcr, 0600, show_btcr, store_btcr),
  569. __ATTR(pccr, 0600, show_pccr, store_pccr),
  570. __ATTR(rpccr, 0600, show_rpccr, store_rpccr),
  571. __ATTR(der, 0600, show_der, store_der),
  572. __ATTR(mer, 0600, show_mer, store_mer),
  573. __ATTR(ber, 0600, show_ber, store_ber),
  574. __ATTR(ier, 0600, show_ier, store_ier),
  575. __ATTR(sier, 0600, show_sier, store_sier),
  576. __ATTR(siar, 0600, show_siar, store_siar),
  577. __ATTR(tsr0, 0600, show_tsr0, store_tsr0),
  578. __ATTR(tsr1, 0600, show_tsr1, store_tsr1),
  579. __ATTR(tsr2, 0600, show_tsr2, store_tsr2),
  580. __ATTR(tsr3, 0600, show_tsr3, store_tsr3),
  581. #endif /* CONFIG_DEBUG_KERNEL */
  582. };
  583. #endif /* HAS_PPC_PMC_PA6T */
  584. #endif /* HAS_PPC_PMC_CLASSIC */
  585. static int register_cpu_online(unsigned int cpu)
  586. {
  587. struct cpu *c = &per_cpu(cpu_devices, cpu);
  588. struct device *s = &c->dev;
  589. struct device_attribute *attrs, *pmc_attrs;
  590. int i, nattrs;
  591. /* For cpus present at boot a reference was already grabbed in register_cpu() */
  592. if (!s->of_node)
  593. s->of_node = of_get_cpu_node(cpu, NULL);
  594. #ifdef CONFIG_PPC64
  595. if (cpu_has_feature(CPU_FTR_SMT))
  596. device_create_file(s, &dev_attr_smt_snooze_delay);
  597. #endif
  598. /* PMC stuff */
  599. switch (cur_cpu_spec->pmc_type) {
  600. #ifdef HAS_PPC_PMC_IBM
  601. case PPC_PMC_IBM:
  602. attrs = ibm_common_attrs;
  603. nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute);
  604. pmc_attrs = classic_pmc_attrs;
  605. break;
  606. #endif /* HAS_PPC_PMC_IBM */
  607. #ifdef HAS_PPC_PMC_G4
  608. case PPC_PMC_G4:
  609. attrs = g4_common_attrs;
  610. nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute);
  611. pmc_attrs = classic_pmc_attrs;
  612. break;
  613. #endif /* HAS_PPC_PMC_G4 */
  614. #ifdef HAS_PPC_PMC_PA6T
  615. case PPC_PMC_PA6T:
  616. /* PA Semi starts counting at PMC0 */
  617. attrs = pa6t_attrs;
  618. nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute);
  619. pmc_attrs = NULL;
  620. break;
  621. #endif /* HAS_PPC_PMC_PA6T */
  622. default:
  623. attrs = NULL;
  624. nattrs = 0;
  625. pmc_attrs = NULL;
  626. }
  627. for (i = 0; i < nattrs; i++)
  628. device_create_file(s, &attrs[i]);
  629. if (pmc_attrs)
  630. for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
  631. device_create_file(s, &pmc_attrs[i]);
  632. #ifdef CONFIG_PPC64
  633. if (cpu_has_feature(CPU_FTR_MMCRA))
  634. device_create_file(s, &dev_attr_mmcra);
  635. if (cpu_has_feature(CPU_FTR_PURR)) {
  636. if (!firmware_has_feature(FW_FEATURE_LPAR))
  637. add_write_permission_dev_attr(&dev_attr_purr);
  638. device_create_file(s, &dev_attr_purr);
  639. }
  640. if (cpu_has_feature(CPU_FTR_SPURR))
  641. device_create_file(s, &dev_attr_spurr);
  642. if (cpu_has_feature(CPU_FTR_DSCR))
  643. device_create_file(s, &dev_attr_dscr);
  644. if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
  645. device_create_file(s, &dev_attr_pir);
  646. if (cpu_has_feature(CPU_FTR_ARCH_206) &&
  647. !firmware_has_feature(FW_FEATURE_LPAR))
  648. device_create_file(s, &dev_attr_tscr);
  649. #endif /* CONFIG_PPC64 */
  650. #ifdef CONFIG_PPC_FSL_BOOK3E
  651. if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) {
  652. device_create_file(s, &dev_attr_pw20_state);
  653. device_create_file(s, &dev_attr_pw20_wait_time);
  654. device_create_file(s, &dev_attr_altivec_idle);
  655. device_create_file(s, &dev_attr_altivec_idle_wait_time);
  656. }
  657. #endif
  658. cacheinfo_cpu_online(cpu);
  659. return 0;
  660. }
  661. #ifdef CONFIG_HOTPLUG_CPU
  662. static int unregister_cpu_online(unsigned int cpu)
  663. {
  664. struct cpu *c = &per_cpu(cpu_devices, cpu);
  665. struct device *s = &c->dev;
  666. struct device_attribute *attrs, *pmc_attrs;
  667. int i, nattrs;
  668. BUG_ON(!c->hotpluggable);
  669. #ifdef CONFIG_PPC64
  670. if (cpu_has_feature(CPU_FTR_SMT))
  671. device_remove_file(s, &dev_attr_smt_snooze_delay);
  672. #endif
  673. /* PMC stuff */
  674. switch (cur_cpu_spec->pmc_type) {
  675. #ifdef HAS_PPC_PMC_IBM
  676. case PPC_PMC_IBM:
  677. attrs = ibm_common_attrs;
  678. nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute);
  679. pmc_attrs = classic_pmc_attrs;
  680. break;
  681. #endif /* HAS_PPC_PMC_IBM */
  682. #ifdef HAS_PPC_PMC_G4
  683. case PPC_PMC_G4:
  684. attrs = g4_common_attrs;
  685. nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute);
  686. pmc_attrs = classic_pmc_attrs;
  687. break;
  688. #endif /* HAS_PPC_PMC_G4 */
  689. #ifdef HAS_PPC_PMC_PA6T
  690. case PPC_PMC_PA6T:
  691. /* PA Semi starts counting at PMC0 */
  692. attrs = pa6t_attrs;
  693. nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute);
  694. pmc_attrs = NULL;
  695. break;
  696. #endif /* HAS_PPC_PMC_PA6T */
  697. default:
  698. attrs = NULL;
  699. nattrs = 0;
  700. pmc_attrs = NULL;
  701. }
  702. for (i = 0; i < nattrs; i++)
  703. device_remove_file(s, &attrs[i]);
  704. if (pmc_attrs)
  705. for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
  706. device_remove_file(s, &pmc_attrs[i]);
  707. #ifdef CONFIG_PPC64
  708. if (cpu_has_feature(CPU_FTR_MMCRA))
  709. device_remove_file(s, &dev_attr_mmcra);
  710. if (cpu_has_feature(CPU_FTR_PURR))
  711. device_remove_file(s, &dev_attr_purr);
  712. if (cpu_has_feature(CPU_FTR_SPURR))
  713. device_remove_file(s, &dev_attr_spurr);
  714. if (cpu_has_feature(CPU_FTR_DSCR))
  715. device_remove_file(s, &dev_attr_dscr);
  716. if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
  717. device_remove_file(s, &dev_attr_pir);
  718. if (cpu_has_feature(CPU_FTR_ARCH_206) &&
  719. !firmware_has_feature(FW_FEATURE_LPAR))
  720. device_remove_file(s, &dev_attr_tscr);
  721. #endif /* CONFIG_PPC64 */
  722. #ifdef CONFIG_PPC_FSL_BOOK3E
  723. if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) {
  724. device_remove_file(s, &dev_attr_pw20_state);
  725. device_remove_file(s, &dev_attr_pw20_wait_time);
  726. device_remove_file(s, &dev_attr_altivec_idle);
  727. device_remove_file(s, &dev_attr_altivec_idle_wait_time);
  728. }
  729. #endif
  730. cacheinfo_cpu_offline(cpu);
  731. of_node_put(s->of_node);
  732. s->of_node = NULL;
  733. return 0;
  734. }
  735. #else /* !CONFIG_HOTPLUG_CPU */
  736. #define unregister_cpu_online NULL
  737. #endif
  738. #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
  739. ssize_t arch_cpu_probe(const char *buf, size_t count)
  740. {
  741. if (ppc_md.cpu_probe)
  742. return ppc_md.cpu_probe(buf, count);
  743. return -EINVAL;
  744. }
  745. ssize_t arch_cpu_release(const char *buf, size_t count)
  746. {
  747. if (ppc_md.cpu_release)
  748. return ppc_md.cpu_release(buf, count);
  749. return -EINVAL;
  750. }
  751. #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
  752. static DEFINE_MUTEX(cpu_mutex);
  753. int cpu_add_dev_attr(struct device_attribute *attr)
  754. {
  755. int cpu;
  756. mutex_lock(&cpu_mutex);
  757. for_each_possible_cpu(cpu) {
  758. device_create_file(get_cpu_device(cpu), attr);
  759. }
  760. mutex_unlock(&cpu_mutex);
  761. return 0;
  762. }
  763. EXPORT_SYMBOL_GPL(cpu_add_dev_attr);
  764. int cpu_add_dev_attr_group(struct attribute_group *attrs)
  765. {
  766. int cpu;
  767. struct device *dev;
  768. int ret;
  769. mutex_lock(&cpu_mutex);
  770. for_each_possible_cpu(cpu) {
  771. dev = get_cpu_device(cpu);
  772. ret = sysfs_create_group(&dev->kobj, attrs);
  773. WARN_ON(ret != 0);
  774. }
  775. mutex_unlock(&cpu_mutex);
  776. return 0;
  777. }
  778. EXPORT_SYMBOL_GPL(cpu_add_dev_attr_group);
  779. void cpu_remove_dev_attr(struct device_attribute *attr)
  780. {
  781. int cpu;
  782. mutex_lock(&cpu_mutex);
  783. for_each_possible_cpu(cpu) {
  784. device_remove_file(get_cpu_device(cpu), attr);
  785. }
  786. mutex_unlock(&cpu_mutex);
  787. }
  788. EXPORT_SYMBOL_GPL(cpu_remove_dev_attr);
  789. void cpu_remove_dev_attr_group(struct attribute_group *attrs)
  790. {
  791. int cpu;
  792. struct device *dev;
  793. mutex_lock(&cpu_mutex);
  794. for_each_possible_cpu(cpu) {
  795. dev = get_cpu_device(cpu);
  796. sysfs_remove_group(&dev->kobj, attrs);
  797. }
  798. mutex_unlock(&cpu_mutex);
  799. }
  800. EXPORT_SYMBOL_GPL(cpu_remove_dev_attr_group);
  801. /* NUMA stuff */
  802. #ifdef CONFIG_NUMA
  803. static void register_nodes(void)
  804. {
  805. int i;
  806. for (i = 0; i < MAX_NUMNODES; i++)
  807. register_one_node(i);
  808. }
  809. int sysfs_add_device_to_node(struct device *dev, int nid)
  810. {
  811. struct node *node = node_devices[nid];
  812. return sysfs_create_link(&node->dev.kobj, &dev->kobj,
  813. kobject_name(&dev->kobj));
  814. }
  815. EXPORT_SYMBOL_GPL(sysfs_add_device_to_node);
  816. void sysfs_remove_device_from_node(struct device *dev, int nid)
  817. {
  818. struct node *node = node_devices[nid];
  819. sysfs_remove_link(&node->dev.kobj, kobject_name(&dev->kobj));
  820. }
  821. EXPORT_SYMBOL_GPL(sysfs_remove_device_from_node);
  822. #else
  823. static void register_nodes(void)
  824. {
  825. return;
  826. }
  827. #endif
  828. /* Only valid if CPU is present. */
  829. static ssize_t show_physical_id(struct device *dev,
  830. struct device_attribute *attr, char *buf)
  831. {
  832. struct cpu *cpu = container_of(dev, struct cpu, dev);
  833. return sprintf(buf, "%d\n", get_hard_smp_processor_id(cpu->dev.id));
  834. }
  835. static DEVICE_ATTR(physical_id, 0444, show_physical_id, NULL);
  836. static int __init topology_init(void)
  837. {
  838. int cpu, r;
  839. register_nodes();
  840. for_each_possible_cpu(cpu) {
  841. struct cpu *c = &per_cpu(cpu_devices, cpu);
  842. /*
  843. * For now, we just see if the system supports making
  844. * the RTAS calls for CPU hotplug. But, there may be a
  845. * more comprehensive way to do this for an individual
  846. * CPU. For instance, the boot cpu might never be valid
  847. * for hotplugging.
  848. */
  849. if (ppc_md.cpu_die)
  850. c->hotpluggable = 1;
  851. if (cpu_online(cpu) || c->hotpluggable) {
  852. register_cpu(c, cpu);
  853. device_create_file(&c->dev, &dev_attr_physical_id);
  854. }
  855. }
  856. r = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/topology:online",
  857. register_cpu_online, unregister_cpu_online);
  858. WARN_ON(r < 0);
  859. #ifdef CONFIG_PPC64
  860. sysfs_create_dscr_default();
  861. #endif /* CONFIG_PPC64 */
  862. return 0;
  863. }
  864. subsys_initcall(topology_init);