sysfs.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064
  1. #include <linux/device.h>
  2. #include <linux/cpu.h>
  3. #include <linux/smp.h>
  4. #include <linux/percpu.h>
  5. #include <linux/init.h>
  6. #include <linux/sched.h>
  7. #include <linux/export.h>
  8. #include <linux/nodemask.h>
  9. #include <linux/cpumask.h>
  10. #include <linux/notifier.h>
  11. #include <asm/current.h>
  12. #include <asm/processor.h>
  13. #include <asm/cputable.h>
  14. #include <asm/hvcall.h>
  15. #include <asm/prom.h>
  16. #include <asm/machdep.h>
  17. #include <asm/smp.h>
  18. #include <asm/pmc.h>
  19. #include <asm/firmware.h>
  20. #include "cacheinfo.h"
  21. #ifdef CONFIG_PPC64
  22. #include <asm/paca.h>
  23. #include <asm/lppaca.h>
  24. #endif
  25. static DEFINE_PER_CPU(struct cpu, cpu_devices);
  26. /*
  27. * SMT snooze delay stuff, 64-bit only for now
  28. */
  29. #ifdef CONFIG_PPC64
  30. /* Time in microseconds we delay before sleeping in the idle loop */
  31. static DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 };
  32. static ssize_t store_smt_snooze_delay(struct device *dev,
  33. struct device_attribute *attr,
  34. const char *buf,
  35. size_t count)
  36. {
  37. struct cpu *cpu = container_of(dev, struct cpu, dev);
  38. ssize_t ret;
  39. long snooze;
  40. ret = sscanf(buf, "%ld", &snooze);
  41. if (ret != 1)
  42. return -EINVAL;
  43. per_cpu(smt_snooze_delay, cpu->dev.id) = snooze;
  44. return count;
  45. }
  46. static ssize_t show_smt_snooze_delay(struct device *dev,
  47. struct device_attribute *attr,
  48. char *buf)
  49. {
  50. struct cpu *cpu = container_of(dev, struct cpu, dev);
  51. return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->dev.id));
  52. }
  53. static DEVICE_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay,
  54. store_smt_snooze_delay);
  55. static int __init setup_smt_snooze_delay(char *str)
  56. {
  57. unsigned int cpu;
  58. long snooze;
  59. if (!cpu_has_feature(CPU_FTR_SMT))
  60. return 1;
  61. snooze = simple_strtol(str, NULL, 10);
  62. for_each_possible_cpu(cpu)
  63. per_cpu(smt_snooze_delay, cpu) = snooze;
  64. return 1;
  65. }
  66. __setup("smt-snooze-delay=", setup_smt_snooze_delay);
  67. #endif /* CONFIG_PPC64 */
  68. #ifdef CONFIG_PPC_FSL_BOOK3E
  69. #define MAX_BIT 63
  70. static u64 pw20_wt;
  71. static u64 altivec_idle_wt;
  72. static unsigned int get_idle_ticks_bit(u64 ns)
  73. {
  74. u64 cycle;
  75. if (ns >= 10000)
  76. cycle = div_u64(ns + 500, 1000) * tb_ticks_per_usec;
  77. else
  78. cycle = div_u64(ns * tb_ticks_per_usec, 1000);
  79. if (!cycle)
  80. return 0;
  81. return ilog2(cycle);
  82. }
  83. static void do_show_pwrmgtcr0(void *val)
  84. {
  85. u32 *value = val;
  86. *value = mfspr(SPRN_PWRMGTCR0);
  87. }
  88. static ssize_t show_pw20_state(struct device *dev,
  89. struct device_attribute *attr, char *buf)
  90. {
  91. u32 value;
  92. unsigned int cpu = dev->id;
  93. smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
  94. value &= PWRMGTCR0_PW20_WAIT;
  95. return sprintf(buf, "%u\n", value ? 1 : 0);
  96. }
  97. static void do_store_pw20_state(void *val)
  98. {
  99. u32 *value = val;
  100. u32 pw20_state;
  101. pw20_state = mfspr(SPRN_PWRMGTCR0);
  102. if (*value)
  103. pw20_state |= PWRMGTCR0_PW20_WAIT;
  104. else
  105. pw20_state &= ~PWRMGTCR0_PW20_WAIT;
  106. mtspr(SPRN_PWRMGTCR0, pw20_state);
  107. }
  108. static ssize_t store_pw20_state(struct device *dev,
  109. struct device_attribute *attr,
  110. const char *buf, size_t count)
  111. {
  112. u32 value;
  113. unsigned int cpu = dev->id;
  114. if (kstrtou32(buf, 0, &value))
  115. return -EINVAL;
  116. if (value > 1)
  117. return -EINVAL;
  118. smp_call_function_single(cpu, do_store_pw20_state, &value, 1);
  119. return count;
  120. }
  121. static ssize_t show_pw20_wait_time(struct device *dev,
  122. struct device_attribute *attr, char *buf)
  123. {
  124. u32 value;
  125. u64 tb_cycle = 1;
  126. u64 time;
  127. unsigned int cpu = dev->id;
  128. if (!pw20_wt) {
  129. smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
  130. value = (value & PWRMGTCR0_PW20_ENT) >>
  131. PWRMGTCR0_PW20_ENT_SHIFT;
  132. tb_cycle = (tb_cycle << (MAX_BIT - value + 1));
  133. /* convert ms to ns */
  134. if (tb_ticks_per_usec > 1000) {
  135. time = div_u64(tb_cycle, tb_ticks_per_usec / 1000);
  136. } else {
  137. u32 rem_us;
  138. time = div_u64_rem(tb_cycle, tb_ticks_per_usec,
  139. &rem_us);
  140. time = time * 1000 + rem_us * 1000 / tb_ticks_per_usec;
  141. }
  142. } else {
  143. time = pw20_wt;
  144. }
  145. return sprintf(buf, "%llu\n", time > 0 ? time : 0);
  146. }
  147. static void set_pw20_wait_entry_bit(void *val)
  148. {
  149. u32 *value = val;
  150. u32 pw20_idle;
  151. pw20_idle = mfspr(SPRN_PWRMGTCR0);
  152. /* Set Automatic PW20 Core Idle Count */
  153. /* clear count */
  154. pw20_idle &= ~PWRMGTCR0_PW20_ENT;
  155. /* set count */
  156. pw20_idle |= ((MAX_BIT - *value) << PWRMGTCR0_PW20_ENT_SHIFT);
  157. mtspr(SPRN_PWRMGTCR0, pw20_idle);
  158. }
  159. static ssize_t store_pw20_wait_time(struct device *dev,
  160. struct device_attribute *attr,
  161. const char *buf, size_t count)
  162. {
  163. u32 entry_bit;
  164. u64 value;
  165. unsigned int cpu = dev->id;
  166. if (kstrtou64(buf, 0, &value))
  167. return -EINVAL;
  168. if (!value)
  169. return -EINVAL;
  170. entry_bit = get_idle_ticks_bit(value);
  171. if (entry_bit > MAX_BIT)
  172. return -EINVAL;
  173. pw20_wt = value;
  174. smp_call_function_single(cpu, set_pw20_wait_entry_bit,
  175. &entry_bit, 1);
  176. return count;
  177. }
  178. static ssize_t show_altivec_idle(struct device *dev,
  179. struct device_attribute *attr, char *buf)
  180. {
  181. u32 value;
  182. unsigned int cpu = dev->id;
  183. smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
  184. value &= PWRMGTCR0_AV_IDLE_PD_EN;
  185. return sprintf(buf, "%u\n", value ? 1 : 0);
  186. }
  187. static void do_store_altivec_idle(void *val)
  188. {
  189. u32 *value = val;
  190. u32 altivec_idle;
  191. altivec_idle = mfspr(SPRN_PWRMGTCR0);
  192. if (*value)
  193. altivec_idle |= PWRMGTCR0_AV_IDLE_PD_EN;
  194. else
  195. altivec_idle &= ~PWRMGTCR0_AV_IDLE_PD_EN;
  196. mtspr(SPRN_PWRMGTCR0, altivec_idle);
  197. }
  198. static ssize_t store_altivec_idle(struct device *dev,
  199. struct device_attribute *attr,
  200. const char *buf, size_t count)
  201. {
  202. u32 value;
  203. unsigned int cpu = dev->id;
  204. if (kstrtou32(buf, 0, &value))
  205. return -EINVAL;
  206. if (value > 1)
  207. return -EINVAL;
  208. smp_call_function_single(cpu, do_store_altivec_idle, &value, 1);
  209. return count;
  210. }
  211. static ssize_t show_altivec_idle_wait_time(struct device *dev,
  212. struct device_attribute *attr, char *buf)
  213. {
  214. u32 value;
  215. u64 tb_cycle = 1;
  216. u64 time;
  217. unsigned int cpu = dev->id;
  218. if (!altivec_idle_wt) {
  219. smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
  220. value = (value & PWRMGTCR0_AV_IDLE_CNT) >>
  221. PWRMGTCR0_AV_IDLE_CNT_SHIFT;
  222. tb_cycle = (tb_cycle << (MAX_BIT - value + 1));
  223. /* convert ms to ns */
  224. if (tb_ticks_per_usec > 1000) {
  225. time = div_u64(tb_cycle, tb_ticks_per_usec / 1000);
  226. } else {
  227. u32 rem_us;
  228. time = div_u64_rem(tb_cycle, tb_ticks_per_usec,
  229. &rem_us);
  230. time = time * 1000 + rem_us * 1000 / tb_ticks_per_usec;
  231. }
  232. } else {
  233. time = altivec_idle_wt;
  234. }
  235. return sprintf(buf, "%llu\n", time > 0 ? time : 0);
  236. }
  237. static void set_altivec_idle_wait_entry_bit(void *val)
  238. {
  239. u32 *value = val;
  240. u32 altivec_idle;
  241. altivec_idle = mfspr(SPRN_PWRMGTCR0);
  242. /* Set Automatic AltiVec Idle Count */
  243. /* clear count */
  244. altivec_idle &= ~PWRMGTCR0_AV_IDLE_CNT;
  245. /* set count */
  246. altivec_idle |= ((MAX_BIT - *value) << PWRMGTCR0_AV_IDLE_CNT_SHIFT);
  247. mtspr(SPRN_PWRMGTCR0, altivec_idle);
  248. }
  249. static ssize_t store_altivec_idle_wait_time(struct device *dev,
  250. struct device_attribute *attr,
  251. const char *buf, size_t count)
  252. {
  253. u32 entry_bit;
  254. u64 value;
  255. unsigned int cpu = dev->id;
  256. if (kstrtou64(buf, 0, &value))
  257. return -EINVAL;
  258. if (!value)
  259. return -EINVAL;
  260. entry_bit = get_idle_ticks_bit(value);
  261. if (entry_bit > MAX_BIT)
  262. return -EINVAL;
  263. altivec_idle_wt = value;
  264. smp_call_function_single(cpu, set_altivec_idle_wait_entry_bit,
  265. &entry_bit, 1);
  266. return count;
  267. }
  268. /*
  269. * Enable/Disable interface:
  270. * 0, disable. 1, enable.
  271. */
  272. static DEVICE_ATTR(pw20_state, 0600, show_pw20_state, store_pw20_state);
  273. static DEVICE_ATTR(altivec_idle, 0600, show_altivec_idle, store_altivec_idle);
  274. /*
  275. * Set wait time interface:(Nanosecond)
  276. * Example: Base on TBfreq is 41MHZ.
  277. * 1~48(ns): TB[63]
  278. * 49~97(ns): TB[62]
  279. * 98~195(ns): TB[61]
  280. * 196~390(ns): TB[60]
  281. * 391~780(ns): TB[59]
  282. * 781~1560(ns): TB[58]
  283. * ...
  284. */
  285. static DEVICE_ATTR(pw20_wait_time, 0600,
  286. show_pw20_wait_time,
  287. store_pw20_wait_time);
  288. static DEVICE_ATTR(altivec_idle_wait_time, 0600,
  289. show_altivec_idle_wait_time,
  290. store_altivec_idle_wait_time);
  291. #endif
  292. /*
  293. * Enabling PMCs will slow partition context switch times so we only do
  294. * it the first time we write to the PMCs.
  295. */
  296. static DEFINE_PER_CPU(char, pmcs_enabled);
  297. void ppc_enable_pmcs(void)
  298. {
  299. ppc_set_pmu_inuse(1);
  300. /* Only need to enable them once */
  301. if (__this_cpu_read(pmcs_enabled))
  302. return;
  303. __this_cpu_write(pmcs_enabled, 1);
  304. if (ppc_md.enable_pmcs)
  305. ppc_md.enable_pmcs();
  306. }
  307. EXPORT_SYMBOL(ppc_enable_pmcs);
  308. #define __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, EXTRA) \
  309. static void read_##NAME(void *val) \
  310. { \
  311. *(unsigned long *)val = mfspr(ADDRESS); \
  312. } \
  313. static void write_##NAME(void *val) \
  314. { \
  315. EXTRA; \
  316. mtspr(ADDRESS, *(unsigned long *)val); \
  317. }
  318. #define __SYSFS_SPRSETUP_SHOW_STORE(NAME) \
  319. static ssize_t show_##NAME(struct device *dev, \
  320. struct device_attribute *attr, \
  321. char *buf) \
  322. { \
  323. struct cpu *cpu = container_of(dev, struct cpu, dev); \
  324. unsigned long val; \
  325. smp_call_function_single(cpu->dev.id, read_##NAME, &val, 1); \
  326. return sprintf(buf, "%lx\n", val); \
  327. } \
  328. static ssize_t __used \
  329. store_##NAME(struct device *dev, struct device_attribute *attr, \
  330. const char *buf, size_t count) \
  331. { \
  332. struct cpu *cpu = container_of(dev, struct cpu, dev); \
  333. unsigned long val; \
  334. int ret = sscanf(buf, "%lx", &val); \
  335. if (ret != 1) \
  336. return -EINVAL; \
  337. smp_call_function_single(cpu->dev.id, write_##NAME, &val, 1); \
  338. return count; \
  339. }
  340. #define SYSFS_PMCSETUP(NAME, ADDRESS) \
  341. __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, ppc_enable_pmcs()) \
  342. __SYSFS_SPRSETUP_SHOW_STORE(NAME)
  343. #define SYSFS_SPRSETUP(NAME, ADDRESS) \
  344. __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, ) \
  345. __SYSFS_SPRSETUP_SHOW_STORE(NAME)
  346. #define SYSFS_SPRSETUP_SHOW_STORE(NAME) \
  347. __SYSFS_SPRSETUP_SHOW_STORE(NAME)
  348. /* Let's define all possible registers, we'll only hook up the ones
  349. * that are implemented on the current processor
  350. */
  351. #if defined(CONFIG_PPC64)
  352. #define HAS_PPC_PMC_CLASSIC 1
  353. #define HAS_PPC_PMC_IBM 1
  354. #define HAS_PPC_PMC_PA6T 1
  355. #elif defined(CONFIG_6xx)
  356. #define HAS_PPC_PMC_CLASSIC 1
  357. #define HAS_PPC_PMC_IBM 1
  358. #define HAS_PPC_PMC_G4 1
  359. #endif
  360. #ifdef HAS_PPC_PMC_CLASSIC
  361. SYSFS_PMCSETUP(mmcr0, SPRN_MMCR0);
  362. SYSFS_PMCSETUP(mmcr1, SPRN_MMCR1);
  363. SYSFS_PMCSETUP(pmc1, SPRN_PMC1);
  364. SYSFS_PMCSETUP(pmc2, SPRN_PMC2);
  365. SYSFS_PMCSETUP(pmc3, SPRN_PMC3);
  366. SYSFS_PMCSETUP(pmc4, SPRN_PMC4);
  367. SYSFS_PMCSETUP(pmc5, SPRN_PMC5);
  368. SYSFS_PMCSETUP(pmc6, SPRN_PMC6);
  369. #ifdef HAS_PPC_PMC_G4
  370. SYSFS_PMCSETUP(mmcr2, SPRN_MMCR2);
  371. #endif
  372. #ifdef CONFIG_PPC64
  373. SYSFS_PMCSETUP(pmc7, SPRN_PMC7);
  374. SYSFS_PMCSETUP(pmc8, SPRN_PMC8);
  375. SYSFS_PMCSETUP(mmcra, SPRN_MMCRA);
  376. SYSFS_SPRSETUP(purr, SPRN_PURR);
  377. SYSFS_SPRSETUP(spurr, SPRN_SPURR);
  378. SYSFS_SPRSETUP(pir, SPRN_PIR);
  379. SYSFS_SPRSETUP(tscr, SPRN_TSCR);
  380. /*
  381. Lets only enable read for phyp resources and
  382. enable write when needed with a separate function.
  383. Lets be conservative and default to pseries.
  384. */
  385. static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
  386. static DEVICE_ATTR(spurr, 0400, show_spurr, NULL);
  387. static DEVICE_ATTR(purr, 0400, show_purr, store_purr);
  388. static DEVICE_ATTR(pir, 0400, show_pir, NULL);
  389. static DEVICE_ATTR(tscr, 0600, show_tscr, store_tscr);
  390. /*
  391. * This is the system wide DSCR register default value. Any
  392. * change to this default value through the sysfs interface
  393. * will update all per cpu DSCR default values across the
  394. * system stored in their respective PACA structures.
  395. */
  396. static unsigned long dscr_default;
  397. /**
  398. * read_dscr() - Fetch the cpu specific DSCR default
  399. * @val: Returned cpu specific DSCR default value
  400. *
  401. * This function returns the per cpu DSCR default value
  402. * for any cpu which is contained in it's PACA structure.
  403. */
  404. static void read_dscr(void *val)
  405. {
  406. *(unsigned long *)val = get_paca()->dscr_default;
  407. }
  408. /**
  409. * write_dscr() - Update the cpu specific DSCR default
  410. * @val: New cpu specific DSCR default value to update
  411. *
  412. * This function updates the per cpu DSCR default value
  413. * for any cpu which is contained in it's PACA structure.
  414. */
  415. static void write_dscr(void *val)
  416. {
  417. get_paca()->dscr_default = *(unsigned long *)val;
  418. if (!current->thread.dscr_inherit) {
  419. current->thread.dscr = *(unsigned long *)val;
  420. mtspr(SPRN_DSCR, *(unsigned long *)val);
  421. }
  422. }
  423. SYSFS_SPRSETUP_SHOW_STORE(dscr);
  424. static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr);
  425. static void add_write_permission_dev_attr(struct device_attribute *attr)
  426. {
  427. attr->attr.mode |= 0200;
  428. }
  429. /**
  430. * show_dscr_default() - Fetch the system wide DSCR default
  431. * @dev: Device structure
  432. * @attr: Device attribute structure
  433. * @buf: Interface buffer
  434. *
  435. * This function returns the system wide DSCR default value.
  436. */
  437. static ssize_t show_dscr_default(struct device *dev,
  438. struct device_attribute *attr, char *buf)
  439. {
  440. return sprintf(buf, "%lx\n", dscr_default);
  441. }
  442. /**
  443. * store_dscr_default() - Update the system wide DSCR default
  444. * @dev: Device structure
  445. * @attr: Device attribute structure
  446. * @buf: Interface buffer
  447. * @count: Size of the update
  448. *
  449. * This function updates the system wide DSCR default value.
  450. */
  451. static ssize_t __used store_dscr_default(struct device *dev,
  452. struct device_attribute *attr, const char *buf,
  453. size_t count)
  454. {
  455. unsigned long val;
  456. int ret = 0;
  457. ret = sscanf(buf, "%lx", &val);
  458. if (ret != 1)
  459. return -EINVAL;
  460. dscr_default = val;
  461. on_each_cpu(write_dscr, &val, 1);
  462. return count;
  463. }
  464. static DEVICE_ATTR(dscr_default, 0600,
  465. show_dscr_default, store_dscr_default);
  466. static void sysfs_create_dscr_default(void)
  467. {
  468. int err = 0;
  469. if (cpu_has_feature(CPU_FTR_DSCR))
  470. err = device_create_file(cpu_subsys.dev_root, &dev_attr_dscr_default);
  471. }
  472. void __init record_spr_defaults(void)
  473. {
  474. int cpu;
  475. if (cpu_has_feature(CPU_FTR_DSCR)) {
  476. dscr_default = mfspr(SPRN_DSCR);
  477. for (cpu = 0; cpu < nr_cpu_ids; cpu++)
  478. paca[cpu].dscr_default = dscr_default;
  479. }
  480. }
  481. #endif /* CONFIG_PPC64 */
  482. #ifdef HAS_PPC_PMC_PA6T
  483. SYSFS_PMCSETUP(pa6t_pmc0, SPRN_PA6T_PMC0);
  484. SYSFS_PMCSETUP(pa6t_pmc1, SPRN_PA6T_PMC1);
  485. SYSFS_PMCSETUP(pa6t_pmc2, SPRN_PA6T_PMC2);
  486. SYSFS_PMCSETUP(pa6t_pmc3, SPRN_PA6T_PMC3);
  487. SYSFS_PMCSETUP(pa6t_pmc4, SPRN_PA6T_PMC4);
  488. SYSFS_PMCSETUP(pa6t_pmc5, SPRN_PA6T_PMC5);
  489. #ifdef CONFIG_DEBUG_KERNEL
  490. SYSFS_SPRSETUP(hid0, SPRN_HID0);
  491. SYSFS_SPRSETUP(hid1, SPRN_HID1);
  492. SYSFS_SPRSETUP(hid4, SPRN_HID4);
  493. SYSFS_SPRSETUP(hid5, SPRN_HID5);
  494. SYSFS_SPRSETUP(ima0, SPRN_PA6T_IMA0);
  495. SYSFS_SPRSETUP(ima1, SPRN_PA6T_IMA1);
  496. SYSFS_SPRSETUP(ima2, SPRN_PA6T_IMA2);
  497. SYSFS_SPRSETUP(ima3, SPRN_PA6T_IMA3);
  498. SYSFS_SPRSETUP(ima4, SPRN_PA6T_IMA4);
  499. SYSFS_SPRSETUP(ima5, SPRN_PA6T_IMA5);
  500. SYSFS_SPRSETUP(ima6, SPRN_PA6T_IMA6);
  501. SYSFS_SPRSETUP(ima7, SPRN_PA6T_IMA7);
  502. SYSFS_SPRSETUP(ima8, SPRN_PA6T_IMA8);
  503. SYSFS_SPRSETUP(ima9, SPRN_PA6T_IMA9);
  504. SYSFS_SPRSETUP(imaat, SPRN_PA6T_IMAAT);
  505. SYSFS_SPRSETUP(btcr, SPRN_PA6T_BTCR);
  506. SYSFS_SPRSETUP(pccr, SPRN_PA6T_PCCR);
  507. SYSFS_SPRSETUP(rpccr, SPRN_PA6T_RPCCR);
  508. SYSFS_SPRSETUP(der, SPRN_PA6T_DER);
  509. SYSFS_SPRSETUP(mer, SPRN_PA6T_MER);
  510. SYSFS_SPRSETUP(ber, SPRN_PA6T_BER);
  511. SYSFS_SPRSETUP(ier, SPRN_PA6T_IER);
  512. SYSFS_SPRSETUP(sier, SPRN_PA6T_SIER);
  513. SYSFS_SPRSETUP(siar, SPRN_PA6T_SIAR);
  514. SYSFS_SPRSETUP(tsr0, SPRN_PA6T_TSR0);
  515. SYSFS_SPRSETUP(tsr1, SPRN_PA6T_TSR1);
  516. SYSFS_SPRSETUP(tsr2, SPRN_PA6T_TSR2);
  517. SYSFS_SPRSETUP(tsr3, SPRN_PA6T_TSR3);
  518. #endif /* CONFIG_DEBUG_KERNEL */
  519. #endif /* HAS_PPC_PMC_PA6T */
  520. #ifdef HAS_PPC_PMC_IBM
  521. static struct device_attribute ibm_common_attrs[] = {
  522. __ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
  523. __ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
  524. };
  525. #endif /* HAS_PPC_PMC_G4 */
  526. #ifdef HAS_PPC_PMC_G4
  527. static struct device_attribute g4_common_attrs[] = {
  528. __ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
  529. __ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
  530. __ATTR(mmcr2, 0600, show_mmcr2, store_mmcr2),
  531. };
  532. #endif /* HAS_PPC_PMC_G4 */
  533. static struct device_attribute classic_pmc_attrs[] = {
  534. __ATTR(pmc1, 0600, show_pmc1, store_pmc1),
  535. __ATTR(pmc2, 0600, show_pmc2, store_pmc2),
  536. __ATTR(pmc3, 0600, show_pmc3, store_pmc3),
  537. __ATTR(pmc4, 0600, show_pmc4, store_pmc4),
  538. __ATTR(pmc5, 0600, show_pmc5, store_pmc5),
  539. __ATTR(pmc6, 0600, show_pmc6, store_pmc6),
  540. #ifdef CONFIG_PPC64
  541. __ATTR(pmc7, 0600, show_pmc7, store_pmc7),
  542. __ATTR(pmc8, 0600, show_pmc8, store_pmc8),
  543. #endif
  544. };
  545. #ifdef HAS_PPC_PMC_PA6T
  546. static struct device_attribute pa6t_attrs[] = {
  547. __ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
  548. __ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
  549. __ATTR(pmc0, 0600, show_pa6t_pmc0, store_pa6t_pmc0),
  550. __ATTR(pmc1, 0600, show_pa6t_pmc1, store_pa6t_pmc1),
  551. __ATTR(pmc2, 0600, show_pa6t_pmc2, store_pa6t_pmc2),
  552. __ATTR(pmc3, 0600, show_pa6t_pmc3, store_pa6t_pmc3),
  553. __ATTR(pmc4, 0600, show_pa6t_pmc4, store_pa6t_pmc4),
  554. __ATTR(pmc5, 0600, show_pa6t_pmc5, store_pa6t_pmc5),
  555. #ifdef CONFIG_DEBUG_KERNEL
  556. __ATTR(hid0, 0600, show_hid0, store_hid0),
  557. __ATTR(hid1, 0600, show_hid1, store_hid1),
  558. __ATTR(hid4, 0600, show_hid4, store_hid4),
  559. __ATTR(hid5, 0600, show_hid5, store_hid5),
  560. __ATTR(ima0, 0600, show_ima0, store_ima0),
  561. __ATTR(ima1, 0600, show_ima1, store_ima1),
  562. __ATTR(ima2, 0600, show_ima2, store_ima2),
  563. __ATTR(ima3, 0600, show_ima3, store_ima3),
  564. __ATTR(ima4, 0600, show_ima4, store_ima4),
  565. __ATTR(ima5, 0600, show_ima5, store_ima5),
  566. __ATTR(ima6, 0600, show_ima6, store_ima6),
  567. __ATTR(ima7, 0600, show_ima7, store_ima7),
  568. __ATTR(ima8, 0600, show_ima8, store_ima8),
  569. __ATTR(ima9, 0600, show_ima9, store_ima9),
  570. __ATTR(imaat, 0600, show_imaat, store_imaat),
  571. __ATTR(btcr, 0600, show_btcr, store_btcr),
  572. __ATTR(pccr, 0600, show_pccr, store_pccr),
  573. __ATTR(rpccr, 0600, show_rpccr, store_rpccr),
  574. __ATTR(der, 0600, show_der, store_der),
  575. __ATTR(mer, 0600, show_mer, store_mer),
  576. __ATTR(ber, 0600, show_ber, store_ber),
  577. __ATTR(ier, 0600, show_ier, store_ier),
  578. __ATTR(sier, 0600, show_sier, store_sier),
  579. __ATTR(siar, 0600, show_siar, store_siar),
  580. __ATTR(tsr0, 0600, show_tsr0, store_tsr0),
  581. __ATTR(tsr1, 0600, show_tsr1, store_tsr1),
  582. __ATTR(tsr2, 0600, show_tsr2, store_tsr2),
  583. __ATTR(tsr3, 0600, show_tsr3, store_tsr3),
  584. #endif /* CONFIG_DEBUG_KERNEL */
  585. };
  586. #endif /* HAS_PPC_PMC_PA6T */
  587. #endif /* HAS_PPC_PMC_CLASSIC */
  588. static int register_cpu_online(unsigned int cpu)
  589. {
  590. struct cpu *c = &per_cpu(cpu_devices, cpu);
  591. struct device *s = &c->dev;
  592. struct device_attribute *attrs, *pmc_attrs;
  593. int i, nattrs;
  594. /* For cpus present at boot a reference was already grabbed in register_cpu() */
  595. if (!s->of_node)
  596. s->of_node = of_get_cpu_node(cpu, NULL);
  597. #ifdef CONFIG_PPC64
  598. if (cpu_has_feature(CPU_FTR_SMT))
  599. device_create_file(s, &dev_attr_smt_snooze_delay);
  600. #endif
  601. /* PMC stuff */
  602. switch (cur_cpu_spec->pmc_type) {
  603. #ifdef HAS_PPC_PMC_IBM
  604. case PPC_PMC_IBM:
  605. attrs = ibm_common_attrs;
  606. nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute);
  607. pmc_attrs = classic_pmc_attrs;
  608. break;
  609. #endif /* HAS_PPC_PMC_IBM */
  610. #ifdef HAS_PPC_PMC_G4
  611. case PPC_PMC_G4:
  612. attrs = g4_common_attrs;
  613. nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute);
  614. pmc_attrs = classic_pmc_attrs;
  615. break;
  616. #endif /* HAS_PPC_PMC_G4 */
  617. #ifdef HAS_PPC_PMC_PA6T
  618. case PPC_PMC_PA6T:
  619. /* PA Semi starts counting at PMC0 */
  620. attrs = pa6t_attrs;
  621. nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute);
  622. pmc_attrs = NULL;
  623. break;
  624. #endif /* HAS_PPC_PMC_PA6T */
  625. default:
  626. attrs = NULL;
  627. nattrs = 0;
  628. pmc_attrs = NULL;
  629. }
  630. for (i = 0; i < nattrs; i++)
  631. device_create_file(s, &attrs[i]);
  632. if (pmc_attrs)
  633. for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
  634. device_create_file(s, &pmc_attrs[i]);
  635. #ifdef CONFIG_PPC64
  636. if (cpu_has_feature(CPU_FTR_MMCRA))
  637. device_create_file(s, &dev_attr_mmcra);
  638. if (cpu_has_feature(CPU_FTR_PURR)) {
  639. if (!firmware_has_feature(FW_FEATURE_LPAR))
  640. add_write_permission_dev_attr(&dev_attr_purr);
  641. device_create_file(s, &dev_attr_purr);
  642. }
  643. if (cpu_has_feature(CPU_FTR_SPURR))
  644. device_create_file(s, &dev_attr_spurr);
  645. if (cpu_has_feature(CPU_FTR_DSCR))
  646. device_create_file(s, &dev_attr_dscr);
  647. if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
  648. device_create_file(s, &dev_attr_pir);
  649. if (cpu_has_feature(CPU_FTR_ARCH_206) &&
  650. !firmware_has_feature(FW_FEATURE_LPAR))
  651. device_create_file(s, &dev_attr_tscr);
  652. #endif /* CONFIG_PPC64 */
  653. #ifdef CONFIG_PPC_FSL_BOOK3E
  654. if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) {
  655. device_create_file(s, &dev_attr_pw20_state);
  656. device_create_file(s, &dev_attr_pw20_wait_time);
  657. device_create_file(s, &dev_attr_altivec_idle);
  658. device_create_file(s, &dev_attr_altivec_idle_wait_time);
  659. }
  660. #endif
  661. cacheinfo_cpu_online(cpu);
  662. return 0;
  663. }
  664. #ifdef CONFIG_HOTPLUG_CPU
  665. static int unregister_cpu_online(unsigned int cpu)
  666. {
  667. struct cpu *c = &per_cpu(cpu_devices, cpu);
  668. struct device *s = &c->dev;
  669. struct device_attribute *attrs, *pmc_attrs;
  670. int i, nattrs;
  671. BUG_ON(!c->hotpluggable);
  672. #ifdef CONFIG_PPC64
  673. if (cpu_has_feature(CPU_FTR_SMT))
  674. device_remove_file(s, &dev_attr_smt_snooze_delay);
  675. #endif
  676. /* PMC stuff */
  677. switch (cur_cpu_spec->pmc_type) {
  678. #ifdef HAS_PPC_PMC_IBM
  679. case PPC_PMC_IBM:
  680. attrs = ibm_common_attrs;
  681. nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute);
  682. pmc_attrs = classic_pmc_attrs;
  683. break;
  684. #endif /* HAS_PPC_PMC_IBM */
  685. #ifdef HAS_PPC_PMC_G4
  686. case PPC_PMC_G4:
  687. attrs = g4_common_attrs;
  688. nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute);
  689. pmc_attrs = classic_pmc_attrs;
  690. break;
  691. #endif /* HAS_PPC_PMC_G4 */
  692. #ifdef HAS_PPC_PMC_PA6T
  693. case PPC_PMC_PA6T:
  694. /* PA Semi starts counting at PMC0 */
  695. attrs = pa6t_attrs;
  696. nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute);
  697. pmc_attrs = NULL;
  698. break;
  699. #endif /* HAS_PPC_PMC_PA6T */
  700. default:
  701. attrs = NULL;
  702. nattrs = 0;
  703. pmc_attrs = NULL;
  704. }
  705. for (i = 0; i < nattrs; i++)
  706. device_remove_file(s, &attrs[i]);
  707. if (pmc_attrs)
  708. for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
  709. device_remove_file(s, &pmc_attrs[i]);
  710. #ifdef CONFIG_PPC64
  711. if (cpu_has_feature(CPU_FTR_MMCRA))
  712. device_remove_file(s, &dev_attr_mmcra);
  713. if (cpu_has_feature(CPU_FTR_PURR))
  714. device_remove_file(s, &dev_attr_purr);
  715. if (cpu_has_feature(CPU_FTR_SPURR))
  716. device_remove_file(s, &dev_attr_spurr);
  717. if (cpu_has_feature(CPU_FTR_DSCR))
  718. device_remove_file(s, &dev_attr_dscr);
  719. if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
  720. device_remove_file(s, &dev_attr_pir);
  721. if (cpu_has_feature(CPU_FTR_ARCH_206) &&
  722. !firmware_has_feature(FW_FEATURE_LPAR))
  723. device_remove_file(s, &dev_attr_tscr);
  724. #endif /* CONFIG_PPC64 */
  725. #ifdef CONFIG_PPC_FSL_BOOK3E
  726. if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) {
  727. device_remove_file(s, &dev_attr_pw20_state);
  728. device_remove_file(s, &dev_attr_pw20_wait_time);
  729. device_remove_file(s, &dev_attr_altivec_idle);
  730. device_remove_file(s, &dev_attr_altivec_idle_wait_time);
  731. }
  732. #endif
  733. cacheinfo_cpu_offline(cpu);
  734. of_node_put(s->of_node);
  735. s->of_node = NULL;
  736. return 0;
  737. }
  738. #else /* !CONFIG_HOTPLUG_CPU */
  739. #define unregister_cpu_online NULL
  740. #endif
  741. #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
  742. ssize_t arch_cpu_probe(const char *buf, size_t count)
  743. {
  744. if (ppc_md.cpu_probe)
  745. return ppc_md.cpu_probe(buf, count);
  746. return -EINVAL;
  747. }
  748. ssize_t arch_cpu_release(const char *buf, size_t count)
  749. {
  750. if (ppc_md.cpu_release)
  751. return ppc_md.cpu_release(buf, count);
  752. return -EINVAL;
  753. }
  754. #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
  755. static DEFINE_MUTEX(cpu_mutex);
  756. int cpu_add_dev_attr(struct device_attribute *attr)
  757. {
  758. int cpu;
  759. mutex_lock(&cpu_mutex);
  760. for_each_possible_cpu(cpu) {
  761. device_create_file(get_cpu_device(cpu), attr);
  762. }
  763. mutex_unlock(&cpu_mutex);
  764. return 0;
  765. }
  766. EXPORT_SYMBOL_GPL(cpu_add_dev_attr);
  767. int cpu_add_dev_attr_group(struct attribute_group *attrs)
  768. {
  769. int cpu;
  770. struct device *dev;
  771. int ret;
  772. mutex_lock(&cpu_mutex);
  773. for_each_possible_cpu(cpu) {
  774. dev = get_cpu_device(cpu);
  775. ret = sysfs_create_group(&dev->kobj, attrs);
  776. WARN_ON(ret != 0);
  777. }
  778. mutex_unlock(&cpu_mutex);
  779. return 0;
  780. }
  781. EXPORT_SYMBOL_GPL(cpu_add_dev_attr_group);
  782. void cpu_remove_dev_attr(struct device_attribute *attr)
  783. {
  784. int cpu;
  785. mutex_lock(&cpu_mutex);
  786. for_each_possible_cpu(cpu) {
  787. device_remove_file(get_cpu_device(cpu), attr);
  788. }
  789. mutex_unlock(&cpu_mutex);
  790. }
  791. EXPORT_SYMBOL_GPL(cpu_remove_dev_attr);
  792. void cpu_remove_dev_attr_group(struct attribute_group *attrs)
  793. {
  794. int cpu;
  795. struct device *dev;
  796. mutex_lock(&cpu_mutex);
  797. for_each_possible_cpu(cpu) {
  798. dev = get_cpu_device(cpu);
  799. sysfs_remove_group(&dev->kobj, attrs);
  800. }
  801. mutex_unlock(&cpu_mutex);
  802. }
  803. EXPORT_SYMBOL_GPL(cpu_remove_dev_attr_group);
  804. /* NUMA stuff */
  805. #ifdef CONFIG_NUMA
  806. static void register_nodes(void)
  807. {
  808. int i;
  809. for (i = 0; i < MAX_NUMNODES; i++)
  810. register_one_node(i);
  811. }
  812. int sysfs_add_device_to_node(struct device *dev, int nid)
  813. {
  814. struct node *node = node_devices[nid];
  815. return sysfs_create_link(&node->dev.kobj, &dev->kobj,
  816. kobject_name(&dev->kobj));
  817. }
  818. EXPORT_SYMBOL_GPL(sysfs_add_device_to_node);
  819. void sysfs_remove_device_from_node(struct device *dev, int nid)
  820. {
  821. struct node *node = node_devices[nid];
  822. sysfs_remove_link(&node->dev.kobj, kobject_name(&dev->kobj));
  823. }
  824. EXPORT_SYMBOL_GPL(sysfs_remove_device_from_node);
  825. #else
  826. static void register_nodes(void)
  827. {
  828. return;
  829. }
  830. #endif
  831. /* Only valid if CPU is present. */
  832. static ssize_t show_physical_id(struct device *dev,
  833. struct device_attribute *attr, char *buf)
  834. {
  835. struct cpu *cpu = container_of(dev, struct cpu, dev);
  836. return sprintf(buf, "%d\n", get_hard_smp_processor_id(cpu->dev.id));
  837. }
  838. static DEVICE_ATTR(physical_id, 0444, show_physical_id, NULL);
  839. static int __init topology_init(void)
  840. {
  841. int cpu, r;
  842. register_nodes();
  843. for_each_possible_cpu(cpu) {
  844. struct cpu *c = &per_cpu(cpu_devices, cpu);
  845. /*
  846. * For now, we just see if the system supports making
  847. * the RTAS calls for CPU hotplug. But, there may be a
  848. * more comprehensive way to do this for an individual
  849. * CPU. For instance, the boot cpu might never be valid
  850. * for hotplugging.
  851. */
  852. if (ppc_md.cpu_die)
  853. c->hotpluggable = 1;
  854. if (cpu_online(cpu) || c->hotpluggable) {
  855. register_cpu(c, cpu);
  856. device_create_file(&c->dev, &dev_attr_physical_id);
  857. }
  858. }
  859. r = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/topology:online",
  860. register_cpu_online, unregister_cpu_online);
  861. WARN_ON(r < 0);
  862. #ifdef CONFIG_PPC64
  863. sysfs_create_dscr_default();
  864. #endif /* CONFIG_PPC64 */
  865. return 0;
  866. }
  867. subsys_initcall(topology_init);