sysfs.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970
  1. /*
  2. * sysfs.c - ACPI sysfs interface to userspace.
  3. */
  4. #include <linux/init.h>
  5. #include <linux/kernel.h>
  6. #include <linux/moduleparam.h>
  7. #include <linux/acpi.h>
  8. #include "internal.h"
  9. #define _COMPONENT ACPI_SYSTEM_COMPONENT
  10. ACPI_MODULE_NAME("sysfs");
  11. #ifdef CONFIG_ACPI_DEBUG
  12. /*
  13. * ACPI debug sysfs I/F, including:
  14. * /sys/modules/acpi/parameters/debug_layer
  15. * /sys/modules/acpi/parameters/debug_level
  16. * /sys/modules/acpi/parameters/trace_method_name
  17. * /sys/modules/acpi/parameters/trace_state
  18. * /sys/modules/acpi/parameters/trace_debug_layer
  19. * /sys/modules/acpi/parameters/trace_debug_level
  20. */
  21. struct acpi_dlayer {
  22. const char *name;
  23. unsigned long value;
  24. };
  25. struct acpi_dlevel {
  26. const char *name;
  27. unsigned long value;
  28. };
  29. #define ACPI_DEBUG_INIT(v) { .name = #v, .value = v }
  30. static const struct acpi_dlayer acpi_debug_layers[] = {
  31. ACPI_DEBUG_INIT(ACPI_UTILITIES),
  32. ACPI_DEBUG_INIT(ACPI_HARDWARE),
  33. ACPI_DEBUG_INIT(ACPI_EVENTS),
  34. ACPI_DEBUG_INIT(ACPI_TABLES),
  35. ACPI_DEBUG_INIT(ACPI_NAMESPACE),
  36. ACPI_DEBUG_INIT(ACPI_PARSER),
  37. ACPI_DEBUG_INIT(ACPI_DISPATCHER),
  38. ACPI_DEBUG_INIT(ACPI_EXECUTER),
  39. ACPI_DEBUG_INIT(ACPI_RESOURCES),
  40. ACPI_DEBUG_INIT(ACPI_CA_DEBUGGER),
  41. ACPI_DEBUG_INIT(ACPI_OS_SERVICES),
  42. ACPI_DEBUG_INIT(ACPI_CA_DISASSEMBLER),
  43. ACPI_DEBUG_INIT(ACPI_COMPILER),
  44. ACPI_DEBUG_INIT(ACPI_TOOLS),
  45. ACPI_DEBUG_INIT(ACPI_BUS_COMPONENT),
  46. ACPI_DEBUG_INIT(ACPI_AC_COMPONENT),
  47. ACPI_DEBUG_INIT(ACPI_BATTERY_COMPONENT),
  48. ACPI_DEBUG_INIT(ACPI_BUTTON_COMPONENT),
  49. ACPI_DEBUG_INIT(ACPI_SBS_COMPONENT),
  50. ACPI_DEBUG_INIT(ACPI_FAN_COMPONENT),
  51. ACPI_DEBUG_INIT(ACPI_PCI_COMPONENT),
  52. ACPI_DEBUG_INIT(ACPI_POWER_COMPONENT),
  53. ACPI_DEBUG_INIT(ACPI_CONTAINER_COMPONENT),
  54. ACPI_DEBUG_INIT(ACPI_SYSTEM_COMPONENT),
  55. ACPI_DEBUG_INIT(ACPI_THERMAL_COMPONENT),
  56. ACPI_DEBUG_INIT(ACPI_MEMORY_DEVICE_COMPONENT),
  57. ACPI_DEBUG_INIT(ACPI_VIDEO_COMPONENT),
  58. ACPI_DEBUG_INIT(ACPI_PROCESSOR_COMPONENT),
  59. };
  60. static const struct acpi_dlevel acpi_debug_levels[] = {
  61. ACPI_DEBUG_INIT(ACPI_LV_INIT),
  62. ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT),
  63. ACPI_DEBUG_INIT(ACPI_LV_INFO),
  64. ACPI_DEBUG_INIT(ACPI_LV_REPAIR),
  65. ACPI_DEBUG_INIT(ACPI_LV_TRACE_POINT),
  66. ACPI_DEBUG_INIT(ACPI_LV_INIT_NAMES),
  67. ACPI_DEBUG_INIT(ACPI_LV_PARSE),
  68. ACPI_DEBUG_INIT(ACPI_LV_LOAD),
  69. ACPI_DEBUG_INIT(ACPI_LV_DISPATCH),
  70. ACPI_DEBUG_INIT(ACPI_LV_EXEC),
  71. ACPI_DEBUG_INIT(ACPI_LV_NAMES),
  72. ACPI_DEBUG_INIT(ACPI_LV_OPREGION),
  73. ACPI_DEBUG_INIT(ACPI_LV_BFIELD),
  74. ACPI_DEBUG_INIT(ACPI_LV_TABLES),
  75. ACPI_DEBUG_INIT(ACPI_LV_VALUES),
  76. ACPI_DEBUG_INIT(ACPI_LV_OBJECTS),
  77. ACPI_DEBUG_INIT(ACPI_LV_RESOURCES),
  78. ACPI_DEBUG_INIT(ACPI_LV_USER_REQUESTS),
  79. ACPI_DEBUG_INIT(ACPI_LV_PACKAGE),
  80. ACPI_DEBUG_INIT(ACPI_LV_ALLOCATIONS),
  81. ACPI_DEBUG_INIT(ACPI_LV_FUNCTIONS),
  82. ACPI_DEBUG_INIT(ACPI_LV_OPTIMIZATIONS),
  83. ACPI_DEBUG_INIT(ACPI_LV_MUTEX),
  84. ACPI_DEBUG_INIT(ACPI_LV_THREADS),
  85. ACPI_DEBUG_INIT(ACPI_LV_IO),
  86. ACPI_DEBUG_INIT(ACPI_LV_INTERRUPTS),
  87. ACPI_DEBUG_INIT(ACPI_LV_AML_DISASSEMBLE),
  88. ACPI_DEBUG_INIT(ACPI_LV_VERBOSE_INFO),
  89. ACPI_DEBUG_INIT(ACPI_LV_FULL_TABLES),
  90. ACPI_DEBUG_INIT(ACPI_LV_EVENTS),
  91. };
  92. static int param_get_debug_layer(char *buffer, const struct kernel_param *kp)
  93. {
  94. int result = 0;
  95. int i;
  96. result = sprintf(buffer, "%-25s\tHex SET\n", "Description");
  97. for (i = 0; i < ARRAY_SIZE(acpi_debug_layers); i++) {
  98. result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
  99. acpi_debug_layers[i].name,
  100. acpi_debug_layers[i].value,
  101. (acpi_dbg_layer & acpi_debug_layers[i].value)
  102. ? '*' : ' ');
  103. }
  104. result +=
  105. sprintf(buffer + result, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS",
  106. ACPI_ALL_DRIVERS,
  107. (acpi_dbg_layer & ACPI_ALL_DRIVERS) ==
  108. ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer & ACPI_ALL_DRIVERS)
  109. == 0 ? ' ' : '-');
  110. result +=
  111. sprintf(buffer + result,
  112. "--\ndebug_layer = 0x%08X ( * = enabled)\n",
  113. acpi_dbg_layer);
  114. return result;
  115. }
  116. static int param_get_debug_level(char *buffer, const struct kernel_param *kp)
  117. {
  118. int result = 0;
  119. int i;
  120. result = sprintf(buffer, "%-25s\tHex SET\n", "Description");
  121. for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) {
  122. result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
  123. acpi_debug_levels[i].name,
  124. acpi_debug_levels[i].value,
  125. (acpi_dbg_level & acpi_debug_levels[i].value)
  126. ? '*' : ' ');
  127. }
  128. result +=
  129. sprintf(buffer + result, "--\ndebug_level = 0x%08X (* = enabled)\n",
  130. acpi_dbg_level);
  131. return result;
  132. }
  133. static const struct kernel_param_ops param_ops_debug_layer = {
  134. .set = param_set_uint,
  135. .get = param_get_debug_layer,
  136. };
  137. static const struct kernel_param_ops param_ops_debug_level = {
  138. .set = param_set_uint,
  139. .get = param_get_debug_level,
  140. };
  141. module_param_cb(debug_layer, &param_ops_debug_layer, &acpi_dbg_layer, 0644);
  142. module_param_cb(debug_level, &param_ops_debug_level, &acpi_dbg_level, 0644);
  143. static char trace_method_name[1024];
  144. int param_set_trace_method_name(const char *val, const struct kernel_param *kp)
  145. {
  146. u32 saved_flags = 0;
  147. bool is_abs_path = true;
  148. if (*val != '\\')
  149. is_abs_path = false;
  150. if ((is_abs_path && strlen(val) > 1023) ||
  151. (!is_abs_path && strlen(val) > 1022)) {
  152. pr_err("%s: string parameter too long\n", kp->name);
  153. return -ENOSPC;
  154. }
  155. /*
  156. * It's not safe to update acpi_gbl_trace_method_name without
  157. * having the tracer stopped, so we save the original tracer
  158. * state and disable it.
  159. */
  160. saved_flags = acpi_gbl_trace_flags;
  161. (void)acpi_debug_trace(NULL,
  162. acpi_gbl_trace_dbg_level,
  163. acpi_gbl_trace_dbg_layer,
  164. 0);
  165. /* This is a hack. We can't kmalloc in early boot. */
  166. if (is_abs_path)
  167. strcpy(trace_method_name, val);
  168. else {
  169. trace_method_name[0] = '\\';
  170. strcpy(trace_method_name+1, val);
  171. }
  172. /* Restore the original tracer state */
  173. (void)acpi_debug_trace(trace_method_name,
  174. acpi_gbl_trace_dbg_level,
  175. acpi_gbl_trace_dbg_layer,
  176. saved_flags);
  177. return 0;
  178. }
  179. static int param_get_trace_method_name(char *buffer, const struct kernel_param *kp)
  180. {
  181. return scnprintf(buffer, PAGE_SIZE, "%s", acpi_gbl_trace_method_name);
  182. }
  183. static const struct kernel_param_ops param_ops_trace_method = {
  184. .set = param_set_trace_method_name,
  185. .get = param_get_trace_method_name,
  186. };
  187. static const struct kernel_param_ops param_ops_trace_attrib = {
  188. .set = param_set_uint,
  189. .get = param_get_uint,
  190. };
  191. module_param_cb(trace_method_name, &param_ops_trace_method, &trace_method_name, 0644);
  192. module_param_cb(trace_debug_layer, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_layer, 0644);
  193. module_param_cb(trace_debug_level, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_level, 0644);
  194. static int param_set_trace_state(const char *val, struct kernel_param *kp)
  195. {
  196. acpi_status status;
  197. const char *method = trace_method_name;
  198. u32 flags = 0;
  199. /* So "xxx-once" comparison should go prior than "xxx" comparison */
  200. #define acpi_compare_param(val, key) \
  201. strncmp((val), (key), sizeof(key) - 1)
  202. if (!acpi_compare_param(val, "enable")) {
  203. method = NULL;
  204. flags = ACPI_TRACE_ENABLED;
  205. } else if (!acpi_compare_param(val, "disable"))
  206. method = NULL;
  207. else if (!acpi_compare_param(val, "method-once"))
  208. flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT;
  209. else if (!acpi_compare_param(val, "method"))
  210. flags = ACPI_TRACE_ENABLED;
  211. else if (!acpi_compare_param(val, "opcode-once"))
  212. flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT | ACPI_TRACE_OPCODE;
  213. else if (!acpi_compare_param(val, "opcode"))
  214. flags = ACPI_TRACE_ENABLED | ACPI_TRACE_OPCODE;
  215. else
  216. return -EINVAL;
  217. status = acpi_debug_trace(method,
  218. acpi_gbl_trace_dbg_level,
  219. acpi_gbl_trace_dbg_layer,
  220. flags);
  221. if (ACPI_FAILURE(status))
  222. return -EBUSY;
  223. return 0;
  224. }
  225. static int param_get_trace_state(char *buffer, struct kernel_param *kp)
  226. {
  227. if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED))
  228. return sprintf(buffer, "disable");
  229. else {
  230. if (acpi_gbl_trace_method_name) {
  231. if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT)
  232. return sprintf(buffer, "method-once");
  233. else
  234. return sprintf(buffer, "method");
  235. } else
  236. return sprintf(buffer, "enable");
  237. }
  238. return 0;
  239. }
  240. module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
  241. NULL, 0644);
  242. #endif /* CONFIG_ACPI_DEBUG */
  243. /* /sys/modules/acpi/parameters/aml_debug_output */
  244. module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
  245. byte, 0644);
  246. MODULE_PARM_DESC(aml_debug_output,
  247. "To enable/disable the ACPI Debug Object output.");
  248. /* /sys/module/acpi/parameters/acpica_version */
  249. static int param_get_acpica_version(char *buffer, struct kernel_param *kp)
  250. {
  251. int result;
  252. result = sprintf(buffer, "%x", ACPI_CA_VERSION);
  253. return result;
  254. }
  255. module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444);
  256. /*
  257. * ACPI table sysfs I/F:
  258. * /sys/firmware/acpi/tables/
  259. * /sys/firmware/acpi/tables/dynamic/
  260. */
  261. static LIST_HEAD(acpi_table_attr_list);
  262. static struct kobject *tables_kobj;
  263. static struct kobject *dynamic_tables_kobj;
  264. static struct kobject *hotplug_kobj;
  265. #define ACPI_MAX_TABLE_INSTANCES 999
  266. #define ACPI_INST_SIZE 4 /* including trailing 0 */
  267. struct acpi_table_attr {
  268. struct bin_attribute attr;
  269. char name[ACPI_NAME_SIZE];
  270. int instance;
  271. char filename[ACPI_NAME_SIZE+ACPI_INST_SIZE];
  272. struct list_head node;
  273. };
  274. static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
  275. struct bin_attribute *bin_attr, char *buf,
  276. loff_t offset, size_t count)
  277. {
  278. struct acpi_table_attr *table_attr =
  279. container_of(bin_attr, struct acpi_table_attr, attr);
  280. struct acpi_table_header *table_header = NULL;
  281. acpi_status status;
  282. ssize_t rc;
  283. status = acpi_get_table(table_attr->name, table_attr->instance,
  284. &table_header);
  285. if (ACPI_FAILURE(status))
  286. return -ENODEV;
  287. rc = memory_read_from_buffer(buf, count, &offset, table_header,
  288. table_header->length);
  289. acpi_put_table(table_header);
  290. return rc;
  291. }
  292. static int acpi_table_attr_init(struct kobject *tables_obj,
  293. struct acpi_table_attr *table_attr,
  294. struct acpi_table_header *table_header)
  295. {
  296. struct acpi_table_header *header = NULL;
  297. struct acpi_table_attr *attr = NULL;
  298. char instance_str[ACPI_INST_SIZE];
  299. sysfs_attr_init(&table_attr->attr.attr);
  300. ACPI_MOVE_NAME(table_attr->name, table_header->signature);
  301. list_for_each_entry(attr, &acpi_table_attr_list, node) {
  302. if (ACPI_COMPARE_NAME(table_attr->name, attr->name))
  303. if (table_attr->instance < attr->instance)
  304. table_attr->instance = attr->instance;
  305. }
  306. table_attr->instance++;
  307. if (table_attr->instance > ACPI_MAX_TABLE_INSTANCES) {
  308. pr_warn("%4.4s: too many table instances\n",
  309. table_attr->name);
  310. return -ERANGE;
  311. }
  312. ACPI_MOVE_NAME(table_attr->filename, table_header->signature);
  313. table_attr->filename[ACPI_NAME_SIZE] = '\0';
  314. if (table_attr->instance > 1 || (table_attr->instance == 1 &&
  315. !acpi_get_table
  316. (table_header->signature, 2, &header))) {
  317. snprintf(instance_str, sizeof(instance_str), "%u",
  318. table_attr->instance);
  319. strcat(table_attr->filename, instance_str);
  320. }
  321. table_attr->attr.size = table_header->length;
  322. table_attr->attr.read = acpi_table_show;
  323. table_attr->attr.attr.name = table_attr->filename;
  324. table_attr->attr.attr.mode = 0400;
  325. return sysfs_create_bin_file(tables_obj, &table_attr->attr);
  326. }
  327. acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context)
  328. {
  329. struct acpi_table_attr *table_attr;
  330. switch (event) {
  331. case ACPI_TABLE_EVENT_INSTALL:
  332. table_attr =
  333. kzalloc(sizeof(struct acpi_table_attr), GFP_KERNEL);
  334. if (!table_attr)
  335. return AE_NO_MEMORY;
  336. if (acpi_table_attr_init(dynamic_tables_kobj,
  337. table_attr, table)) {
  338. kfree(table_attr);
  339. return AE_ERROR;
  340. }
  341. list_add_tail(&table_attr->node, &acpi_table_attr_list);
  342. break;
  343. case ACPI_TABLE_EVENT_LOAD:
  344. case ACPI_TABLE_EVENT_UNLOAD:
  345. case ACPI_TABLE_EVENT_UNINSTALL:
  346. /*
  347. * we do not need to do anything right now
  348. * because the table is not deleted from the
  349. * global table list when unloading it.
  350. */
  351. break;
  352. default:
  353. return AE_BAD_PARAMETER;
  354. }
  355. return AE_OK;
  356. }
  357. static int acpi_tables_sysfs_init(void)
  358. {
  359. struct acpi_table_attr *table_attr;
  360. struct acpi_table_header *table_header = NULL;
  361. int table_index;
  362. acpi_status status;
  363. int ret;
  364. tables_kobj = kobject_create_and_add("tables", acpi_kobj);
  365. if (!tables_kobj)
  366. goto err;
  367. dynamic_tables_kobj = kobject_create_and_add("dynamic", tables_kobj);
  368. if (!dynamic_tables_kobj)
  369. goto err_dynamic_tables;
  370. for (table_index = 0;; table_index++) {
  371. status = acpi_get_table_by_index(table_index, &table_header);
  372. if (status == AE_BAD_PARAMETER)
  373. break;
  374. if (ACPI_FAILURE(status))
  375. continue;
  376. table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL);
  377. if (!table_attr)
  378. return -ENOMEM;
  379. ret = acpi_table_attr_init(tables_kobj,
  380. table_attr, table_header);
  381. if (ret) {
  382. kfree(table_attr);
  383. return ret;
  384. }
  385. list_add_tail(&table_attr->node, &acpi_table_attr_list);
  386. }
  387. kobject_uevent(tables_kobj, KOBJ_ADD);
  388. kobject_uevent(dynamic_tables_kobj, KOBJ_ADD);
  389. return 0;
  390. err_dynamic_tables:
  391. kobject_put(tables_kobj);
  392. err:
  393. return -ENOMEM;
  394. }
  395. /*
  396. * Detailed ACPI IRQ counters:
  397. * /sys/firmware/acpi/interrupts/
  398. */
  399. u32 acpi_irq_handled;
  400. u32 acpi_irq_not_handled;
  401. #define COUNT_GPE 0
  402. #define COUNT_SCI 1 /* acpi_irq_handled */
  403. #define COUNT_SCI_NOT 2 /* acpi_irq_not_handled */
  404. #define COUNT_ERROR 3 /* other */
  405. #define NUM_COUNTERS_EXTRA 4
  406. struct event_counter {
  407. u32 count;
  408. u32 flags;
  409. };
  410. static struct event_counter *all_counters;
  411. static u32 num_gpes;
  412. static u32 num_counters;
  413. static struct attribute **all_attrs;
  414. static u32 acpi_gpe_count;
  415. static struct attribute_group interrupt_stats_attr_group = {
  416. .name = "interrupts",
  417. };
  418. static struct kobj_attribute *counter_attrs;
  419. static void delete_gpe_attr_array(void)
  420. {
  421. struct event_counter *tmp = all_counters;
  422. all_counters = NULL;
  423. kfree(tmp);
  424. if (counter_attrs) {
  425. int i;
  426. for (i = 0; i < num_gpes; i++)
  427. kfree(counter_attrs[i].attr.name);
  428. kfree(counter_attrs);
  429. }
  430. kfree(all_attrs);
  431. return;
  432. }
  433. static void gpe_count(u32 gpe_number)
  434. {
  435. acpi_gpe_count++;
  436. if (!all_counters)
  437. return;
  438. if (gpe_number < num_gpes)
  439. all_counters[gpe_number].count++;
  440. else
  441. all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
  442. COUNT_ERROR].count++;
  443. return;
  444. }
  445. static void fixed_event_count(u32 event_number)
  446. {
  447. if (!all_counters)
  448. return;
  449. if (event_number < ACPI_NUM_FIXED_EVENTS)
  450. all_counters[num_gpes + event_number].count++;
  451. else
  452. all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
  453. COUNT_ERROR].count++;
  454. return;
  455. }
  456. static void acpi_global_event_handler(u32 event_type, acpi_handle device,
  457. u32 event_number, void *context)
  458. {
  459. if (event_type == ACPI_EVENT_TYPE_GPE)
  460. gpe_count(event_number);
  461. if (event_type == ACPI_EVENT_TYPE_FIXED)
  462. fixed_event_count(event_number);
  463. }
  464. static int get_status(u32 index, acpi_event_status *status,
  465. acpi_handle *handle)
  466. {
  467. int result;
  468. if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
  469. return -EINVAL;
  470. if (index < num_gpes) {
  471. result = acpi_get_gpe_device(index, handle);
  472. if (result) {
  473. ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND,
  474. "Invalid GPE 0x%x", index));
  475. return result;
  476. }
  477. result = acpi_get_gpe_status(*handle, index, status);
  478. } else if (index < (num_gpes + ACPI_NUM_FIXED_EVENTS))
  479. result = acpi_get_event_status(index - num_gpes, status);
  480. return result;
  481. }
  482. static ssize_t counter_show(struct kobject *kobj,
  483. struct kobj_attribute *attr, char *buf)
  484. {
  485. int index = attr - counter_attrs;
  486. int size;
  487. acpi_handle handle;
  488. acpi_event_status status;
  489. int result = 0;
  490. all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI].count =
  491. acpi_irq_handled;
  492. all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT].count =
  493. acpi_irq_not_handled;
  494. all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE].count =
  495. acpi_gpe_count;
  496. size = sprintf(buf, "%8u", all_counters[index].count);
  497. /* "gpe_all" or "sci" */
  498. if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
  499. goto end;
  500. result = get_status(index, &status, &handle);
  501. if (result)
  502. goto end;
  503. if (status & ACPI_EVENT_FLAG_ENABLE_SET)
  504. size += sprintf(buf + size, " EN");
  505. else
  506. size += sprintf(buf + size, " ");
  507. if (status & ACPI_EVENT_FLAG_STATUS_SET)
  508. size += sprintf(buf + size, " STS");
  509. else
  510. size += sprintf(buf + size, " ");
  511. if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER))
  512. size += sprintf(buf + size, " invalid ");
  513. else if (status & ACPI_EVENT_FLAG_ENABLED)
  514. size += sprintf(buf + size, " enabled ");
  515. else if (status & ACPI_EVENT_FLAG_WAKE_ENABLED)
  516. size += sprintf(buf + size, " wake_enabled");
  517. else
  518. size += sprintf(buf + size, " disabled ");
  519. if (status & ACPI_EVENT_FLAG_MASKED)
  520. size += sprintf(buf + size, " masked ");
  521. else
  522. size += sprintf(buf + size, " unmasked");
  523. end:
  524. size += sprintf(buf + size, "\n");
  525. return result ? result : size;
  526. }
  527. /*
  528. * counter_set() sets the specified counter.
  529. * setting the total "sci" file to any value clears all counters.
  530. * enable/disable/clear a gpe/fixed event in user space.
  531. */
  532. static ssize_t counter_set(struct kobject *kobj,
  533. struct kobj_attribute *attr, const char *buf,
  534. size_t size)
  535. {
  536. int index = attr - counter_attrs;
  537. acpi_event_status status;
  538. acpi_handle handle;
  539. int result = 0;
  540. unsigned long tmp;
  541. if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) {
  542. int i;
  543. for (i = 0; i < num_counters; ++i)
  544. all_counters[i].count = 0;
  545. acpi_gpe_count = 0;
  546. acpi_irq_handled = 0;
  547. acpi_irq_not_handled = 0;
  548. goto end;
  549. }
  550. /* show the event status for both GPEs and Fixed Events */
  551. result = get_status(index, &status, &handle);
  552. if (result)
  553. goto end;
  554. if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER)) {
  555. printk(KERN_WARNING PREFIX
  556. "Can not change Invalid GPE/Fixed Event status\n");
  557. return -EINVAL;
  558. }
  559. if (index < num_gpes) {
  560. if (!strcmp(buf, "disable\n") &&
  561. (status & ACPI_EVENT_FLAG_ENABLED))
  562. result = acpi_disable_gpe(handle, index);
  563. else if (!strcmp(buf, "enable\n") &&
  564. !(status & ACPI_EVENT_FLAG_ENABLED))
  565. result = acpi_enable_gpe(handle, index);
  566. else if (!strcmp(buf, "clear\n") &&
  567. (status & ACPI_EVENT_FLAG_STATUS_SET))
  568. result = acpi_clear_gpe(handle, index);
  569. else if (!strcmp(buf, "mask\n"))
  570. result = acpi_mask_gpe(handle, index, TRUE);
  571. else if (!strcmp(buf, "unmask\n"))
  572. result = acpi_mask_gpe(handle, index, FALSE);
  573. else if (!kstrtoul(buf, 0, &tmp))
  574. all_counters[index].count = tmp;
  575. else
  576. result = -EINVAL;
  577. } else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) {
  578. int event = index - num_gpes;
  579. if (!strcmp(buf, "disable\n") &&
  580. (status & ACPI_EVENT_FLAG_ENABLE_SET))
  581. result = acpi_disable_event(event, ACPI_NOT_ISR);
  582. else if (!strcmp(buf, "enable\n") &&
  583. !(status & ACPI_EVENT_FLAG_ENABLE_SET))
  584. result = acpi_enable_event(event, ACPI_NOT_ISR);
  585. else if (!strcmp(buf, "clear\n") &&
  586. (status & ACPI_EVENT_FLAG_STATUS_SET))
  587. result = acpi_clear_event(event);
  588. else if (!kstrtoul(buf, 0, &tmp))
  589. all_counters[index].count = tmp;
  590. else
  591. result = -EINVAL;
  592. } else
  593. all_counters[index].count = strtoul(buf, NULL, 0);
  594. if (ACPI_FAILURE(result))
  595. result = -EINVAL;
  596. end:
  597. return result ? result : size;
  598. }
  599. /*
  600. * A Quirk Mechanism for GPE Flooding Prevention:
  601. *
  602. * Quirks may be needed to prevent GPE flooding on a specific GPE. The
  603. * flooding typically cannot be detected and automatically prevented by
  604. * ACPI_GPE_DISPATCH_NONE check because there is a _Lxx/_Exx prepared in
  605. * the AML tables. This normally indicates a feature gap in Linux, thus
  606. * instead of providing endless quirk tables, we provide a boot parameter
  607. * for those who want this quirk. For example, if the users want to prevent
  608. * the GPE flooding for GPE 00, they need to specify the following boot
  609. * parameter:
  610. * acpi_mask_gpe=0x00
  611. * The masking status can be modified by the following runtime controlling
  612. * interface:
  613. * echo unmask > /sys/firmware/acpi/interrupts/gpe00
  614. */
  615. /*
  616. * Currently, the GPE flooding prevention only supports to mask the GPEs
  617. * numbered from 00 to 7f.
  618. */
  619. #define ACPI_MASKABLE_GPE_MAX 0x80
  620. static u64 __initdata acpi_masked_gpes;
  621. static int __init acpi_gpe_set_masked_gpes(char *val)
  622. {
  623. u8 gpe;
  624. if (kstrtou8(val, 0, &gpe) || gpe > ACPI_MASKABLE_GPE_MAX)
  625. return -EINVAL;
  626. acpi_masked_gpes |= ((u64)1<<gpe);
  627. return 1;
  628. }
  629. __setup("acpi_mask_gpe=", acpi_gpe_set_masked_gpes);
  630. void __init acpi_gpe_apply_masked_gpes(void)
  631. {
  632. acpi_handle handle;
  633. acpi_status status;
  634. u8 gpe;
  635. for (gpe = 0;
  636. gpe < min_t(u8, ACPI_MASKABLE_GPE_MAX, acpi_current_gpe_count);
  637. gpe++) {
  638. if (acpi_masked_gpes & ((u64)1<<gpe)) {
  639. status = acpi_get_gpe_device(gpe, &handle);
  640. if (ACPI_SUCCESS(status)) {
  641. pr_info("Masking GPE 0x%x.\n", gpe);
  642. (void)acpi_mask_gpe(handle, gpe, TRUE);
  643. }
  644. }
  645. }
  646. }
  647. void acpi_irq_stats_init(void)
  648. {
  649. acpi_status status;
  650. int i;
  651. if (all_counters)
  652. return;
  653. num_gpes = acpi_current_gpe_count;
  654. num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA;
  655. all_attrs = kzalloc(sizeof(struct attribute *) * (num_counters + 1),
  656. GFP_KERNEL);
  657. if (all_attrs == NULL)
  658. return;
  659. all_counters = kzalloc(sizeof(struct event_counter) * (num_counters),
  660. GFP_KERNEL);
  661. if (all_counters == NULL)
  662. goto fail;
  663. status = acpi_install_global_event_handler(acpi_global_event_handler, NULL);
  664. if (ACPI_FAILURE(status))
  665. goto fail;
  666. counter_attrs = kzalloc(sizeof(struct kobj_attribute) * (num_counters),
  667. GFP_KERNEL);
  668. if (counter_attrs == NULL)
  669. goto fail;
  670. for (i = 0; i < num_counters; ++i) {
  671. char buffer[12];
  672. char *name;
  673. if (i < num_gpes)
  674. sprintf(buffer, "gpe%02X", i);
  675. else if (i == num_gpes + ACPI_EVENT_PMTIMER)
  676. sprintf(buffer, "ff_pmtimer");
  677. else if (i == num_gpes + ACPI_EVENT_GLOBAL)
  678. sprintf(buffer, "ff_gbl_lock");
  679. else if (i == num_gpes + ACPI_EVENT_POWER_BUTTON)
  680. sprintf(buffer, "ff_pwr_btn");
  681. else if (i == num_gpes + ACPI_EVENT_SLEEP_BUTTON)
  682. sprintf(buffer, "ff_slp_btn");
  683. else if (i == num_gpes + ACPI_EVENT_RTC)
  684. sprintf(buffer, "ff_rt_clk");
  685. else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE)
  686. sprintf(buffer, "gpe_all");
  687. else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI)
  688. sprintf(buffer, "sci");
  689. else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT)
  690. sprintf(buffer, "sci_not");
  691. else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR)
  692. sprintf(buffer, "error");
  693. else
  694. sprintf(buffer, "bug%02X", i);
  695. name = kstrdup(buffer, GFP_KERNEL);
  696. if (name == NULL)
  697. goto fail;
  698. sysfs_attr_init(&counter_attrs[i].attr);
  699. counter_attrs[i].attr.name = name;
  700. counter_attrs[i].attr.mode = 0644;
  701. counter_attrs[i].show = counter_show;
  702. counter_attrs[i].store = counter_set;
  703. all_attrs[i] = &counter_attrs[i].attr;
  704. }
  705. interrupt_stats_attr_group.attrs = all_attrs;
  706. if (!sysfs_create_group(acpi_kobj, &interrupt_stats_attr_group))
  707. return;
  708. fail:
  709. delete_gpe_attr_array();
  710. return;
  711. }
  712. static void __exit interrupt_stats_exit(void)
  713. {
  714. sysfs_remove_group(acpi_kobj, &interrupt_stats_attr_group);
  715. delete_gpe_attr_array();
  716. return;
  717. }
  718. static ssize_t
  719. acpi_show_profile(struct device *dev, struct device_attribute *attr,
  720. char *buf)
  721. {
  722. return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
  723. }
  724. static const struct device_attribute pm_profile_attr =
  725. __ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL);
  726. static ssize_t hotplug_enabled_show(struct kobject *kobj,
  727. struct kobj_attribute *attr, char *buf)
  728. {
  729. struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
  730. return sprintf(buf, "%d\n", hotplug->enabled);
  731. }
  732. static ssize_t hotplug_enabled_store(struct kobject *kobj,
  733. struct kobj_attribute *attr,
  734. const char *buf, size_t size)
  735. {
  736. struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
  737. unsigned int val;
  738. if (kstrtouint(buf, 10, &val) || val > 1)
  739. return -EINVAL;
  740. acpi_scan_hotplug_enabled(hotplug, val);
  741. return size;
  742. }
  743. static struct kobj_attribute hotplug_enabled_attr =
  744. __ATTR(enabled, S_IRUGO | S_IWUSR, hotplug_enabled_show,
  745. hotplug_enabled_store);
  746. static struct attribute *hotplug_profile_attrs[] = {
  747. &hotplug_enabled_attr.attr,
  748. NULL
  749. };
  750. static struct kobj_type acpi_hotplug_profile_ktype = {
  751. .sysfs_ops = &kobj_sysfs_ops,
  752. .default_attrs = hotplug_profile_attrs,
  753. };
  754. void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
  755. const char *name)
  756. {
  757. int error;
  758. if (!hotplug_kobj)
  759. goto err_out;
  760. error = kobject_init_and_add(&hotplug->kobj,
  761. &acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name);
  762. if (error)
  763. goto err_out;
  764. kobject_uevent(&hotplug->kobj, KOBJ_ADD);
  765. return;
  766. err_out:
  767. pr_err(PREFIX "Unable to add hotplug profile '%s'\n", name);
  768. }
  769. static ssize_t force_remove_show(struct kobject *kobj,
  770. struct kobj_attribute *attr, char *buf)
  771. {
  772. return sprintf(buf, "%d\n", 0);
  773. }
  774. static ssize_t force_remove_store(struct kobject *kobj,
  775. struct kobj_attribute *attr,
  776. const char *buf, size_t size)
  777. {
  778. bool val;
  779. int ret;
  780. ret = strtobool(buf, &val);
  781. if (ret < 0)
  782. return ret;
  783. if (val) {
  784. pr_err("Enabling force_remove is not supported anymore. Please report to linux-acpi@vger.kernel.org if you depend on this functionality\n");
  785. return -EINVAL;
  786. }
  787. return size;
  788. }
  789. static const struct kobj_attribute force_remove_attr =
  790. __ATTR(force_remove, S_IRUGO | S_IWUSR, force_remove_show,
  791. force_remove_store);
  792. int __init acpi_sysfs_init(void)
  793. {
  794. int result;
  795. result = acpi_tables_sysfs_init();
  796. if (result)
  797. return result;
  798. hotplug_kobj = kobject_create_and_add("hotplug", acpi_kobj);
  799. if (!hotplug_kobj)
  800. return -ENOMEM;
  801. result = sysfs_create_file(hotplug_kobj, &force_remove_attr.attr);
  802. if (result)
  803. return result;
  804. result = sysfs_create_file(acpi_kobj, &pm_profile_attr.attr);
  805. return result;
  806. }