ec.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606
  1. /*
  2. * ec.c - ACPI Embedded Controller Driver (v3)
  3. *
  4. * Copyright (C) 2001-2015 Intel Corporation
  5. * Author: 2014, 2015 Lv Zheng <lv.zheng@intel.com>
  6. * 2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
  7. * 2006 Denis Sadykov <denis.m.sadykov@intel.com>
  8. * 2004 Luming Yu <luming.yu@intel.com>
  9. * 2001, 2002 Andy Grover <andrew.grover@intel.com>
  10. * 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  11. * Copyright (C) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
  12. *
  13. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  14. *
  15. * This program is free software; you can redistribute it and/or modify
  16. * it under the terms of the GNU General Public License as published by
  17. * the Free Software Foundation; either version 2 of the License, or (at
  18. * your option) any later version.
  19. *
  20. * This program is distributed in the hope that it will be useful, but
  21. * WITHOUT ANY WARRANTY; without even the implied warranty of
  22. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  23. * General Public License for more details.
  24. *
  25. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  26. */
  27. /* Uncomment next line to get verbose printout */
  28. /* #define DEBUG */
  29. #define pr_fmt(fmt) "ACPI : EC: " fmt
  30. #include <linux/kernel.h>
  31. #include <linux/module.h>
  32. #include <linux/init.h>
  33. #include <linux/types.h>
  34. #include <linux/delay.h>
  35. #include <linux/interrupt.h>
  36. #include <linux/list.h>
  37. #include <linux/spinlock.h>
  38. #include <linux/slab.h>
  39. #include <linux/acpi.h>
  40. #include <linux/dmi.h>
  41. #include <asm/io.h>
  42. #include "internal.h"
  43. #define ACPI_EC_CLASS "embedded_controller"
  44. #define ACPI_EC_DEVICE_NAME "Embedded Controller"
  45. #define ACPI_EC_FILE_INFO "info"
  46. /* EC status register */
  47. #define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */
  48. #define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */
  49. #define ACPI_EC_FLAG_CMD 0x08 /* Input buffer contains a command */
  50. #define ACPI_EC_FLAG_BURST 0x10 /* burst mode */
  51. #define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */
  52. /*
  53. * The SCI_EVT clearing timing is not defined by the ACPI specification.
  54. * This leads to lots of practical timing issues for the host EC driver.
  55. * The following variations are defined (from the target EC firmware's
  56. * perspective):
  57. * STATUS: After indicating SCI_EVT edge triggered IRQ to the host, the
  58. * target can clear SCI_EVT at any time so long as the host can see
  59. * the indication by reading the status register (EC_SC). So the
  60. * host should re-check SCI_EVT after the first time the SCI_EVT
  61. * indication is seen, which is the same time the query request
  62. * (QR_EC) is written to the command register (EC_CMD). SCI_EVT set
  63. * at any later time could indicate another event. Normally such
  64. * kind of EC firmware has implemented an event queue and will
  65. * return 0x00 to indicate "no outstanding event".
  66. * QUERY: After seeing the query request (QR_EC) written to the command
  67. * register (EC_CMD) by the host and having prepared the responding
  68. * event value in the data register (EC_DATA), the target can safely
  69. * clear SCI_EVT because the target can confirm that the current
  70. * event is being handled by the host. The host then should check
  71. * SCI_EVT right after reading the event response from the data
  72. * register (EC_DATA).
  73. * EVENT: After seeing the event response read from the data register
  74. * (EC_DATA) by the host, the target can clear SCI_EVT. As the
  75. * target requires time to notice the change in the data register
  76. * (EC_DATA), the host may be required to wait additional guarding
  77. * time before checking the SCI_EVT again. Such guarding may not be
  78. * necessary if the host is notified via another IRQ.
  79. */
  80. #define ACPI_EC_EVT_TIMING_STATUS 0x00
  81. #define ACPI_EC_EVT_TIMING_QUERY 0x01
  82. #define ACPI_EC_EVT_TIMING_EVENT 0x02
  83. /* EC commands */
  84. enum ec_command {
  85. ACPI_EC_COMMAND_READ = 0x80,
  86. ACPI_EC_COMMAND_WRITE = 0x81,
  87. ACPI_EC_BURST_ENABLE = 0x82,
  88. ACPI_EC_BURST_DISABLE = 0x83,
  89. ACPI_EC_COMMAND_QUERY = 0x84,
  90. };
  91. #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
  92. #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
  93. #define ACPI_EC_UDELAY_POLL 550 /* Wait 1ms for EC transaction polling */
  94. #define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
  95. * when trying to clear the EC */
  96. enum {
  97. EC_FLAGS_QUERY_PENDING, /* Query is pending */
  98. EC_FLAGS_QUERY_GUARDING, /* Guard for SCI_EVT check */
  99. EC_FLAGS_HANDLERS_INSTALLED, /* Handlers for GPE and
  100. * OpReg are installed */
  101. EC_FLAGS_STARTED, /* Driver is started */
  102. EC_FLAGS_STOPPED, /* Driver is stopped */
  103. EC_FLAGS_COMMAND_STORM, /* GPE storms occurred to the
  104. * current command processing */
  105. };
  106. #define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */
  107. #define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */
  108. /* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
  109. static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
  110. module_param(ec_delay, uint, 0644);
  111. MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
  112. static bool ec_busy_polling __read_mostly;
  113. module_param(ec_busy_polling, bool, 0644);
  114. MODULE_PARM_DESC(ec_busy_polling, "Use busy polling to advance EC transaction");
  115. static unsigned int ec_polling_guard __read_mostly = ACPI_EC_UDELAY_POLL;
  116. module_param(ec_polling_guard, uint, 0644);
  117. MODULE_PARM_DESC(ec_polling_guard, "Guard time(us) between EC accesses in polling modes");
  118. static unsigned int ec_event_clearing __read_mostly = ACPI_EC_EVT_TIMING_QUERY;
  119. /*
  120. * If the number of false interrupts per one transaction exceeds
  121. * this threshold, will think there is a GPE storm happened and
  122. * will disable the GPE for normal transaction.
  123. */
  124. static unsigned int ec_storm_threshold __read_mostly = 8;
  125. module_param(ec_storm_threshold, uint, 0644);
  126. MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm");
  127. struct acpi_ec_query_handler {
  128. struct list_head node;
  129. acpi_ec_query_func func;
  130. acpi_handle handle;
  131. void *data;
  132. u8 query_bit;
  133. struct kref kref;
  134. };
  135. struct transaction {
  136. const u8 *wdata;
  137. u8 *rdata;
  138. unsigned short irq_count;
  139. u8 command;
  140. u8 wi;
  141. u8 ri;
  142. u8 wlen;
  143. u8 rlen;
  144. u8 flags;
  145. };
  146. static int acpi_ec_query(struct acpi_ec *ec, u8 *data);
  147. static void advance_transaction(struct acpi_ec *ec);
  148. struct acpi_ec *boot_ec, *first_ec;
  149. EXPORT_SYMBOL(first_ec);
  150. static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
  151. static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
  152. static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
  153. static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
  154. /* --------------------------------------------------------------------------
  155. * Logging/Debugging
  156. * -------------------------------------------------------------------------- */
  157. /*
  158. * Splitters used by the developers to track the boundary of the EC
  159. * handling processes.
  160. */
  161. #ifdef DEBUG
  162. #define EC_DBG_SEP " "
  163. #define EC_DBG_DRV "+++++"
  164. #define EC_DBG_STM "====="
  165. #define EC_DBG_REQ "*****"
  166. #define EC_DBG_EVT "#####"
  167. #else
  168. #define EC_DBG_SEP ""
  169. #define EC_DBG_DRV
  170. #define EC_DBG_STM
  171. #define EC_DBG_REQ
  172. #define EC_DBG_EVT
  173. #endif
  174. #define ec_log_raw(fmt, ...) \
  175. pr_info(fmt "\n", ##__VA_ARGS__)
  176. #define ec_dbg_raw(fmt, ...) \
  177. pr_debug(fmt "\n", ##__VA_ARGS__)
  178. #define ec_log(filter, fmt, ...) \
  179. ec_log_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__)
  180. #define ec_dbg(filter, fmt, ...) \
  181. ec_dbg_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__)
  182. #define ec_log_drv(fmt, ...) \
  183. ec_log(EC_DBG_DRV, fmt, ##__VA_ARGS__)
  184. #define ec_dbg_drv(fmt, ...) \
  185. ec_dbg(EC_DBG_DRV, fmt, ##__VA_ARGS__)
  186. #define ec_dbg_stm(fmt, ...) \
  187. ec_dbg(EC_DBG_STM, fmt, ##__VA_ARGS__)
  188. #define ec_dbg_req(fmt, ...) \
  189. ec_dbg(EC_DBG_REQ, fmt, ##__VA_ARGS__)
  190. #define ec_dbg_evt(fmt, ...) \
  191. ec_dbg(EC_DBG_EVT, fmt, ##__VA_ARGS__)
  192. #define ec_dbg_ref(ec, fmt, ...) \
  193. ec_dbg_raw("%lu: " fmt, ec->reference_count, ## __VA_ARGS__)
  194. /* --------------------------------------------------------------------------
  195. * Device Flags
  196. * -------------------------------------------------------------------------- */
  197. static bool acpi_ec_started(struct acpi_ec *ec)
  198. {
  199. return test_bit(EC_FLAGS_STARTED, &ec->flags) &&
  200. !test_bit(EC_FLAGS_STOPPED, &ec->flags);
  201. }
  202. static bool acpi_ec_flushed(struct acpi_ec *ec)
  203. {
  204. return ec->reference_count == 1;
  205. }
  206. /* --------------------------------------------------------------------------
  207. * EC Registers
  208. * -------------------------------------------------------------------------- */
  209. static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
  210. {
  211. u8 x = inb(ec->command_addr);
  212. ec_dbg_raw("EC_SC(R) = 0x%2.2x "
  213. "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d",
  214. x,
  215. !!(x & ACPI_EC_FLAG_SCI),
  216. !!(x & ACPI_EC_FLAG_BURST),
  217. !!(x & ACPI_EC_FLAG_CMD),
  218. !!(x & ACPI_EC_FLAG_IBF),
  219. !!(x & ACPI_EC_FLAG_OBF));
  220. return x;
  221. }
  222. static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
  223. {
  224. u8 x = inb(ec->data_addr);
  225. ec->timestamp = jiffies;
  226. ec_dbg_raw("EC_DATA(R) = 0x%2.2x", x);
  227. return x;
  228. }
  229. static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
  230. {
  231. ec_dbg_raw("EC_SC(W) = 0x%2.2x", command);
  232. outb(command, ec->command_addr);
  233. ec->timestamp = jiffies;
  234. }
  235. static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
  236. {
  237. ec_dbg_raw("EC_DATA(W) = 0x%2.2x", data);
  238. outb(data, ec->data_addr);
  239. ec->timestamp = jiffies;
  240. }
  241. #ifdef DEBUG
  242. static const char *acpi_ec_cmd_string(u8 cmd)
  243. {
  244. switch (cmd) {
  245. case 0x80:
  246. return "RD_EC";
  247. case 0x81:
  248. return "WR_EC";
  249. case 0x82:
  250. return "BE_EC";
  251. case 0x83:
  252. return "BD_EC";
  253. case 0x84:
  254. return "QR_EC";
  255. }
  256. return "UNKNOWN";
  257. }
  258. #else
  259. #define acpi_ec_cmd_string(cmd) "UNDEF"
  260. #endif
  261. /* --------------------------------------------------------------------------
  262. * GPE Registers
  263. * -------------------------------------------------------------------------- */
  264. static inline bool acpi_ec_is_gpe_raised(struct acpi_ec *ec)
  265. {
  266. acpi_event_status gpe_status = 0;
  267. (void)acpi_get_gpe_status(NULL, ec->gpe, &gpe_status);
  268. return (gpe_status & ACPI_EVENT_FLAG_STATUS_SET) ? true : false;
  269. }
  270. static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open)
  271. {
  272. if (open)
  273. acpi_enable_gpe(NULL, ec->gpe);
  274. else {
  275. BUG_ON(ec->reference_count < 1);
  276. acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
  277. }
  278. if (acpi_ec_is_gpe_raised(ec)) {
  279. /*
  280. * On some platforms, EN=1 writes cannot trigger GPE. So
  281. * software need to manually trigger a pseudo GPE event on
  282. * EN=1 writes.
  283. */
  284. ec_dbg_raw("Polling quirk");
  285. advance_transaction(ec);
  286. }
  287. }
  288. static inline void acpi_ec_disable_gpe(struct acpi_ec *ec, bool close)
  289. {
  290. if (close)
  291. acpi_disable_gpe(NULL, ec->gpe);
  292. else {
  293. BUG_ON(ec->reference_count < 1);
  294. acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
  295. }
  296. }
  297. static inline void acpi_ec_clear_gpe(struct acpi_ec *ec)
  298. {
  299. /*
  300. * GPE STS is a W1C register, which means:
  301. * 1. Software can clear it without worrying about clearing other
  302. * GPEs' STS bits when the hardware sets them in parallel.
  303. * 2. As long as software can ensure only clearing it when it is
  304. * set, hardware won't set it in parallel.
  305. * So software can clear GPE in any contexts.
  306. * Warning: do not move the check into advance_transaction() as the
  307. * EC commands will be sent without GPE raised.
  308. */
  309. if (!acpi_ec_is_gpe_raised(ec))
  310. return;
  311. acpi_clear_gpe(NULL, ec->gpe);
  312. }
  313. /* --------------------------------------------------------------------------
  314. * Transaction Management
  315. * -------------------------------------------------------------------------- */
  316. static void acpi_ec_submit_request(struct acpi_ec *ec)
  317. {
  318. ec->reference_count++;
  319. if (ec->reference_count == 1)
  320. acpi_ec_enable_gpe(ec, true);
  321. }
  322. static void acpi_ec_complete_request(struct acpi_ec *ec)
  323. {
  324. bool flushed = false;
  325. ec->reference_count--;
  326. if (ec->reference_count == 0)
  327. acpi_ec_disable_gpe(ec, true);
  328. flushed = acpi_ec_flushed(ec);
  329. if (flushed)
  330. wake_up(&ec->wait);
  331. }
  332. static void acpi_ec_set_storm(struct acpi_ec *ec, u8 flag)
  333. {
  334. if (!test_bit(flag, &ec->flags)) {
  335. acpi_ec_disable_gpe(ec, false);
  336. ec_dbg_drv("Polling enabled");
  337. set_bit(flag, &ec->flags);
  338. }
  339. }
  340. static void acpi_ec_clear_storm(struct acpi_ec *ec, u8 flag)
  341. {
  342. if (test_bit(flag, &ec->flags)) {
  343. clear_bit(flag, &ec->flags);
  344. acpi_ec_enable_gpe(ec, false);
  345. ec_dbg_drv("Polling disabled");
  346. }
  347. }
  348. /*
  349. * acpi_ec_submit_flushable_request() - Increase the reference count unless
  350. * the flush operation is not in
  351. * progress
  352. * @ec: the EC device
  353. *
  354. * This function must be used before taking a new action that should hold
  355. * the reference count. If this function returns false, then the action
  356. * must be discarded or it will prevent the flush operation from being
  357. * completed.
  358. */
  359. static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
  360. {
  361. if (!acpi_ec_started(ec))
  362. return false;
  363. acpi_ec_submit_request(ec);
  364. return true;
  365. }
  366. static void acpi_ec_submit_query(struct acpi_ec *ec)
  367. {
  368. if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
  369. ec_dbg_evt("Command(%s) submitted/blocked",
  370. acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
  371. ec->nr_pending_queries++;
  372. schedule_work(&ec->work);
  373. }
  374. }
  375. static void acpi_ec_complete_query(struct acpi_ec *ec)
  376. {
  377. if (test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
  378. clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
  379. ec_dbg_evt("Command(%s) unblocked",
  380. acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
  381. }
  382. }
  383. static bool acpi_ec_guard_event(struct acpi_ec *ec)
  384. {
  385. if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS ||
  386. ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY ||
  387. !test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags) ||
  388. (ec->curr && ec->curr->command == ACPI_EC_COMMAND_QUERY))
  389. return false;
  390. /*
  391. * Postpone the query submission to allow the firmware to proceed,
  392. * we shouldn't check SCI_EVT before the firmware reflagging it.
  393. */
  394. return true;
  395. }
  396. static int ec_transaction_polled(struct acpi_ec *ec)
  397. {
  398. unsigned long flags;
  399. int ret = 0;
  400. spin_lock_irqsave(&ec->lock, flags);
  401. if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL))
  402. ret = 1;
  403. spin_unlock_irqrestore(&ec->lock, flags);
  404. return ret;
  405. }
  406. static int ec_transaction_completed(struct acpi_ec *ec)
  407. {
  408. unsigned long flags;
  409. int ret = 0;
  410. spin_lock_irqsave(&ec->lock, flags);
  411. if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
  412. ret = 1;
  413. spin_unlock_irqrestore(&ec->lock, flags);
  414. return ret;
  415. }
  416. static inline void ec_transaction_transition(struct acpi_ec *ec, unsigned long flag)
  417. {
  418. ec->curr->flags |= flag;
  419. if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
  420. if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS &&
  421. flag == ACPI_EC_COMMAND_POLL)
  422. acpi_ec_complete_query(ec);
  423. if (ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY &&
  424. flag == ACPI_EC_COMMAND_COMPLETE)
  425. acpi_ec_complete_query(ec);
  426. if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
  427. flag == ACPI_EC_COMMAND_COMPLETE)
  428. set_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags);
  429. }
  430. }
  431. static void advance_transaction(struct acpi_ec *ec)
  432. {
  433. struct transaction *t;
  434. u8 status;
  435. bool wakeup = false;
  436. ec_dbg_stm("%s (%d)", in_interrupt() ? "IRQ" : "TASK",
  437. smp_processor_id());
  438. /*
  439. * By always clearing STS before handling all indications, we can
  440. * ensure a hardware STS 0->1 change after this clearing can always
  441. * trigger a GPE interrupt.
  442. */
  443. acpi_ec_clear_gpe(ec);
  444. status = acpi_ec_read_status(ec);
  445. t = ec->curr;
  446. /*
  447. * Another IRQ or a guarded polling mode advancement is detected,
  448. * the next QR_EC submission is then allowed.
  449. */
  450. if (!t || !(t->flags & ACPI_EC_COMMAND_POLL)) {
  451. if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
  452. (!ec->nr_pending_queries ||
  453. test_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags))) {
  454. clear_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags);
  455. acpi_ec_complete_query(ec);
  456. }
  457. }
  458. if (!t)
  459. goto err;
  460. if (t->flags & ACPI_EC_COMMAND_POLL) {
  461. if (t->wlen > t->wi) {
  462. if ((status & ACPI_EC_FLAG_IBF) == 0)
  463. acpi_ec_write_data(ec, t->wdata[t->wi++]);
  464. else
  465. goto err;
  466. } else if (t->rlen > t->ri) {
  467. if ((status & ACPI_EC_FLAG_OBF) == 1) {
  468. t->rdata[t->ri++] = acpi_ec_read_data(ec);
  469. if (t->rlen == t->ri) {
  470. ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
  471. if (t->command == ACPI_EC_COMMAND_QUERY)
  472. ec_dbg_evt("Command(%s) completed by hardware",
  473. acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
  474. wakeup = true;
  475. }
  476. } else
  477. goto err;
  478. } else if (t->wlen == t->wi &&
  479. (status & ACPI_EC_FLAG_IBF) == 0) {
  480. ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
  481. wakeup = true;
  482. }
  483. goto out;
  484. } else {
  485. if (EC_FLAGS_QUERY_HANDSHAKE &&
  486. !(status & ACPI_EC_FLAG_SCI) &&
  487. (t->command == ACPI_EC_COMMAND_QUERY)) {
  488. ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL);
  489. t->rdata[t->ri++] = 0x00;
  490. ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
  491. ec_dbg_evt("Command(%s) completed by software",
  492. acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
  493. wakeup = true;
  494. } else if ((status & ACPI_EC_FLAG_IBF) == 0) {
  495. acpi_ec_write_cmd(ec, t->command);
  496. ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL);
  497. } else
  498. goto err;
  499. goto out;
  500. }
  501. err:
  502. /*
  503. * If SCI bit is set, then don't think it's a false IRQ
  504. * otherwise will take a not handled IRQ as a false one.
  505. */
  506. if (!(status & ACPI_EC_FLAG_SCI)) {
  507. if (in_interrupt() && t) {
  508. if (t->irq_count < ec_storm_threshold)
  509. ++t->irq_count;
  510. /* Allow triggering on 0 threshold */
  511. if (t->irq_count == ec_storm_threshold)
  512. acpi_ec_set_storm(ec, EC_FLAGS_COMMAND_STORM);
  513. }
  514. }
  515. out:
  516. if (status & ACPI_EC_FLAG_SCI)
  517. acpi_ec_submit_query(ec);
  518. if (wakeup && in_interrupt())
  519. wake_up(&ec->wait);
  520. }
  521. static void start_transaction(struct acpi_ec *ec)
  522. {
  523. ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
  524. ec->curr->flags = 0;
  525. }
  526. static int ec_guard(struct acpi_ec *ec)
  527. {
  528. unsigned long guard = usecs_to_jiffies(ec_polling_guard);
  529. unsigned long timeout = ec->timestamp + guard;
  530. do {
  531. if (ec_busy_polling) {
  532. /* Perform busy polling */
  533. if (ec_transaction_completed(ec))
  534. return 0;
  535. udelay(jiffies_to_usecs(guard));
  536. } else {
  537. /*
  538. * Perform wait polling
  539. *
  540. * For SCI_EVT clearing timing of "event",
  541. * performing guarding before re-checking the
  542. * SCI_EVT. Otherwise, such guarding is not needed
  543. * due to the old practices.
  544. */
  545. if (!ec_transaction_polled(ec) &&
  546. !acpi_ec_guard_event(ec))
  547. break;
  548. if (wait_event_timeout(ec->wait,
  549. ec_transaction_completed(ec),
  550. guard))
  551. return 0;
  552. }
  553. /* Guard the register accesses for the polling modes */
  554. } while (time_before(jiffies, timeout));
  555. return -ETIME;
  556. }
  557. static int ec_poll(struct acpi_ec *ec)
  558. {
  559. unsigned long flags;
  560. int repeat = 5; /* number of command restarts */
  561. while (repeat--) {
  562. unsigned long delay = jiffies +
  563. msecs_to_jiffies(ec_delay);
  564. do {
  565. if (!ec_guard(ec))
  566. return 0;
  567. spin_lock_irqsave(&ec->lock, flags);
  568. advance_transaction(ec);
  569. spin_unlock_irqrestore(&ec->lock, flags);
  570. } while (time_before(jiffies, delay));
  571. pr_debug("controller reset, restart transaction\n");
  572. spin_lock_irqsave(&ec->lock, flags);
  573. start_transaction(ec);
  574. spin_unlock_irqrestore(&ec->lock, flags);
  575. }
  576. return -ETIME;
  577. }
  578. static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
  579. struct transaction *t)
  580. {
  581. unsigned long tmp;
  582. int ret = 0;
  583. /* start transaction */
  584. spin_lock_irqsave(&ec->lock, tmp);
  585. /* Enable GPE for command processing (IBF=0/OBF=1) */
  586. if (!acpi_ec_submit_flushable_request(ec)) {
  587. ret = -EINVAL;
  588. goto unlock;
  589. }
  590. ec_dbg_ref(ec, "Increase command");
  591. /* following two actions should be kept atomic */
  592. ec->curr = t;
  593. ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command));
  594. start_transaction(ec);
  595. spin_unlock_irqrestore(&ec->lock, tmp);
  596. ret = ec_poll(ec);
  597. spin_lock_irqsave(&ec->lock, tmp);
  598. if (t->irq_count == ec_storm_threshold)
  599. acpi_ec_clear_storm(ec, EC_FLAGS_COMMAND_STORM);
  600. ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command));
  601. ec->curr = NULL;
  602. /* Disable GPE for command processing (IBF=0/OBF=1) */
  603. acpi_ec_complete_request(ec);
  604. ec_dbg_ref(ec, "Decrease command");
  605. unlock:
  606. spin_unlock_irqrestore(&ec->lock, tmp);
  607. return ret;
  608. }
  609. static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
  610. {
  611. int status;
  612. u32 glk;
  613. if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata))
  614. return -EINVAL;
  615. if (t->rdata)
  616. memset(t->rdata, 0, t->rlen);
  617. mutex_lock(&ec->mutex);
  618. if (ec->global_lock) {
  619. status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
  620. if (ACPI_FAILURE(status)) {
  621. status = -ENODEV;
  622. goto unlock;
  623. }
  624. }
  625. status = acpi_ec_transaction_unlocked(ec, t);
  626. if (ec->global_lock)
  627. acpi_release_global_lock(glk);
  628. unlock:
  629. mutex_unlock(&ec->mutex);
  630. return status;
  631. }
  632. static int acpi_ec_burst_enable(struct acpi_ec *ec)
  633. {
  634. u8 d;
  635. struct transaction t = {.command = ACPI_EC_BURST_ENABLE,
  636. .wdata = NULL, .rdata = &d,
  637. .wlen = 0, .rlen = 1};
  638. return acpi_ec_transaction(ec, &t);
  639. }
  640. static int acpi_ec_burst_disable(struct acpi_ec *ec)
  641. {
  642. struct transaction t = {.command = ACPI_EC_BURST_DISABLE,
  643. .wdata = NULL, .rdata = NULL,
  644. .wlen = 0, .rlen = 0};
  645. return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ?
  646. acpi_ec_transaction(ec, &t) : 0;
  647. }
  648. static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data)
  649. {
  650. int result;
  651. u8 d;
  652. struct transaction t = {.command = ACPI_EC_COMMAND_READ,
  653. .wdata = &address, .rdata = &d,
  654. .wlen = 1, .rlen = 1};
  655. result = acpi_ec_transaction(ec, &t);
  656. *data = d;
  657. return result;
  658. }
  659. static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
  660. {
  661. u8 wdata[2] = { address, data };
  662. struct transaction t = {.command = ACPI_EC_COMMAND_WRITE,
  663. .wdata = wdata, .rdata = NULL,
  664. .wlen = 2, .rlen = 0};
  665. return acpi_ec_transaction(ec, &t);
  666. }
  667. int ec_read(u8 addr, u8 *val)
  668. {
  669. int err;
  670. u8 temp_data;
  671. if (!first_ec)
  672. return -ENODEV;
  673. err = acpi_ec_read(first_ec, addr, &temp_data);
  674. if (!err) {
  675. *val = temp_data;
  676. return 0;
  677. }
  678. return err;
  679. }
  680. EXPORT_SYMBOL(ec_read);
  681. int ec_write(u8 addr, u8 val)
  682. {
  683. int err;
  684. if (!first_ec)
  685. return -ENODEV;
  686. err = acpi_ec_write(first_ec, addr, val);
  687. return err;
  688. }
  689. EXPORT_SYMBOL(ec_write);
  690. int ec_transaction(u8 command,
  691. const u8 *wdata, unsigned wdata_len,
  692. u8 *rdata, unsigned rdata_len)
  693. {
  694. struct transaction t = {.command = command,
  695. .wdata = wdata, .rdata = rdata,
  696. .wlen = wdata_len, .rlen = rdata_len};
  697. if (!first_ec)
  698. return -ENODEV;
  699. return acpi_ec_transaction(first_ec, &t);
  700. }
  701. EXPORT_SYMBOL(ec_transaction);
  702. /* Get the handle to the EC device */
  703. acpi_handle ec_get_handle(void)
  704. {
  705. if (!first_ec)
  706. return NULL;
  707. return first_ec->handle;
  708. }
  709. EXPORT_SYMBOL(ec_get_handle);
  710. /*
  711. * Process _Q events that might have accumulated in the EC.
  712. * Run with locked ec mutex.
  713. */
  714. static void acpi_ec_clear(struct acpi_ec *ec)
  715. {
  716. int i, status;
  717. u8 value = 0;
  718. for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
  719. status = acpi_ec_query(ec, &value);
  720. if (status || !value)
  721. break;
  722. }
  723. if (unlikely(i == ACPI_EC_CLEAR_MAX))
  724. pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
  725. else
  726. pr_info("%d stale EC events cleared\n", i);
  727. }
  728. static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
  729. {
  730. unsigned long flags;
  731. spin_lock_irqsave(&ec->lock, flags);
  732. if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
  733. ec_dbg_drv("Starting EC");
  734. /* Enable GPE for event processing (SCI_EVT=1) */
  735. if (!resuming) {
  736. acpi_ec_submit_request(ec);
  737. ec_dbg_ref(ec, "Increase driver");
  738. }
  739. ec_log_drv("EC started");
  740. }
  741. spin_unlock_irqrestore(&ec->lock, flags);
  742. }
  743. static bool acpi_ec_stopped(struct acpi_ec *ec)
  744. {
  745. unsigned long flags;
  746. bool flushed;
  747. spin_lock_irqsave(&ec->lock, flags);
  748. flushed = acpi_ec_flushed(ec);
  749. spin_unlock_irqrestore(&ec->lock, flags);
  750. return flushed;
  751. }
  752. static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
  753. {
  754. unsigned long flags;
  755. spin_lock_irqsave(&ec->lock, flags);
  756. if (acpi_ec_started(ec)) {
  757. ec_dbg_drv("Stopping EC");
  758. set_bit(EC_FLAGS_STOPPED, &ec->flags);
  759. spin_unlock_irqrestore(&ec->lock, flags);
  760. wait_event(ec->wait, acpi_ec_stopped(ec));
  761. spin_lock_irqsave(&ec->lock, flags);
  762. /* Disable GPE for event processing (SCI_EVT=1) */
  763. if (!suspending) {
  764. acpi_ec_complete_request(ec);
  765. ec_dbg_ref(ec, "Decrease driver");
  766. }
  767. clear_bit(EC_FLAGS_STARTED, &ec->flags);
  768. clear_bit(EC_FLAGS_STOPPED, &ec->flags);
  769. ec_log_drv("EC stopped");
  770. }
  771. spin_unlock_irqrestore(&ec->lock, flags);
  772. }
  773. void acpi_ec_block_transactions(void)
  774. {
  775. struct acpi_ec *ec = first_ec;
  776. if (!ec)
  777. return;
  778. mutex_lock(&ec->mutex);
  779. /* Prevent transactions from being carried out */
  780. acpi_ec_stop(ec, true);
  781. mutex_unlock(&ec->mutex);
  782. }
  783. void acpi_ec_unblock_transactions(void)
  784. {
  785. struct acpi_ec *ec = first_ec;
  786. if (!ec)
  787. return;
  788. /* Allow transactions to be carried out again */
  789. acpi_ec_start(ec, true);
  790. if (EC_FLAGS_CLEAR_ON_RESUME)
  791. acpi_ec_clear(ec);
  792. }
  793. void acpi_ec_unblock_transactions_early(void)
  794. {
  795. /*
  796. * Allow transactions to happen again (this function is called from
  797. * atomic context during wakeup, so we don't need to acquire the mutex).
  798. */
  799. if (first_ec)
  800. acpi_ec_start(first_ec, true);
  801. }
  802. /* --------------------------------------------------------------------------
  803. Event Management
  804. -------------------------------------------------------------------------- */
  805. static struct acpi_ec_query_handler *
  806. acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler)
  807. {
  808. if (handler)
  809. kref_get(&handler->kref);
  810. return handler;
  811. }
  812. static void acpi_ec_query_handler_release(struct kref *kref)
  813. {
  814. struct acpi_ec_query_handler *handler =
  815. container_of(kref, struct acpi_ec_query_handler, kref);
  816. kfree(handler);
  817. }
  818. static void acpi_ec_put_query_handler(struct acpi_ec_query_handler *handler)
  819. {
  820. kref_put(&handler->kref, acpi_ec_query_handler_release);
  821. }
  822. int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
  823. acpi_handle handle, acpi_ec_query_func func,
  824. void *data)
  825. {
  826. struct acpi_ec_query_handler *handler =
  827. kzalloc(sizeof(struct acpi_ec_query_handler), GFP_KERNEL);
  828. if (!handler)
  829. return -ENOMEM;
  830. handler->query_bit = query_bit;
  831. handler->handle = handle;
  832. handler->func = func;
  833. handler->data = data;
  834. mutex_lock(&ec->mutex);
  835. kref_init(&handler->kref);
  836. list_add(&handler->node, &ec->list);
  837. mutex_unlock(&ec->mutex);
  838. return 0;
  839. }
  840. EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler);
  841. void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
  842. {
  843. struct acpi_ec_query_handler *handler, *tmp;
  844. LIST_HEAD(free_list);
  845. mutex_lock(&ec->mutex);
  846. list_for_each_entry_safe(handler, tmp, &ec->list, node) {
  847. if (query_bit == handler->query_bit) {
  848. list_del_init(&handler->node);
  849. list_add(&handler->node, &free_list);
  850. }
  851. }
  852. mutex_unlock(&ec->mutex);
  853. list_for_each_entry_safe(handler, tmp, &free_list, node)
  854. acpi_ec_put_query_handler(handler);
  855. }
  856. EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
  857. static void acpi_ec_run(void *cxt)
  858. {
  859. struct acpi_ec_query_handler *handler = cxt;
  860. if (!handler)
  861. return;
  862. ec_dbg_evt("Query(0x%02x) started", handler->query_bit);
  863. if (handler->func)
  864. handler->func(handler->data);
  865. else if (handler->handle)
  866. acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
  867. ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit);
  868. acpi_ec_put_query_handler(handler);
  869. }
  870. static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
  871. {
  872. u8 value = 0;
  873. int result;
  874. acpi_status status;
  875. struct acpi_ec_query_handler *handler;
  876. struct transaction t = {.command = ACPI_EC_COMMAND_QUERY,
  877. .wdata = NULL, .rdata = &value,
  878. .wlen = 0, .rlen = 1};
  879. /*
  880. * Query the EC to find out which _Qxx method we need to evaluate.
  881. * Note that successful completion of the query causes the ACPI_EC_SCI
  882. * bit to be cleared (and thus clearing the interrupt source).
  883. */
  884. result = acpi_ec_transaction(ec, &t);
  885. if (result)
  886. return result;
  887. if (data)
  888. *data = value;
  889. if (!value)
  890. return -ENODATA;
  891. mutex_lock(&ec->mutex);
  892. list_for_each_entry(handler, &ec->list, node) {
  893. if (value == handler->query_bit) {
  894. /* have custom handler for this bit */
  895. handler = acpi_ec_get_query_handler(handler);
  896. ec_dbg_evt("Query(0x%02x) scheduled",
  897. handler->query_bit);
  898. status = acpi_os_execute((handler->func) ?
  899. OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER,
  900. acpi_ec_run, handler);
  901. if (ACPI_FAILURE(status))
  902. result = -EBUSY;
  903. break;
  904. }
  905. }
  906. mutex_unlock(&ec->mutex);
  907. return result;
  908. }
  909. static void acpi_ec_check_event(struct acpi_ec *ec)
  910. {
  911. unsigned long flags;
  912. if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT) {
  913. if (ec_guard(ec)) {
  914. spin_lock_irqsave(&ec->lock, flags);
  915. /*
  916. * Take care of the SCI_EVT unless no one else is
  917. * taking care of it.
  918. */
  919. if (!ec->curr)
  920. advance_transaction(ec);
  921. spin_unlock_irqrestore(&ec->lock, flags);
  922. }
  923. }
  924. }
  925. static void acpi_ec_event_handler(struct work_struct *work)
  926. {
  927. unsigned long flags;
  928. struct acpi_ec *ec = container_of(work, struct acpi_ec, work);
  929. ec_dbg_evt("Event started");
  930. spin_lock_irqsave(&ec->lock, flags);
  931. while (ec->nr_pending_queries) {
  932. spin_unlock_irqrestore(&ec->lock, flags);
  933. (void)acpi_ec_query(ec, NULL);
  934. spin_lock_irqsave(&ec->lock, flags);
  935. ec->nr_pending_queries--;
  936. /*
  937. * Before exit, make sure that this work item can be
  938. * scheduled again. There might be QR_EC failures, leaving
  939. * EC_FLAGS_QUERY_PENDING uncleared and preventing this work
  940. * item from being scheduled again.
  941. */
  942. if (!ec->nr_pending_queries) {
  943. if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS ||
  944. ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY)
  945. acpi_ec_complete_query(ec);
  946. }
  947. }
  948. spin_unlock_irqrestore(&ec->lock, flags);
  949. ec_dbg_evt("Event stopped");
  950. acpi_ec_check_event(ec);
  951. }
  952. static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
  953. u32 gpe_number, void *data)
  954. {
  955. unsigned long flags;
  956. struct acpi_ec *ec = data;
  957. spin_lock_irqsave(&ec->lock, flags);
  958. advance_transaction(ec);
  959. spin_unlock_irqrestore(&ec->lock, flags);
  960. return ACPI_INTERRUPT_HANDLED;
  961. }
  962. /* --------------------------------------------------------------------------
  963. * Address Space Management
  964. * -------------------------------------------------------------------------- */
  965. static acpi_status
  966. acpi_ec_space_handler(u32 function, acpi_physical_address address,
  967. u32 bits, u64 *value64,
  968. void *handler_context, void *region_context)
  969. {
  970. struct acpi_ec *ec = handler_context;
  971. int result = 0, i, bytes = bits / 8;
  972. u8 *value = (u8 *)value64;
  973. if ((address > 0xFF) || !value || !handler_context)
  974. return AE_BAD_PARAMETER;
  975. if (function != ACPI_READ && function != ACPI_WRITE)
  976. return AE_BAD_PARAMETER;
  977. if (ec_busy_polling || bits > 8)
  978. acpi_ec_burst_enable(ec);
  979. for (i = 0; i < bytes; ++i, ++address, ++value)
  980. result = (function == ACPI_READ) ?
  981. acpi_ec_read(ec, address, value) :
  982. acpi_ec_write(ec, address, *value);
  983. if (ec_busy_polling || bits > 8)
  984. acpi_ec_burst_disable(ec);
  985. switch (result) {
  986. case -EINVAL:
  987. return AE_BAD_PARAMETER;
  988. case -ENODEV:
  989. return AE_NOT_FOUND;
  990. case -ETIME:
  991. return AE_TIME;
  992. default:
  993. return AE_OK;
  994. }
  995. }
  996. /* --------------------------------------------------------------------------
  997. * Driver Interface
  998. * -------------------------------------------------------------------------- */
  999. static acpi_status
  1000. ec_parse_io_ports(struct acpi_resource *resource, void *context);
  1001. static struct acpi_ec *make_acpi_ec(void)
  1002. {
  1003. struct acpi_ec *ec = kzalloc(sizeof(struct acpi_ec), GFP_KERNEL);
  1004. if (!ec)
  1005. return NULL;
  1006. ec->flags = 1 << EC_FLAGS_QUERY_PENDING;
  1007. mutex_init(&ec->mutex);
  1008. init_waitqueue_head(&ec->wait);
  1009. INIT_LIST_HEAD(&ec->list);
  1010. spin_lock_init(&ec->lock);
  1011. INIT_WORK(&ec->work, acpi_ec_event_handler);
  1012. ec->timestamp = jiffies;
  1013. return ec;
  1014. }
  1015. static acpi_status
  1016. acpi_ec_register_query_methods(acpi_handle handle, u32 level,
  1017. void *context, void **return_value)
  1018. {
  1019. char node_name[5];
  1020. struct acpi_buffer buffer = { sizeof(node_name), node_name };
  1021. struct acpi_ec *ec = context;
  1022. int value = 0;
  1023. acpi_status status;
  1024. status = acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer);
  1025. if (ACPI_SUCCESS(status) && sscanf(node_name, "_Q%x", &value) == 1)
  1026. acpi_ec_add_query_handler(ec, value, handle, NULL, NULL);
  1027. return AE_OK;
  1028. }
  1029. static acpi_status
  1030. ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
  1031. {
  1032. acpi_status status;
  1033. unsigned long long tmp = 0;
  1034. struct acpi_ec *ec = context;
  1035. /* clear addr values, ec_parse_io_ports depend on it */
  1036. ec->command_addr = ec->data_addr = 0;
  1037. status = acpi_walk_resources(handle, METHOD_NAME__CRS,
  1038. ec_parse_io_ports, ec);
  1039. if (ACPI_FAILURE(status))
  1040. return status;
  1041. /* Get GPE bit assignment (EC events). */
  1042. /* TODO: Add support for _GPE returning a package */
  1043. status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
  1044. if (ACPI_FAILURE(status))
  1045. return status;
  1046. ec->gpe = tmp;
  1047. /* Use the global lock for all EC transactions? */
  1048. tmp = 0;
  1049. acpi_evaluate_integer(handle, "_GLK", NULL, &tmp);
  1050. ec->global_lock = tmp;
  1051. ec->handle = handle;
  1052. return AE_CTRL_TERMINATE;
  1053. }
  1054. static int ec_install_handlers(struct acpi_ec *ec)
  1055. {
  1056. acpi_status status;
  1057. if (test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
  1058. return 0;
  1059. status = acpi_install_gpe_raw_handler(NULL, ec->gpe,
  1060. ACPI_GPE_EDGE_TRIGGERED,
  1061. &acpi_ec_gpe_handler, ec);
  1062. if (ACPI_FAILURE(status))
  1063. return -ENODEV;
  1064. acpi_ec_start(ec, false);
  1065. status = acpi_install_address_space_handler(ec->handle,
  1066. ACPI_ADR_SPACE_EC,
  1067. &acpi_ec_space_handler,
  1068. NULL, ec);
  1069. if (ACPI_FAILURE(status)) {
  1070. if (status == AE_NOT_FOUND) {
  1071. /*
  1072. * Maybe OS fails in evaluating the _REG object.
  1073. * The AE_NOT_FOUND error will be ignored and OS
  1074. * continue to initialize EC.
  1075. */
  1076. pr_err("Fail in evaluating the _REG object"
  1077. " of EC device. Broken bios is suspected.\n");
  1078. } else {
  1079. acpi_ec_stop(ec, false);
  1080. acpi_remove_gpe_handler(NULL, ec->gpe,
  1081. &acpi_ec_gpe_handler);
  1082. return -ENODEV;
  1083. }
  1084. }
  1085. set_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
  1086. return 0;
  1087. }
  1088. static void ec_remove_handlers(struct acpi_ec *ec)
  1089. {
  1090. if (!test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
  1091. return;
  1092. acpi_ec_stop(ec, false);
  1093. if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
  1094. ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
  1095. pr_err("failed to remove space handler\n");
  1096. if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
  1097. &acpi_ec_gpe_handler)))
  1098. pr_err("failed to remove gpe handler\n");
  1099. clear_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
  1100. }
  1101. static int acpi_ec_add(struct acpi_device *device)
  1102. {
  1103. struct acpi_ec *ec = NULL;
  1104. int ret;
  1105. strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
  1106. strcpy(acpi_device_class(device), ACPI_EC_CLASS);
  1107. /* Check for boot EC */
  1108. if (boot_ec &&
  1109. (boot_ec->handle == device->handle ||
  1110. boot_ec->handle == ACPI_ROOT_OBJECT)) {
  1111. ec = boot_ec;
  1112. boot_ec = NULL;
  1113. } else {
  1114. ec = make_acpi_ec();
  1115. if (!ec)
  1116. return -ENOMEM;
  1117. }
  1118. if (ec_parse_device(device->handle, 0, ec, NULL) !=
  1119. AE_CTRL_TERMINATE) {
  1120. kfree(ec);
  1121. return -EINVAL;
  1122. }
  1123. /* Find and register all query methods */
  1124. acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1,
  1125. acpi_ec_register_query_methods, NULL, ec, NULL);
  1126. if (!first_ec)
  1127. first_ec = ec;
  1128. device->driver_data = ec;
  1129. ret = !!request_region(ec->data_addr, 1, "EC data");
  1130. WARN(!ret, "Could not request EC data io port 0x%lx", ec->data_addr);
  1131. ret = !!request_region(ec->command_addr, 1, "EC cmd");
  1132. WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
  1133. pr_info("GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n",
  1134. ec->gpe, ec->command_addr, ec->data_addr);
  1135. ret = ec_install_handlers(ec);
  1136. /* Reprobe devices depending on the EC */
  1137. acpi_walk_dep_device_list(ec->handle);
  1138. /* EC is fully operational, allow queries */
  1139. clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
  1140. /* Clear stale _Q events if hardware might require that */
  1141. if (EC_FLAGS_CLEAR_ON_RESUME)
  1142. acpi_ec_clear(ec);
  1143. return ret;
  1144. }
  1145. static int acpi_ec_remove(struct acpi_device *device)
  1146. {
  1147. struct acpi_ec *ec;
  1148. struct acpi_ec_query_handler *handler, *tmp;
  1149. if (!device)
  1150. return -EINVAL;
  1151. ec = acpi_driver_data(device);
  1152. ec_remove_handlers(ec);
  1153. mutex_lock(&ec->mutex);
  1154. list_for_each_entry_safe(handler, tmp, &ec->list, node) {
  1155. list_del(&handler->node);
  1156. kfree(handler);
  1157. }
  1158. mutex_unlock(&ec->mutex);
  1159. release_region(ec->data_addr, 1);
  1160. release_region(ec->command_addr, 1);
  1161. device->driver_data = NULL;
  1162. if (ec == first_ec)
  1163. first_ec = NULL;
  1164. kfree(ec);
  1165. return 0;
  1166. }
  1167. static acpi_status
  1168. ec_parse_io_ports(struct acpi_resource *resource, void *context)
  1169. {
  1170. struct acpi_ec *ec = context;
  1171. if (resource->type != ACPI_RESOURCE_TYPE_IO)
  1172. return AE_OK;
  1173. /*
  1174. * The first address region returned is the data port, and
  1175. * the second address region returned is the status/command
  1176. * port.
  1177. */
  1178. if (ec->data_addr == 0)
  1179. ec->data_addr = resource->data.io.minimum;
  1180. else if (ec->command_addr == 0)
  1181. ec->command_addr = resource->data.io.minimum;
  1182. else
  1183. return AE_CTRL_TERMINATE;
  1184. return AE_OK;
  1185. }
  1186. int __init acpi_boot_ec_enable(void)
  1187. {
  1188. if (!boot_ec || test_bit(EC_FLAGS_HANDLERS_INSTALLED, &boot_ec->flags))
  1189. return 0;
  1190. if (!ec_install_handlers(boot_ec)) {
  1191. first_ec = boot_ec;
  1192. return 0;
  1193. }
  1194. return -EFAULT;
  1195. }
  1196. static const struct acpi_device_id ec_device_ids[] = {
  1197. {"PNP0C09", 0},
  1198. {"", 0},
  1199. };
  1200. /* Some BIOS do not survive early DSDT scan, skip it */
  1201. static int ec_skip_dsdt_scan(const struct dmi_system_id *id)
  1202. {
  1203. EC_FLAGS_SKIP_DSDT_SCAN = 1;
  1204. return 0;
  1205. }
  1206. /* ASUStek often supplies us with broken ECDT, validate it */
  1207. static int ec_validate_ecdt(const struct dmi_system_id *id)
  1208. {
  1209. EC_FLAGS_VALIDATE_ECDT = 1;
  1210. return 0;
  1211. }
  1212. #if 0
  1213. /*
  1214. * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not
  1215. * set, for which case, we complete the QR_EC without issuing it to the
  1216. * firmware.
  1217. * https://bugzilla.kernel.org/show_bug.cgi?id=82611
  1218. * https://bugzilla.kernel.org/show_bug.cgi?id=97381
  1219. */
  1220. static int ec_flag_query_handshake(const struct dmi_system_id *id)
  1221. {
  1222. pr_debug("Detected the EC firmware requiring QR_EC issued when SCI_EVT set\n");
  1223. EC_FLAGS_QUERY_HANDSHAKE = 1;
  1224. return 0;
  1225. }
  1226. #endif
  1227. /*
  1228. * On some hardware it is necessary to clear events accumulated by the EC during
  1229. * sleep. These ECs stop reporting GPEs until they are manually polled, if too
  1230. * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
  1231. *
  1232. * https://bugzilla.kernel.org/show_bug.cgi?id=44161
  1233. *
  1234. * Ideally, the EC should also be instructed NOT to accumulate events during
  1235. * sleep (which Windows seems to do somehow), but the interface to control this
  1236. * behaviour is not known at this time.
  1237. *
  1238. * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
  1239. * however it is very likely that other Samsung models are affected.
  1240. *
  1241. * On systems which don't accumulate _Q events during sleep, this extra check
  1242. * should be harmless.
  1243. */
  1244. static int ec_clear_on_resume(const struct dmi_system_id *id)
  1245. {
  1246. pr_debug("Detected system needing EC poll on resume.\n");
  1247. EC_FLAGS_CLEAR_ON_RESUME = 1;
  1248. ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
  1249. return 0;
  1250. }
  1251. static struct dmi_system_id ec_dmi_table[] __initdata = {
  1252. {
  1253. ec_skip_dsdt_scan, "Compal JFL92", {
  1254. DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
  1255. DMI_MATCH(DMI_BOARD_NAME, "JFL92") }, NULL},
  1256. {
  1257. ec_validate_ecdt, "MSI MS-171F", {
  1258. DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"),
  1259. DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),}, NULL},
  1260. {
  1261. ec_validate_ecdt, "ASUS hardware", {
  1262. DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL},
  1263. {
  1264. ec_validate_ecdt, "ASUS hardware", {
  1265. DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL},
  1266. {
  1267. ec_skip_dsdt_scan, "HP Folio 13", {
  1268. DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
  1269. DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13"),}, NULL},
  1270. {
  1271. ec_validate_ecdt, "ASUS hardware", {
  1272. DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."),
  1273. DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL},
  1274. {
  1275. ec_clear_on_resume, "Samsung hardware", {
  1276. DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
  1277. {},
  1278. };
  1279. int __init acpi_ec_ecdt_probe(void)
  1280. {
  1281. acpi_status status;
  1282. struct acpi_ec *saved_ec = NULL;
  1283. struct acpi_table_ecdt *ecdt_ptr;
  1284. boot_ec = make_acpi_ec();
  1285. if (!boot_ec)
  1286. return -ENOMEM;
  1287. /*
  1288. * Generate a boot ec context
  1289. */
  1290. dmi_check_system(ec_dmi_table);
  1291. status = acpi_get_table(ACPI_SIG_ECDT, 1,
  1292. (struct acpi_table_header **)&ecdt_ptr);
  1293. if (ACPI_SUCCESS(status)) {
  1294. pr_info("EC description table is found, configuring boot EC\n");
  1295. boot_ec->command_addr = ecdt_ptr->control.address;
  1296. boot_ec->data_addr = ecdt_ptr->data.address;
  1297. boot_ec->gpe = ecdt_ptr->gpe;
  1298. boot_ec->handle = ACPI_ROOT_OBJECT;
  1299. acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id,
  1300. &boot_ec->handle);
  1301. /* Don't trust ECDT, which comes from ASUSTek */
  1302. if (!EC_FLAGS_VALIDATE_ECDT)
  1303. goto install;
  1304. saved_ec = kmemdup(boot_ec, sizeof(struct acpi_ec), GFP_KERNEL);
  1305. if (!saved_ec)
  1306. return -ENOMEM;
  1307. /* fall through */
  1308. }
  1309. if (EC_FLAGS_SKIP_DSDT_SCAN) {
  1310. kfree(saved_ec);
  1311. return -ENODEV;
  1312. }
  1313. /* This workaround is needed only on some broken machines,
  1314. * which require early EC, but fail to provide ECDT */
  1315. pr_debug("Look up EC in DSDT\n");
  1316. status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device,
  1317. boot_ec, NULL);
  1318. /* Check that acpi_get_devices actually find something */
  1319. if (ACPI_FAILURE(status) || !boot_ec->handle)
  1320. goto error;
  1321. if (saved_ec) {
  1322. /* try to find good ECDT from ASUSTek */
  1323. if (saved_ec->command_addr != boot_ec->command_addr ||
  1324. saved_ec->data_addr != boot_ec->data_addr ||
  1325. saved_ec->gpe != boot_ec->gpe ||
  1326. saved_ec->handle != boot_ec->handle)
  1327. pr_info("ASUSTek keeps feeding us with broken "
  1328. "ECDT tables, which are very hard to workaround. "
  1329. "Trying to use DSDT EC info instead. Please send "
  1330. "output of acpidump to linux-acpi@vger.kernel.org\n");
  1331. kfree(saved_ec);
  1332. saved_ec = NULL;
  1333. } else {
  1334. /* We really need to limit this workaround, the only ASUS,
  1335. * which needs it, has fake EC._INI method, so use it as flag.
  1336. * Keep boot_ec struct as it will be needed soon.
  1337. */
  1338. if (!dmi_name_in_vendors("ASUS") ||
  1339. !acpi_has_method(boot_ec->handle, "_INI"))
  1340. return -ENODEV;
  1341. }
  1342. install:
  1343. if (!ec_install_handlers(boot_ec)) {
  1344. first_ec = boot_ec;
  1345. return 0;
  1346. }
  1347. error:
  1348. kfree(boot_ec);
  1349. kfree(saved_ec);
  1350. boot_ec = NULL;
  1351. return -ENODEV;
  1352. }
  1353. static int param_set_event_clearing(const char *val, struct kernel_param *kp)
  1354. {
  1355. int result = 0;
  1356. if (!strncmp(val, "status", sizeof("status") - 1)) {
  1357. ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
  1358. pr_info("Assuming SCI_EVT clearing on EC_SC accesses\n");
  1359. } else if (!strncmp(val, "query", sizeof("query") - 1)) {
  1360. ec_event_clearing = ACPI_EC_EVT_TIMING_QUERY;
  1361. pr_info("Assuming SCI_EVT clearing on QR_EC writes\n");
  1362. } else if (!strncmp(val, "event", sizeof("event") - 1)) {
  1363. ec_event_clearing = ACPI_EC_EVT_TIMING_EVENT;
  1364. pr_info("Assuming SCI_EVT clearing on event reads\n");
  1365. } else
  1366. result = -EINVAL;
  1367. return result;
  1368. }
  1369. static int param_get_event_clearing(char *buffer, struct kernel_param *kp)
  1370. {
  1371. switch (ec_event_clearing) {
  1372. case ACPI_EC_EVT_TIMING_STATUS:
  1373. return sprintf(buffer, "status");
  1374. case ACPI_EC_EVT_TIMING_QUERY:
  1375. return sprintf(buffer, "query");
  1376. case ACPI_EC_EVT_TIMING_EVENT:
  1377. return sprintf(buffer, "event");
  1378. default:
  1379. return sprintf(buffer, "invalid");
  1380. }
  1381. return 0;
  1382. }
  1383. module_param_call(ec_event_clearing, param_set_event_clearing, param_get_event_clearing,
  1384. NULL, 0644);
  1385. MODULE_PARM_DESC(ec_event_clearing, "Assumed SCI_EVT clearing timing");
  1386. static struct acpi_driver acpi_ec_driver = {
  1387. .name = "ec",
  1388. .class = ACPI_EC_CLASS,
  1389. .ids = ec_device_ids,
  1390. .ops = {
  1391. .add = acpi_ec_add,
  1392. .remove = acpi_ec_remove,
  1393. },
  1394. };
  1395. int __init acpi_ec_init(void)
  1396. {
  1397. int result = 0;
  1398. /* Now register the driver for the EC */
  1399. result = acpi_bus_register_driver(&acpi_ec_driver);
  1400. if (result < 0)
  1401. return -ENODEV;
  1402. return result;
  1403. }
  1404. /* EC driver currently not unloadable */
  1405. #if 0
  1406. static void __exit acpi_ec_exit(void)
  1407. {
  1408. acpi_bus_unregister_driver(&acpi_ec_driver);
  1409. }
  1410. #endif /* 0 */