ec.c 54 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027
  1. /*
  2. * ec.c - ACPI Embedded Controller Driver (v3)
  3. *
  4. * Copyright (C) 2001-2015 Intel Corporation
  5. * Author: 2014, 2015 Lv Zheng <lv.zheng@intel.com>
  6. * 2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
  7. * 2006 Denis Sadykov <denis.m.sadykov@intel.com>
  8. * 2004 Luming Yu <luming.yu@intel.com>
  9. * 2001, 2002 Andy Grover <andrew.grover@intel.com>
  10. * 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  11. * Copyright (C) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
  12. *
  13. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  14. *
  15. * This program is free software; you can redistribute it and/or modify
  16. * it under the terms of the GNU General Public License as published by
  17. * the Free Software Foundation; either version 2 of the License, or (at
  18. * your option) any later version.
  19. *
  20. * This program is distributed in the hope that it will be useful, but
  21. * WITHOUT ANY WARRANTY; without even the implied warranty of
  22. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  23. * General Public License for more details.
  24. *
  25. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  26. */
  27. /* Uncomment next line to get verbose printout */
  28. /* #define DEBUG */
  29. #define pr_fmt(fmt) "ACPI: EC: " fmt
  30. #include <linux/kernel.h>
  31. #include <linux/module.h>
  32. #include <linux/init.h>
  33. #include <linux/types.h>
  34. #include <linux/delay.h>
  35. #include <linux/interrupt.h>
  36. #include <linux/list.h>
  37. #include <linux/spinlock.h>
  38. #include <linux/slab.h>
  39. #include <linux/acpi.h>
  40. #include <linux/dmi.h>
  41. #include <asm/io.h>
  42. #include "internal.h"
  43. #define ACPI_EC_CLASS "embedded_controller"
  44. #define ACPI_EC_DEVICE_NAME "Embedded Controller"
  45. #define ACPI_EC_FILE_INFO "info"
  46. /* EC status register */
  47. #define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */
  48. #define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */
  49. #define ACPI_EC_FLAG_CMD 0x08 /* Input buffer contains a command */
  50. #define ACPI_EC_FLAG_BURST 0x10 /* burst mode */
  51. #define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */
  52. /*
  53. * The SCI_EVT clearing timing is not defined by the ACPI specification.
  54. * This leads to lots of practical timing issues for the host EC driver.
  55. * The following variations are defined (from the target EC firmware's
  56. * perspective):
  57. * STATUS: After indicating SCI_EVT edge triggered IRQ to the host, the
  58. * target can clear SCI_EVT at any time so long as the host can see
  59. * the indication by reading the status register (EC_SC). So the
  60. * host should re-check SCI_EVT after the first time the SCI_EVT
  61. * indication is seen, which is the same time the query request
  62. * (QR_EC) is written to the command register (EC_CMD). SCI_EVT set
  63. * at any later time could indicate another event. Normally such
  64. * kind of EC firmware has implemented an event queue and will
  65. * return 0x00 to indicate "no outstanding event".
  66. * QUERY: After seeing the query request (QR_EC) written to the command
  67. * register (EC_CMD) by the host and having prepared the responding
  68. * event value in the data register (EC_DATA), the target can safely
  69. * clear SCI_EVT because the target can confirm that the current
  70. * event is being handled by the host. The host then should check
  71. * SCI_EVT right after reading the event response from the data
  72. * register (EC_DATA).
  73. * EVENT: After seeing the event response read from the data register
  74. * (EC_DATA) by the host, the target can clear SCI_EVT. As the
  75. * target requires time to notice the change in the data register
  76. * (EC_DATA), the host may be required to wait additional guarding
  77. * time before checking the SCI_EVT again. Such guarding may not be
  78. * necessary if the host is notified via another IRQ.
  79. */
  80. #define ACPI_EC_EVT_TIMING_STATUS 0x00
  81. #define ACPI_EC_EVT_TIMING_QUERY 0x01
  82. #define ACPI_EC_EVT_TIMING_EVENT 0x02
  83. /* EC commands */
  84. enum ec_command {
  85. ACPI_EC_COMMAND_READ = 0x80,
  86. ACPI_EC_COMMAND_WRITE = 0x81,
  87. ACPI_EC_BURST_ENABLE = 0x82,
  88. ACPI_EC_BURST_DISABLE = 0x83,
  89. ACPI_EC_COMMAND_QUERY = 0x84,
  90. };
  91. #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
  92. #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
  93. #define ACPI_EC_UDELAY_POLL 550 /* Wait 1ms for EC transaction polling */
  94. #define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
  95. * when trying to clear the EC */
  96. #define ACPI_EC_MAX_QUERIES 16 /* Maximum number of parallel queries */
  97. enum {
  98. EC_FLAGS_QUERY_ENABLED, /* Query is enabled */
  99. EC_FLAGS_QUERY_PENDING, /* Query is pending */
  100. EC_FLAGS_QUERY_GUARDING, /* Guard for SCI_EVT check */
  101. EC_FLAGS_GPE_HANDLER_INSTALLED, /* GPE handler installed */
  102. EC_FLAGS_EC_HANDLER_INSTALLED, /* OpReg handler installed */
  103. EC_FLAGS_EVT_HANDLER_INSTALLED, /* _Qxx handlers installed */
  104. EC_FLAGS_STARTED, /* Driver is started */
  105. EC_FLAGS_STOPPED, /* Driver is stopped */
  106. EC_FLAGS_COMMAND_STORM, /* GPE storms occurred to the
  107. * current command processing */
  108. };
  109. #define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */
  110. #define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */
  111. /* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
  112. static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
  113. module_param(ec_delay, uint, 0644);
  114. MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
  115. static unsigned int ec_max_queries __read_mostly = ACPI_EC_MAX_QUERIES;
  116. module_param(ec_max_queries, uint, 0644);
  117. MODULE_PARM_DESC(ec_max_queries, "Maximum parallel _Qxx evaluations");
  118. static bool ec_busy_polling __read_mostly;
  119. module_param(ec_busy_polling, bool, 0644);
  120. MODULE_PARM_DESC(ec_busy_polling, "Use busy polling to advance EC transaction");
  121. static unsigned int ec_polling_guard __read_mostly = ACPI_EC_UDELAY_POLL;
  122. module_param(ec_polling_guard, uint, 0644);
  123. MODULE_PARM_DESC(ec_polling_guard, "Guard time(us) between EC accesses in polling modes");
  124. static unsigned int ec_event_clearing __read_mostly = ACPI_EC_EVT_TIMING_QUERY;
  125. /*
  126. * If the number of false interrupts per one transaction exceeds
  127. * this threshold, will think there is a GPE storm happened and
  128. * will disable the GPE for normal transaction.
  129. */
  130. static unsigned int ec_storm_threshold __read_mostly = 8;
  131. module_param(ec_storm_threshold, uint, 0644);
  132. MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm");
  133. static bool ec_freeze_events __read_mostly = false;
  134. module_param(ec_freeze_events, bool, 0644);
  135. MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume");
  136. static bool ec_no_wakeup __read_mostly;
  137. module_param(ec_no_wakeup, bool, 0644);
  138. MODULE_PARM_DESC(ec_no_wakeup, "Do not wake up from suspend-to-idle");
  139. struct acpi_ec_query_handler {
  140. struct list_head node;
  141. acpi_ec_query_func func;
  142. acpi_handle handle;
  143. void *data;
  144. u8 query_bit;
  145. struct kref kref;
  146. };
  147. struct transaction {
  148. const u8 *wdata;
  149. u8 *rdata;
  150. unsigned short irq_count;
  151. u8 command;
  152. u8 wi;
  153. u8 ri;
  154. u8 wlen;
  155. u8 rlen;
  156. u8 flags;
  157. };
  158. struct acpi_ec_query {
  159. struct transaction transaction;
  160. struct work_struct work;
  161. struct acpi_ec_query_handler *handler;
  162. };
  163. static int acpi_ec_query(struct acpi_ec *ec, u8 *data);
  164. static void advance_transaction(struct acpi_ec *ec);
  165. static void acpi_ec_event_handler(struct work_struct *work);
  166. static void acpi_ec_event_processor(struct work_struct *work);
  167. struct acpi_ec *boot_ec, *first_ec;
  168. EXPORT_SYMBOL(first_ec);
  169. static bool boot_ec_is_ecdt = false;
  170. static struct workqueue_struct *ec_query_wq;
  171. static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
  172. static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */
  173. static int EC_FLAGS_IGNORE_DSDT_GPE; /* Needs ECDT GPE as correction setting */
  174. /* --------------------------------------------------------------------------
  175. * Logging/Debugging
  176. * -------------------------------------------------------------------------- */
  177. /*
  178. * Splitters used by the developers to track the boundary of the EC
  179. * handling processes.
  180. */
  181. #ifdef DEBUG
  182. #define EC_DBG_SEP " "
  183. #define EC_DBG_DRV "+++++"
  184. #define EC_DBG_STM "====="
  185. #define EC_DBG_REQ "*****"
  186. #define EC_DBG_EVT "#####"
  187. #else
  188. #define EC_DBG_SEP ""
  189. #define EC_DBG_DRV
  190. #define EC_DBG_STM
  191. #define EC_DBG_REQ
  192. #define EC_DBG_EVT
  193. #endif
  194. #define ec_log_raw(fmt, ...) \
  195. pr_info(fmt "\n", ##__VA_ARGS__)
  196. #define ec_dbg_raw(fmt, ...) \
  197. pr_debug(fmt "\n", ##__VA_ARGS__)
  198. #define ec_log(filter, fmt, ...) \
  199. ec_log_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__)
  200. #define ec_dbg(filter, fmt, ...) \
  201. ec_dbg_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__)
  202. #define ec_log_drv(fmt, ...) \
  203. ec_log(EC_DBG_DRV, fmt, ##__VA_ARGS__)
  204. #define ec_dbg_drv(fmt, ...) \
  205. ec_dbg(EC_DBG_DRV, fmt, ##__VA_ARGS__)
  206. #define ec_dbg_stm(fmt, ...) \
  207. ec_dbg(EC_DBG_STM, fmt, ##__VA_ARGS__)
  208. #define ec_dbg_req(fmt, ...) \
  209. ec_dbg(EC_DBG_REQ, fmt, ##__VA_ARGS__)
  210. #define ec_dbg_evt(fmt, ...) \
  211. ec_dbg(EC_DBG_EVT, fmt, ##__VA_ARGS__)
  212. #define ec_dbg_ref(ec, fmt, ...) \
  213. ec_dbg_raw("%lu: " fmt, ec->reference_count, ## __VA_ARGS__)
  214. /* --------------------------------------------------------------------------
  215. * Device Flags
  216. * -------------------------------------------------------------------------- */
  217. static bool acpi_ec_started(struct acpi_ec *ec)
  218. {
  219. return test_bit(EC_FLAGS_STARTED, &ec->flags) &&
  220. !test_bit(EC_FLAGS_STOPPED, &ec->flags);
  221. }
  222. static bool acpi_ec_event_enabled(struct acpi_ec *ec)
  223. {
  224. /*
  225. * There is an OSPM early stage logic. During the early stages
  226. * (boot/resume), OSPMs shouldn't enable the event handling, only
  227. * the EC transactions are allowed to be performed.
  228. */
  229. if (!test_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
  230. return false;
  231. /*
  232. * However, disabling the event handling is experimental for late
  233. * stage (suspend), and is controlled by the boot parameter of
  234. * "ec_freeze_events":
  235. * 1. true: The EC event handling is disabled before entering
  236. * the noirq stage.
  237. * 2. false: The EC event handling is automatically disabled as
  238. * soon as the EC driver is stopped.
  239. */
  240. if (ec_freeze_events)
  241. return acpi_ec_started(ec);
  242. else
  243. return test_bit(EC_FLAGS_STARTED, &ec->flags);
  244. }
  245. static bool acpi_ec_flushed(struct acpi_ec *ec)
  246. {
  247. return ec->reference_count == 1;
  248. }
  249. /* --------------------------------------------------------------------------
  250. * EC Registers
  251. * -------------------------------------------------------------------------- */
  252. static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
  253. {
  254. u8 x = inb(ec->command_addr);
  255. ec_dbg_raw("EC_SC(R) = 0x%2.2x "
  256. "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d",
  257. x,
  258. !!(x & ACPI_EC_FLAG_SCI),
  259. !!(x & ACPI_EC_FLAG_BURST),
  260. !!(x & ACPI_EC_FLAG_CMD),
  261. !!(x & ACPI_EC_FLAG_IBF),
  262. !!(x & ACPI_EC_FLAG_OBF));
  263. return x;
  264. }
  265. static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
  266. {
  267. u8 x = inb(ec->data_addr);
  268. ec->timestamp = jiffies;
  269. ec_dbg_raw("EC_DATA(R) = 0x%2.2x", x);
  270. return x;
  271. }
  272. static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
  273. {
  274. ec_dbg_raw("EC_SC(W) = 0x%2.2x", command);
  275. outb(command, ec->command_addr);
  276. ec->timestamp = jiffies;
  277. }
  278. static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
  279. {
  280. ec_dbg_raw("EC_DATA(W) = 0x%2.2x", data);
  281. outb(data, ec->data_addr);
  282. ec->timestamp = jiffies;
  283. }
  284. #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
  285. static const char *acpi_ec_cmd_string(u8 cmd)
  286. {
  287. switch (cmd) {
  288. case 0x80:
  289. return "RD_EC";
  290. case 0x81:
  291. return "WR_EC";
  292. case 0x82:
  293. return "BE_EC";
  294. case 0x83:
  295. return "BD_EC";
  296. case 0x84:
  297. return "QR_EC";
  298. }
  299. return "UNKNOWN";
  300. }
  301. #else
  302. #define acpi_ec_cmd_string(cmd) "UNDEF"
  303. #endif
  304. /* --------------------------------------------------------------------------
  305. * GPE Registers
  306. * -------------------------------------------------------------------------- */
  307. static inline bool acpi_ec_is_gpe_raised(struct acpi_ec *ec)
  308. {
  309. acpi_event_status gpe_status = 0;
  310. (void)acpi_get_gpe_status(NULL, ec->gpe, &gpe_status);
  311. return (gpe_status & ACPI_EVENT_FLAG_STATUS_SET) ? true : false;
  312. }
  313. static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open)
  314. {
  315. if (open)
  316. acpi_enable_gpe(NULL, ec->gpe);
  317. else {
  318. BUG_ON(ec->reference_count < 1);
  319. acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
  320. }
  321. if (acpi_ec_is_gpe_raised(ec)) {
  322. /*
  323. * On some platforms, EN=1 writes cannot trigger GPE. So
  324. * software need to manually trigger a pseudo GPE event on
  325. * EN=1 writes.
  326. */
  327. ec_dbg_raw("Polling quirk");
  328. advance_transaction(ec);
  329. }
  330. }
  331. static inline void acpi_ec_disable_gpe(struct acpi_ec *ec, bool close)
  332. {
  333. if (close)
  334. acpi_disable_gpe(NULL, ec->gpe);
  335. else {
  336. BUG_ON(ec->reference_count < 1);
  337. acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
  338. }
  339. }
  340. static inline void acpi_ec_clear_gpe(struct acpi_ec *ec)
  341. {
  342. /*
  343. * GPE STS is a W1C register, which means:
  344. * 1. Software can clear it without worrying about clearing other
  345. * GPEs' STS bits when the hardware sets them in parallel.
  346. * 2. As long as software can ensure only clearing it when it is
  347. * set, hardware won't set it in parallel.
  348. * So software can clear GPE in any contexts.
  349. * Warning: do not move the check into advance_transaction() as the
  350. * EC commands will be sent without GPE raised.
  351. */
  352. if (!acpi_ec_is_gpe_raised(ec))
  353. return;
  354. acpi_clear_gpe(NULL, ec->gpe);
  355. }
  356. /* --------------------------------------------------------------------------
  357. * Transaction Management
  358. * -------------------------------------------------------------------------- */
  359. static void acpi_ec_submit_request(struct acpi_ec *ec)
  360. {
  361. ec->reference_count++;
  362. if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags) &&
  363. ec->reference_count == 1)
  364. acpi_ec_enable_gpe(ec, true);
  365. }
  366. static void acpi_ec_complete_request(struct acpi_ec *ec)
  367. {
  368. bool flushed = false;
  369. ec->reference_count--;
  370. if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags) &&
  371. ec->reference_count == 0)
  372. acpi_ec_disable_gpe(ec, true);
  373. flushed = acpi_ec_flushed(ec);
  374. if (flushed)
  375. wake_up(&ec->wait);
  376. }
  377. static void acpi_ec_set_storm(struct acpi_ec *ec, u8 flag)
  378. {
  379. if (!test_bit(flag, &ec->flags)) {
  380. acpi_ec_disable_gpe(ec, false);
  381. ec_dbg_drv("Polling enabled");
  382. set_bit(flag, &ec->flags);
  383. }
  384. }
  385. static void acpi_ec_clear_storm(struct acpi_ec *ec, u8 flag)
  386. {
  387. if (test_bit(flag, &ec->flags)) {
  388. clear_bit(flag, &ec->flags);
  389. acpi_ec_enable_gpe(ec, false);
  390. ec_dbg_drv("Polling disabled");
  391. }
  392. }
  393. /*
  394. * acpi_ec_submit_flushable_request() - Increase the reference count unless
  395. * the flush operation is not in
  396. * progress
  397. * @ec: the EC device
  398. *
  399. * This function must be used before taking a new action that should hold
  400. * the reference count. If this function returns false, then the action
  401. * must be discarded or it will prevent the flush operation from being
  402. * completed.
  403. */
  404. static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
  405. {
  406. if (!acpi_ec_started(ec))
  407. return false;
  408. acpi_ec_submit_request(ec);
  409. return true;
  410. }
  411. static void acpi_ec_submit_query(struct acpi_ec *ec)
  412. {
  413. acpi_ec_set_storm(ec, EC_FLAGS_COMMAND_STORM);
  414. if (!acpi_ec_event_enabled(ec))
  415. return;
  416. if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
  417. ec_dbg_evt("Command(%s) submitted/blocked",
  418. acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
  419. ec->nr_pending_queries++;
  420. schedule_work(&ec->work);
  421. }
  422. }
  423. static void acpi_ec_complete_query(struct acpi_ec *ec)
  424. {
  425. if (test_and_clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
  426. ec_dbg_evt("Command(%s) unblocked",
  427. acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
  428. acpi_ec_clear_storm(ec, EC_FLAGS_COMMAND_STORM);
  429. }
  430. static inline void __acpi_ec_enable_event(struct acpi_ec *ec)
  431. {
  432. if (!test_and_set_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
  433. ec_log_drv("event unblocked");
  434. if (!test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
  435. advance_transaction(ec);
  436. }
  437. static inline void __acpi_ec_disable_event(struct acpi_ec *ec)
  438. {
  439. if (test_and_clear_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
  440. ec_log_drv("event blocked");
  441. }
  442. static void acpi_ec_enable_event(struct acpi_ec *ec)
  443. {
  444. unsigned long flags;
  445. spin_lock_irqsave(&ec->lock, flags);
  446. if (acpi_ec_started(ec))
  447. __acpi_ec_enable_event(ec);
  448. spin_unlock_irqrestore(&ec->lock, flags);
  449. }
  450. #ifdef CONFIG_PM_SLEEP
  451. static bool acpi_ec_query_flushed(struct acpi_ec *ec)
  452. {
  453. bool flushed;
  454. unsigned long flags;
  455. spin_lock_irqsave(&ec->lock, flags);
  456. flushed = !ec->nr_pending_queries;
  457. spin_unlock_irqrestore(&ec->lock, flags);
  458. return flushed;
  459. }
  460. static void __acpi_ec_flush_event(struct acpi_ec *ec)
  461. {
  462. /*
  463. * When ec_freeze_events is true, we need to flush events in
  464. * the proper position before entering the noirq stage.
  465. */
  466. wait_event(ec->wait, acpi_ec_query_flushed(ec));
  467. if (ec_query_wq)
  468. flush_workqueue(ec_query_wq);
  469. }
  470. static void acpi_ec_disable_event(struct acpi_ec *ec)
  471. {
  472. unsigned long flags;
  473. spin_lock_irqsave(&ec->lock, flags);
  474. __acpi_ec_disable_event(ec);
  475. spin_unlock_irqrestore(&ec->lock, flags);
  476. __acpi_ec_flush_event(ec);
  477. }
  478. void acpi_ec_flush_work(void)
  479. {
  480. if (first_ec)
  481. __acpi_ec_flush_event(first_ec);
  482. flush_scheduled_work();
  483. }
  484. #endif /* CONFIG_PM_SLEEP */
  485. static bool acpi_ec_guard_event(struct acpi_ec *ec)
  486. {
  487. bool guarded = true;
  488. unsigned long flags;
  489. spin_lock_irqsave(&ec->lock, flags);
  490. /*
  491. * If firmware SCI_EVT clearing timing is "event", we actually
  492. * don't know when the SCI_EVT will be cleared by firmware after
  493. * evaluating _Qxx, so we need to re-check SCI_EVT after waiting an
  494. * acceptable period.
  495. *
  496. * The guarding period begins when EC_FLAGS_QUERY_PENDING is
  497. * flagged, which means SCI_EVT check has just been performed.
  498. * But if the current transaction is ACPI_EC_COMMAND_QUERY, the
  499. * guarding should have already been performed (via
  500. * EC_FLAGS_QUERY_GUARDING) and should not be applied so that the
  501. * ACPI_EC_COMMAND_QUERY transaction can be transitioned into
  502. * ACPI_EC_COMMAND_POLL state immediately.
  503. */
  504. if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS ||
  505. ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY ||
  506. !test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags) ||
  507. (ec->curr && ec->curr->command == ACPI_EC_COMMAND_QUERY))
  508. guarded = false;
  509. spin_unlock_irqrestore(&ec->lock, flags);
  510. return guarded;
  511. }
  512. static int ec_transaction_polled(struct acpi_ec *ec)
  513. {
  514. unsigned long flags;
  515. int ret = 0;
  516. spin_lock_irqsave(&ec->lock, flags);
  517. if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL))
  518. ret = 1;
  519. spin_unlock_irqrestore(&ec->lock, flags);
  520. return ret;
  521. }
  522. static int ec_transaction_completed(struct acpi_ec *ec)
  523. {
  524. unsigned long flags;
  525. int ret = 0;
  526. spin_lock_irqsave(&ec->lock, flags);
  527. if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
  528. ret = 1;
  529. spin_unlock_irqrestore(&ec->lock, flags);
  530. return ret;
  531. }
  532. static inline void ec_transaction_transition(struct acpi_ec *ec, unsigned long flag)
  533. {
  534. ec->curr->flags |= flag;
  535. if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
  536. if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS &&
  537. flag == ACPI_EC_COMMAND_POLL)
  538. acpi_ec_complete_query(ec);
  539. if (ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY &&
  540. flag == ACPI_EC_COMMAND_COMPLETE)
  541. acpi_ec_complete_query(ec);
  542. if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
  543. flag == ACPI_EC_COMMAND_COMPLETE)
  544. set_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags);
  545. }
  546. }
  547. static void advance_transaction(struct acpi_ec *ec)
  548. {
  549. struct transaction *t;
  550. u8 status;
  551. bool wakeup = false;
  552. ec_dbg_stm("%s (%d)", in_interrupt() ? "IRQ" : "TASK",
  553. smp_processor_id());
  554. /*
  555. * By always clearing STS before handling all indications, we can
  556. * ensure a hardware STS 0->1 change after this clearing can always
  557. * trigger a GPE interrupt.
  558. */
  559. acpi_ec_clear_gpe(ec);
  560. status = acpi_ec_read_status(ec);
  561. t = ec->curr;
  562. /*
  563. * Another IRQ or a guarded polling mode advancement is detected,
  564. * the next QR_EC submission is then allowed.
  565. */
  566. if (!t || !(t->flags & ACPI_EC_COMMAND_POLL)) {
  567. if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
  568. (!ec->nr_pending_queries ||
  569. test_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags))) {
  570. clear_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags);
  571. acpi_ec_complete_query(ec);
  572. }
  573. }
  574. if (!t)
  575. goto err;
  576. if (t->flags & ACPI_EC_COMMAND_POLL) {
  577. if (t->wlen > t->wi) {
  578. if ((status & ACPI_EC_FLAG_IBF) == 0)
  579. acpi_ec_write_data(ec, t->wdata[t->wi++]);
  580. else
  581. goto err;
  582. } else if (t->rlen > t->ri) {
  583. if ((status & ACPI_EC_FLAG_OBF) == 1) {
  584. t->rdata[t->ri++] = acpi_ec_read_data(ec);
  585. if (t->rlen == t->ri) {
  586. ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
  587. if (t->command == ACPI_EC_COMMAND_QUERY)
  588. ec_dbg_evt("Command(%s) completed by hardware",
  589. acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
  590. wakeup = true;
  591. }
  592. } else
  593. goto err;
  594. } else if (t->wlen == t->wi &&
  595. (status & ACPI_EC_FLAG_IBF) == 0) {
  596. ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
  597. wakeup = true;
  598. }
  599. goto out;
  600. } else {
  601. if (EC_FLAGS_QUERY_HANDSHAKE &&
  602. !(status & ACPI_EC_FLAG_SCI) &&
  603. (t->command == ACPI_EC_COMMAND_QUERY)) {
  604. ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL);
  605. t->rdata[t->ri++] = 0x00;
  606. ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
  607. ec_dbg_evt("Command(%s) completed by software",
  608. acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
  609. wakeup = true;
  610. } else if ((status & ACPI_EC_FLAG_IBF) == 0) {
  611. acpi_ec_write_cmd(ec, t->command);
  612. ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL);
  613. } else
  614. goto err;
  615. goto out;
  616. }
  617. err:
  618. /*
  619. * If SCI bit is set, then don't think it's a false IRQ
  620. * otherwise will take a not handled IRQ as a false one.
  621. */
  622. if (!(status & ACPI_EC_FLAG_SCI)) {
  623. if (in_interrupt() && t) {
  624. if (t->irq_count < ec_storm_threshold)
  625. ++t->irq_count;
  626. /* Allow triggering on 0 threshold */
  627. if (t->irq_count == ec_storm_threshold)
  628. acpi_ec_set_storm(ec, EC_FLAGS_COMMAND_STORM);
  629. }
  630. }
  631. out:
  632. if (status & ACPI_EC_FLAG_SCI)
  633. acpi_ec_submit_query(ec);
  634. if (wakeup && in_interrupt())
  635. wake_up(&ec->wait);
  636. }
  637. static void start_transaction(struct acpi_ec *ec)
  638. {
  639. ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
  640. ec->curr->flags = 0;
  641. }
  642. static int ec_guard(struct acpi_ec *ec)
  643. {
  644. unsigned long guard = usecs_to_jiffies(ec->polling_guard);
  645. unsigned long timeout = ec->timestamp + guard;
  646. /* Ensure guarding period before polling EC status */
  647. do {
  648. if (ec->busy_polling) {
  649. /* Perform busy polling */
  650. if (ec_transaction_completed(ec))
  651. return 0;
  652. udelay(jiffies_to_usecs(guard));
  653. } else {
  654. /*
  655. * Perform wait polling
  656. * 1. Wait the transaction to be completed by the
  657. * GPE handler after the transaction enters
  658. * ACPI_EC_COMMAND_POLL state.
  659. * 2. A special guarding logic is also required
  660. * for event clearing mode "event" before the
  661. * transaction enters ACPI_EC_COMMAND_POLL
  662. * state.
  663. */
  664. if (!ec_transaction_polled(ec) &&
  665. !acpi_ec_guard_event(ec))
  666. break;
  667. if (wait_event_timeout(ec->wait,
  668. ec_transaction_completed(ec),
  669. guard))
  670. return 0;
  671. }
  672. } while (time_before(jiffies, timeout));
  673. return -ETIME;
  674. }
  675. static int ec_poll(struct acpi_ec *ec)
  676. {
  677. unsigned long flags;
  678. int repeat = 5; /* number of command restarts */
  679. while (repeat--) {
  680. unsigned long delay = jiffies +
  681. msecs_to_jiffies(ec_delay);
  682. do {
  683. if (!ec_guard(ec))
  684. return 0;
  685. spin_lock_irqsave(&ec->lock, flags);
  686. advance_transaction(ec);
  687. spin_unlock_irqrestore(&ec->lock, flags);
  688. } while (time_before(jiffies, delay));
  689. pr_debug("controller reset, restart transaction\n");
  690. spin_lock_irqsave(&ec->lock, flags);
  691. start_transaction(ec);
  692. spin_unlock_irqrestore(&ec->lock, flags);
  693. }
  694. return -ETIME;
  695. }
  696. static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
  697. struct transaction *t)
  698. {
  699. unsigned long tmp;
  700. int ret = 0;
  701. /* start transaction */
  702. spin_lock_irqsave(&ec->lock, tmp);
  703. /* Enable GPE for command processing (IBF=0/OBF=1) */
  704. if (!acpi_ec_submit_flushable_request(ec)) {
  705. ret = -EINVAL;
  706. goto unlock;
  707. }
  708. ec_dbg_ref(ec, "Increase command");
  709. /* following two actions should be kept atomic */
  710. ec->curr = t;
  711. ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command));
  712. start_transaction(ec);
  713. spin_unlock_irqrestore(&ec->lock, tmp);
  714. ret = ec_poll(ec);
  715. spin_lock_irqsave(&ec->lock, tmp);
  716. if (t->irq_count == ec_storm_threshold)
  717. acpi_ec_clear_storm(ec, EC_FLAGS_COMMAND_STORM);
  718. ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command));
  719. ec->curr = NULL;
  720. /* Disable GPE for command processing (IBF=0/OBF=1) */
  721. acpi_ec_complete_request(ec);
  722. ec_dbg_ref(ec, "Decrease command");
  723. unlock:
  724. spin_unlock_irqrestore(&ec->lock, tmp);
  725. return ret;
  726. }
  727. static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
  728. {
  729. int status;
  730. u32 glk;
  731. if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata))
  732. return -EINVAL;
  733. if (t->rdata)
  734. memset(t->rdata, 0, t->rlen);
  735. mutex_lock(&ec->mutex);
  736. if (ec->global_lock) {
  737. status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
  738. if (ACPI_FAILURE(status)) {
  739. status = -ENODEV;
  740. goto unlock;
  741. }
  742. }
  743. status = acpi_ec_transaction_unlocked(ec, t);
  744. if (ec->global_lock)
  745. acpi_release_global_lock(glk);
  746. unlock:
  747. mutex_unlock(&ec->mutex);
  748. return status;
  749. }
  750. static int acpi_ec_burst_enable(struct acpi_ec *ec)
  751. {
  752. u8 d;
  753. struct transaction t = {.command = ACPI_EC_BURST_ENABLE,
  754. .wdata = NULL, .rdata = &d,
  755. .wlen = 0, .rlen = 1};
  756. return acpi_ec_transaction(ec, &t);
  757. }
  758. static int acpi_ec_burst_disable(struct acpi_ec *ec)
  759. {
  760. struct transaction t = {.command = ACPI_EC_BURST_DISABLE,
  761. .wdata = NULL, .rdata = NULL,
  762. .wlen = 0, .rlen = 0};
  763. return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ?
  764. acpi_ec_transaction(ec, &t) : 0;
  765. }
  766. static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data)
  767. {
  768. int result;
  769. u8 d;
  770. struct transaction t = {.command = ACPI_EC_COMMAND_READ,
  771. .wdata = &address, .rdata = &d,
  772. .wlen = 1, .rlen = 1};
  773. result = acpi_ec_transaction(ec, &t);
  774. *data = d;
  775. return result;
  776. }
  777. static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
  778. {
  779. u8 wdata[2] = { address, data };
  780. struct transaction t = {.command = ACPI_EC_COMMAND_WRITE,
  781. .wdata = wdata, .rdata = NULL,
  782. .wlen = 2, .rlen = 0};
  783. return acpi_ec_transaction(ec, &t);
  784. }
  785. int ec_read(u8 addr, u8 *val)
  786. {
  787. int err;
  788. u8 temp_data;
  789. if (!first_ec)
  790. return -ENODEV;
  791. err = acpi_ec_read(first_ec, addr, &temp_data);
  792. if (!err) {
  793. *val = temp_data;
  794. return 0;
  795. }
  796. return err;
  797. }
  798. EXPORT_SYMBOL(ec_read);
  799. int ec_write(u8 addr, u8 val)
  800. {
  801. int err;
  802. if (!first_ec)
  803. return -ENODEV;
  804. err = acpi_ec_write(first_ec, addr, val);
  805. return err;
  806. }
  807. EXPORT_SYMBOL(ec_write);
  808. int ec_transaction(u8 command,
  809. const u8 *wdata, unsigned wdata_len,
  810. u8 *rdata, unsigned rdata_len)
  811. {
  812. struct transaction t = {.command = command,
  813. .wdata = wdata, .rdata = rdata,
  814. .wlen = wdata_len, .rlen = rdata_len};
  815. if (!first_ec)
  816. return -ENODEV;
  817. return acpi_ec_transaction(first_ec, &t);
  818. }
  819. EXPORT_SYMBOL(ec_transaction);
  820. /* Get the handle to the EC device */
  821. acpi_handle ec_get_handle(void)
  822. {
  823. if (!first_ec)
  824. return NULL;
  825. return first_ec->handle;
  826. }
  827. EXPORT_SYMBOL(ec_get_handle);
  828. static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
  829. {
  830. unsigned long flags;
  831. spin_lock_irqsave(&ec->lock, flags);
  832. if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
  833. ec_dbg_drv("Starting EC");
  834. /* Enable GPE for event processing (SCI_EVT=1) */
  835. if (!resuming) {
  836. acpi_ec_submit_request(ec);
  837. ec_dbg_ref(ec, "Increase driver");
  838. }
  839. ec_log_drv("EC started");
  840. }
  841. spin_unlock_irqrestore(&ec->lock, flags);
  842. }
  843. static bool acpi_ec_stopped(struct acpi_ec *ec)
  844. {
  845. unsigned long flags;
  846. bool flushed;
  847. spin_lock_irqsave(&ec->lock, flags);
  848. flushed = acpi_ec_flushed(ec);
  849. spin_unlock_irqrestore(&ec->lock, flags);
  850. return flushed;
  851. }
  852. static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
  853. {
  854. unsigned long flags;
  855. spin_lock_irqsave(&ec->lock, flags);
  856. if (acpi_ec_started(ec)) {
  857. ec_dbg_drv("Stopping EC");
  858. set_bit(EC_FLAGS_STOPPED, &ec->flags);
  859. spin_unlock_irqrestore(&ec->lock, flags);
  860. wait_event(ec->wait, acpi_ec_stopped(ec));
  861. spin_lock_irqsave(&ec->lock, flags);
  862. /* Disable GPE for event processing (SCI_EVT=1) */
  863. if (!suspending) {
  864. acpi_ec_complete_request(ec);
  865. ec_dbg_ref(ec, "Decrease driver");
  866. } else if (!ec_freeze_events)
  867. __acpi_ec_disable_event(ec);
  868. clear_bit(EC_FLAGS_STARTED, &ec->flags);
  869. clear_bit(EC_FLAGS_STOPPED, &ec->flags);
  870. ec_log_drv("EC stopped");
  871. }
  872. spin_unlock_irqrestore(&ec->lock, flags);
  873. }
  874. static void acpi_ec_enter_noirq(struct acpi_ec *ec)
  875. {
  876. unsigned long flags;
  877. spin_lock_irqsave(&ec->lock, flags);
  878. ec->busy_polling = true;
  879. ec->polling_guard = 0;
  880. ec_log_drv("interrupt blocked");
  881. spin_unlock_irqrestore(&ec->lock, flags);
  882. }
  883. static void acpi_ec_leave_noirq(struct acpi_ec *ec)
  884. {
  885. unsigned long flags;
  886. spin_lock_irqsave(&ec->lock, flags);
  887. ec->busy_polling = ec_busy_polling;
  888. ec->polling_guard = ec_polling_guard;
  889. ec_log_drv("interrupt unblocked");
  890. spin_unlock_irqrestore(&ec->lock, flags);
  891. }
  892. void acpi_ec_block_transactions(void)
  893. {
  894. struct acpi_ec *ec = first_ec;
  895. if (!ec)
  896. return;
  897. mutex_lock(&ec->mutex);
  898. /* Prevent transactions from being carried out */
  899. acpi_ec_stop(ec, true);
  900. mutex_unlock(&ec->mutex);
  901. }
  902. void acpi_ec_unblock_transactions(void)
  903. {
  904. /*
  905. * Allow transactions to happen again (this function is called from
  906. * atomic context during wakeup, so we don't need to acquire the mutex).
  907. */
  908. if (first_ec)
  909. acpi_ec_start(first_ec, true);
  910. }
  911. /* --------------------------------------------------------------------------
  912. Event Management
  913. -------------------------------------------------------------------------- */
  914. static struct acpi_ec_query_handler *
  915. acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler)
  916. {
  917. if (handler)
  918. kref_get(&handler->kref);
  919. return handler;
  920. }
  921. static struct acpi_ec_query_handler *
  922. acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value)
  923. {
  924. struct acpi_ec_query_handler *handler;
  925. bool found = false;
  926. mutex_lock(&ec->mutex);
  927. list_for_each_entry(handler, &ec->list, node) {
  928. if (value == handler->query_bit) {
  929. found = true;
  930. break;
  931. }
  932. }
  933. mutex_unlock(&ec->mutex);
  934. return found ? acpi_ec_get_query_handler(handler) : NULL;
  935. }
  936. static void acpi_ec_query_handler_release(struct kref *kref)
  937. {
  938. struct acpi_ec_query_handler *handler =
  939. container_of(kref, struct acpi_ec_query_handler, kref);
  940. kfree(handler);
  941. }
  942. static void acpi_ec_put_query_handler(struct acpi_ec_query_handler *handler)
  943. {
  944. kref_put(&handler->kref, acpi_ec_query_handler_release);
  945. }
  946. int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
  947. acpi_handle handle, acpi_ec_query_func func,
  948. void *data)
  949. {
  950. struct acpi_ec_query_handler *handler =
  951. kzalloc(sizeof(struct acpi_ec_query_handler), GFP_KERNEL);
  952. if (!handler)
  953. return -ENOMEM;
  954. handler->query_bit = query_bit;
  955. handler->handle = handle;
  956. handler->func = func;
  957. handler->data = data;
  958. mutex_lock(&ec->mutex);
  959. kref_init(&handler->kref);
  960. list_add(&handler->node, &ec->list);
  961. mutex_unlock(&ec->mutex);
  962. return 0;
  963. }
  964. EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler);
  965. static void acpi_ec_remove_query_handlers(struct acpi_ec *ec,
  966. bool remove_all, u8 query_bit)
  967. {
  968. struct acpi_ec_query_handler *handler, *tmp;
  969. LIST_HEAD(free_list);
  970. mutex_lock(&ec->mutex);
  971. list_for_each_entry_safe(handler, tmp, &ec->list, node) {
  972. if (remove_all || query_bit == handler->query_bit) {
  973. list_del_init(&handler->node);
  974. list_add(&handler->node, &free_list);
  975. }
  976. }
  977. mutex_unlock(&ec->mutex);
  978. list_for_each_entry_safe(handler, tmp, &free_list, node)
  979. acpi_ec_put_query_handler(handler);
  980. }
  981. void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
  982. {
  983. acpi_ec_remove_query_handlers(ec, false, query_bit);
  984. }
  985. EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
  986. static struct acpi_ec_query *acpi_ec_create_query(u8 *pval)
  987. {
  988. struct acpi_ec_query *q;
  989. struct transaction *t;
  990. q = kzalloc(sizeof (struct acpi_ec_query), GFP_KERNEL);
  991. if (!q)
  992. return NULL;
  993. INIT_WORK(&q->work, acpi_ec_event_processor);
  994. t = &q->transaction;
  995. t->command = ACPI_EC_COMMAND_QUERY;
  996. t->rdata = pval;
  997. t->rlen = 1;
  998. return q;
  999. }
  1000. static void acpi_ec_delete_query(struct acpi_ec_query *q)
  1001. {
  1002. if (q) {
  1003. if (q->handler)
  1004. acpi_ec_put_query_handler(q->handler);
  1005. kfree(q);
  1006. }
  1007. }
  1008. static void acpi_ec_event_processor(struct work_struct *work)
  1009. {
  1010. struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work);
  1011. struct acpi_ec_query_handler *handler = q->handler;
  1012. ec_dbg_evt("Query(0x%02x) started", handler->query_bit);
  1013. if (handler->func)
  1014. handler->func(handler->data);
  1015. else if (handler->handle)
  1016. acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
  1017. ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit);
  1018. acpi_ec_delete_query(q);
  1019. }
  1020. static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
  1021. {
  1022. u8 value = 0;
  1023. int result;
  1024. struct acpi_ec_query *q;
  1025. q = acpi_ec_create_query(&value);
  1026. if (!q)
  1027. return -ENOMEM;
  1028. /*
  1029. * Query the EC to find out which _Qxx method we need to evaluate.
  1030. * Note that successful completion of the query causes the ACPI_EC_SCI
  1031. * bit to be cleared (and thus clearing the interrupt source).
  1032. */
  1033. result = acpi_ec_transaction(ec, &q->transaction);
  1034. if (!value)
  1035. result = -ENODATA;
  1036. if (result)
  1037. goto err_exit;
  1038. q->handler = acpi_ec_get_query_handler_by_value(ec, value);
  1039. if (!q->handler) {
  1040. result = -ENODATA;
  1041. goto err_exit;
  1042. }
  1043. /*
  1044. * It is reported that _Qxx are evaluated in a parallel way on
  1045. * Windows:
  1046. * https://bugzilla.kernel.org/show_bug.cgi?id=94411
  1047. *
  1048. * Put this log entry before schedule_work() in order to make
  1049. * it appearing before any other log entries occurred during the
  1050. * work queue execution.
  1051. */
  1052. ec_dbg_evt("Query(0x%02x) scheduled", value);
  1053. if (!queue_work(ec_query_wq, &q->work)) {
  1054. ec_dbg_evt("Query(0x%02x) overlapped", value);
  1055. result = -EBUSY;
  1056. }
  1057. err_exit:
  1058. if (result)
  1059. acpi_ec_delete_query(q);
  1060. if (data)
  1061. *data = value;
  1062. return result;
  1063. }
  1064. static void acpi_ec_check_event(struct acpi_ec *ec)
  1065. {
  1066. unsigned long flags;
  1067. if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT) {
  1068. if (ec_guard(ec)) {
  1069. spin_lock_irqsave(&ec->lock, flags);
  1070. /*
  1071. * Take care of the SCI_EVT unless no one else is
  1072. * taking care of it.
  1073. */
  1074. if (!ec->curr)
  1075. advance_transaction(ec);
  1076. spin_unlock_irqrestore(&ec->lock, flags);
  1077. }
  1078. }
  1079. }
  1080. static void acpi_ec_event_handler(struct work_struct *work)
  1081. {
  1082. unsigned long flags;
  1083. struct acpi_ec *ec = container_of(work, struct acpi_ec, work);
  1084. ec_dbg_evt("Event started");
  1085. spin_lock_irqsave(&ec->lock, flags);
  1086. while (ec->nr_pending_queries) {
  1087. spin_unlock_irqrestore(&ec->lock, flags);
  1088. (void)acpi_ec_query(ec, NULL);
  1089. spin_lock_irqsave(&ec->lock, flags);
  1090. ec->nr_pending_queries--;
  1091. /*
  1092. * Before exit, make sure that this work item can be
  1093. * scheduled again. There might be QR_EC failures, leaving
  1094. * EC_FLAGS_QUERY_PENDING uncleared and preventing this work
  1095. * item from being scheduled again.
  1096. */
  1097. if (!ec->nr_pending_queries) {
  1098. if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS ||
  1099. ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY)
  1100. acpi_ec_complete_query(ec);
  1101. }
  1102. }
  1103. spin_unlock_irqrestore(&ec->lock, flags);
  1104. ec_dbg_evt("Event stopped");
  1105. acpi_ec_check_event(ec);
  1106. }
  1107. static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
  1108. u32 gpe_number, void *data)
  1109. {
  1110. unsigned long flags;
  1111. struct acpi_ec *ec = data;
  1112. spin_lock_irqsave(&ec->lock, flags);
  1113. advance_transaction(ec);
  1114. spin_unlock_irqrestore(&ec->lock, flags);
  1115. return ACPI_INTERRUPT_HANDLED;
  1116. }
  1117. /* --------------------------------------------------------------------------
  1118. * Address Space Management
  1119. * -------------------------------------------------------------------------- */
  1120. static acpi_status
  1121. acpi_ec_space_handler(u32 function, acpi_physical_address address,
  1122. u32 bits, u64 *value64,
  1123. void *handler_context, void *region_context)
  1124. {
  1125. struct acpi_ec *ec = handler_context;
  1126. int result = 0, i, bytes = bits / 8;
  1127. u8 *value = (u8 *)value64;
  1128. if ((address > 0xFF) || !value || !handler_context)
  1129. return AE_BAD_PARAMETER;
  1130. if (function != ACPI_READ && function != ACPI_WRITE)
  1131. return AE_BAD_PARAMETER;
  1132. if (ec->busy_polling || bits > 8)
  1133. acpi_ec_burst_enable(ec);
  1134. for (i = 0; i < bytes; ++i, ++address, ++value)
  1135. result = (function == ACPI_READ) ?
  1136. acpi_ec_read(ec, address, value) :
  1137. acpi_ec_write(ec, address, *value);
  1138. if (ec->busy_polling || bits > 8)
  1139. acpi_ec_burst_disable(ec);
  1140. switch (result) {
  1141. case -EINVAL:
  1142. return AE_BAD_PARAMETER;
  1143. case -ENODEV:
  1144. return AE_NOT_FOUND;
  1145. case -ETIME:
  1146. return AE_TIME;
  1147. default:
  1148. return AE_OK;
  1149. }
  1150. }
  1151. /* --------------------------------------------------------------------------
  1152. * Driver Interface
  1153. * -------------------------------------------------------------------------- */
  1154. static acpi_status
  1155. ec_parse_io_ports(struct acpi_resource *resource, void *context);
  1156. static void acpi_ec_free(struct acpi_ec *ec)
  1157. {
  1158. if (first_ec == ec)
  1159. first_ec = NULL;
  1160. if (boot_ec == ec)
  1161. boot_ec = NULL;
  1162. kfree(ec);
  1163. }
  1164. static struct acpi_ec *acpi_ec_alloc(void)
  1165. {
  1166. struct acpi_ec *ec = kzalloc(sizeof(struct acpi_ec), GFP_KERNEL);
  1167. if (!ec)
  1168. return NULL;
  1169. mutex_init(&ec->mutex);
  1170. init_waitqueue_head(&ec->wait);
  1171. INIT_LIST_HEAD(&ec->list);
  1172. spin_lock_init(&ec->lock);
  1173. INIT_WORK(&ec->work, acpi_ec_event_handler);
  1174. ec->timestamp = jiffies;
  1175. ec->busy_polling = true;
  1176. ec->polling_guard = 0;
  1177. return ec;
  1178. }
  1179. static acpi_status
  1180. acpi_ec_register_query_methods(acpi_handle handle, u32 level,
  1181. void *context, void **return_value)
  1182. {
  1183. char node_name[5];
  1184. struct acpi_buffer buffer = { sizeof(node_name), node_name };
  1185. struct acpi_ec *ec = context;
  1186. int value = 0;
  1187. acpi_status status;
  1188. status = acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer);
  1189. if (ACPI_SUCCESS(status) && sscanf(node_name, "_Q%x", &value) == 1)
  1190. acpi_ec_add_query_handler(ec, value, handle, NULL, NULL);
  1191. return AE_OK;
  1192. }
  1193. static acpi_status
  1194. ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
  1195. {
  1196. acpi_status status;
  1197. unsigned long long tmp = 0;
  1198. struct acpi_ec *ec = context;
  1199. /* clear addr values, ec_parse_io_ports depend on it */
  1200. ec->command_addr = ec->data_addr = 0;
  1201. status = acpi_walk_resources(handle, METHOD_NAME__CRS,
  1202. ec_parse_io_ports, ec);
  1203. if (ACPI_FAILURE(status))
  1204. return status;
  1205. if (ec->data_addr == 0 || ec->command_addr == 0)
  1206. return AE_OK;
  1207. if (boot_ec && boot_ec_is_ecdt && EC_FLAGS_IGNORE_DSDT_GPE) {
  1208. /*
  1209. * Always inherit the GPE number setting from the ECDT
  1210. * EC.
  1211. */
  1212. ec->gpe = boot_ec->gpe;
  1213. } else {
  1214. /* Get GPE bit assignment (EC events). */
  1215. /* TODO: Add support for _GPE returning a package */
  1216. status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
  1217. if (ACPI_FAILURE(status))
  1218. return status;
  1219. ec->gpe = tmp;
  1220. }
  1221. /* Use the global lock for all EC transactions? */
  1222. tmp = 0;
  1223. acpi_evaluate_integer(handle, "_GLK", NULL, &tmp);
  1224. ec->global_lock = tmp;
  1225. ec->handle = handle;
  1226. return AE_CTRL_TERMINATE;
  1227. }
  1228. /*
  1229. * Note: This function returns an error code only when the address space
  1230. * handler is not installed, which means "not able to handle
  1231. * transactions".
  1232. */
  1233. static int ec_install_handlers(struct acpi_ec *ec, bool handle_events)
  1234. {
  1235. acpi_status status;
  1236. acpi_ec_start(ec, false);
  1237. if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
  1238. acpi_ec_enter_noirq(ec);
  1239. status = acpi_install_address_space_handler(ec->handle,
  1240. ACPI_ADR_SPACE_EC,
  1241. &acpi_ec_space_handler,
  1242. NULL, ec);
  1243. if (ACPI_FAILURE(status)) {
  1244. if (status == AE_NOT_FOUND) {
  1245. /*
  1246. * Maybe OS fails in evaluating the _REG
  1247. * object. The AE_NOT_FOUND error will be
  1248. * ignored and OS * continue to initialize
  1249. * EC.
  1250. */
  1251. pr_err("Fail in evaluating the _REG object"
  1252. " of EC device. Broken bios is suspected.\n");
  1253. } else {
  1254. acpi_ec_stop(ec, false);
  1255. return -ENODEV;
  1256. }
  1257. }
  1258. set_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
  1259. }
  1260. if (!handle_events)
  1261. return 0;
  1262. if (!test_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags)) {
  1263. /* Find and register all query methods */
  1264. acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1,
  1265. acpi_ec_register_query_methods,
  1266. NULL, ec, NULL);
  1267. set_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags);
  1268. }
  1269. if (!test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags)) {
  1270. status = acpi_install_gpe_raw_handler(NULL, ec->gpe,
  1271. ACPI_GPE_EDGE_TRIGGERED,
  1272. &acpi_ec_gpe_handler, ec);
  1273. /* This is not fatal as we can poll EC events */
  1274. if (ACPI_SUCCESS(status)) {
  1275. set_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags);
  1276. acpi_ec_leave_noirq(ec);
  1277. if (test_bit(EC_FLAGS_STARTED, &ec->flags) &&
  1278. ec->reference_count >= 1)
  1279. acpi_ec_enable_gpe(ec, true);
  1280. /* EC is fully operational, allow queries */
  1281. acpi_ec_enable_event(ec);
  1282. }
  1283. }
  1284. return 0;
  1285. }
  1286. static void ec_remove_handlers(struct acpi_ec *ec)
  1287. {
  1288. if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
  1289. if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
  1290. ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
  1291. pr_err("failed to remove space handler\n");
  1292. clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
  1293. }
  1294. /*
  1295. * Stops handling the EC transactions after removing the operation
  1296. * region handler. This is required because _REG(DISCONNECT)
  1297. * invoked during the removal can result in new EC transactions.
  1298. *
  1299. * Flushes the EC requests and thus disables the GPE before
  1300. * removing the GPE handler. This is required by the current ACPICA
  1301. * GPE core. ACPICA GPE core will automatically disable a GPE when
  1302. * it is indicated but there is no way to handle it. So the drivers
  1303. * must disable the GPEs prior to removing the GPE handlers.
  1304. */
  1305. acpi_ec_stop(ec, false);
  1306. if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags)) {
  1307. if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
  1308. &acpi_ec_gpe_handler)))
  1309. pr_err("failed to remove gpe handler\n");
  1310. clear_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags);
  1311. }
  1312. if (test_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags)) {
  1313. acpi_ec_remove_query_handlers(ec, true, 0);
  1314. clear_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags);
  1315. }
  1316. }
  1317. static int acpi_ec_setup(struct acpi_ec *ec, bool handle_events)
  1318. {
  1319. int ret;
  1320. ret = ec_install_handlers(ec, handle_events);
  1321. if (ret)
  1322. return ret;
  1323. /* First EC capable of handling transactions */
  1324. if (!first_ec) {
  1325. first_ec = ec;
  1326. acpi_handle_info(first_ec->handle, "Used as first EC\n");
  1327. }
  1328. acpi_handle_info(ec->handle,
  1329. "GPE=0x%lx, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
  1330. ec->gpe, ec->command_addr, ec->data_addr);
  1331. return ret;
  1332. }
  1333. static int acpi_config_boot_ec(struct acpi_ec *ec, acpi_handle handle,
  1334. bool handle_events, bool is_ecdt)
  1335. {
  1336. int ret;
  1337. /*
  1338. * Changing the ACPI handle results in a re-configuration of the
  1339. * boot EC. And if it happens after the namespace initialization,
  1340. * it causes _REG evaluations.
  1341. */
  1342. if (boot_ec && boot_ec->handle != handle)
  1343. ec_remove_handlers(boot_ec);
  1344. /* Unset old boot EC */
  1345. if (boot_ec != ec)
  1346. acpi_ec_free(boot_ec);
  1347. /*
  1348. * ECDT device creation is split into acpi_ec_ecdt_probe() and
  1349. * acpi_ec_ecdt_start(). This function takes care of completing the
  1350. * ECDT parsing logic as the handle update should be performed
  1351. * between the installation/uninstallation of the handlers.
  1352. */
  1353. if (ec->handle != handle)
  1354. ec->handle = handle;
  1355. ret = acpi_ec_setup(ec, handle_events);
  1356. if (ret)
  1357. return ret;
  1358. /* Set new boot EC */
  1359. if (!boot_ec) {
  1360. boot_ec = ec;
  1361. boot_ec_is_ecdt = is_ecdt;
  1362. }
  1363. acpi_handle_info(boot_ec->handle,
  1364. "Used as boot %s EC to handle transactions%s\n",
  1365. is_ecdt ? "ECDT" : "DSDT",
  1366. handle_events ? " and events" : "");
  1367. return ret;
  1368. }
  1369. static bool acpi_ec_ecdt_get_handle(acpi_handle *phandle)
  1370. {
  1371. struct acpi_table_ecdt *ecdt_ptr;
  1372. acpi_status status;
  1373. acpi_handle handle;
  1374. status = acpi_get_table(ACPI_SIG_ECDT, 1,
  1375. (struct acpi_table_header **)&ecdt_ptr);
  1376. if (ACPI_FAILURE(status))
  1377. return false;
  1378. status = acpi_get_handle(NULL, ecdt_ptr->id, &handle);
  1379. if (ACPI_FAILURE(status))
  1380. return false;
  1381. *phandle = handle;
  1382. return true;
  1383. }
  1384. static bool acpi_is_boot_ec(struct acpi_ec *ec)
  1385. {
  1386. if (!boot_ec)
  1387. return false;
  1388. if (ec->handle == boot_ec->handle &&
  1389. ec->gpe == boot_ec->gpe &&
  1390. ec->command_addr == boot_ec->command_addr &&
  1391. ec->data_addr == boot_ec->data_addr)
  1392. return true;
  1393. return false;
  1394. }
  1395. static int acpi_ec_add(struct acpi_device *device)
  1396. {
  1397. struct acpi_ec *ec = NULL;
  1398. int ret;
  1399. strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
  1400. strcpy(acpi_device_class(device), ACPI_EC_CLASS);
  1401. ec = acpi_ec_alloc();
  1402. if (!ec)
  1403. return -ENOMEM;
  1404. if (ec_parse_device(device->handle, 0, ec, NULL) !=
  1405. AE_CTRL_TERMINATE) {
  1406. ret = -EINVAL;
  1407. goto err_alloc;
  1408. }
  1409. if (acpi_is_boot_ec(ec)) {
  1410. boot_ec_is_ecdt = false;
  1411. acpi_handle_debug(ec->handle, "duplicated.\n");
  1412. acpi_ec_free(ec);
  1413. ec = boot_ec;
  1414. ret = acpi_config_boot_ec(ec, ec->handle, true, false);
  1415. } else
  1416. ret = acpi_ec_setup(ec, true);
  1417. if (ret)
  1418. goto err_query;
  1419. device->driver_data = ec;
  1420. ret = !!request_region(ec->data_addr, 1, "EC data");
  1421. WARN(!ret, "Could not request EC data io port 0x%lx", ec->data_addr);
  1422. ret = !!request_region(ec->command_addr, 1, "EC cmd");
  1423. WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
  1424. /* Reprobe devices depending on the EC */
  1425. acpi_walk_dep_device_list(ec->handle);
  1426. acpi_handle_debug(ec->handle, "enumerated.\n");
  1427. return 0;
  1428. err_query:
  1429. if (ec != boot_ec)
  1430. acpi_ec_remove_query_handlers(ec, true, 0);
  1431. err_alloc:
  1432. if (ec != boot_ec)
  1433. acpi_ec_free(ec);
  1434. return ret;
  1435. }
  1436. static int acpi_ec_remove(struct acpi_device *device)
  1437. {
  1438. struct acpi_ec *ec;
  1439. if (!device)
  1440. return -EINVAL;
  1441. ec = acpi_driver_data(device);
  1442. release_region(ec->data_addr, 1);
  1443. release_region(ec->command_addr, 1);
  1444. device->driver_data = NULL;
  1445. if (ec != boot_ec) {
  1446. ec_remove_handlers(ec);
  1447. acpi_ec_free(ec);
  1448. }
  1449. return 0;
  1450. }
  1451. static acpi_status
  1452. ec_parse_io_ports(struct acpi_resource *resource, void *context)
  1453. {
  1454. struct acpi_ec *ec = context;
  1455. if (resource->type != ACPI_RESOURCE_TYPE_IO)
  1456. return AE_OK;
  1457. /*
  1458. * The first address region returned is the data port, and
  1459. * the second address region returned is the status/command
  1460. * port.
  1461. */
  1462. if (ec->data_addr == 0)
  1463. ec->data_addr = resource->data.io.minimum;
  1464. else if (ec->command_addr == 0)
  1465. ec->command_addr = resource->data.io.minimum;
  1466. else
  1467. return AE_CTRL_TERMINATE;
  1468. return AE_OK;
  1469. }
  1470. static const struct acpi_device_id ec_device_ids[] = {
  1471. {"PNP0C09", 0},
  1472. {"", 0},
  1473. };
  1474. /*
  1475. * This function is not Windows-compatible as Windows never enumerates the
  1476. * namespace EC before the main ACPI device enumeration process. It is
  1477. * retained for historical reason and will be deprecated in the future.
  1478. */
  1479. int __init acpi_ec_dsdt_probe(void)
  1480. {
  1481. acpi_status status;
  1482. struct acpi_ec *ec;
  1483. int ret;
  1484. /*
  1485. * If a platform has ECDT, there is no need to proceed as the
  1486. * following probe is not a part of the ACPI device enumeration,
  1487. * executing _STA is not safe, and thus this probe may risk of
  1488. * picking up an invalid EC device.
  1489. */
  1490. if (boot_ec)
  1491. return -ENODEV;
  1492. ec = acpi_ec_alloc();
  1493. if (!ec)
  1494. return -ENOMEM;
  1495. /*
  1496. * At this point, the namespace is initialized, so start to find
  1497. * the namespace objects.
  1498. */
  1499. status = acpi_get_devices(ec_device_ids[0].id,
  1500. ec_parse_device, ec, NULL);
  1501. if (ACPI_FAILURE(status) || !ec->handle) {
  1502. ret = -ENODEV;
  1503. goto error;
  1504. }
  1505. /*
  1506. * When the DSDT EC is available, always re-configure boot EC to
  1507. * have _REG evaluated. _REG can only be evaluated after the
  1508. * namespace initialization.
  1509. * At this point, the GPE is not fully initialized, so do not to
  1510. * handle the events.
  1511. */
  1512. ret = acpi_config_boot_ec(ec, ec->handle, false, false);
  1513. error:
  1514. if (ret)
  1515. acpi_ec_free(ec);
  1516. return ret;
  1517. }
  1518. /*
  1519. * If the DSDT EC is not functioning, we still need to prepare a fully
  1520. * functioning ECDT EC first in order to handle the events.
  1521. * https://bugzilla.kernel.org/show_bug.cgi?id=115021
  1522. */
  1523. static int __init acpi_ec_ecdt_start(void)
  1524. {
  1525. acpi_handle handle;
  1526. if (!boot_ec)
  1527. return -ENODEV;
  1528. /*
  1529. * The DSDT EC should have already been started in
  1530. * acpi_ec_add().
  1531. */
  1532. if (!boot_ec_is_ecdt)
  1533. return -ENODEV;
  1534. /*
  1535. * At this point, the namespace and the GPE is initialized, so
  1536. * start to find the namespace objects and handle the events.
  1537. */
  1538. if (!acpi_ec_ecdt_get_handle(&handle))
  1539. return -ENODEV;
  1540. return acpi_config_boot_ec(boot_ec, handle, true, true);
  1541. }
  1542. #if 0
  1543. /*
  1544. * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not
  1545. * set, for which case, we complete the QR_EC without issuing it to the
  1546. * firmware.
  1547. * https://bugzilla.kernel.org/show_bug.cgi?id=82611
  1548. * https://bugzilla.kernel.org/show_bug.cgi?id=97381
  1549. */
  1550. static int ec_flag_query_handshake(const struct dmi_system_id *id)
  1551. {
  1552. pr_debug("Detected the EC firmware requiring QR_EC issued when SCI_EVT set\n");
  1553. EC_FLAGS_QUERY_HANDSHAKE = 1;
  1554. return 0;
  1555. }
  1556. #endif
  1557. /*
  1558. * Some ECDTs contain wrong register addresses.
  1559. * MSI MS-171F
  1560. * https://bugzilla.kernel.org/show_bug.cgi?id=12461
  1561. */
  1562. static int ec_correct_ecdt(const struct dmi_system_id *id)
  1563. {
  1564. pr_debug("Detected system needing ECDT address correction.\n");
  1565. EC_FLAGS_CORRECT_ECDT = 1;
  1566. return 0;
  1567. }
  1568. /*
  1569. * Some DSDTs contain wrong GPE setting.
  1570. * Asus FX502VD/VE, GL702VMK, X550VXK, X580VD
  1571. * https://bugzilla.kernel.org/show_bug.cgi?id=195651
  1572. */
  1573. static int ec_honor_ecdt_gpe(const struct dmi_system_id *id)
  1574. {
  1575. pr_debug("Detected system needing ignore DSDT GPE setting.\n");
  1576. EC_FLAGS_IGNORE_DSDT_GPE = 1;
  1577. return 0;
  1578. }
  1579. static struct dmi_system_id ec_dmi_table[] __initdata = {
  1580. {
  1581. ec_correct_ecdt, "MSI MS-171F", {
  1582. DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"),
  1583. DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),}, NULL},
  1584. {
  1585. ec_honor_ecdt_gpe, "ASUS FX502VD", {
  1586. DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
  1587. DMI_MATCH(DMI_PRODUCT_NAME, "FX502VD"),}, NULL},
  1588. {
  1589. ec_honor_ecdt_gpe, "ASUS FX502VE", {
  1590. DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
  1591. DMI_MATCH(DMI_PRODUCT_NAME, "FX502VE"),}, NULL},
  1592. {
  1593. ec_honor_ecdt_gpe, "ASUS GL702VMK", {
  1594. DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
  1595. DMI_MATCH(DMI_PRODUCT_NAME, "GL702VMK"),}, NULL},
  1596. {
  1597. ec_honor_ecdt_gpe, "ASUS X550VXK", {
  1598. DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
  1599. DMI_MATCH(DMI_PRODUCT_NAME, "X550VXK"),}, NULL},
  1600. {
  1601. ec_honor_ecdt_gpe, "ASUS X580VD", {
  1602. DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
  1603. DMI_MATCH(DMI_PRODUCT_NAME, "X580VD"),}, NULL},
  1604. {},
  1605. };
  1606. int __init acpi_ec_ecdt_probe(void)
  1607. {
  1608. int ret;
  1609. acpi_status status;
  1610. struct acpi_table_ecdt *ecdt_ptr;
  1611. struct acpi_ec *ec;
  1612. ec = acpi_ec_alloc();
  1613. if (!ec)
  1614. return -ENOMEM;
  1615. /*
  1616. * Generate a boot ec context
  1617. */
  1618. dmi_check_system(ec_dmi_table);
  1619. status = acpi_get_table(ACPI_SIG_ECDT, 1,
  1620. (struct acpi_table_header **)&ecdt_ptr);
  1621. if (ACPI_FAILURE(status)) {
  1622. ret = -ENODEV;
  1623. goto error;
  1624. }
  1625. if (!ecdt_ptr->control.address || !ecdt_ptr->data.address) {
  1626. /*
  1627. * Asus X50GL:
  1628. * https://bugzilla.kernel.org/show_bug.cgi?id=11880
  1629. */
  1630. ret = -ENODEV;
  1631. goto error;
  1632. }
  1633. if (EC_FLAGS_CORRECT_ECDT) {
  1634. ec->command_addr = ecdt_ptr->data.address;
  1635. ec->data_addr = ecdt_ptr->control.address;
  1636. } else {
  1637. ec->command_addr = ecdt_ptr->control.address;
  1638. ec->data_addr = ecdt_ptr->data.address;
  1639. }
  1640. ec->gpe = ecdt_ptr->gpe;
  1641. /*
  1642. * At this point, the namespace is not initialized, so do not find
  1643. * the namespace objects, or handle the events.
  1644. */
  1645. ret = acpi_config_boot_ec(ec, ACPI_ROOT_OBJECT, false, true);
  1646. error:
  1647. if (ret)
  1648. acpi_ec_free(ec);
  1649. return ret;
  1650. }
  1651. #ifdef CONFIG_PM_SLEEP
  1652. static int acpi_ec_suspend(struct device *dev)
  1653. {
  1654. struct acpi_ec *ec =
  1655. acpi_driver_data(to_acpi_device(dev));
  1656. if (acpi_sleep_no_ec_events() && ec_freeze_events)
  1657. acpi_ec_disable_event(ec);
  1658. return 0;
  1659. }
  1660. static int acpi_ec_suspend_noirq(struct device *dev)
  1661. {
  1662. struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
  1663. /*
  1664. * The SCI handler doesn't run at this point, so the GPE can be
  1665. * masked at the low level without side effects.
  1666. */
  1667. if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
  1668. ec->reference_count >= 1)
  1669. acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
  1670. return 0;
  1671. }
  1672. static int acpi_ec_resume_noirq(struct device *dev)
  1673. {
  1674. struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
  1675. if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
  1676. ec->reference_count >= 1)
  1677. acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
  1678. return 0;
  1679. }
  1680. static int acpi_ec_resume(struct device *dev)
  1681. {
  1682. struct acpi_ec *ec =
  1683. acpi_driver_data(to_acpi_device(dev));
  1684. acpi_ec_enable_event(ec);
  1685. return 0;
  1686. }
  1687. #endif
  1688. static const struct dev_pm_ops acpi_ec_pm = {
  1689. SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend_noirq, acpi_ec_resume_noirq)
  1690. SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume)
  1691. };
  1692. static int param_set_event_clearing(const char *val, struct kernel_param *kp)
  1693. {
  1694. int result = 0;
  1695. if (!strncmp(val, "status", sizeof("status") - 1)) {
  1696. ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
  1697. pr_info("Assuming SCI_EVT clearing on EC_SC accesses\n");
  1698. } else if (!strncmp(val, "query", sizeof("query") - 1)) {
  1699. ec_event_clearing = ACPI_EC_EVT_TIMING_QUERY;
  1700. pr_info("Assuming SCI_EVT clearing on QR_EC writes\n");
  1701. } else if (!strncmp(val, "event", sizeof("event") - 1)) {
  1702. ec_event_clearing = ACPI_EC_EVT_TIMING_EVENT;
  1703. pr_info("Assuming SCI_EVT clearing on event reads\n");
  1704. } else
  1705. result = -EINVAL;
  1706. return result;
  1707. }
  1708. static int param_get_event_clearing(char *buffer, struct kernel_param *kp)
  1709. {
  1710. switch (ec_event_clearing) {
  1711. case ACPI_EC_EVT_TIMING_STATUS:
  1712. return sprintf(buffer, "status");
  1713. case ACPI_EC_EVT_TIMING_QUERY:
  1714. return sprintf(buffer, "query");
  1715. case ACPI_EC_EVT_TIMING_EVENT:
  1716. return sprintf(buffer, "event");
  1717. default:
  1718. return sprintf(buffer, "invalid");
  1719. }
  1720. return 0;
  1721. }
  1722. module_param_call(ec_event_clearing, param_set_event_clearing, param_get_event_clearing,
  1723. NULL, 0644);
  1724. MODULE_PARM_DESC(ec_event_clearing, "Assumed SCI_EVT clearing timing");
  1725. static struct acpi_driver acpi_ec_driver = {
  1726. .name = "ec",
  1727. .class = ACPI_EC_CLASS,
  1728. .ids = ec_device_ids,
  1729. .ops = {
  1730. .add = acpi_ec_add,
  1731. .remove = acpi_ec_remove,
  1732. },
  1733. .drv.pm = &acpi_ec_pm,
  1734. };
  1735. static inline int acpi_ec_query_init(void)
  1736. {
  1737. if (!ec_query_wq) {
  1738. ec_query_wq = alloc_workqueue("kec_query", 0,
  1739. ec_max_queries);
  1740. if (!ec_query_wq)
  1741. return -ENODEV;
  1742. }
  1743. return 0;
  1744. }
  1745. static inline void acpi_ec_query_exit(void)
  1746. {
  1747. if (ec_query_wq) {
  1748. destroy_workqueue(ec_query_wq);
  1749. ec_query_wq = NULL;
  1750. }
  1751. }
  1752. int __init acpi_ec_init(void)
  1753. {
  1754. int result;
  1755. int ecdt_fail, dsdt_fail;
  1756. /* register workqueue for _Qxx evaluations */
  1757. result = acpi_ec_query_init();
  1758. if (result)
  1759. return result;
  1760. /* Drivers must be started after acpi_ec_query_init() */
  1761. ecdt_fail = acpi_ec_ecdt_start();
  1762. dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
  1763. return ecdt_fail && dsdt_fail ? -ENODEV : 0;
  1764. }
  1765. /* EC driver currently not unloadable */
  1766. #if 0
  1767. static void __exit acpi_ec_exit(void)
  1768. {
  1769. acpi_bus_unregister_driver(&acpi_ec_driver);
  1770. acpi_ec_query_exit();
  1771. }
  1772. #endif /* 0 */