ec.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408
  1. /*
  2. * ec.c - ACPI Embedded Controller Driver (v3)
  3. *
  4. * Copyright (C) 2001-2015 Intel Corporation
  5. * Author: 2014, 2015 Lv Zheng <lv.zheng@intel.com>
  6. * 2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
  7. * 2006 Denis Sadykov <denis.m.sadykov@intel.com>
  8. * 2004 Luming Yu <luming.yu@intel.com>
  9. * 2001, 2002 Andy Grover <andrew.grover@intel.com>
  10. * 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  11. * Copyright (C) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
  12. *
  13. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  14. *
  15. * This program is free software; you can redistribute it and/or modify
  16. * it under the terms of the GNU General Public License as published by
  17. * the Free Software Foundation; either version 2 of the License, or (at
  18. * your option) any later version.
  19. *
  20. * This program is distributed in the hope that it will be useful, but
  21. * WITHOUT ANY WARRANTY; without even the implied warranty of
  22. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  23. * General Public License for more details.
  24. *
  25. * You should have received a copy of the GNU General Public License along
  26. * with this program; if not, write to the Free Software Foundation, Inc.,
  27. * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
  28. *
  29. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  30. */
  31. /* Uncomment next line to get verbose printout */
  32. /* #define DEBUG */
  33. #define pr_fmt(fmt) "ACPI : EC: " fmt
  34. #include <linux/kernel.h>
  35. #include <linux/module.h>
  36. #include <linux/init.h>
  37. #include <linux/types.h>
  38. #include <linux/delay.h>
  39. #include <linux/interrupt.h>
  40. #include <linux/list.h>
  41. #include <linux/spinlock.h>
  42. #include <linux/slab.h>
  43. #include <linux/acpi.h>
  44. #include <linux/dmi.h>
  45. #include <asm/io.h>
  46. #include "internal.h"
  47. #define ACPI_EC_CLASS "embedded_controller"
  48. #define ACPI_EC_DEVICE_NAME "Embedded Controller"
  49. #define ACPI_EC_FILE_INFO "info"
  50. /* EC status register */
  51. #define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */
  52. #define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */
  53. #define ACPI_EC_FLAG_CMD 0x08 /* Input buffer contains a command */
  54. #define ACPI_EC_FLAG_BURST 0x10 /* burst mode */
  55. #define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */
  56. /* EC commands */
  57. enum ec_command {
  58. ACPI_EC_COMMAND_READ = 0x80,
  59. ACPI_EC_COMMAND_WRITE = 0x81,
  60. ACPI_EC_BURST_ENABLE = 0x82,
  61. ACPI_EC_BURST_DISABLE = 0x83,
  62. ACPI_EC_COMMAND_QUERY = 0x84,
  63. };
  64. #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
  65. #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
  66. #define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */
  67. #define ACPI_EC_UDELAY_POLL 1000 /* Wait 1ms for EC transaction polling */
  68. #define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
  69. * when trying to clear the EC */
  70. enum {
  71. EC_FLAGS_QUERY_PENDING, /* Query is pending */
  72. EC_FLAGS_HANDLERS_INSTALLED, /* Handlers for GPE and
  73. * OpReg are installed */
  74. EC_FLAGS_STARTED, /* Driver is started */
  75. EC_FLAGS_STOPPED, /* Driver is stopped */
  76. EC_FLAGS_COMMAND_STORM, /* GPE storms occurred to the
  77. * current command processing */
  78. };
  79. #define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */
  80. #define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */
  81. /* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
  82. static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
  83. module_param(ec_delay, uint, 0644);
  84. MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
  85. /*
  86. * If the number of false interrupts per one transaction exceeds
  87. * this threshold, will think there is a GPE storm happened and
  88. * will disable the GPE for normal transaction.
  89. */
  90. static unsigned int ec_storm_threshold __read_mostly = 8;
  91. module_param(ec_storm_threshold, uint, 0644);
  92. MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm");
  93. struct acpi_ec_query_handler {
  94. struct list_head node;
  95. acpi_ec_query_func func;
  96. acpi_handle handle;
  97. void *data;
  98. u8 query_bit;
  99. struct kref kref;
  100. };
  101. struct transaction {
  102. const u8 *wdata;
  103. u8 *rdata;
  104. unsigned short irq_count;
  105. u8 command;
  106. u8 wi;
  107. u8 ri;
  108. u8 wlen;
  109. u8 rlen;
  110. u8 flags;
  111. unsigned long timestamp;
  112. };
  113. static int acpi_ec_query(struct acpi_ec *ec, u8 *data);
  114. static void advance_transaction(struct acpi_ec *ec);
  115. struct acpi_ec *boot_ec, *first_ec;
  116. EXPORT_SYMBOL(first_ec);
  117. static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */
  118. static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
  119. static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
  120. static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
  121. static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
  122. /* --------------------------------------------------------------------------
  123. * Device Flags
  124. * -------------------------------------------------------------------------- */
  125. static bool acpi_ec_started(struct acpi_ec *ec)
  126. {
  127. return test_bit(EC_FLAGS_STARTED, &ec->flags) &&
  128. !test_bit(EC_FLAGS_STOPPED, &ec->flags);
  129. }
  130. static bool acpi_ec_flushed(struct acpi_ec *ec)
  131. {
  132. return ec->reference_count == 1;
  133. }
  134. /* --------------------------------------------------------------------------
  135. * EC Registers
  136. * -------------------------------------------------------------------------- */
  137. static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
  138. {
  139. u8 x = inb(ec->command_addr);
  140. pr_debug("EC_SC(R) = 0x%2.2x "
  141. "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d\n",
  142. x,
  143. !!(x & ACPI_EC_FLAG_SCI),
  144. !!(x & ACPI_EC_FLAG_BURST),
  145. !!(x & ACPI_EC_FLAG_CMD),
  146. !!(x & ACPI_EC_FLAG_IBF),
  147. !!(x & ACPI_EC_FLAG_OBF));
  148. return x;
  149. }
  150. static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
  151. {
  152. u8 x = inb(ec->data_addr);
  153. ec->curr->timestamp = jiffies;
  154. pr_debug("EC_DATA(R) = 0x%2.2x\n", x);
  155. return x;
  156. }
  157. static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
  158. {
  159. pr_debug("EC_SC(W) = 0x%2.2x\n", command);
  160. outb(command, ec->command_addr);
  161. ec->curr->timestamp = jiffies;
  162. }
  163. static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
  164. {
  165. pr_debug("EC_DATA(W) = 0x%2.2x\n", data);
  166. outb(data, ec->data_addr);
  167. ec->curr->timestamp = jiffies;
  168. }
  169. #ifdef DEBUG
  170. static const char *acpi_ec_cmd_string(u8 cmd)
  171. {
  172. switch (cmd) {
  173. case 0x80:
  174. return "RD_EC";
  175. case 0x81:
  176. return "WR_EC";
  177. case 0x82:
  178. return "BE_EC";
  179. case 0x83:
  180. return "BD_EC";
  181. case 0x84:
  182. return "QR_EC";
  183. }
  184. return "UNKNOWN";
  185. }
  186. #else
  187. #define acpi_ec_cmd_string(cmd) "UNDEF"
  188. #endif
  189. /* --------------------------------------------------------------------------
  190. * GPE Registers
  191. * -------------------------------------------------------------------------- */
  192. static inline bool acpi_ec_is_gpe_raised(struct acpi_ec *ec)
  193. {
  194. acpi_event_status gpe_status = 0;
  195. (void)acpi_get_gpe_status(NULL, ec->gpe, &gpe_status);
  196. return (gpe_status & ACPI_EVENT_FLAG_SET) ? true : false;
  197. }
  198. static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open)
  199. {
  200. if (open)
  201. acpi_enable_gpe(NULL, ec->gpe);
  202. else {
  203. BUG_ON(ec->reference_count < 1);
  204. acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
  205. }
  206. if (acpi_ec_is_gpe_raised(ec)) {
  207. /*
  208. * On some platforms, EN=1 writes cannot trigger GPE. So
  209. * software need to manually trigger a pseudo GPE event on
  210. * EN=1 writes.
  211. */
  212. pr_debug("***** Polling quirk *****\n");
  213. advance_transaction(ec);
  214. }
  215. }
  216. static inline void acpi_ec_disable_gpe(struct acpi_ec *ec, bool close)
  217. {
  218. if (close)
  219. acpi_disable_gpe(NULL, ec->gpe);
  220. else {
  221. BUG_ON(ec->reference_count < 1);
  222. acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
  223. }
  224. }
  225. static inline void acpi_ec_clear_gpe(struct acpi_ec *ec)
  226. {
  227. /*
  228. * GPE STS is a W1C register, which means:
  229. * 1. Software can clear it without worrying about clearing other
  230. * GPEs' STS bits when the hardware sets them in parallel.
  231. * 2. As long as software can ensure only clearing it when it is
  232. * set, hardware won't set it in parallel.
  233. * So software can clear GPE in any contexts.
  234. * Warning: do not move the check into advance_transaction() as the
  235. * EC commands will be sent without GPE raised.
  236. */
  237. if (!acpi_ec_is_gpe_raised(ec))
  238. return;
  239. acpi_clear_gpe(NULL, ec->gpe);
  240. }
  241. /* --------------------------------------------------------------------------
  242. * Transaction Management
  243. * -------------------------------------------------------------------------- */
  244. static void acpi_ec_submit_request(struct acpi_ec *ec)
  245. {
  246. ec->reference_count++;
  247. if (ec->reference_count == 1)
  248. acpi_ec_enable_gpe(ec, true);
  249. }
  250. static void acpi_ec_complete_request(struct acpi_ec *ec)
  251. {
  252. bool flushed = false;
  253. ec->reference_count--;
  254. if (ec->reference_count == 0)
  255. acpi_ec_disable_gpe(ec, true);
  256. flushed = acpi_ec_flushed(ec);
  257. if (flushed)
  258. wake_up(&ec->wait);
  259. }
  260. static void acpi_ec_set_storm(struct acpi_ec *ec, u8 flag)
  261. {
  262. if (!test_bit(flag, &ec->flags)) {
  263. acpi_ec_disable_gpe(ec, false);
  264. pr_debug("+++++ Polling enabled +++++\n");
  265. set_bit(flag, &ec->flags);
  266. }
  267. }
  268. static void acpi_ec_clear_storm(struct acpi_ec *ec, u8 flag)
  269. {
  270. if (test_bit(flag, &ec->flags)) {
  271. clear_bit(flag, &ec->flags);
  272. acpi_ec_enable_gpe(ec, false);
  273. pr_debug("+++++ Polling disabled +++++\n");
  274. }
  275. }
  276. /*
  277. * acpi_ec_submit_flushable_request() - Increase the reference count unless
  278. * the flush operation is not in
  279. * progress
  280. * @ec: the EC device
  281. *
  282. * This function must be used before taking a new action that should hold
  283. * the reference count. If this function returns false, then the action
  284. * must be discarded or it will prevent the flush operation from being
  285. * completed.
  286. */
  287. static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
  288. {
  289. if (!acpi_ec_started(ec))
  290. return false;
  291. acpi_ec_submit_request(ec);
  292. return true;
  293. }
  294. static void acpi_ec_submit_query(struct acpi_ec *ec)
  295. {
  296. if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
  297. pr_debug("***** Event started *****\n");
  298. schedule_work(&ec->work);
  299. }
  300. }
  301. static void acpi_ec_complete_query(struct acpi_ec *ec)
  302. {
  303. if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
  304. clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
  305. pr_debug("***** Event stopped *****\n");
  306. }
  307. }
  308. static int ec_transaction_completed(struct acpi_ec *ec)
  309. {
  310. unsigned long flags;
  311. int ret = 0;
  312. spin_lock_irqsave(&ec->lock, flags);
  313. if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
  314. ret = 1;
  315. spin_unlock_irqrestore(&ec->lock, flags);
  316. return ret;
  317. }
  318. static void advance_transaction(struct acpi_ec *ec)
  319. {
  320. struct transaction *t;
  321. u8 status;
  322. bool wakeup = false;
  323. pr_debug("===== %s (%d) =====\n",
  324. in_interrupt() ? "IRQ" : "TASK", smp_processor_id());
  325. /*
  326. * By always clearing STS before handling all indications, we can
  327. * ensure a hardware STS 0->1 change after this clearing can always
  328. * trigger a GPE interrupt.
  329. */
  330. acpi_ec_clear_gpe(ec);
  331. status = acpi_ec_read_status(ec);
  332. t = ec->curr;
  333. if (!t)
  334. goto err;
  335. if (t->flags & ACPI_EC_COMMAND_POLL) {
  336. if (t->wlen > t->wi) {
  337. if ((status & ACPI_EC_FLAG_IBF) == 0)
  338. acpi_ec_write_data(ec, t->wdata[t->wi++]);
  339. else
  340. goto err;
  341. } else if (t->rlen > t->ri) {
  342. if ((status & ACPI_EC_FLAG_OBF) == 1) {
  343. t->rdata[t->ri++] = acpi_ec_read_data(ec);
  344. if (t->rlen == t->ri) {
  345. t->flags |= ACPI_EC_COMMAND_COMPLETE;
  346. if (t->command == ACPI_EC_COMMAND_QUERY)
  347. pr_debug("***** Command(%s) hardware completion *****\n",
  348. acpi_ec_cmd_string(t->command));
  349. wakeup = true;
  350. }
  351. } else
  352. goto err;
  353. } else if (t->wlen == t->wi &&
  354. (status & ACPI_EC_FLAG_IBF) == 0) {
  355. t->flags |= ACPI_EC_COMMAND_COMPLETE;
  356. wakeup = true;
  357. }
  358. goto out;
  359. } else {
  360. if (EC_FLAGS_QUERY_HANDSHAKE &&
  361. !(status & ACPI_EC_FLAG_SCI) &&
  362. (t->command == ACPI_EC_COMMAND_QUERY)) {
  363. t->flags |= ACPI_EC_COMMAND_POLL;
  364. acpi_ec_complete_query(ec);
  365. t->rdata[t->ri++] = 0x00;
  366. t->flags |= ACPI_EC_COMMAND_COMPLETE;
  367. pr_debug("***** Command(%s) software completion *****\n",
  368. acpi_ec_cmd_string(t->command));
  369. wakeup = true;
  370. } else if ((status & ACPI_EC_FLAG_IBF) == 0) {
  371. acpi_ec_write_cmd(ec, t->command);
  372. t->flags |= ACPI_EC_COMMAND_POLL;
  373. acpi_ec_complete_query(ec);
  374. } else
  375. goto err;
  376. goto out;
  377. }
  378. err:
  379. /*
  380. * If SCI bit is set, then don't think it's a false IRQ
  381. * otherwise will take a not handled IRQ as a false one.
  382. */
  383. if (!(status & ACPI_EC_FLAG_SCI)) {
  384. if (in_interrupt() && t) {
  385. if (t->irq_count < ec_storm_threshold)
  386. ++t->irq_count;
  387. /* Allow triggering on 0 threshold */
  388. if (t->irq_count == ec_storm_threshold)
  389. acpi_ec_set_storm(ec, EC_FLAGS_COMMAND_STORM);
  390. }
  391. }
  392. out:
  393. if (status & ACPI_EC_FLAG_SCI)
  394. acpi_ec_submit_query(ec);
  395. if (wakeup && in_interrupt())
  396. wake_up(&ec->wait);
  397. }
  398. static void start_transaction(struct acpi_ec *ec)
  399. {
  400. ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
  401. ec->curr->flags = 0;
  402. ec->curr->timestamp = jiffies;
  403. advance_transaction(ec);
  404. }
  405. static int ec_poll(struct acpi_ec *ec)
  406. {
  407. unsigned long flags;
  408. int repeat = 5; /* number of command restarts */
  409. while (repeat--) {
  410. unsigned long delay = jiffies +
  411. msecs_to_jiffies(ec_delay);
  412. unsigned long usecs = ACPI_EC_UDELAY_POLL;
  413. do {
  414. /* don't sleep with disabled interrupts */
  415. if (EC_FLAGS_MSI || irqs_disabled()) {
  416. usecs = ACPI_EC_MSI_UDELAY;
  417. udelay(usecs);
  418. if (ec_transaction_completed(ec))
  419. return 0;
  420. } else {
  421. if (wait_event_timeout(ec->wait,
  422. ec_transaction_completed(ec),
  423. usecs_to_jiffies(usecs)))
  424. return 0;
  425. }
  426. spin_lock_irqsave(&ec->lock, flags);
  427. if (time_after(jiffies,
  428. ec->curr->timestamp +
  429. usecs_to_jiffies(usecs)))
  430. advance_transaction(ec);
  431. spin_unlock_irqrestore(&ec->lock, flags);
  432. } while (time_before(jiffies, delay));
  433. pr_debug("controller reset, restart transaction\n");
  434. spin_lock_irqsave(&ec->lock, flags);
  435. start_transaction(ec);
  436. spin_unlock_irqrestore(&ec->lock, flags);
  437. }
  438. return -ETIME;
  439. }
  440. static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
  441. struct transaction *t)
  442. {
  443. unsigned long tmp;
  444. int ret = 0;
  445. if (EC_FLAGS_MSI)
  446. udelay(ACPI_EC_MSI_UDELAY);
  447. /* start transaction */
  448. spin_lock_irqsave(&ec->lock, tmp);
  449. /* Enable GPE for command processing (IBF=0/OBF=1) */
  450. if (!acpi_ec_submit_flushable_request(ec)) {
  451. ret = -EINVAL;
  452. goto unlock;
  453. }
  454. /* following two actions should be kept atomic */
  455. ec->curr = t;
  456. pr_debug("***** Command(%s) started *****\n",
  457. acpi_ec_cmd_string(t->command));
  458. start_transaction(ec);
  459. spin_unlock_irqrestore(&ec->lock, tmp);
  460. ret = ec_poll(ec);
  461. spin_lock_irqsave(&ec->lock, tmp);
  462. if (t->irq_count == ec_storm_threshold)
  463. acpi_ec_clear_storm(ec, EC_FLAGS_COMMAND_STORM);
  464. pr_debug("***** Command(%s) stopped *****\n",
  465. acpi_ec_cmd_string(t->command));
  466. ec->curr = NULL;
  467. /* Disable GPE for command processing (IBF=0/OBF=1) */
  468. acpi_ec_complete_request(ec);
  469. unlock:
  470. spin_unlock_irqrestore(&ec->lock, tmp);
  471. return ret;
  472. }
  473. static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
  474. {
  475. int status;
  476. u32 glk;
  477. if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata))
  478. return -EINVAL;
  479. if (t->rdata)
  480. memset(t->rdata, 0, t->rlen);
  481. mutex_lock(&ec->mutex);
  482. if (ec->global_lock) {
  483. status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
  484. if (ACPI_FAILURE(status)) {
  485. status = -ENODEV;
  486. goto unlock;
  487. }
  488. }
  489. status = acpi_ec_transaction_unlocked(ec, t);
  490. if (test_bit(EC_FLAGS_COMMAND_STORM, &ec->flags))
  491. msleep(1);
  492. if (ec->global_lock)
  493. acpi_release_global_lock(glk);
  494. unlock:
  495. mutex_unlock(&ec->mutex);
  496. return status;
  497. }
  498. static int acpi_ec_burst_enable(struct acpi_ec *ec)
  499. {
  500. u8 d;
  501. struct transaction t = {.command = ACPI_EC_BURST_ENABLE,
  502. .wdata = NULL, .rdata = &d,
  503. .wlen = 0, .rlen = 1};
  504. return acpi_ec_transaction(ec, &t);
  505. }
  506. static int acpi_ec_burst_disable(struct acpi_ec *ec)
  507. {
  508. struct transaction t = {.command = ACPI_EC_BURST_DISABLE,
  509. .wdata = NULL, .rdata = NULL,
  510. .wlen = 0, .rlen = 0};
  511. return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ?
  512. acpi_ec_transaction(ec, &t) : 0;
  513. }
  514. static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data)
  515. {
  516. int result;
  517. u8 d;
  518. struct transaction t = {.command = ACPI_EC_COMMAND_READ,
  519. .wdata = &address, .rdata = &d,
  520. .wlen = 1, .rlen = 1};
  521. result = acpi_ec_transaction(ec, &t);
  522. *data = d;
  523. return result;
  524. }
  525. static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
  526. {
  527. u8 wdata[2] = { address, data };
  528. struct transaction t = {.command = ACPI_EC_COMMAND_WRITE,
  529. .wdata = wdata, .rdata = NULL,
  530. .wlen = 2, .rlen = 0};
  531. return acpi_ec_transaction(ec, &t);
  532. }
  533. int ec_read(u8 addr, u8 *val)
  534. {
  535. int err;
  536. u8 temp_data;
  537. if (!first_ec)
  538. return -ENODEV;
  539. err = acpi_ec_read(first_ec, addr, &temp_data);
  540. if (!err) {
  541. *val = temp_data;
  542. return 0;
  543. }
  544. return err;
  545. }
  546. EXPORT_SYMBOL(ec_read);
  547. int ec_write(u8 addr, u8 val)
  548. {
  549. int err;
  550. if (!first_ec)
  551. return -ENODEV;
  552. err = acpi_ec_write(first_ec, addr, val);
  553. return err;
  554. }
  555. EXPORT_SYMBOL(ec_write);
  556. int ec_transaction(u8 command,
  557. const u8 *wdata, unsigned wdata_len,
  558. u8 *rdata, unsigned rdata_len)
  559. {
  560. struct transaction t = {.command = command,
  561. .wdata = wdata, .rdata = rdata,
  562. .wlen = wdata_len, .rlen = rdata_len};
  563. if (!first_ec)
  564. return -ENODEV;
  565. return acpi_ec_transaction(first_ec, &t);
  566. }
  567. EXPORT_SYMBOL(ec_transaction);
  568. /* Get the handle to the EC device */
  569. acpi_handle ec_get_handle(void)
  570. {
  571. if (!first_ec)
  572. return NULL;
  573. return first_ec->handle;
  574. }
  575. EXPORT_SYMBOL(ec_get_handle);
  576. /*
  577. * Process _Q events that might have accumulated in the EC.
  578. * Run with locked ec mutex.
  579. */
  580. static void acpi_ec_clear(struct acpi_ec *ec)
  581. {
  582. int i, status;
  583. u8 value = 0;
  584. for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
  585. status = acpi_ec_query(ec, &value);
  586. if (status || !value)
  587. break;
  588. }
  589. if (unlikely(i == ACPI_EC_CLEAR_MAX))
  590. pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
  591. else
  592. pr_info("%d stale EC events cleared\n", i);
  593. }
  594. static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
  595. {
  596. unsigned long flags;
  597. spin_lock_irqsave(&ec->lock, flags);
  598. if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
  599. pr_debug("+++++ Starting EC +++++\n");
  600. /* Enable GPE for event processing (SCI_EVT=1) */
  601. if (!resuming)
  602. acpi_ec_submit_request(ec);
  603. pr_debug("EC started\n");
  604. }
  605. spin_unlock_irqrestore(&ec->lock, flags);
  606. }
  607. static bool acpi_ec_stopped(struct acpi_ec *ec)
  608. {
  609. unsigned long flags;
  610. bool flushed;
  611. spin_lock_irqsave(&ec->lock, flags);
  612. flushed = acpi_ec_flushed(ec);
  613. spin_unlock_irqrestore(&ec->lock, flags);
  614. return flushed;
  615. }
  616. static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
  617. {
  618. unsigned long flags;
  619. spin_lock_irqsave(&ec->lock, flags);
  620. if (acpi_ec_started(ec)) {
  621. pr_debug("+++++ Stopping EC +++++\n");
  622. set_bit(EC_FLAGS_STOPPED, &ec->flags);
  623. spin_unlock_irqrestore(&ec->lock, flags);
  624. wait_event(ec->wait, acpi_ec_stopped(ec));
  625. spin_lock_irqsave(&ec->lock, flags);
  626. /* Disable GPE for event processing (SCI_EVT=1) */
  627. if (!suspending)
  628. acpi_ec_complete_request(ec);
  629. clear_bit(EC_FLAGS_STARTED, &ec->flags);
  630. clear_bit(EC_FLAGS_STOPPED, &ec->flags);
  631. pr_debug("EC stopped\n");
  632. }
  633. spin_unlock_irqrestore(&ec->lock, flags);
  634. }
  635. void acpi_ec_block_transactions(void)
  636. {
  637. struct acpi_ec *ec = first_ec;
  638. if (!ec)
  639. return;
  640. mutex_lock(&ec->mutex);
  641. /* Prevent transactions from being carried out */
  642. acpi_ec_stop(ec, true);
  643. mutex_unlock(&ec->mutex);
  644. }
  645. void acpi_ec_unblock_transactions(void)
  646. {
  647. struct acpi_ec *ec = first_ec;
  648. if (!ec)
  649. return;
  650. /* Allow transactions to be carried out again */
  651. acpi_ec_start(ec, true);
  652. if (EC_FLAGS_CLEAR_ON_RESUME)
  653. acpi_ec_clear(ec);
  654. }
  655. void acpi_ec_unblock_transactions_early(void)
  656. {
  657. /*
  658. * Allow transactions to happen again (this function is called from
  659. * atomic context during wakeup, so we don't need to acquire the mutex).
  660. */
  661. if (first_ec)
  662. acpi_ec_start(first_ec, true);
  663. }
  664. /* --------------------------------------------------------------------------
  665. Event Management
  666. -------------------------------------------------------------------------- */
  667. static struct acpi_ec_query_handler *
  668. acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler)
  669. {
  670. if (handler)
  671. kref_get(&handler->kref);
  672. return handler;
  673. }
  674. static void acpi_ec_query_handler_release(struct kref *kref)
  675. {
  676. struct acpi_ec_query_handler *handler =
  677. container_of(kref, struct acpi_ec_query_handler, kref);
  678. kfree(handler);
  679. }
  680. static void acpi_ec_put_query_handler(struct acpi_ec_query_handler *handler)
  681. {
  682. kref_put(&handler->kref, acpi_ec_query_handler_release);
  683. }
  684. int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
  685. acpi_handle handle, acpi_ec_query_func func,
  686. void *data)
  687. {
  688. struct acpi_ec_query_handler *handler =
  689. kzalloc(sizeof(struct acpi_ec_query_handler), GFP_KERNEL);
  690. if (!handler)
  691. return -ENOMEM;
  692. handler->query_bit = query_bit;
  693. handler->handle = handle;
  694. handler->func = func;
  695. handler->data = data;
  696. mutex_lock(&ec->mutex);
  697. kref_init(&handler->kref);
  698. list_add(&handler->node, &ec->list);
  699. mutex_unlock(&ec->mutex);
  700. return 0;
  701. }
  702. EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler);
  703. void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
  704. {
  705. struct acpi_ec_query_handler *handler, *tmp;
  706. LIST_HEAD(free_list);
  707. mutex_lock(&ec->mutex);
  708. list_for_each_entry_safe(handler, tmp, &ec->list, node) {
  709. if (query_bit == handler->query_bit) {
  710. list_del_init(&handler->node);
  711. list_add(&handler->node, &free_list);
  712. }
  713. }
  714. mutex_unlock(&ec->mutex);
  715. list_for_each_entry(handler, &free_list, node)
  716. acpi_ec_put_query_handler(handler);
  717. }
  718. EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
  719. static void acpi_ec_run(void *cxt)
  720. {
  721. struct acpi_ec_query_handler *handler = cxt;
  722. if (!handler)
  723. return;
  724. pr_debug("##### Query(0x%02x) started #####\n", handler->query_bit);
  725. if (handler->func)
  726. handler->func(handler->data);
  727. else if (handler->handle)
  728. acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
  729. pr_debug("##### Query(0x%02x) stopped #####\n", handler->query_bit);
  730. acpi_ec_put_query_handler(handler);
  731. }
  732. static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
  733. {
  734. u8 value = 0;
  735. int result;
  736. acpi_status status;
  737. struct acpi_ec_query_handler *handler;
  738. struct transaction t = {.command = ACPI_EC_COMMAND_QUERY,
  739. .wdata = NULL, .rdata = &value,
  740. .wlen = 0, .rlen = 1};
  741. /*
  742. * Query the EC to find out which _Qxx method we need to evaluate.
  743. * Note that successful completion of the query causes the ACPI_EC_SCI
  744. * bit to be cleared (and thus clearing the interrupt source).
  745. */
  746. result = acpi_ec_transaction(ec, &t);
  747. if (result)
  748. return result;
  749. if (data)
  750. *data = value;
  751. if (!value)
  752. return -ENODATA;
  753. mutex_lock(&ec->mutex);
  754. list_for_each_entry(handler, &ec->list, node) {
  755. if (value == handler->query_bit) {
  756. /* have custom handler for this bit */
  757. handler = acpi_ec_get_query_handler(handler);
  758. pr_debug("##### Query(0x%02x) scheduled #####\n",
  759. handler->query_bit);
  760. status = acpi_os_execute((handler->func) ?
  761. OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER,
  762. acpi_ec_run, handler);
  763. if (ACPI_FAILURE(status))
  764. result = -EBUSY;
  765. break;
  766. }
  767. }
  768. mutex_unlock(&ec->mutex);
  769. return result;
  770. }
  771. static void acpi_ec_gpe_poller(struct work_struct *work)
  772. {
  773. struct acpi_ec *ec = container_of(work, struct acpi_ec, work);
  774. acpi_ec_query(ec, NULL);
  775. }
  776. static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
  777. u32 gpe_number, void *data)
  778. {
  779. unsigned long flags;
  780. struct acpi_ec *ec = data;
  781. spin_lock_irqsave(&ec->lock, flags);
  782. advance_transaction(ec);
  783. spin_unlock_irqrestore(&ec->lock, flags);
  784. return ACPI_INTERRUPT_HANDLED;
  785. }
  786. /* --------------------------------------------------------------------------
  787. * Address Space Management
  788. * -------------------------------------------------------------------------- */
  789. static acpi_status
  790. acpi_ec_space_handler(u32 function, acpi_physical_address address,
  791. u32 bits, u64 *value64,
  792. void *handler_context, void *region_context)
  793. {
  794. struct acpi_ec *ec = handler_context;
  795. int result = 0, i, bytes = bits / 8;
  796. u8 *value = (u8 *)value64;
  797. if ((address > 0xFF) || !value || !handler_context)
  798. return AE_BAD_PARAMETER;
  799. if (function != ACPI_READ && function != ACPI_WRITE)
  800. return AE_BAD_PARAMETER;
  801. if (EC_FLAGS_MSI || bits > 8)
  802. acpi_ec_burst_enable(ec);
  803. for (i = 0; i < bytes; ++i, ++address, ++value)
  804. result = (function == ACPI_READ) ?
  805. acpi_ec_read(ec, address, value) :
  806. acpi_ec_write(ec, address, *value);
  807. if (EC_FLAGS_MSI || bits > 8)
  808. acpi_ec_burst_disable(ec);
  809. switch (result) {
  810. case -EINVAL:
  811. return AE_BAD_PARAMETER;
  812. case -ENODEV:
  813. return AE_NOT_FOUND;
  814. case -ETIME:
  815. return AE_TIME;
  816. default:
  817. return AE_OK;
  818. }
  819. }
  820. /* --------------------------------------------------------------------------
  821. * Driver Interface
  822. * -------------------------------------------------------------------------- */
  823. static acpi_status
  824. ec_parse_io_ports(struct acpi_resource *resource, void *context);
  825. static struct acpi_ec *make_acpi_ec(void)
  826. {
  827. struct acpi_ec *ec = kzalloc(sizeof(struct acpi_ec), GFP_KERNEL);
  828. if (!ec)
  829. return NULL;
  830. ec->flags = 1 << EC_FLAGS_QUERY_PENDING;
  831. mutex_init(&ec->mutex);
  832. init_waitqueue_head(&ec->wait);
  833. INIT_LIST_HEAD(&ec->list);
  834. spin_lock_init(&ec->lock);
  835. INIT_WORK(&ec->work, acpi_ec_gpe_poller);
  836. return ec;
  837. }
  838. static acpi_status
  839. acpi_ec_register_query_methods(acpi_handle handle, u32 level,
  840. void *context, void **return_value)
  841. {
  842. char node_name[5];
  843. struct acpi_buffer buffer = { sizeof(node_name), node_name };
  844. struct acpi_ec *ec = context;
  845. int value = 0;
  846. acpi_status status;
  847. status = acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer);
  848. if (ACPI_SUCCESS(status) && sscanf(node_name, "_Q%x", &value) == 1)
  849. acpi_ec_add_query_handler(ec, value, handle, NULL, NULL);
  850. return AE_OK;
  851. }
  852. static acpi_status
  853. ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
  854. {
  855. acpi_status status;
  856. unsigned long long tmp = 0;
  857. struct acpi_ec *ec = context;
  858. /* clear addr values, ec_parse_io_ports depend on it */
  859. ec->command_addr = ec->data_addr = 0;
  860. status = acpi_walk_resources(handle, METHOD_NAME__CRS,
  861. ec_parse_io_ports, ec);
  862. if (ACPI_FAILURE(status))
  863. return status;
  864. /* Get GPE bit assignment (EC events). */
  865. /* TODO: Add support for _GPE returning a package */
  866. status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
  867. if (ACPI_FAILURE(status))
  868. return status;
  869. ec->gpe = tmp;
  870. /* Use the global lock for all EC transactions? */
  871. tmp = 0;
  872. acpi_evaluate_integer(handle, "_GLK", NULL, &tmp);
  873. ec->global_lock = tmp;
  874. ec->handle = handle;
  875. return AE_CTRL_TERMINATE;
  876. }
  877. static int ec_install_handlers(struct acpi_ec *ec)
  878. {
  879. acpi_status status;
  880. if (test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
  881. return 0;
  882. status = acpi_install_gpe_raw_handler(NULL, ec->gpe,
  883. ACPI_GPE_EDGE_TRIGGERED,
  884. &acpi_ec_gpe_handler, ec);
  885. if (ACPI_FAILURE(status))
  886. return -ENODEV;
  887. acpi_ec_start(ec, false);
  888. status = acpi_install_address_space_handler(ec->handle,
  889. ACPI_ADR_SPACE_EC,
  890. &acpi_ec_space_handler,
  891. NULL, ec);
  892. if (ACPI_FAILURE(status)) {
  893. if (status == AE_NOT_FOUND) {
  894. /*
  895. * Maybe OS fails in evaluating the _REG object.
  896. * The AE_NOT_FOUND error will be ignored and OS
  897. * continue to initialize EC.
  898. */
  899. pr_err("Fail in evaluating the _REG object"
  900. " of EC device. Broken bios is suspected.\n");
  901. } else {
  902. acpi_ec_stop(ec, false);
  903. acpi_remove_gpe_handler(NULL, ec->gpe,
  904. &acpi_ec_gpe_handler);
  905. return -ENODEV;
  906. }
  907. }
  908. set_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
  909. return 0;
  910. }
  911. static void ec_remove_handlers(struct acpi_ec *ec)
  912. {
  913. if (!test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
  914. return;
  915. acpi_ec_stop(ec, false);
  916. if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
  917. ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
  918. pr_err("failed to remove space handler\n");
  919. if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
  920. &acpi_ec_gpe_handler)))
  921. pr_err("failed to remove gpe handler\n");
  922. clear_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
  923. }
  924. static int acpi_ec_add(struct acpi_device *device)
  925. {
  926. struct acpi_ec *ec = NULL;
  927. int ret;
  928. strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
  929. strcpy(acpi_device_class(device), ACPI_EC_CLASS);
  930. /* Check for boot EC */
  931. if (boot_ec &&
  932. (boot_ec->handle == device->handle ||
  933. boot_ec->handle == ACPI_ROOT_OBJECT)) {
  934. ec = boot_ec;
  935. boot_ec = NULL;
  936. } else {
  937. ec = make_acpi_ec();
  938. if (!ec)
  939. return -ENOMEM;
  940. }
  941. if (ec_parse_device(device->handle, 0, ec, NULL) !=
  942. AE_CTRL_TERMINATE) {
  943. kfree(ec);
  944. return -EINVAL;
  945. }
  946. /* Find and register all query methods */
  947. acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1,
  948. acpi_ec_register_query_methods, NULL, ec, NULL);
  949. if (!first_ec)
  950. first_ec = ec;
  951. device->driver_data = ec;
  952. ret = !!request_region(ec->data_addr, 1, "EC data");
  953. WARN(!ret, "Could not request EC data io port 0x%lx", ec->data_addr);
  954. ret = !!request_region(ec->command_addr, 1, "EC cmd");
  955. WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
  956. pr_info("GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n",
  957. ec->gpe, ec->command_addr, ec->data_addr);
  958. ret = ec_install_handlers(ec);
  959. /* EC is fully operational, allow queries */
  960. clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
  961. /* Clear stale _Q events if hardware might require that */
  962. if (EC_FLAGS_CLEAR_ON_RESUME)
  963. acpi_ec_clear(ec);
  964. return ret;
  965. }
  966. static int acpi_ec_remove(struct acpi_device *device)
  967. {
  968. struct acpi_ec *ec;
  969. struct acpi_ec_query_handler *handler, *tmp;
  970. if (!device)
  971. return -EINVAL;
  972. ec = acpi_driver_data(device);
  973. ec_remove_handlers(ec);
  974. mutex_lock(&ec->mutex);
  975. list_for_each_entry_safe(handler, tmp, &ec->list, node) {
  976. list_del(&handler->node);
  977. kfree(handler);
  978. }
  979. mutex_unlock(&ec->mutex);
  980. release_region(ec->data_addr, 1);
  981. release_region(ec->command_addr, 1);
  982. device->driver_data = NULL;
  983. if (ec == first_ec)
  984. first_ec = NULL;
  985. kfree(ec);
  986. return 0;
  987. }
  988. static acpi_status
  989. ec_parse_io_ports(struct acpi_resource *resource, void *context)
  990. {
  991. struct acpi_ec *ec = context;
  992. if (resource->type != ACPI_RESOURCE_TYPE_IO)
  993. return AE_OK;
  994. /*
  995. * The first address region returned is the data port, and
  996. * the second address region returned is the status/command
  997. * port.
  998. */
  999. if (ec->data_addr == 0)
  1000. ec->data_addr = resource->data.io.minimum;
  1001. else if (ec->command_addr == 0)
  1002. ec->command_addr = resource->data.io.minimum;
  1003. else
  1004. return AE_CTRL_TERMINATE;
  1005. return AE_OK;
  1006. }
  1007. int __init acpi_boot_ec_enable(void)
  1008. {
  1009. if (!boot_ec || test_bit(EC_FLAGS_HANDLERS_INSTALLED, &boot_ec->flags))
  1010. return 0;
  1011. if (!ec_install_handlers(boot_ec)) {
  1012. first_ec = boot_ec;
  1013. return 0;
  1014. }
  1015. return -EFAULT;
  1016. }
  1017. static const struct acpi_device_id ec_device_ids[] = {
  1018. {"PNP0C09", 0},
  1019. {"", 0},
  1020. };
  1021. /* Some BIOS do not survive early DSDT scan, skip it */
  1022. static int ec_skip_dsdt_scan(const struct dmi_system_id *id)
  1023. {
  1024. EC_FLAGS_SKIP_DSDT_SCAN = 1;
  1025. return 0;
  1026. }
  1027. /* ASUStek often supplies us with broken ECDT, validate it */
  1028. static int ec_validate_ecdt(const struct dmi_system_id *id)
  1029. {
  1030. EC_FLAGS_VALIDATE_ECDT = 1;
  1031. return 0;
  1032. }
  1033. /* MSI EC needs special treatment, enable it */
  1034. static int ec_flag_msi(const struct dmi_system_id *id)
  1035. {
  1036. pr_debug("Detected MSI hardware, enabling workarounds.\n");
  1037. EC_FLAGS_MSI = 1;
  1038. EC_FLAGS_VALIDATE_ECDT = 1;
  1039. return 0;
  1040. }
  1041. /*
  1042. * Clevo M720 notebook actually works ok with IRQ mode, if we lifted
  1043. * the GPE storm threshold back to 20
  1044. */
  1045. static int ec_enlarge_storm_threshold(const struct dmi_system_id *id)
  1046. {
  1047. pr_debug("Setting the EC GPE storm threshold to 20\n");
  1048. ec_storm_threshold = 20;
  1049. return 0;
  1050. }
  1051. /*
  1052. * Acer EC firmware refuses to respond QR_EC when SCI_EVT is not set, for
  1053. * which case, we complete the QR_EC without issuing it to the firmware.
  1054. * https://bugzilla.kernel.org/show_bug.cgi?id=86211
  1055. */
  1056. static int ec_flag_query_handshake(const struct dmi_system_id *id)
  1057. {
  1058. pr_debug("Detected the EC firmware requiring QR_EC issued when SCI_EVT set\n");
  1059. EC_FLAGS_QUERY_HANDSHAKE = 1;
  1060. return 0;
  1061. }
  1062. /*
  1063. * On some hardware it is necessary to clear events accumulated by the EC during
  1064. * sleep. These ECs stop reporting GPEs until they are manually polled, if too
  1065. * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
  1066. *
  1067. * https://bugzilla.kernel.org/show_bug.cgi?id=44161
  1068. *
  1069. * Ideally, the EC should also be instructed NOT to accumulate events during
  1070. * sleep (which Windows seems to do somehow), but the interface to control this
  1071. * behaviour is not known at this time.
  1072. *
  1073. * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
  1074. * however it is very likely that other Samsung models are affected.
  1075. *
  1076. * On systems which don't accumulate _Q events during sleep, this extra check
  1077. * should be harmless.
  1078. */
  1079. static int ec_clear_on_resume(const struct dmi_system_id *id)
  1080. {
  1081. pr_debug("Detected system needing EC poll on resume.\n");
  1082. EC_FLAGS_CLEAR_ON_RESUME = 1;
  1083. return 0;
  1084. }
  1085. static struct dmi_system_id ec_dmi_table[] __initdata = {
  1086. {
  1087. ec_skip_dsdt_scan, "Compal JFL92", {
  1088. DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
  1089. DMI_MATCH(DMI_BOARD_NAME, "JFL92") }, NULL},
  1090. {
  1091. ec_flag_msi, "MSI hardware", {
  1092. DMI_MATCH(DMI_BIOS_VENDOR, "Micro-Star")}, NULL},
  1093. {
  1094. ec_flag_msi, "MSI hardware", {
  1095. DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star")}, NULL},
  1096. {
  1097. ec_flag_msi, "MSI hardware", {
  1098. DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star")}, NULL},
  1099. {
  1100. ec_flag_msi, "MSI hardware", {
  1101. DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR")}, NULL},
  1102. {
  1103. ec_flag_msi, "Quanta hardware", {
  1104. DMI_MATCH(DMI_SYS_VENDOR, "Quanta"),
  1105. DMI_MATCH(DMI_PRODUCT_NAME, "TW8/SW8/DW8"),}, NULL},
  1106. {
  1107. ec_flag_msi, "Quanta hardware", {
  1108. DMI_MATCH(DMI_SYS_VENDOR, "Quanta"),
  1109. DMI_MATCH(DMI_PRODUCT_NAME, "TW9/SW9"),}, NULL},
  1110. {
  1111. ec_flag_msi, "Clevo W350etq", {
  1112. DMI_MATCH(DMI_SYS_VENDOR, "CLEVO CO."),
  1113. DMI_MATCH(DMI_PRODUCT_NAME, "W35_37ET"),}, NULL},
  1114. {
  1115. ec_validate_ecdt, "ASUS hardware", {
  1116. DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL},
  1117. {
  1118. ec_validate_ecdt, "ASUS hardware", {
  1119. DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL},
  1120. {
  1121. ec_enlarge_storm_threshold, "CLEVO hardware", {
  1122. DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
  1123. DMI_MATCH(DMI_PRODUCT_NAME, "M720T/M730T"),}, NULL},
  1124. {
  1125. ec_skip_dsdt_scan, "HP Folio 13", {
  1126. DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
  1127. DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13"),}, NULL},
  1128. {
  1129. ec_validate_ecdt, "ASUS hardware", {
  1130. DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."),
  1131. DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL},
  1132. {
  1133. ec_clear_on_resume, "Samsung hardware", {
  1134. DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
  1135. {
  1136. ec_flag_query_handshake, "Acer hardware", {
  1137. DMI_MATCH(DMI_SYS_VENDOR, "Acer"), }, NULL},
  1138. {},
  1139. };
  1140. int __init acpi_ec_ecdt_probe(void)
  1141. {
  1142. acpi_status status;
  1143. struct acpi_ec *saved_ec = NULL;
  1144. struct acpi_table_ecdt *ecdt_ptr;
  1145. boot_ec = make_acpi_ec();
  1146. if (!boot_ec)
  1147. return -ENOMEM;
  1148. /*
  1149. * Generate a boot ec context
  1150. */
  1151. dmi_check_system(ec_dmi_table);
  1152. status = acpi_get_table(ACPI_SIG_ECDT, 1,
  1153. (struct acpi_table_header **)&ecdt_ptr);
  1154. if (ACPI_SUCCESS(status)) {
  1155. pr_info("EC description table is found, configuring boot EC\n");
  1156. boot_ec->command_addr = ecdt_ptr->control.address;
  1157. boot_ec->data_addr = ecdt_ptr->data.address;
  1158. boot_ec->gpe = ecdt_ptr->gpe;
  1159. boot_ec->handle = ACPI_ROOT_OBJECT;
  1160. acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id,
  1161. &boot_ec->handle);
  1162. /* Don't trust ECDT, which comes from ASUSTek */
  1163. if (!EC_FLAGS_VALIDATE_ECDT)
  1164. goto install;
  1165. saved_ec = kmemdup(boot_ec, sizeof(struct acpi_ec), GFP_KERNEL);
  1166. if (!saved_ec)
  1167. return -ENOMEM;
  1168. /* fall through */
  1169. }
  1170. if (EC_FLAGS_SKIP_DSDT_SCAN) {
  1171. kfree(saved_ec);
  1172. return -ENODEV;
  1173. }
  1174. /* This workaround is needed only on some broken machines,
  1175. * which require early EC, but fail to provide ECDT */
  1176. pr_debug("Look up EC in DSDT\n");
  1177. status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device,
  1178. boot_ec, NULL);
  1179. /* Check that acpi_get_devices actually find something */
  1180. if (ACPI_FAILURE(status) || !boot_ec->handle)
  1181. goto error;
  1182. if (saved_ec) {
  1183. /* try to find good ECDT from ASUSTek */
  1184. if (saved_ec->command_addr != boot_ec->command_addr ||
  1185. saved_ec->data_addr != boot_ec->data_addr ||
  1186. saved_ec->gpe != boot_ec->gpe ||
  1187. saved_ec->handle != boot_ec->handle)
  1188. pr_info("ASUSTek keeps feeding us with broken "
  1189. "ECDT tables, which are very hard to workaround. "
  1190. "Trying to use DSDT EC info instead. Please send "
  1191. "output of acpidump to linux-acpi@vger.kernel.org\n");
  1192. kfree(saved_ec);
  1193. saved_ec = NULL;
  1194. } else {
  1195. /* We really need to limit this workaround, the only ASUS,
  1196. * which needs it, has fake EC._INI method, so use it as flag.
  1197. * Keep boot_ec struct as it will be needed soon.
  1198. */
  1199. if (!dmi_name_in_vendors("ASUS") ||
  1200. !acpi_has_method(boot_ec->handle, "_INI"))
  1201. return -ENODEV;
  1202. }
  1203. install:
  1204. if (!ec_install_handlers(boot_ec)) {
  1205. first_ec = boot_ec;
  1206. return 0;
  1207. }
  1208. error:
  1209. kfree(boot_ec);
  1210. kfree(saved_ec);
  1211. boot_ec = NULL;
  1212. return -ENODEV;
  1213. }
  1214. static struct acpi_driver acpi_ec_driver = {
  1215. .name = "ec",
  1216. .class = ACPI_EC_CLASS,
  1217. .ids = ec_device_ids,
  1218. .ops = {
  1219. .add = acpi_ec_add,
  1220. .remove = acpi_ec_remove,
  1221. },
  1222. };
  1223. int __init acpi_ec_init(void)
  1224. {
  1225. int result = 0;
  1226. /* Now register the driver for the EC */
  1227. result = acpi_bus_register_driver(&acpi_ec_driver);
  1228. if (result < 0)
  1229. return -ENODEV;
  1230. return result;
  1231. }
  1232. /* EC driver currently not unloadable */
  1233. #if 0
  1234. static void __exit acpi_ec_exit(void)
  1235. {
  1236. acpi_bus_unregister_driver(&acpi_ec_driver);
  1237. }
  1238. #endif /* 0 */