hw-me.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885
  1. /*
  2. *
  3. * Intel Management Engine Interface (Intel MEI) Linux driver
  4. * Copyright (c) 2003-2012, Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. */
  16. #include <linux/pci.h>
  17. #include <linux/kthread.h>
  18. #include <linux/interrupt.h>
  19. #include "mei_dev.h"
  20. #include "hbm.h"
  21. #include "hw-me.h"
  22. #include "hw-me-regs.h"
  23. /**
  24. * mei_me_reg_read - Reads 32bit data from the mei device
  25. *
  26. * @dev: the device structure
  27. * @offset: offset from which to read the data
  28. *
  29. * returns register value (u32)
  30. */
  31. static inline u32 mei_me_reg_read(const struct mei_me_hw *hw,
  32. unsigned long offset)
  33. {
  34. return ioread32(hw->mem_addr + offset);
  35. }
  36. /**
  37. * mei_me_reg_write - Writes 32bit data to the mei device
  38. *
  39. * @dev: the device structure
  40. * @offset: offset from which to write the data
  41. * @value: register value to write (u32)
  42. */
  43. static inline void mei_me_reg_write(const struct mei_me_hw *hw,
  44. unsigned long offset, u32 value)
  45. {
  46. iowrite32(value, hw->mem_addr + offset);
  47. }
  48. /**
  49. * mei_me_mecbrw_read - Reads 32bit data from ME circular buffer
  50. * read window register
  51. *
  52. * @dev: the device structure
  53. *
  54. * returns ME_CB_RW register value (u32)
  55. */
  56. static u32 mei_me_mecbrw_read(const struct mei_device *dev)
  57. {
  58. return mei_me_reg_read(to_me_hw(dev), ME_CB_RW);
  59. }
  60. /**
  61. * mei_me_mecsr_read - Reads 32bit data from the ME CSR
  62. *
  63. * @dev: the device structure
  64. *
  65. * returns ME_CSR_HA register value (u32)
  66. */
  67. static inline u32 mei_me_mecsr_read(const struct mei_me_hw *hw)
  68. {
  69. return mei_me_reg_read(hw, ME_CSR_HA);
  70. }
  71. /**
  72. * mei_hcsr_read - Reads 32bit data from the host CSR
  73. *
  74. * @dev: the device structure
  75. *
  76. * returns H_CSR register value (u32)
  77. */
  78. static inline u32 mei_hcsr_read(const struct mei_me_hw *hw)
  79. {
  80. return mei_me_reg_read(hw, H_CSR);
  81. }
  82. /**
  83. * mei_hcsr_set - writes H_CSR register to the mei device,
  84. * and ignores the H_IS bit for it is write-one-to-zero.
  85. *
  86. * @dev: the device structure
  87. */
  88. static inline void mei_hcsr_set(struct mei_me_hw *hw, u32 hcsr)
  89. {
  90. hcsr &= ~H_IS;
  91. mei_me_reg_write(hw, H_CSR, hcsr);
  92. }
  93. /**
  94. * mei_me_hw_config - configure hw dependent settings
  95. *
  96. * @dev: mei device
  97. */
  98. static void mei_me_hw_config(struct mei_device *dev)
  99. {
  100. struct mei_me_hw *hw = to_me_hw(dev);
  101. u32 hcsr = mei_hcsr_read(to_me_hw(dev));
  102. /* Doesn't change in runtime */
  103. dev->hbuf_depth = (hcsr & H_CBD) >> 24;
  104. hw->pg_state = MEI_PG_OFF;
  105. }
  106. /**
  107. * mei_me_pg_state - translate internal pg state
  108. * to the mei power gating state
  109. *
  110. * @hw - me hardware
  111. * returns: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise
  112. */
  113. static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
  114. {
  115. struct mei_me_hw *hw = to_me_hw(dev);
  116. return hw->pg_state;
  117. }
  118. /**
  119. * mei_clear_interrupts - clear and stop interrupts
  120. *
  121. * @dev: the device structure
  122. */
  123. static void mei_me_intr_clear(struct mei_device *dev)
  124. {
  125. struct mei_me_hw *hw = to_me_hw(dev);
  126. u32 hcsr = mei_hcsr_read(hw);
  127. if ((hcsr & H_IS) == H_IS)
  128. mei_me_reg_write(hw, H_CSR, hcsr);
  129. }
  130. /**
  131. * mei_me_intr_enable - enables mei device interrupts
  132. *
  133. * @dev: the device structure
  134. */
  135. static void mei_me_intr_enable(struct mei_device *dev)
  136. {
  137. struct mei_me_hw *hw = to_me_hw(dev);
  138. u32 hcsr = mei_hcsr_read(hw);
  139. hcsr |= H_IE;
  140. mei_hcsr_set(hw, hcsr);
  141. }
  142. /**
  143. * mei_disable_interrupts - disables mei device interrupts
  144. *
  145. * @dev: the device structure
  146. */
  147. static void mei_me_intr_disable(struct mei_device *dev)
  148. {
  149. struct mei_me_hw *hw = to_me_hw(dev);
  150. u32 hcsr = mei_hcsr_read(hw);
  151. hcsr &= ~H_IE;
  152. mei_hcsr_set(hw, hcsr);
  153. }
  154. /**
  155. * mei_me_hw_reset_release - release device from the reset
  156. *
  157. * @dev: the device structure
  158. */
  159. static void mei_me_hw_reset_release(struct mei_device *dev)
  160. {
  161. struct mei_me_hw *hw = to_me_hw(dev);
  162. u32 hcsr = mei_hcsr_read(hw);
  163. hcsr |= H_IG;
  164. hcsr &= ~H_RST;
  165. mei_hcsr_set(hw, hcsr);
  166. /* complete this write before we set host ready on another CPU */
  167. mmiowb();
  168. }
  169. /**
  170. * mei_me_hw_reset - resets fw via mei csr register.
  171. *
  172. * @dev: the device structure
  173. * @intr_enable: if interrupt should be enabled after reset.
  174. */
  175. static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
  176. {
  177. struct mei_me_hw *hw = to_me_hw(dev);
  178. u32 hcsr = mei_hcsr_read(hw);
  179. hcsr |= H_RST | H_IG | H_IS;
  180. if (intr_enable)
  181. hcsr |= H_IE;
  182. else
  183. hcsr &= ~H_IE;
  184. dev->recvd_hw_ready = false;
  185. mei_me_reg_write(hw, H_CSR, hcsr);
  186. /*
  187. * Host reads the H_CSR once to ensure that the
  188. * posted write to H_CSR completes.
  189. */
  190. hcsr = mei_hcsr_read(hw);
  191. if ((hcsr & H_RST) == 0)
  192. dev_warn(&dev->pdev->dev, "H_RST is not set = 0x%08X", hcsr);
  193. if ((hcsr & H_RDY) == H_RDY)
  194. dev_warn(&dev->pdev->dev, "H_RDY is not cleared 0x%08X", hcsr);
  195. if (intr_enable == false)
  196. mei_me_hw_reset_release(dev);
  197. return 0;
  198. }
  199. /**
  200. * mei_me_host_set_ready - enable device
  201. *
  202. * @dev - mei device
  203. * returns bool
  204. */
  205. static void mei_me_host_set_ready(struct mei_device *dev)
  206. {
  207. struct mei_me_hw *hw = to_me_hw(dev);
  208. hw->host_hw_state = mei_hcsr_read(hw);
  209. hw->host_hw_state |= H_IE | H_IG | H_RDY;
  210. mei_hcsr_set(hw, hw->host_hw_state);
  211. }
  212. /**
  213. * mei_me_host_is_ready - check whether the host has turned ready
  214. *
  215. * @dev - mei device
  216. * returns bool
  217. */
  218. static bool mei_me_host_is_ready(struct mei_device *dev)
  219. {
  220. struct mei_me_hw *hw = to_me_hw(dev);
  221. hw->host_hw_state = mei_hcsr_read(hw);
  222. return (hw->host_hw_state & H_RDY) == H_RDY;
  223. }
  224. /**
  225. * mei_me_hw_is_ready - check whether the me(hw) has turned ready
  226. *
  227. * @dev - mei device
  228. * returns bool
  229. */
  230. static bool mei_me_hw_is_ready(struct mei_device *dev)
  231. {
  232. struct mei_me_hw *hw = to_me_hw(dev);
  233. hw->me_hw_state = mei_me_mecsr_read(hw);
  234. return (hw->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA;
  235. }
  236. static int mei_me_hw_ready_wait(struct mei_device *dev)
  237. {
  238. int err;
  239. mutex_unlock(&dev->device_lock);
  240. err = wait_event_interruptible_timeout(dev->wait_hw_ready,
  241. dev->recvd_hw_ready,
  242. mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
  243. mutex_lock(&dev->device_lock);
  244. if (!err && !dev->recvd_hw_ready) {
  245. if (!err)
  246. err = -ETIME;
  247. dev_err(&dev->pdev->dev,
  248. "wait hw ready failed. status = %d\n", err);
  249. return err;
  250. }
  251. dev->recvd_hw_ready = false;
  252. return 0;
  253. }
  254. static int mei_me_hw_start(struct mei_device *dev)
  255. {
  256. int ret = mei_me_hw_ready_wait(dev);
  257. if (ret)
  258. return ret;
  259. dev_dbg(&dev->pdev->dev, "hw is ready\n");
  260. mei_me_host_set_ready(dev);
  261. return ret;
  262. }
  263. /**
  264. * mei_hbuf_filled_slots - gets number of device filled buffer slots
  265. *
  266. * @dev: the device structure
  267. *
  268. * returns number of filled slots
  269. */
  270. static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
  271. {
  272. struct mei_me_hw *hw = to_me_hw(dev);
  273. char read_ptr, write_ptr;
  274. hw->host_hw_state = mei_hcsr_read(hw);
  275. read_ptr = (char) ((hw->host_hw_state & H_CBRP) >> 8);
  276. write_ptr = (char) ((hw->host_hw_state & H_CBWP) >> 16);
  277. return (unsigned char) (write_ptr - read_ptr);
  278. }
  279. /**
  280. * mei_me_hbuf_is_empty - checks if host buffer is empty.
  281. *
  282. * @dev: the device structure
  283. *
  284. * returns true if empty, false - otherwise.
  285. */
  286. static bool mei_me_hbuf_is_empty(struct mei_device *dev)
  287. {
  288. return mei_hbuf_filled_slots(dev) == 0;
  289. }
  290. /**
  291. * mei_me_hbuf_empty_slots - counts write empty slots.
  292. *
  293. * @dev: the device structure
  294. *
  295. * returns -EOVERFLOW if overflow, otherwise empty slots count
  296. */
  297. static int mei_me_hbuf_empty_slots(struct mei_device *dev)
  298. {
  299. unsigned char filled_slots, empty_slots;
  300. filled_slots = mei_hbuf_filled_slots(dev);
  301. empty_slots = dev->hbuf_depth - filled_slots;
  302. /* check for overflow */
  303. if (filled_slots > dev->hbuf_depth)
  304. return -EOVERFLOW;
  305. return empty_slots;
  306. }
  307. static size_t mei_me_hbuf_max_len(const struct mei_device *dev)
  308. {
  309. return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr);
  310. }
  311. /**
  312. * mei_me_write_message - writes a message to mei device.
  313. *
  314. * @dev: the device structure
  315. * @header: mei HECI header of message
  316. * @buf: message payload will be written
  317. *
  318. * This function returns -EIO if write has failed
  319. */
  320. static int mei_me_write_message(struct mei_device *dev,
  321. struct mei_msg_hdr *header,
  322. unsigned char *buf)
  323. {
  324. struct mei_me_hw *hw = to_me_hw(dev);
  325. unsigned long rem;
  326. unsigned long length = header->length;
  327. u32 *reg_buf = (u32 *)buf;
  328. u32 hcsr;
  329. u32 dw_cnt;
  330. int i;
  331. int empty_slots;
  332. dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header));
  333. empty_slots = mei_hbuf_empty_slots(dev);
  334. dev_dbg(&dev->pdev->dev, "empty slots = %hu.\n", empty_slots);
  335. dw_cnt = mei_data2slots(length);
  336. if (empty_slots < 0 || dw_cnt > empty_slots)
  337. return -EMSGSIZE;
  338. mei_me_reg_write(hw, H_CB_WW, *((u32 *) header));
  339. for (i = 0; i < length / 4; i++)
  340. mei_me_reg_write(hw, H_CB_WW, reg_buf[i]);
  341. rem = length & 0x3;
  342. if (rem > 0) {
  343. u32 reg = 0;
  344. memcpy(&reg, &buf[length - rem], rem);
  345. mei_me_reg_write(hw, H_CB_WW, reg);
  346. }
  347. hcsr = mei_hcsr_read(hw) | H_IG;
  348. mei_hcsr_set(hw, hcsr);
  349. if (!mei_me_hw_is_ready(dev))
  350. return -EIO;
  351. return 0;
  352. }
  353. /**
  354. * mei_me_count_full_read_slots - counts read full slots.
  355. *
  356. * @dev: the device structure
  357. *
  358. * returns -EOVERFLOW if overflow, otherwise filled slots count
  359. */
  360. static int mei_me_count_full_read_slots(struct mei_device *dev)
  361. {
  362. struct mei_me_hw *hw = to_me_hw(dev);
  363. char read_ptr, write_ptr;
  364. unsigned char buffer_depth, filled_slots;
  365. hw->me_hw_state = mei_me_mecsr_read(hw);
  366. buffer_depth = (unsigned char)((hw->me_hw_state & ME_CBD_HRA) >> 24);
  367. read_ptr = (char) ((hw->me_hw_state & ME_CBRP_HRA) >> 8);
  368. write_ptr = (char) ((hw->me_hw_state & ME_CBWP_HRA) >> 16);
  369. filled_slots = (unsigned char) (write_ptr - read_ptr);
  370. /* check for overflow */
  371. if (filled_slots > buffer_depth)
  372. return -EOVERFLOW;
  373. dev_dbg(&dev->pdev->dev, "filled_slots =%08x\n", filled_slots);
  374. return (int)filled_slots;
  375. }
  376. /**
  377. * mei_me_read_slots - reads a message from mei device.
  378. *
  379. * @dev: the device structure
  380. * @buffer: message buffer will be written
  381. * @buffer_length: message size will be read
  382. */
  383. static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
  384. unsigned long buffer_length)
  385. {
  386. struct mei_me_hw *hw = to_me_hw(dev);
  387. u32 *reg_buf = (u32 *)buffer;
  388. u32 hcsr;
  389. for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
  390. *reg_buf++ = mei_me_mecbrw_read(dev);
  391. if (buffer_length > 0) {
  392. u32 reg = mei_me_mecbrw_read(dev);
  393. memcpy(reg_buf, &reg, buffer_length);
  394. }
  395. hcsr = mei_hcsr_read(hw) | H_IG;
  396. mei_hcsr_set(hw, hcsr);
  397. return 0;
  398. }
  399. /**
  400. * mei_me_pg_enter - write pg enter register to mei device.
  401. *
  402. * @dev: the device structure
  403. */
  404. static void mei_me_pg_enter(struct mei_device *dev)
  405. {
  406. struct mei_me_hw *hw = to_me_hw(dev);
  407. u32 reg = mei_me_reg_read(hw, H_HPG_CSR);
  408. reg |= H_HPG_CSR_PGI;
  409. mei_me_reg_write(hw, H_HPG_CSR, reg);
  410. }
  411. /**
  412. * mei_me_pg_enter - write pg enter register to mei device.
  413. *
  414. * @dev: the device structure
  415. */
  416. static void mei_me_pg_exit(struct mei_device *dev)
  417. {
  418. struct mei_me_hw *hw = to_me_hw(dev);
  419. u32 reg = mei_me_reg_read(hw, H_HPG_CSR);
  420. WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n");
  421. reg |= H_HPG_CSR_PGIHEXR;
  422. mei_me_reg_write(hw, H_HPG_CSR, reg);
  423. }
  424. /**
  425. * mei_me_pg_set_sync - perform pg entry procedure
  426. *
  427. * @dev: the device structure
  428. *
  429. * returns 0 on success an error code otherwise
  430. */
  431. int mei_me_pg_set_sync(struct mei_device *dev)
  432. {
  433. struct mei_me_hw *hw = to_me_hw(dev);
  434. unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
  435. int ret;
  436. dev->pg_event = MEI_PG_EVENT_WAIT;
  437. ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
  438. if (ret)
  439. return ret;
  440. mutex_unlock(&dev->device_lock);
  441. wait_event_timeout(dev->wait_pg,
  442. dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
  443. mutex_lock(&dev->device_lock);
  444. if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
  445. mei_me_pg_enter(dev);
  446. ret = 0;
  447. } else {
  448. ret = -ETIME;
  449. }
  450. dev->pg_event = MEI_PG_EVENT_IDLE;
  451. hw->pg_state = MEI_PG_ON;
  452. return ret;
  453. }
  454. /**
  455. * mei_me_pg_unset_sync - perform pg exit procedure
  456. *
  457. * @dev: the device structure
  458. *
  459. * returns 0 on success an error code otherwise
  460. */
  461. int mei_me_pg_unset_sync(struct mei_device *dev)
  462. {
  463. struct mei_me_hw *hw = to_me_hw(dev);
  464. unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
  465. int ret;
  466. if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
  467. goto reply;
  468. dev->pg_event = MEI_PG_EVENT_WAIT;
  469. mei_me_pg_exit(dev);
  470. mutex_unlock(&dev->device_lock);
  471. wait_event_timeout(dev->wait_pg,
  472. dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
  473. mutex_lock(&dev->device_lock);
  474. reply:
  475. if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
  476. ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
  477. else
  478. ret = -ETIME;
  479. dev->pg_event = MEI_PG_EVENT_IDLE;
  480. hw->pg_state = MEI_PG_OFF;
  481. return ret;
  482. }
  483. /**
  484. * mei_me_pg_is_enabled - detect if PG is supported by HW
  485. *
  486. * @dev: the device structure
  487. *
  488. * returns: true is pg supported, false otherwise
  489. */
  490. static bool mei_me_pg_is_enabled(struct mei_device *dev)
  491. {
  492. struct mei_me_hw *hw = to_me_hw(dev);
  493. u32 reg = mei_me_reg_read(hw, ME_CSR_HA);
  494. if ((reg & ME_PGIC_HRA) == 0)
  495. goto notsupported;
  496. if (dev->version.major_version < HBM_MAJOR_VERSION_PGI)
  497. goto notsupported;
  498. if (dev->version.major_version == HBM_MAJOR_VERSION_PGI &&
  499. dev->version.minor_version < HBM_MINOR_VERSION_PGI)
  500. goto notsupported;
  501. return true;
  502. notsupported:
  503. dev_dbg(&dev->pdev->dev, "pg: not supported: HGP = %d hbm version %d.%d ?= %d.%d\n",
  504. !!(reg & ME_PGIC_HRA),
  505. dev->version.major_version,
  506. dev->version.minor_version,
  507. HBM_MAJOR_VERSION_PGI,
  508. HBM_MINOR_VERSION_PGI);
  509. return false;
  510. }
  511. /**
  512. * mei_me_irq_quick_handler - The ISR of the MEI device
  513. *
  514. * @irq: The irq number
  515. * @dev_id: pointer to the device structure
  516. *
  517. * returns irqreturn_t
  518. */
  519. irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
  520. {
  521. struct mei_device *dev = (struct mei_device *) dev_id;
  522. struct mei_me_hw *hw = to_me_hw(dev);
  523. u32 csr_reg = mei_hcsr_read(hw);
  524. if ((csr_reg & H_IS) != H_IS)
  525. return IRQ_NONE;
  526. /* clear H_IS bit in H_CSR */
  527. mei_me_reg_write(hw, H_CSR, csr_reg);
  528. return IRQ_WAKE_THREAD;
  529. }
  530. /**
  531. * mei_me_irq_thread_handler - function called after ISR to handle the interrupt
  532. * processing.
  533. *
  534. * @irq: The irq number
  535. * @dev_id: pointer to the device structure
  536. *
  537. * returns irqreturn_t
  538. *
  539. */
  540. irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
  541. {
  542. struct mei_device *dev = (struct mei_device *) dev_id;
  543. struct mei_cl_cb complete_list;
  544. s32 slots;
  545. int rets = 0;
  546. dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n");
  547. /* initialize our complete list */
  548. mutex_lock(&dev->device_lock);
  549. mei_io_list_init(&complete_list);
  550. /* Ack the interrupt here
  551. * In case of MSI we don't go through the quick handler */
  552. if (pci_dev_msi_enabled(dev->pdev))
  553. mei_clear_interrupts(dev);
  554. /* check if ME wants a reset */
  555. if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
  556. dev_warn(&dev->pdev->dev, "FW not ready: resetting.\n");
  557. schedule_work(&dev->reset_work);
  558. goto end;
  559. }
  560. /* check if we need to start the dev */
  561. if (!mei_host_is_ready(dev)) {
  562. if (mei_hw_is_ready(dev)) {
  563. mei_me_hw_reset_release(dev);
  564. dev_dbg(&dev->pdev->dev, "we need to start the dev.\n");
  565. dev->recvd_hw_ready = true;
  566. wake_up_interruptible(&dev->wait_hw_ready);
  567. } else {
  568. dev_dbg(&dev->pdev->dev, "Spurious Interrupt\n");
  569. }
  570. goto end;
  571. }
  572. /* check slots available for reading */
  573. slots = mei_count_full_read_slots(dev);
  574. while (slots > 0) {
  575. dev_dbg(&dev->pdev->dev, "slots to read = %08x\n", slots);
  576. rets = mei_irq_read_handler(dev, &complete_list, &slots);
  577. /* There is a race between ME write and interrupt delivery:
  578. * Not all data is always available immediately after the
  579. * interrupt, so try to read again on the next interrupt.
  580. */
  581. if (rets == -ENODATA)
  582. break;
  583. if (rets && dev->dev_state != MEI_DEV_RESETTING) {
  584. dev_err(&dev->pdev->dev, "mei_irq_read_handler ret = %d.\n",
  585. rets);
  586. schedule_work(&dev->reset_work);
  587. goto end;
  588. }
  589. }
  590. dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
  591. /*
  592. * During PG handshake only allowed write is the replay to the
  593. * PG exit message, so block calling write function
  594. * if the pg state is not idle
  595. */
  596. if (dev->pg_event == MEI_PG_EVENT_IDLE) {
  597. rets = mei_irq_write_handler(dev, &complete_list);
  598. dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
  599. }
  600. mei_irq_compl_handler(dev, &complete_list);
  601. end:
  602. dev_dbg(&dev->pdev->dev, "interrupt thread end ret = %d\n", rets);
  603. mutex_unlock(&dev->device_lock);
  604. return IRQ_HANDLED;
  605. }
  606. /**
  607. * mei_me_fw_status - retrieve fw status from the pci config space
  608. *
  609. * @dev: the device structure
  610. * @fw_status: fw status registers storage
  611. *
  612. * returns 0 on success an error code otherwise
  613. */
  614. static int mei_me_fw_status(struct mei_device *dev,
  615. struct mei_fw_status *fw_status)
  616. {
  617. const u32 pci_cfg_reg[] = {PCI_CFG_HFS_1, PCI_CFG_HFS_2};
  618. int i;
  619. if (!fw_status)
  620. return -EINVAL;
  621. switch (dev->pdev->device) {
  622. case MEI_DEV_ID_IBXPK_1:
  623. case MEI_DEV_ID_IBXPK_2:
  624. case MEI_DEV_ID_CPT_1:
  625. case MEI_DEV_ID_PBG_1:
  626. case MEI_DEV_ID_PPT_1:
  627. case MEI_DEV_ID_PPT_2:
  628. case MEI_DEV_ID_PPT_3:
  629. case MEI_DEV_ID_LPT_H:
  630. case MEI_DEV_ID_LPT_W:
  631. case MEI_DEV_ID_LPT_LP:
  632. case MEI_DEV_ID_LPT_HR:
  633. case MEI_DEV_ID_WPT_LP:
  634. fw_status->count = 2;
  635. break;
  636. case MEI_DEV_ID_ICH10_1:
  637. case MEI_DEV_ID_ICH10_2:
  638. case MEI_DEV_ID_ICH10_3:
  639. case MEI_DEV_ID_ICH10_4:
  640. fw_status->count = 1;
  641. break;
  642. default:
  643. fw_status->count = 0;
  644. break;
  645. }
  646. for (i = 0; i < fw_status->count && i < MEI_FW_STATUS_MAX; i++) {
  647. int ret;
  648. ret = pci_read_config_dword(dev->pdev,
  649. pci_cfg_reg[i], &fw_status->status[i]);
  650. if (ret)
  651. return ret;
  652. }
  653. return 0;
  654. }
  655. static const struct mei_hw_ops mei_me_hw_ops = {
  656. .pg_state = mei_me_pg_state,
  657. .fw_status = mei_me_fw_status,
  658. .host_is_ready = mei_me_host_is_ready,
  659. .hw_is_ready = mei_me_hw_is_ready,
  660. .hw_reset = mei_me_hw_reset,
  661. .hw_config = mei_me_hw_config,
  662. .hw_start = mei_me_hw_start,
  663. .pg_is_enabled = mei_me_pg_is_enabled,
  664. .intr_clear = mei_me_intr_clear,
  665. .intr_enable = mei_me_intr_enable,
  666. .intr_disable = mei_me_intr_disable,
  667. .hbuf_free_slots = mei_me_hbuf_empty_slots,
  668. .hbuf_is_ready = mei_me_hbuf_is_empty,
  669. .hbuf_max_len = mei_me_hbuf_max_len,
  670. .write = mei_me_write_message,
  671. .rdbuf_full_slots = mei_me_count_full_read_slots,
  672. .read_hdr = mei_me_mecbrw_read,
  673. .read = mei_me_read_slots
  674. };
  675. static bool mei_me_fw_type_nm(struct pci_dev *pdev)
  676. {
  677. u32 reg;
  678. pci_read_config_dword(pdev, PCI_CFG_HFS_2, &reg);
  679. /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
  680. return (reg & 0x600) == 0x200;
  681. }
  682. #define MEI_CFG_FW_NM \
  683. .quirk_probe = mei_me_fw_type_nm
  684. static bool mei_me_fw_type_sps(struct pci_dev *pdev)
  685. {
  686. u32 reg;
  687. /* Read ME FW Status check for SPS Firmware */
  688. pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
  689. /* if bits [19:16] = 15, running SPS Firmware */
  690. return (reg & 0xf0000) == 0xf0000;
  691. }
  692. #define MEI_CFG_FW_SPS \
  693. .quirk_probe = mei_me_fw_type_sps
  694. #define MEI_CFG_LEGACY_HFS \
  695. .fw_status.count = 0
  696. #define MEI_CFG_ICH_HFS \
  697. .fw_status.count = 1, \
  698. .fw_status.status[0] = PCI_CFG_HFS_1
  699. #define MEI_CFG_PCH_HFS \
  700. .fw_status.count = 2, \
  701. .fw_status.status[0] = PCI_CFG_HFS_1, \
  702. .fw_status.status[1] = PCI_CFG_HFS_2
  703. /* ICH Legacy devices */
  704. const struct mei_cfg mei_me_legacy_cfg = {
  705. MEI_CFG_LEGACY_HFS,
  706. };
  707. /* ICH devices */
  708. const struct mei_cfg mei_me_ich_cfg = {
  709. MEI_CFG_ICH_HFS,
  710. };
  711. /* PCH devices */
  712. const struct mei_cfg mei_me_pch_cfg = {
  713. MEI_CFG_PCH_HFS,
  714. };
  715. /* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */
  716. const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
  717. MEI_CFG_PCH_HFS,
  718. MEI_CFG_FW_NM,
  719. };
  720. /* PCH Lynx Point with quirk for SPS Firmware exclusion */
  721. const struct mei_cfg mei_me_lpt_cfg = {
  722. MEI_CFG_PCH_HFS,
  723. MEI_CFG_FW_SPS,
  724. };
  725. /**
  726. * mei_me_dev_init - allocates and initializes the mei device structure
  727. *
  728. * @pdev: The pci device structure
  729. * @cfg: per device generation config
  730. *
  731. * returns The mei_device_device pointer on success, NULL on failure.
  732. */
  733. struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
  734. const struct mei_cfg *cfg)
  735. {
  736. struct mei_device *dev;
  737. dev = kzalloc(sizeof(struct mei_device) +
  738. sizeof(struct mei_me_hw), GFP_KERNEL);
  739. if (!dev)
  740. return NULL;
  741. mei_device_init(dev, cfg);
  742. dev->ops = &mei_me_hw_ops;
  743. dev->pdev = pdev;
  744. return dev;
  745. }