hw-me.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485
  1. /*
  2. *
  3. * Intel Management Engine Interface (Intel MEI) Linux driver
  4. * Copyright (c) 2003-2012, Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. */
  16. #include <linux/pci.h>
  17. #include <linux/kthread.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/pm_runtime.h>
  20. #include <linux/sizes.h>
  21. #include "mei_dev.h"
  22. #include "hbm.h"
  23. #include "hw-me.h"
  24. #include "hw-me-regs.h"
  25. #include "mei-trace.h"
  26. /**
  27. * mei_me_reg_read - Reads 32bit data from the mei device
  28. *
  29. * @hw: the me hardware structure
  30. * @offset: offset from which to read the data
  31. *
  32. * Return: register value (u32)
  33. */
  34. static inline u32 mei_me_reg_read(const struct mei_me_hw *hw,
  35. unsigned long offset)
  36. {
  37. return ioread32(hw->mem_addr + offset);
  38. }
  39. /**
  40. * mei_me_reg_write - Writes 32bit data to the mei device
  41. *
  42. * @hw: the me hardware structure
  43. * @offset: offset from which to write the data
  44. * @value: register value to write (u32)
  45. */
  46. static inline void mei_me_reg_write(const struct mei_me_hw *hw,
  47. unsigned long offset, u32 value)
  48. {
  49. iowrite32(value, hw->mem_addr + offset);
  50. }
  51. /**
  52. * mei_me_mecbrw_read - Reads 32bit data from ME circular buffer
  53. * read window register
  54. *
  55. * @dev: the device structure
  56. *
  57. * Return: ME_CB_RW register value (u32)
  58. */
  59. static inline u32 mei_me_mecbrw_read(const struct mei_device *dev)
  60. {
  61. return mei_me_reg_read(to_me_hw(dev), ME_CB_RW);
  62. }
  63. /**
  64. * mei_me_hcbww_write - write 32bit data to the host circular buffer
  65. *
  66. * @dev: the device structure
  67. * @data: 32bit data to be written to the host circular buffer
  68. */
  69. static inline void mei_me_hcbww_write(struct mei_device *dev, u32 data)
  70. {
  71. mei_me_reg_write(to_me_hw(dev), H_CB_WW, data);
  72. }
  73. /**
  74. * mei_me_mecsr_read - Reads 32bit data from the ME CSR
  75. *
  76. * @dev: the device structure
  77. *
  78. * Return: ME_CSR_HA register value (u32)
  79. */
  80. static inline u32 mei_me_mecsr_read(const struct mei_device *dev)
  81. {
  82. u32 reg;
  83. reg = mei_me_reg_read(to_me_hw(dev), ME_CSR_HA);
  84. trace_mei_reg_read(dev->dev, "ME_CSR_HA", ME_CSR_HA, reg);
  85. return reg;
  86. }
  87. /**
  88. * mei_hcsr_read - Reads 32bit data from the host CSR
  89. *
  90. * @dev: the device structure
  91. *
  92. * Return: H_CSR register value (u32)
  93. */
  94. static inline u32 mei_hcsr_read(const struct mei_device *dev)
  95. {
  96. u32 reg;
  97. reg = mei_me_reg_read(to_me_hw(dev), H_CSR);
  98. trace_mei_reg_read(dev->dev, "H_CSR", H_CSR, reg);
  99. return reg;
  100. }
  101. /**
  102. * mei_hcsr_write - writes H_CSR register to the mei device
  103. *
  104. * @dev: the device structure
  105. * @reg: new register value
  106. */
  107. static inline void mei_hcsr_write(struct mei_device *dev, u32 reg)
  108. {
  109. trace_mei_reg_write(dev->dev, "H_CSR", H_CSR, reg);
  110. mei_me_reg_write(to_me_hw(dev), H_CSR, reg);
  111. }
  112. /**
  113. * mei_hcsr_set - writes H_CSR register to the mei device,
  114. * and ignores the H_IS bit for it is write-one-to-zero.
  115. *
  116. * @dev: the device structure
  117. * @reg: new register value
  118. */
  119. static inline void mei_hcsr_set(struct mei_device *dev, u32 reg)
  120. {
  121. reg &= ~H_CSR_IS_MASK;
  122. mei_hcsr_write(dev, reg);
  123. }
  124. /**
  125. * mei_hcsr_set_hig - set host interrupt (set H_IG)
  126. *
  127. * @dev: the device structure
  128. */
  129. static inline void mei_hcsr_set_hig(struct mei_device *dev)
  130. {
  131. u32 hcsr;
  132. hcsr = mei_hcsr_read(dev) | H_IG;
  133. mei_hcsr_set(dev, hcsr);
  134. }
  135. /**
  136. * mei_me_d0i3c_read - Reads 32bit data from the D0I3C register
  137. *
  138. * @dev: the device structure
  139. *
  140. * Return: H_D0I3C register value (u32)
  141. */
  142. static inline u32 mei_me_d0i3c_read(const struct mei_device *dev)
  143. {
  144. u32 reg;
  145. reg = mei_me_reg_read(to_me_hw(dev), H_D0I3C);
  146. trace_mei_reg_read(dev->dev, "H_D0I3C", H_D0I3C, reg);
  147. return reg;
  148. }
  149. /**
  150. * mei_me_d0i3c_write - writes H_D0I3C register to device
  151. *
  152. * @dev: the device structure
  153. * @reg: new register value
  154. */
  155. static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg)
  156. {
  157. trace_mei_reg_write(dev->dev, "H_D0I3C", H_D0I3C, reg);
  158. mei_me_reg_write(to_me_hw(dev), H_D0I3C, reg);
  159. }
  160. /**
  161. * mei_me_fw_status - read fw status register from pci config space
  162. *
  163. * @dev: mei device
  164. * @fw_status: fw status register values
  165. *
  166. * Return: 0 on success, error otherwise
  167. */
  168. static int mei_me_fw_status(struct mei_device *dev,
  169. struct mei_fw_status *fw_status)
  170. {
  171. struct pci_dev *pdev = to_pci_dev(dev->dev);
  172. struct mei_me_hw *hw = to_me_hw(dev);
  173. const struct mei_fw_status *fw_src = &hw->cfg->fw_status;
  174. int ret;
  175. int i;
  176. if (!fw_status)
  177. return -EINVAL;
  178. fw_status->count = fw_src->count;
  179. for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
  180. ret = pci_read_config_dword(pdev, fw_src->status[i],
  181. &fw_status->status[i]);
  182. trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X",
  183. fw_src->status[i],
  184. fw_status->status[i]);
  185. if (ret)
  186. return ret;
  187. }
  188. return 0;
  189. }
  190. /**
  191. * mei_me_hw_config - configure hw dependent settings
  192. *
  193. * @dev: mei device
  194. */
  195. static void mei_me_hw_config(struct mei_device *dev)
  196. {
  197. struct pci_dev *pdev = to_pci_dev(dev->dev);
  198. struct mei_me_hw *hw = to_me_hw(dev);
  199. u32 hcsr, reg;
  200. /* Doesn't change in runtime */
  201. hcsr = mei_hcsr_read(dev);
  202. hw->hbuf_depth = (hcsr & H_CBD) >> 24;
  203. reg = 0;
  204. pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
  205. trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
  206. hw->d0i3_supported =
  207. ((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK);
  208. hw->pg_state = MEI_PG_OFF;
  209. if (hw->d0i3_supported) {
  210. reg = mei_me_d0i3c_read(dev);
  211. if (reg & H_D0I3C_I3)
  212. hw->pg_state = MEI_PG_ON;
  213. }
  214. }
  215. /**
  216. * mei_me_pg_state - translate internal pg state
  217. * to the mei power gating state
  218. *
  219. * @dev: mei device
  220. *
  221. * Return: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise
  222. */
  223. static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
  224. {
  225. struct mei_me_hw *hw = to_me_hw(dev);
  226. return hw->pg_state;
  227. }
  228. static inline u32 me_intr_src(u32 hcsr)
  229. {
  230. return hcsr & H_CSR_IS_MASK;
  231. }
  232. /**
  233. * me_intr_disable - disables mei device interrupts
  234. * using supplied hcsr register value.
  235. *
  236. * @dev: the device structure
  237. * @hcsr: supplied hcsr register value
  238. */
  239. static inline void me_intr_disable(struct mei_device *dev, u32 hcsr)
  240. {
  241. hcsr &= ~H_CSR_IE_MASK;
  242. mei_hcsr_set(dev, hcsr);
  243. }
  244. /**
  245. * mei_me_intr_clear - clear and stop interrupts
  246. *
  247. * @dev: the device structure
  248. * @hcsr: supplied hcsr register value
  249. */
  250. static inline void me_intr_clear(struct mei_device *dev, u32 hcsr)
  251. {
  252. if (me_intr_src(hcsr))
  253. mei_hcsr_write(dev, hcsr);
  254. }
  255. /**
  256. * mei_me_intr_clear - clear and stop interrupts
  257. *
  258. * @dev: the device structure
  259. */
  260. static void mei_me_intr_clear(struct mei_device *dev)
  261. {
  262. u32 hcsr = mei_hcsr_read(dev);
  263. me_intr_clear(dev, hcsr);
  264. }
  265. /**
  266. * mei_me_intr_enable - enables mei device interrupts
  267. *
  268. * @dev: the device structure
  269. */
  270. static void mei_me_intr_enable(struct mei_device *dev)
  271. {
  272. u32 hcsr = mei_hcsr_read(dev);
  273. hcsr |= H_CSR_IE_MASK;
  274. mei_hcsr_set(dev, hcsr);
  275. }
  276. /**
  277. * mei_me_intr_disable - disables mei device interrupts
  278. *
  279. * @dev: the device structure
  280. */
  281. static void mei_me_intr_disable(struct mei_device *dev)
  282. {
  283. u32 hcsr = mei_hcsr_read(dev);
  284. me_intr_disable(dev, hcsr);
  285. }
  286. /**
  287. * mei_me_synchronize_irq - wait for pending IRQ handlers
  288. *
  289. * @dev: the device structure
  290. */
  291. static void mei_me_synchronize_irq(struct mei_device *dev)
  292. {
  293. struct pci_dev *pdev = to_pci_dev(dev->dev);
  294. synchronize_irq(pdev->irq);
  295. }
  296. /**
  297. * mei_me_hw_reset_release - release device from the reset
  298. *
  299. * @dev: the device structure
  300. */
  301. static void mei_me_hw_reset_release(struct mei_device *dev)
  302. {
  303. u32 hcsr = mei_hcsr_read(dev);
  304. hcsr |= H_IG;
  305. hcsr &= ~H_RST;
  306. mei_hcsr_set(dev, hcsr);
  307. /* complete this write before we set host ready on another CPU */
  308. mmiowb();
  309. }
  310. /**
  311. * mei_me_host_set_ready - enable device
  312. *
  313. * @dev: mei device
  314. */
  315. static void mei_me_host_set_ready(struct mei_device *dev)
  316. {
  317. u32 hcsr = mei_hcsr_read(dev);
  318. hcsr |= H_CSR_IE_MASK | H_IG | H_RDY;
  319. mei_hcsr_set(dev, hcsr);
  320. }
  321. /**
  322. * mei_me_host_is_ready - check whether the host has turned ready
  323. *
  324. * @dev: mei device
  325. * Return: bool
  326. */
  327. static bool mei_me_host_is_ready(struct mei_device *dev)
  328. {
  329. u32 hcsr = mei_hcsr_read(dev);
  330. return (hcsr & H_RDY) == H_RDY;
  331. }
  332. /**
  333. * mei_me_hw_is_ready - check whether the me(hw) has turned ready
  334. *
  335. * @dev: mei device
  336. * Return: bool
  337. */
  338. static bool mei_me_hw_is_ready(struct mei_device *dev)
  339. {
  340. u32 mecsr = mei_me_mecsr_read(dev);
  341. return (mecsr & ME_RDY_HRA) == ME_RDY_HRA;
  342. }
  343. /**
  344. * mei_me_hw_is_resetting - check whether the me(hw) is in reset
  345. *
  346. * @dev: mei device
  347. * Return: bool
  348. */
  349. static bool mei_me_hw_is_resetting(struct mei_device *dev)
  350. {
  351. u32 mecsr = mei_me_mecsr_read(dev);
  352. return (mecsr & ME_RST_HRA) == ME_RST_HRA;
  353. }
  354. /**
  355. * mei_me_hw_ready_wait - wait until the me(hw) has turned ready
  356. * or timeout is reached
  357. *
  358. * @dev: mei device
  359. * Return: 0 on success, error otherwise
  360. */
  361. static int mei_me_hw_ready_wait(struct mei_device *dev)
  362. {
  363. mutex_unlock(&dev->device_lock);
  364. wait_event_timeout(dev->wait_hw_ready,
  365. dev->recvd_hw_ready,
  366. mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
  367. mutex_lock(&dev->device_lock);
  368. if (!dev->recvd_hw_ready) {
  369. dev_err(dev->dev, "wait hw ready failed\n");
  370. return -ETIME;
  371. }
  372. mei_me_hw_reset_release(dev);
  373. dev->recvd_hw_ready = false;
  374. return 0;
  375. }
  376. /**
  377. * mei_me_hw_start - hw start routine
  378. *
  379. * @dev: mei device
  380. * Return: 0 on success, error otherwise
  381. */
  382. static int mei_me_hw_start(struct mei_device *dev)
  383. {
  384. int ret = mei_me_hw_ready_wait(dev);
  385. if (ret)
  386. return ret;
  387. dev_dbg(dev->dev, "hw is ready\n");
  388. mei_me_host_set_ready(dev);
  389. return ret;
  390. }
  391. /**
  392. * mei_hbuf_filled_slots - gets number of device filled buffer slots
  393. *
  394. * @dev: the device structure
  395. *
  396. * Return: number of filled slots
  397. */
  398. static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
  399. {
  400. u32 hcsr;
  401. char read_ptr, write_ptr;
  402. hcsr = mei_hcsr_read(dev);
  403. read_ptr = (char) ((hcsr & H_CBRP) >> 8);
  404. write_ptr = (char) ((hcsr & H_CBWP) >> 16);
  405. return (unsigned char) (write_ptr - read_ptr);
  406. }
  407. /**
  408. * mei_me_hbuf_is_empty - checks if host buffer is empty.
  409. *
  410. * @dev: the device structure
  411. *
  412. * Return: true if empty, false - otherwise.
  413. */
  414. static bool mei_me_hbuf_is_empty(struct mei_device *dev)
  415. {
  416. return mei_hbuf_filled_slots(dev) == 0;
  417. }
  418. /**
  419. * mei_me_hbuf_empty_slots - counts write empty slots.
  420. *
  421. * @dev: the device structure
  422. *
  423. * Return: -EOVERFLOW if overflow, otherwise empty slots count
  424. */
  425. static int mei_me_hbuf_empty_slots(struct mei_device *dev)
  426. {
  427. struct mei_me_hw *hw = to_me_hw(dev);
  428. unsigned char filled_slots, empty_slots;
  429. filled_slots = mei_hbuf_filled_slots(dev);
  430. empty_slots = hw->hbuf_depth - filled_slots;
  431. /* check for overflow */
  432. if (filled_slots > hw->hbuf_depth)
  433. return -EOVERFLOW;
  434. return empty_slots;
  435. }
  436. /**
  437. * mei_me_hbuf_depth - returns depth of the hw buffer.
  438. *
  439. * @dev: the device structure
  440. *
  441. * Return: size of hw buffer in slots
  442. */
  443. static u32 mei_me_hbuf_depth(const struct mei_device *dev)
  444. {
  445. struct mei_me_hw *hw = to_me_hw(dev);
  446. return hw->hbuf_depth;
  447. }
  448. /**
  449. * mei_me_hbuf_write - writes a message to host hw buffer.
  450. *
  451. * @dev: the device structure
  452. * @hdr: header of message
  453. * @hdr_len: header length in bytes: must be multiplication of a slot (4bytes)
  454. * @data: payload
  455. * @data_len: payload length in bytes
  456. *
  457. * Return: 0 if success, < 0 - otherwise.
  458. */
  459. static int mei_me_hbuf_write(struct mei_device *dev,
  460. const void *hdr, size_t hdr_len,
  461. const void *data, size_t data_len)
  462. {
  463. unsigned long rem;
  464. unsigned long i;
  465. const u32 *reg_buf;
  466. u32 dw_cnt;
  467. int empty_slots;
  468. if (WARN_ON(!hdr || !data || hdr_len & 0x3))
  469. return -EINVAL;
  470. dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr));
  471. empty_slots = mei_hbuf_empty_slots(dev);
  472. dev_dbg(dev->dev, "empty slots = %hu.\n", empty_slots);
  473. if (empty_slots < 0)
  474. return -EOVERFLOW;
  475. dw_cnt = mei_data2slots(hdr_len + data_len);
  476. if (dw_cnt > (u32)empty_slots)
  477. return -EMSGSIZE;
  478. reg_buf = hdr;
  479. for (i = 0; i < hdr_len / MEI_SLOT_SIZE; i++)
  480. mei_me_hcbww_write(dev, reg_buf[i]);
  481. reg_buf = data;
  482. for (i = 0; i < data_len / MEI_SLOT_SIZE; i++)
  483. mei_me_hcbww_write(dev, reg_buf[i]);
  484. rem = data_len & 0x3;
  485. if (rem > 0) {
  486. u32 reg = 0;
  487. memcpy(&reg, (const u8 *)data + data_len - rem, rem);
  488. mei_me_hcbww_write(dev, reg);
  489. }
  490. mei_hcsr_set_hig(dev);
  491. if (!mei_me_hw_is_ready(dev))
  492. return -EIO;
  493. return 0;
  494. }
  495. /**
  496. * mei_me_count_full_read_slots - counts read full slots.
  497. *
  498. * @dev: the device structure
  499. *
  500. * Return: -EOVERFLOW if overflow, otherwise filled slots count
  501. */
  502. static int mei_me_count_full_read_slots(struct mei_device *dev)
  503. {
  504. u32 me_csr;
  505. char read_ptr, write_ptr;
  506. unsigned char buffer_depth, filled_slots;
  507. me_csr = mei_me_mecsr_read(dev);
  508. buffer_depth = (unsigned char)((me_csr & ME_CBD_HRA) >> 24);
  509. read_ptr = (char) ((me_csr & ME_CBRP_HRA) >> 8);
  510. write_ptr = (char) ((me_csr & ME_CBWP_HRA) >> 16);
  511. filled_slots = (unsigned char) (write_ptr - read_ptr);
  512. /* check for overflow */
  513. if (filled_slots > buffer_depth)
  514. return -EOVERFLOW;
  515. dev_dbg(dev->dev, "filled_slots =%08x\n", filled_slots);
  516. return (int)filled_slots;
  517. }
  518. /**
  519. * mei_me_read_slots - reads a message from mei device.
  520. *
  521. * @dev: the device structure
  522. * @buffer: message buffer will be written
  523. * @buffer_length: message size will be read
  524. *
  525. * Return: always 0
  526. */
  527. static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
  528. unsigned long buffer_length)
  529. {
  530. u32 *reg_buf = (u32 *)buffer;
  531. for (; buffer_length >= MEI_SLOT_SIZE; buffer_length -= MEI_SLOT_SIZE)
  532. *reg_buf++ = mei_me_mecbrw_read(dev);
  533. if (buffer_length > 0) {
  534. u32 reg = mei_me_mecbrw_read(dev);
  535. memcpy(reg_buf, &reg, buffer_length);
  536. }
  537. mei_hcsr_set_hig(dev);
  538. return 0;
  539. }
  540. /**
  541. * mei_me_pg_set - write pg enter register
  542. *
  543. * @dev: the device structure
  544. */
  545. static void mei_me_pg_set(struct mei_device *dev)
  546. {
  547. struct mei_me_hw *hw = to_me_hw(dev);
  548. u32 reg;
  549. reg = mei_me_reg_read(hw, H_HPG_CSR);
  550. trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
  551. reg |= H_HPG_CSR_PGI;
  552. trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
  553. mei_me_reg_write(hw, H_HPG_CSR, reg);
  554. }
  555. /**
  556. * mei_me_pg_unset - write pg exit register
  557. *
  558. * @dev: the device structure
  559. */
  560. static void mei_me_pg_unset(struct mei_device *dev)
  561. {
  562. struct mei_me_hw *hw = to_me_hw(dev);
  563. u32 reg;
  564. reg = mei_me_reg_read(hw, H_HPG_CSR);
  565. trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
  566. WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n");
  567. reg |= H_HPG_CSR_PGIHEXR;
  568. trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
  569. mei_me_reg_write(hw, H_HPG_CSR, reg);
  570. }
  571. /**
  572. * mei_me_pg_legacy_enter_sync - perform legacy pg entry procedure
  573. *
  574. * @dev: the device structure
  575. *
  576. * Return: 0 on success an error code otherwise
  577. */
  578. static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
  579. {
  580. struct mei_me_hw *hw = to_me_hw(dev);
  581. unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
  582. int ret;
  583. dev->pg_event = MEI_PG_EVENT_WAIT;
  584. ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
  585. if (ret)
  586. return ret;
  587. mutex_unlock(&dev->device_lock);
  588. wait_event_timeout(dev->wait_pg,
  589. dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
  590. mutex_lock(&dev->device_lock);
  591. if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
  592. mei_me_pg_set(dev);
  593. ret = 0;
  594. } else {
  595. ret = -ETIME;
  596. }
  597. dev->pg_event = MEI_PG_EVENT_IDLE;
  598. hw->pg_state = MEI_PG_ON;
  599. return ret;
  600. }
  601. /**
  602. * mei_me_pg_legacy_exit_sync - perform legacy pg exit procedure
  603. *
  604. * @dev: the device structure
  605. *
  606. * Return: 0 on success an error code otherwise
  607. */
  608. static int mei_me_pg_legacy_exit_sync(struct mei_device *dev)
  609. {
  610. struct mei_me_hw *hw = to_me_hw(dev);
  611. unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
  612. int ret;
  613. if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
  614. goto reply;
  615. dev->pg_event = MEI_PG_EVENT_WAIT;
  616. mei_me_pg_unset(dev);
  617. mutex_unlock(&dev->device_lock);
  618. wait_event_timeout(dev->wait_pg,
  619. dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
  620. mutex_lock(&dev->device_lock);
  621. reply:
  622. if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
  623. ret = -ETIME;
  624. goto out;
  625. }
  626. dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
  627. ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
  628. if (ret)
  629. return ret;
  630. mutex_unlock(&dev->device_lock);
  631. wait_event_timeout(dev->wait_pg,
  632. dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
  633. mutex_lock(&dev->device_lock);
  634. if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED)
  635. ret = 0;
  636. else
  637. ret = -ETIME;
  638. out:
  639. dev->pg_event = MEI_PG_EVENT_IDLE;
  640. hw->pg_state = MEI_PG_OFF;
  641. return ret;
  642. }
  643. /**
  644. * mei_me_pg_in_transition - is device now in pg transition
  645. *
  646. * @dev: the device structure
  647. *
  648. * Return: true if in pg transition, false otherwise
  649. */
  650. static bool mei_me_pg_in_transition(struct mei_device *dev)
  651. {
  652. return dev->pg_event >= MEI_PG_EVENT_WAIT &&
  653. dev->pg_event <= MEI_PG_EVENT_INTR_WAIT;
  654. }
  655. /**
  656. * mei_me_pg_is_enabled - detect if PG is supported by HW
  657. *
  658. * @dev: the device structure
  659. *
  660. * Return: true is pg supported, false otherwise
  661. */
  662. static bool mei_me_pg_is_enabled(struct mei_device *dev)
  663. {
  664. struct mei_me_hw *hw = to_me_hw(dev);
  665. u32 reg = mei_me_mecsr_read(dev);
  666. if (hw->d0i3_supported)
  667. return true;
  668. if ((reg & ME_PGIC_HRA) == 0)
  669. goto notsupported;
  670. if (!dev->hbm_f_pg_supported)
  671. goto notsupported;
  672. return true;
  673. notsupported:
  674. dev_dbg(dev->dev, "pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d\n",
  675. hw->d0i3_supported,
  676. !!(reg & ME_PGIC_HRA),
  677. dev->version.major_version,
  678. dev->version.minor_version,
  679. HBM_MAJOR_VERSION_PGI,
  680. HBM_MINOR_VERSION_PGI);
  681. return false;
  682. }
  683. /**
  684. * mei_me_d0i3_set - write d0i3 register bit on mei device.
  685. *
  686. * @dev: the device structure
  687. * @intr: ask for interrupt
  688. *
  689. * Return: D0I3C register value
  690. */
  691. static u32 mei_me_d0i3_set(struct mei_device *dev, bool intr)
  692. {
  693. u32 reg = mei_me_d0i3c_read(dev);
  694. reg |= H_D0I3C_I3;
  695. if (intr)
  696. reg |= H_D0I3C_IR;
  697. else
  698. reg &= ~H_D0I3C_IR;
  699. mei_me_d0i3c_write(dev, reg);
  700. /* read it to ensure HW consistency */
  701. reg = mei_me_d0i3c_read(dev);
  702. return reg;
  703. }
  704. /**
  705. * mei_me_d0i3_unset - clean d0i3 register bit on mei device.
  706. *
  707. * @dev: the device structure
  708. *
  709. * Return: D0I3C register value
  710. */
  711. static u32 mei_me_d0i3_unset(struct mei_device *dev)
  712. {
  713. u32 reg = mei_me_d0i3c_read(dev);
  714. reg &= ~H_D0I3C_I3;
  715. reg |= H_D0I3C_IR;
  716. mei_me_d0i3c_write(dev, reg);
  717. /* read it to ensure HW consistency */
  718. reg = mei_me_d0i3c_read(dev);
  719. return reg;
  720. }
  721. /**
  722. * mei_me_d0i3_enter_sync - perform d0i3 entry procedure
  723. *
  724. * @dev: the device structure
  725. *
  726. * Return: 0 on success an error code otherwise
  727. */
  728. static int mei_me_d0i3_enter_sync(struct mei_device *dev)
  729. {
  730. struct mei_me_hw *hw = to_me_hw(dev);
  731. unsigned long d0i3_timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
  732. unsigned long pgi_timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
  733. int ret;
  734. u32 reg;
  735. reg = mei_me_d0i3c_read(dev);
  736. if (reg & H_D0I3C_I3) {
  737. /* we are in d0i3, nothing to do */
  738. dev_dbg(dev->dev, "d0i3 set not needed\n");
  739. ret = 0;
  740. goto on;
  741. }
  742. /* PGI entry procedure */
  743. dev->pg_event = MEI_PG_EVENT_WAIT;
  744. ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
  745. if (ret)
  746. /* FIXME: should we reset here? */
  747. goto out;
  748. mutex_unlock(&dev->device_lock);
  749. wait_event_timeout(dev->wait_pg,
  750. dev->pg_event == MEI_PG_EVENT_RECEIVED, pgi_timeout);
  751. mutex_lock(&dev->device_lock);
  752. if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
  753. ret = -ETIME;
  754. goto out;
  755. }
  756. /* end PGI entry procedure */
  757. dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
  758. reg = mei_me_d0i3_set(dev, true);
  759. if (!(reg & H_D0I3C_CIP)) {
  760. dev_dbg(dev->dev, "d0i3 enter wait not needed\n");
  761. ret = 0;
  762. goto on;
  763. }
  764. mutex_unlock(&dev->device_lock);
  765. wait_event_timeout(dev->wait_pg,
  766. dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, d0i3_timeout);
  767. mutex_lock(&dev->device_lock);
  768. if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
  769. reg = mei_me_d0i3c_read(dev);
  770. if (!(reg & H_D0I3C_I3)) {
  771. ret = -ETIME;
  772. goto out;
  773. }
  774. }
  775. ret = 0;
  776. on:
  777. hw->pg_state = MEI_PG_ON;
  778. out:
  779. dev->pg_event = MEI_PG_EVENT_IDLE;
  780. dev_dbg(dev->dev, "d0i3 enter ret = %d\n", ret);
  781. return ret;
  782. }
  783. /**
  784. * mei_me_d0i3_enter - perform d0i3 entry procedure
  785. * no hbm PG handshake
  786. * no waiting for confirmation; runs with interrupts
  787. * disabled
  788. *
  789. * @dev: the device structure
  790. *
  791. * Return: 0 on success an error code otherwise
  792. */
  793. static int mei_me_d0i3_enter(struct mei_device *dev)
  794. {
  795. struct mei_me_hw *hw = to_me_hw(dev);
  796. u32 reg;
  797. reg = mei_me_d0i3c_read(dev);
  798. if (reg & H_D0I3C_I3) {
  799. /* we are in d0i3, nothing to do */
  800. dev_dbg(dev->dev, "already d0i3 : set not needed\n");
  801. goto on;
  802. }
  803. mei_me_d0i3_set(dev, false);
  804. on:
  805. hw->pg_state = MEI_PG_ON;
  806. dev->pg_event = MEI_PG_EVENT_IDLE;
  807. dev_dbg(dev->dev, "d0i3 enter\n");
  808. return 0;
  809. }
  810. /**
  811. * mei_me_d0i3_exit_sync - perform d0i3 exit procedure
  812. *
  813. * @dev: the device structure
  814. *
  815. * Return: 0 on success an error code otherwise
  816. */
  817. static int mei_me_d0i3_exit_sync(struct mei_device *dev)
  818. {
  819. struct mei_me_hw *hw = to_me_hw(dev);
  820. unsigned long timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
  821. int ret;
  822. u32 reg;
  823. dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
  824. reg = mei_me_d0i3c_read(dev);
  825. if (!(reg & H_D0I3C_I3)) {
  826. /* we are not in d0i3, nothing to do */
  827. dev_dbg(dev->dev, "d0i3 exit not needed\n");
  828. ret = 0;
  829. goto off;
  830. }
  831. reg = mei_me_d0i3_unset(dev);
  832. if (!(reg & H_D0I3C_CIP)) {
  833. dev_dbg(dev->dev, "d0i3 exit wait not needed\n");
  834. ret = 0;
  835. goto off;
  836. }
  837. mutex_unlock(&dev->device_lock);
  838. wait_event_timeout(dev->wait_pg,
  839. dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
  840. mutex_lock(&dev->device_lock);
  841. if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
  842. reg = mei_me_d0i3c_read(dev);
  843. if (reg & H_D0I3C_I3) {
  844. ret = -ETIME;
  845. goto out;
  846. }
  847. }
  848. ret = 0;
  849. off:
  850. hw->pg_state = MEI_PG_OFF;
  851. out:
  852. dev->pg_event = MEI_PG_EVENT_IDLE;
  853. dev_dbg(dev->dev, "d0i3 exit ret = %d\n", ret);
  854. return ret;
  855. }
  856. /**
  857. * mei_me_pg_legacy_intr - perform legacy pg processing
  858. * in interrupt thread handler
  859. *
  860. * @dev: the device structure
  861. */
  862. static void mei_me_pg_legacy_intr(struct mei_device *dev)
  863. {
  864. struct mei_me_hw *hw = to_me_hw(dev);
  865. if (dev->pg_event != MEI_PG_EVENT_INTR_WAIT)
  866. return;
  867. dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
  868. hw->pg_state = MEI_PG_OFF;
  869. if (waitqueue_active(&dev->wait_pg))
  870. wake_up(&dev->wait_pg);
  871. }
  872. /**
  873. * mei_me_d0i3_intr - perform d0i3 processing in interrupt thread handler
  874. *
  875. * @dev: the device structure
  876. * @intr_source: interrupt source
  877. */
  878. static void mei_me_d0i3_intr(struct mei_device *dev, u32 intr_source)
  879. {
  880. struct mei_me_hw *hw = to_me_hw(dev);
  881. if (dev->pg_event == MEI_PG_EVENT_INTR_WAIT &&
  882. (intr_source & H_D0I3C_IS)) {
  883. dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
  884. if (hw->pg_state == MEI_PG_ON) {
  885. hw->pg_state = MEI_PG_OFF;
  886. if (dev->hbm_state != MEI_HBM_IDLE) {
  887. /*
  888. * force H_RDY because it could be
  889. * wiped off during PG
  890. */
  891. dev_dbg(dev->dev, "d0i3 set host ready\n");
  892. mei_me_host_set_ready(dev);
  893. }
  894. } else {
  895. hw->pg_state = MEI_PG_ON;
  896. }
  897. wake_up(&dev->wait_pg);
  898. }
  899. if (hw->pg_state == MEI_PG_ON && (intr_source & H_IS)) {
  900. /*
  901. * HW sent some data and we are in D0i3, so
  902. * we got here because of HW initiated exit from D0i3.
  903. * Start runtime pm resume sequence to exit low power state.
  904. */
  905. dev_dbg(dev->dev, "d0i3 want resume\n");
  906. mei_hbm_pg_resume(dev);
  907. }
  908. }
  909. /**
  910. * mei_me_pg_intr - perform pg processing in interrupt thread handler
  911. *
  912. * @dev: the device structure
  913. * @intr_source: interrupt source
  914. */
  915. static void mei_me_pg_intr(struct mei_device *dev, u32 intr_source)
  916. {
  917. struct mei_me_hw *hw = to_me_hw(dev);
  918. if (hw->d0i3_supported)
  919. mei_me_d0i3_intr(dev, intr_source);
  920. else
  921. mei_me_pg_legacy_intr(dev);
  922. }
  923. /**
  924. * mei_me_pg_enter_sync - perform runtime pm entry procedure
  925. *
  926. * @dev: the device structure
  927. *
  928. * Return: 0 on success an error code otherwise
  929. */
  930. int mei_me_pg_enter_sync(struct mei_device *dev)
  931. {
  932. struct mei_me_hw *hw = to_me_hw(dev);
  933. if (hw->d0i3_supported)
  934. return mei_me_d0i3_enter_sync(dev);
  935. else
  936. return mei_me_pg_legacy_enter_sync(dev);
  937. }
  938. /**
  939. * mei_me_pg_exit_sync - perform runtime pm exit procedure
  940. *
  941. * @dev: the device structure
  942. *
  943. * Return: 0 on success an error code otherwise
  944. */
  945. int mei_me_pg_exit_sync(struct mei_device *dev)
  946. {
  947. struct mei_me_hw *hw = to_me_hw(dev);
  948. if (hw->d0i3_supported)
  949. return mei_me_d0i3_exit_sync(dev);
  950. else
  951. return mei_me_pg_legacy_exit_sync(dev);
  952. }
  953. /**
  954. * mei_me_hw_reset - resets fw via mei csr register.
  955. *
  956. * @dev: the device structure
  957. * @intr_enable: if interrupt should be enabled after reset.
  958. *
  959. * Return: 0 on success an error code otherwise
  960. */
  961. static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
  962. {
  963. struct mei_me_hw *hw = to_me_hw(dev);
  964. int ret;
  965. u32 hcsr;
  966. if (intr_enable) {
  967. mei_me_intr_enable(dev);
  968. if (hw->d0i3_supported) {
  969. ret = mei_me_d0i3_exit_sync(dev);
  970. if (ret)
  971. return ret;
  972. }
  973. }
  974. pm_runtime_set_active(dev->dev);
  975. hcsr = mei_hcsr_read(dev);
  976. /* H_RST may be found lit before reset is started,
  977. * for example if preceding reset flow hasn't completed.
  978. * In that case asserting H_RST will be ignored, therefore
  979. * we need to clean H_RST bit to start a successful reset sequence.
  980. */
  981. if ((hcsr & H_RST) == H_RST) {
  982. dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
  983. hcsr &= ~H_RST;
  984. mei_hcsr_set(dev, hcsr);
  985. hcsr = mei_hcsr_read(dev);
  986. }
  987. hcsr |= H_RST | H_IG | H_CSR_IS_MASK;
  988. if (!intr_enable)
  989. hcsr &= ~H_CSR_IE_MASK;
  990. dev->recvd_hw_ready = false;
  991. mei_hcsr_write(dev, hcsr);
  992. /*
  993. * Host reads the H_CSR once to ensure that the
  994. * posted write to H_CSR completes.
  995. */
  996. hcsr = mei_hcsr_read(dev);
  997. if ((hcsr & H_RST) == 0)
  998. dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr);
  999. if ((hcsr & H_RDY) == H_RDY)
  1000. dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr);
  1001. if (!intr_enable) {
  1002. mei_me_hw_reset_release(dev);
  1003. if (hw->d0i3_supported) {
  1004. ret = mei_me_d0i3_enter(dev);
  1005. if (ret)
  1006. return ret;
  1007. }
  1008. }
  1009. return 0;
  1010. }
  1011. /**
  1012. * mei_me_irq_quick_handler - The ISR of the MEI device
  1013. *
  1014. * @irq: The irq number
  1015. * @dev_id: pointer to the device structure
  1016. *
  1017. * Return: irqreturn_t
  1018. */
  1019. irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
  1020. {
  1021. struct mei_device *dev = (struct mei_device *)dev_id;
  1022. u32 hcsr;
  1023. hcsr = mei_hcsr_read(dev);
  1024. if (!me_intr_src(hcsr))
  1025. return IRQ_NONE;
  1026. dev_dbg(dev->dev, "interrupt source 0x%08X\n", me_intr_src(hcsr));
  1027. /* disable interrupts on device */
  1028. me_intr_disable(dev, hcsr);
  1029. return IRQ_WAKE_THREAD;
  1030. }
  1031. /**
  1032. * mei_me_irq_thread_handler - function called after ISR to handle the interrupt
  1033. * processing.
  1034. *
  1035. * @irq: The irq number
  1036. * @dev_id: pointer to the device structure
  1037. *
  1038. * Return: irqreturn_t
  1039. *
  1040. */
  1041. irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
  1042. {
  1043. struct mei_device *dev = (struct mei_device *) dev_id;
  1044. struct list_head cmpl_list;
  1045. s32 slots;
  1046. u32 hcsr;
  1047. int rets = 0;
  1048. dev_dbg(dev->dev, "function called after ISR to handle the interrupt processing.\n");
  1049. /* initialize our complete list */
  1050. mutex_lock(&dev->device_lock);
  1051. hcsr = mei_hcsr_read(dev);
  1052. me_intr_clear(dev, hcsr);
  1053. INIT_LIST_HEAD(&cmpl_list);
  1054. /* check if ME wants a reset */
  1055. if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
  1056. dev_warn(dev->dev, "FW not ready: resetting.\n");
  1057. schedule_work(&dev->reset_work);
  1058. goto end;
  1059. }
  1060. if (mei_me_hw_is_resetting(dev))
  1061. mei_hcsr_set_hig(dev);
  1062. mei_me_pg_intr(dev, me_intr_src(hcsr));
  1063. /* check if we need to start the dev */
  1064. if (!mei_host_is_ready(dev)) {
  1065. if (mei_hw_is_ready(dev)) {
  1066. dev_dbg(dev->dev, "we need to start the dev.\n");
  1067. dev->recvd_hw_ready = true;
  1068. wake_up(&dev->wait_hw_ready);
  1069. } else {
  1070. dev_dbg(dev->dev, "Spurious Interrupt\n");
  1071. }
  1072. goto end;
  1073. }
  1074. /* check slots available for reading */
  1075. slots = mei_count_full_read_slots(dev);
  1076. while (slots > 0) {
  1077. dev_dbg(dev->dev, "slots to read = %08x\n", slots);
  1078. rets = mei_irq_read_handler(dev, &cmpl_list, &slots);
  1079. /* There is a race between ME write and interrupt delivery:
  1080. * Not all data is always available immediately after the
  1081. * interrupt, so try to read again on the next interrupt.
  1082. */
  1083. if (rets == -ENODATA)
  1084. break;
  1085. if (rets &&
  1086. (dev->dev_state != MEI_DEV_RESETTING &&
  1087. dev->dev_state != MEI_DEV_POWER_DOWN)) {
  1088. dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n",
  1089. rets);
  1090. schedule_work(&dev->reset_work);
  1091. goto end;
  1092. }
  1093. }
  1094. dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
  1095. /*
  1096. * During PG handshake only allowed write is the replay to the
  1097. * PG exit message, so block calling write function
  1098. * if the pg event is in PG handshake
  1099. */
  1100. if (dev->pg_event != MEI_PG_EVENT_WAIT &&
  1101. dev->pg_event != MEI_PG_EVENT_RECEIVED) {
  1102. rets = mei_irq_write_handler(dev, &cmpl_list);
  1103. dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
  1104. }
  1105. mei_irq_compl_handler(dev, &cmpl_list);
  1106. end:
  1107. dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
  1108. mei_me_intr_enable(dev);
  1109. mutex_unlock(&dev->device_lock);
  1110. return IRQ_HANDLED;
  1111. }
  1112. static const struct mei_hw_ops mei_me_hw_ops = {
  1113. .fw_status = mei_me_fw_status,
  1114. .pg_state = mei_me_pg_state,
  1115. .host_is_ready = mei_me_host_is_ready,
  1116. .hw_is_ready = mei_me_hw_is_ready,
  1117. .hw_reset = mei_me_hw_reset,
  1118. .hw_config = mei_me_hw_config,
  1119. .hw_start = mei_me_hw_start,
  1120. .pg_in_transition = mei_me_pg_in_transition,
  1121. .pg_is_enabled = mei_me_pg_is_enabled,
  1122. .intr_clear = mei_me_intr_clear,
  1123. .intr_enable = mei_me_intr_enable,
  1124. .intr_disable = mei_me_intr_disable,
  1125. .synchronize_irq = mei_me_synchronize_irq,
  1126. .hbuf_free_slots = mei_me_hbuf_empty_slots,
  1127. .hbuf_is_ready = mei_me_hbuf_is_empty,
  1128. .hbuf_depth = mei_me_hbuf_depth,
  1129. .write = mei_me_hbuf_write,
  1130. .rdbuf_full_slots = mei_me_count_full_read_slots,
  1131. .read_hdr = mei_me_mecbrw_read,
  1132. .read = mei_me_read_slots
  1133. };
  1134. static bool mei_me_fw_type_nm(struct pci_dev *pdev)
  1135. {
  1136. u32 reg;
  1137. pci_read_config_dword(pdev, PCI_CFG_HFS_2, &reg);
  1138. trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_2", PCI_CFG_HFS_2, reg);
  1139. /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
  1140. return (reg & 0x600) == 0x200;
  1141. }
  1142. #define MEI_CFG_FW_NM \
  1143. .quirk_probe = mei_me_fw_type_nm
  1144. static bool mei_me_fw_type_sps(struct pci_dev *pdev)
  1145. {
  1146. u32 reg;
  1147. unsigned int devfn;
  1148. /*
  1149. * Read ME FW Status register to check for SPS Firmware
  1150. * The SPS FW is only signaled in pci function 0
  1151. */
  1152. devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
  1153. pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_1, &reg);
  1154. trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
  1155. /* if bits [19:16] = 15, running SPS Firmware */
  1156. return (reg & 0xf0000) == 0xf0000;
  1157. }
  1158. #define MEI_CFG_FW_SPS \
  1159. .quirk_probe = mei_me_fw_type_sps
  1160. #define MEI_CFG_ICH_HFS \
  1161. .fw_status.count = 0
  1162. #define MEI_CFG_ICH10_HFS \
  1163. .fw_status.count = 1, \
  1164. .fw_status.status[0] = PCI_CFG_HFS_1
  1165. #define MEI_CFG_PCH_HFS \
  1166. .fw_status.count = 2, \
  1167. .fw_status.status[0] = PCI_CFG_HFS_1, \
  1168. .fw_status.status[1] = PCI_CFG_HFS_2
  1169. #define MEI_CFG_PCH8_HFS \
  1170. .fw_status.count = 6, \
  1171. .fw_status.status[0] = PCI_CFG_HFS_1, \
  1172. .fw_status.status[1] = PCI_CFG_HFS_2, \
  1173. .fw_status.status[2] = PCI_CFG_HFS_3, \
  1174. .fw_status.status[3] = PCI_CFG_HFS_4, \
  1175. .fw_status.status[4] = PCI_CFG_HFS_5, \
  1176. .fw_status.status[5] = PCI_CFG_HFS_6
  1177. #define MEI_CFG_DMA_128 \
  1178. .dma_size[DMA_DSCR_HOST] = SZ_128K, \
  1179. .dma_size[DMA_DSCR_DEVICE] = SZ_128K, \
  1180. .dma_size[DMA_DSCR_CTRL] = PAGE_SIZE
  1181. /* ICH Legacy devices */
  1182. static const struct mei_cfg mei_me_ich_cfg = {
  1183. MEI_CFG_ICH_HFS,
  1184. };
  1185. /* ICH devices */
  1186. static const struct mei_cfg mei_me_ich10_cfg = {
  1187. MEI_CFG_ICH10_HFS,
  1188. };
  1189. /* PCH devices */
  1190. static const struct mei_cfg mei_me_pch_cfg = {
  1191. MEI_CFG_PCH_HFS,
  1192. };
  1193. /* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */
  1194. static const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
  1195. MEI_CFG_PCH_HFS,
  1196. MEI_CFG_FW_NM,
  1197. };
  1198. /* PCH8 Lynx Point and newer devices */
  1199. static const struct mei_cfg mei_me_pch8_cfg = {
  1200. MEI_CFG_PCH8_HFS,
  1201. };
  1202. /* PCH8 Lynx Point with quirk for SPS Firmware exclusion */
  1203. static const struct mei_cfg mei_me_pch8_sps_cfg = {
  1204. MEI_CFG_PCH8_HFS,
  1205. MEI_CFG_FW_SPS,
  1206. };
  1207. /* Cannon Lake and newer devices */
  1208. static const struct mei_cfg mei_me_pch12_cfg = {
  1209. MEI_CFG_PCH8_HFS,
  1210. MEI_CFG_DMA_128,
  1211. };
  1212. /*
  1213. * mei_cfg_list - A list of platform platform specific configurations.
  1214. * Note: has to be synchronized with enum mei_cfg_idx.
  1215. */
  1216. static const struct mei_cfg *const mei_cfg_list[] = {
  1217. [MEI_ME_UNDEF_CFG] = NULL,
  1218. [MEI_ME_ICH_CFG] = &mei_me_ich_cfg,
  1219. [MEI_ME_ICH10_CFG] = &mei_me_ich10_cfg,
  1220. [MEI_ME_PCH_CFG] = &mei_me_pch_cfg,
  1221. [MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg,
  1222. [MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
  1223. [MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg,
  1224. [MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg,
  1225. };
  1226. const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx)
  1227. {
  1228. BUILD_BUG_ON(ARRAY_SIZE(mei_cfg_list) != MEI_ME_NUM_CFG);
  1229. if (idx >= MEI_ME_NUM_CFG)
  1230. return NULL;
  1231. return mei_cfg_list[idx];
  1232. };
  1233. /**
  1234. * mei_me_dev_init - allocates and initializes the mei device structure
  1235. *
  1236. * @pdev: The pci device structure
  1237. * @cfg: per device generation config
  1238. *
  1239. * Return: The mei_device pointer on success, NULL on failure.
  1240. */
  1241. struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
  1242. const struct mei_cfg *cfg)
  1243. {
  1244. struct mei_device *dev;
  1245. struct mei_me_hw *hw;
  1246. dev = devm_kzalloc(&pdev->dev, sizeof(struct mei_device) +
  1247. sizeof(struct mei_me_hw), GFP_KERNEL);
  1248. if (!dev)
  1249. return NULL;
  1250. hw = to_me_hw(dev);
  1251. mei_device_init(dev, &pdev->dev, &mei_me_hw_ops);
  1252. hw->cfg = cfg;
  1253. return dev;
  1254. }