hw-me.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458
  1. /*
  2. *
  3. * Intel Management Engine Interface (Intel MEI) Linux driver
  4. * Copyright (c) 2003-2012, Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. */
  16. #include <linux/pci.h>
  17. #include <linux/kthread.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/pm_runtime.h>
  20. #include "mei_dev.h"
  21. #include "hbm.h"
  22. #include "hw-me.h"
  23. #include "hw-me-regs.h"
  24. #include "mei-trace.h"
  25. /**
  26. * mei_me_reg_read - Reads 32bit data from the mei device
  27. *
  28. * @hw: the me hardware structure
  29. * @offset: offset from which to read the data
  30. *
  31. * Return: register value (u32)
  32. */
  33. static inline u32 mei_me_reg_read(const struct mei_me_hw *hw,
  34. unsigned long offset)
  35. {
  36. return ioread32(hw->mem_addr + offset);
  37. }
  38. /**
  39. * mei_me_reg_write - Writes 32bit data to the mei device
  40. *
  41. * @hw: the me hardware structure
  42. * @offset: offset from which to write the data
  43. * @value: register value to write (u32)
  44. */
  45. static inline void mei_me_reg_write(const struct mei_me_hw *hw,
  46. unsigned long offset, u32 value)
  47. {
  48. iowrite32(value, hw->mem_addr + offset);
  49. }
  50. /**
  51. * mei_me_mecbrw_read - Reads 32bit data from ME circular buffer
  52. * read window register
  53. *
  54. * @dev: the device structure
  55. *
  56. * Return: ME_CB_RW register value (u32)
  57. */
  58. static inline u32 mei_me_mecbrw_read(const struct mei_device *dev)
  59. {
  60. return mei_me_reg_read(to_me_hw(dev), ME_CB_RW);
  61. }
  62. /**
  63. * mei_me_hcbww_write - write 32bit data to the host circular buffer
  64. *
  65. * @dev: the device structure
  66. * @data: 32bit data to be written to the host circular buffer
  67. */
  68. static inline void mei_me_hcbww_write(struct mei_device *dev, u32 data)
  69. {
  70. mei_me_reg_write(to_me_hw(dev), H_CB_WW, data);
  71. }
  72. /**
  73. * mei_me_mecsr_read - Reads 32bit data from the ME CSR
  74. *
  75. * @dev: the device structure
  76. *
  77. * Return: ME_CSR_HA register value (u32)
  78. */
  79. static inline u32 mei_me_mecsr_read(const struct mei_device *dev)
  80. {
  81. u32 reg;
  82. reg = mei_me_reg_read(to_me_hw(dev), ME_CSR_HA);
  83. trace_mei_reg_read(dev->dev, "ME_CSR_HA", ME_CSR_HA, reg);
  84. return reg;
  85. }
  86. /**
  87. * mei_hcsr_read - Reads 32bit data from the host CSR
  88. *
  89. * @dev: the device structure
  90. *
  91. * Return: H_CSR register value (u32)
  92. */
  93. static inline u32 mei_hcsr_read(const struct mei_device *dev)
  94. {
  95. u32 reg;
  96. reg = mei_me_reg_read(to_me_hw(dev), H_CSR);
  97. trace_mei_reg_read(dev->dev, "H_CSR", H_CSR, reg);
  98. return reg;
  99. }
  100. /**
  101. * mei_hcsr_write - writes H_CSR register to the mei device
  102. *
  103. * @dev: the device structure
  104. * @reg: new register value
  105. */
  106. static inline void mei_hcsr_write(struct mei_device *dev, u32 reg)
  107. {
  108. trace_mei_reg_write(dev->dev, "H_CSR", H_CSR, reg);
  109. mei_me_reg_write(to_me_hw(dev), H_CSR, reg);
  110. }
  111. /**
  112. * mei_hcsr_set - writes H_CSR register to the mei device,
  113. * and ignores the H_IS bit for it is write-one-to-zero.
  114. *
  115. * @dev: the device structure
  116. * @reg: new register value
  117. */
  118. static inline void mei_hcsr_set(struct mei_device *dev, u32 reg)
  119. {
  120. reg &= ~H_CSR_IS_MASK;
  121. mei_hcsr_write(dev, reg);
  122. }
  123. /**
  124. * mei_hcsr_set_hig - set host interrupt (set H_IG)
  125. *
  126. * @dev: the device structure
  127. */
  128. static inline void mei_hcsr_set_hig(struct mei_device *dev)
  129. {
  130. u32 hcsr;
  131. hcsr = mei_hcsr_read(dev) | H_IG;
  132. mei_hcsr_set(dev, hcsr);
  133. }
  134. /**
  135. * mei_me_d0i3c_read - Reads 32bit data from the D0I3C register
  136. *
  137. * @dev: the device structure
  138. *
  139. * Return: H_D0I3C register value (u32)
  140. */
  141. static inline u32 mei_me_d0i3c_read(const struct mei_device *dev)
  142. {
  143. u32 reg;
  144. reg = mei_me_reg_read(to_me_hw(dev), H_D0I3C);
  145. trace_mei_reg_read(dev->dev, "H_D0I3C", H_D0I3C, reg);
  146. return reg;
  147. }
  148. /**
  149. * mei_me_d0i3c_write - writes H_D0I3C register to device
  150. *
  151. * @dev: the device structure
  152. * @reg: new register value
  153. */
  154. static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg)
  155. {
  156. trace_mei_reg_write(dev->dev, "H_D0I3C", H_D0I3C, reg);
  157. mei_me_reg_write(to_me_hw(dev), H_D0I3C, reg);
  158. }
  159. /**
  160. * mei_me_fw_status - read fw status register from pci config space
  161. *
  162. * @dev: mei device
  163. * @fw_status: fw status register values
  164. *
  165. * Return: 0 on success, error otherwise
  166. */
  167. static int mei_me_fw_status(struct mei_device *dev,
  168. struct mei_fw_status *fw_status)
  169. {
  170. struct pci_dev *pdev = to_pci_dev(dev->dev);
  171. struct mei_me_hw *hw = to_me_hw(dev);
  172. const struct mei_fw_status *fw_src = &hw->cfg->fw_status;
  173. int ret;
  174. int i;
  175. if (!fw_status)
  176. return -EINVAL;
  177. fw_status->count = fw_src->count;
  178. for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
  179. ret = pci_read_config_dword(pdev, fw_src->status[i],
  180. &fw_status->status[i]);
  181. trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X",
  182. fw_src->status[i],
  183. fw_status->status[i]);
  184. if (ret)
  185. return ret;
  186. }
  187. return 0;
  188. }
  189. /**
  190. * mei_me_hw_config - configure hw dependent settings
  191. *
  192. * @dev: mei device
  193. */
  194. static void mei_me_hw_config(struct mei_device *dev)
  195. {
  196. struct pci_dev *pdev = to_pci_dev(dev->dev);
  197. struct mei_me_hw *hw = to_me_hw(dev);
  198. u32 hcsr, reg;
  199. /* Doesn't change in runtime */
  200. hcsr = mei_hcsr_read(dev);
  201. dev->hbuf_depth = (hcsr & H_CBD) >> 24;
  202. reg = 0;
  203. pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
  204. trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
  205. hw->d0i3_supported =
  206. ((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK);
  207. hw->pg_state = MEI_PG_OFF;
  208. if (hw->d0i3_supported) {
  209. reg = mei_me_d0i3c_read(dev);
  210. if (reg & H_D0I3C_I3)
  211. hw->pg_state = MEI_PG_ON;
  212. }
  213. }
  214. /**
  215. * mei_me_pg_state - translate internal pg state
  216. * to the mei power gating state
  217. *
  218. * @dev: mei device
  219. *
  220. * Return: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise
  221. */
  222. static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
  223. {
  224. struct mei_me_hw *hw = to_me_hw(dev);
  225. return hw->pg_state;
  226. }
  227. static inline u32 me_intr_src(u32 hcsr)
  228. {
  229. return hcsr & H_CSR_IS_MASK;
  230. }
  231. /**
  232. * me_intr_disable - disables mei device interrupts
  233. * using supplied hcsr register value.
  234. *
  235. * @dev: the device structure
  236. * @hcsr: supplied hcsr register value
  237. */
  238. static inline void me_intr_disable(struct mei_device *dev, u32 hcsr)
  239. {
  240. hcsr &= ~H_CSR_IE_MASK;
  241. mei_hcsr_set(dev, hcsr);
  242. }
  243. /**
  244. * mei_me_intr_clear - clear and stop interrupts
  245. *
  246. * @dev: the device structure
  247. * @hcsr: supplied hcsr register value
  248. */
  249. static inline void me_intr_clear(struct mei_device *dev, u32 hcsr)
  250. {
  251. if (me_intr_src(hcsr))
  252. mei_hcsr_write(dev, hcsr);
  253. }
  254. /**
  255. * mei_me_intr_clear - clear and stop interrupts
  256. *
  257. * @dev: the device structure
  258. */
  259. static void mei_me_intr_clear(struct mei_device *dev)
  260. {
  261. u32 hcsr = mei_hcsr_read(dev);
  262. me_intr_clear(dev, hcsr);
  263. }
  264. /**
  265. * mei_me_intr_enable - enables mei device interrupts
  266. *
  267. * @dev: the device structure
  268. */
  269. static void mei_me_intr_enable(struct mei_device *dev)
  270. {
  271. u32 hcsr = mei_hcsr_read(dev);
  272. hcsr |= H_CSR_IE_MASK;
  273. mei_hcsr_set(dev, hcsr);
  274. }
  275. /**
  276. * mei_me_intr_disable - disables mei device interrupts
  277. *
  278. * @dev: the device structure
  279. */
  280. static void mei_me_intr_disable(struct mei_device *dev)
  281. {
  282. u32 hcsr = mei_hcsr_read(dev);
  283. me_intr_disable(dev, hcsr);
  284. }
  285. /**
  286. * mei_me_synchronize_irq - wait for pending IRQ handlers
  287. *
  288. * @dev: the device structure
  289. */
  290. static void mei_me_synchronize_irq(struct mei_device *dev)
  291. {
  292. struct pci_dev *pdev = to_pci_dev(dev->dev);
  293. synchronize_irq(pdev->irq);
  294. }
  295. /**
  296. * mei_me_hw_reset_release - release device from the reset
  297. *
  298. * @dev: the device structure
  299. */
  300. static void mei_me_hw_reset_release(struct mei_device *dev)
  301. {
  302. u32 hcsr = mei_hcsr_read(dev);
  303. hcsr |= H_IG;
  304. hcsr &= ~H_RST;
  305. mei_hcsr_set(dev, hcsr);
  306. /* complete this write before we set host ready on another CPU */
  307. mmiowb();
  308. }
  309. /**
  310. * mei_me_host_set_ready - enable device
  311. *
  312. * @dev: mei device
  313. */
  314. static void mei_me_host_set_ready(struct mei_device *dev)
  315. {
  316. u32 hcsr = mei_hcsr_read(dev);
  317. hcsr |= H_CSR_IE_MASK | H_IG | H_RDY;
  318. mei_hcsr_set(dev, hcsr);
  319. }
  320. /**
  321. * mei_me_host_is_ready - check whether the host has turned ready
  322. *
  323. * @dev: mei device
  324. * Return: bool
  325. */
  326. static bool mei_me_host_is_ready(struct mei_device *dev)
  327. {
  328. u32 hcsr = mei_hcsr_read(dev);
  329. return (hcsr & H_RDY) == H_RDY;
  330. }
  331. /**
  332. * mei_me_hw_is_ready - check whether the me(hw) has turned ready
  333. *
  334. * @dev: mei device
  335. * Return: bool
  336. */
  337. static bool mei_me_hw_is_ready(struct mei_device *dev)
  338. {
  339. u32 mecsr = mei_me_mecsr_read(dev);
  340. return (mecsr & ME_RDY_HRA) == ME_RDY_HRA;
  341. }
  342. /**
  343. * mei_me_hw_is_resetting - check whether the me(hw) is in reset
  344. *
  345. * @dev: mei device
  346. * Return: bool
  347. */
  348. static bool mei_me_hw_is_resetting(struct mei_device *dev)
  349. {
  350. u32 mecsr = mei_me_mecsr_read(dev);
  351. return (mecsr & ME_RST_HRA) == ME_RST_HRA;
  352. }
  353. /**
  354. * mei_me_hw_ready_wait - wait until the me(hw) has turned ready
  355. * or timeout is reached
  356. *
  357. * @dev: mei device
  358. * Return: 0 on success, error otherwise
  359. */
  360. static int mei_me_hw_ready_wait(struct mei_device *dev)
  361. {
  362. mutex_unlock(&dev->device_lock);
  363. wait_event_timeout(dev->wait_hw_ready,
  364. dev->recvd_hw_ready,
  365. mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
  366. mutex_lock(&dev->device_lock);
  367. if (!dev->recvd_hw_ready) {
  368. dev_err(dev->dev, "wait hw ready failed\n");
  369. return -ETIME;
  370. }
  371. mei_me_hw_reset_release(dev);
  372. dev->recvd_hw_ready = false;
  373. return 0;
  374. }
  375. /**
  376. * mei_me_hw_start - hw start routine
  377. *
  378. * @dev: mei device
  379. * Return: 0 on success, error otherwise
  380. */
  381. static int mei_me_hw_start(struct mei_device *dev)
  382. {
  383. int ret = mei_me_hw_ready_wait(dev);
  384. if (ret)
  385. return ret;
  386. dev_dbg(dev->dev, "hw is ready\n");
  387. mei_me_host_set_ready(dev);
  388. return ret;
  389. }
  390. /**
  391. * mei_hbuf_filled_slots - gets number of device filled buffer slots
  392. *
  393. * @dev: the device structure
  394. *
  395. * Return: number of filled slots
  396. */
  397. static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
  398. {
  399. u32 hcsr;
  400. char read_ptr, write_ptr;
  401. hcsr = mei_hcsr_read(dev);
  402. read_ptr = (char) ((hcsr & H_CBRP) >> 8);
  403. write_ptr = (char) ((hcsr & H_CBWP) >> 16);
  404. return (unsigned char) (write_ptr - read_ptr);
  405. }
  406. /**
  407. * mei_me_hbuf_is_empty - checks if host buffer is empty.
  408. *
  409. * @dev: the device structure
  410. *
  411. * Return: true if empty, false - otherwise.
  412. */
  413. static bool mei_me_hbuf_is_empty(struct mei_device *dev)
  414. {
  415. return mei_hbuf_filled_slots(dev) == 0;
  416. }
  417. /**
  418. * mei_me_hbuf_empty_slots - counts write empty slots.
  419. *
  420. * @dev: the device structure
  421. *
  422. * Return: -EOVERFLOW if overflow, otherwise empty slots count
  423. */
  424. static int mei_me_hbuf_empty_slots(struct mei_device *dev)
  425. {
  426. unsigned char filled_slots, empty_slots;
  427. filled_slots = mei_hbuf_filled_slots(dev);
  428. empty_slots = dev->hbuf_depth - filled_slots;
  429. /* check for overflow */
  430. if (filled_slots > dev->hbuf_depth)
  431. return -EOVERFLOW;
  432. return empty_slots;
  433. }
  434. /**
  435. * mei_me_hbuf_max_len - returns size of hw buffer.
  436. *
  437. * @dev: the device structure
  438. *
  439. * Return: size of hw buffer in bytes
  440. */
  441. static size_t mei_me_hbuf_max_len(const struct mei_device *dev)
  442. {
  443. return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr);
  444. }
  445. /**
  446. * mei_me_hbuf_write - writes a message to host hw buffer.
  447. *
  448. * @dev: the device structure
  449. * @header: mei HECI header of message
  450. * @buf: message payload will be written
  451. *
  452. * Return: -EIO if write has failed
  453. */
  454. static int mei_me_hbuf_write(struct mei_device *dev,
  455. struct mei_msg_hdr *header,
  456. const unsigned char *buf)
  457. {
  458. unsigned long rem;
  459. unsigned long length = header->length;
  460. u32 *reg_buf = (u32 *)buf;
  461. u32 dw_cnt;
  462. int i;
  463. int empty_slots;
  464. dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header));
  465. empty_slots = mei_hbuf_empty_slots(dev);
  466. dev_dbg(dev->dev, "empty slots = %hu.\n", empty_slots);
  467. dw_cnt = mei_data2slots(length);
  468. if (empty_slots < 0 || dw_cnt > empty_slots)
  469. return -EMSGSIZE;
  470. mei_me_hcbww_write(dev, *((u32 *) header));
  471. for (i = 0; i < length / 4; i++)
  472. mei_me_hcbww_write(dev, reg_buf[i]);
  473. rem = length & 0x3;
  474. if (rem > 0) {
  475. u32 reg = 0;
  476. memcpy(&reg, &buf[length - rem], rem);
  477. mei_me_hcbww_write(dev, reg);
  478. }
  479. mei_hcsr_set_hig(dev);
  480. if (!mei_me_hw_is_ready(dev))
  481. return -EIO;
  482. return 0;
  483. }
  484. /**
  485. * mei_me_count_full_read_slots - counts read full slots.
  486. *
  487. * @dev: the device structure
  488. *
  489. * Return: -EOVERFLOW if overflow, otherwise filled slots count
  490. */
  491. static int mei_me_count_full_read_slots(struct mei_device *dev)
  492. {
  493. u32 me_csr;
  494. char read_ptr, write_ptr;
  495. unsigned char buffer_depth, filled_slots;
  496. me_csr = mei_me_mecsr_read(dev);
  497. buffer_depth = (unsigned char)((me_csr & ME_CBD_HRA) >> 24);
  498. read_ptr = (char) ((me_csr & ME_CBRP_HRA) >> 8);
  499. write_ptr = (char) ((me_csr & ME_CBWP_HRA) >> 16);
  500. filled_slots = (unsigned char) (write_ptr - read_ptr);
  501. /* check for overflow */
  502. if (filled_slots > buffer_depth)
  503. return -EOVERFLOW;
  504. dev_dbg(dev->dev, "filled_slots =%08x\n", filled_slots);
  505. return (int)filled_slots;
  506. }
  507. /**
  508. * mei_me_read_slots - reads a message from mei device.
  509. *
  510. * @dev: the device structure
  511. * @buffer: message buffer will be written
  512. * @buffer_length: message size will be read
  513. *
  514. * Return: always 0
  515. */
  516. static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
  517. unsigned long buffer_length)
  518. {
  519. u32 *reg_buf = (u32 *)buffer;
  520. for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
  521. *reg_buf++ = mei_me_mecbrw_read(dev);
  522. if (buffer_length > 0) {
  523. u32 reg = mei_me_mecbrw_read(dev);
  524. memcpy(reg_buf, &reg, buffer_length);
  525. }
  526. mei_hcsr_set_hig(dev);
  527. return 0;
  528. }
  529. /**
  530. * mei_me_pg_set - write pg enter register
  531. *
  532. * @dev: the device structure
  533. */
  534. static void mei_me_pg_set(struct mei_device *dev)
  535. {
  536. struct mei_me_hw *hw = to_me_hw(dev);
  537. u32 reg;
  538. reg = mei_me_reg_read(hw, H_HPG_CSR);
  539. trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
  540. reg |= H_HPG_CSR_PGI;
  541. trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
  542. mei_me_reg_write(hw, H_HPG_CSR, reg);
  543. }
  544. /**
  545. * mei_me_pg_unset - write pg exit register
  546. *
  547. * @dev: the device structure
  548. */
  549. static void mei_me_pg_unset(struct mei_device *dev)
  550. {
  551. struct mei_me_hw *hw = to_me_hw(dev);
  552. u32 reg;
  553. reg = mei_me_reg_read(hw, H_HPG_CSR);
  554. trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
  555. WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n");
  556. reg |= H_HPG_CSR_PGIHEXR;
  557. trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
  558. mei_me_reg_write(hw, H_HPG_CSR, reg);
  559. }
  560. /**
  561. * mei_me_pg_legacy_enter_sync - perform legacy pg entry procedure
  562. *
  563. * @dev: the device structure
  564. *
  565. * Return: 0 on success an error code otherwise
  566. */
  567. static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
  568. {
  569. struct mei_me_hw *hw = to_me_hw(dev);
  570. unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
  571. int ret;
  572. dev->pg_event = MEI_PG_EVENT_WAIT;
  573. ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
  574. if (ret)
  575. return ret;
  576. mutex_unlock(&dev->device_lock);
  577. wait_event_timeout(dev->wait_pg,
  578. dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
  579. mutex_lock(&dev->device_lock);
  580. if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
  581. mei_me_pg_set(dev);
  582. ret = 0;
  583. } else {
  584. ret = -ETIME;
  585. }
  586. dev->pg_event = MEI_PG_EVENT_IDLE;
  587. hw->pg_state = MEI_PG_ON;
  588. return ret;
  589. }
  590. /**
  591. * mei_me_pg_legacy_exit_sync - perform legacy pg exit procedure
  592. *
  593. * @dev: the device structure
  594. *
  595. * Return: 0 on success an error code otherwise
  596. */
  597. static int mei_me_pg_legacy_exit_sync(struct mei_device *dev)
  598. {
  599. struct mei_me_hw *hw = to_me_hw(dev);
  600. unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
  601. int ret;
  602. if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
  603. goto reply;
  604. dev->pg_event = MEI_PG_EVENT_WAIT;
  605. mei_me_pg_unset(dev);
  606. mutex_unlock(&dev->device_lock);
  607. wait_event_timeout(dev->wait_pg,
  608. dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
  609. mutex_lock(&dev->device_lock);
  610. reply:
  611. if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
  612. ret = -ETIME;
  613. goto out;
  614. }
  615. dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
  616. ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
  617. if (ret)
  618. return ret;
  619. mutex_unlock(&dev->device_lock);
  620. wait_event_timeout(dev->wait_pg,
  621. dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
  622. mutex_lock(&dev->device_lock);
  623. if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED)
  624. ret = 0;
  625. else
  626. ret = -ETIME;
  627. out:
  628. dev->pg_event = MEI_PG_EVENT_IDLE;
  629. hw->pg_state = MEI_PG_OFF;
  630. return ret;
  631. }
  632. /**
  633. * mei_me_pg_in_transition - is device now in pg transition
  634. *
  635. * @dev: the device structure
  636. *
  637. * Return: true if in pg transition, false otherwise
  638. */
  639. static bool mei_me_pg_in_transition(struct mei_device *dev)
  640. {
  641. return dev->pg_event >= MEI_PG_EVENT_WAIT &&
  642. dev->pg_event <= MEI_PG_EVENT_INTR_WAIT;
  643. }
  644. /**
  645. * mei_me_pg_is_enabled - detect if PG is supported by HW
  646. *
  647. * @dev: the device structure
  648. *
  649. * Return: true is pg supported, false otherwise
  650. */
  651. static bool mei_me_pg_is_enabled(struct mei_device *dev)
  652. {
  653. struct mei_me_hw *hw = to_me_hw(dev);
  654. u32 reg = mei_me_mecsr_read(dev);
  655. if (hw->d0i3_supported)
  656. return true;
  657. if ((reg & ME_PGIC_HRA) == 0)
  658. goto notsupported;
  659. if (!dev->hbm_f_pg_supported)
  660. goto notsupported;
  661. return true;
  662. notsupported:
  663. dev_dbg(dev->dev, "pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d\n",
  664. hw->d0i3_supported,
  665. !!(reg & ME_PGIC_HRA),
  666. dev->version.major_version,
  667. dev->version.minor_version,
  668. HBM_MAJOR_VERSION_PGI,
  669. HBM_MINOR_VERSION_PGI);
  670. return false;
  671. }
  672. /**
  673. * mei_me_d0i3_set - write d0i3 register bit on mei device.
  674. *
  675. * @dev: the device structure
  676. * @intr: ask for interrupt
  677. *
  678. * Return: D0I3C register value
  679. */
  680. static u32 mei_me_d0i3_set(struct mei_device *dev, bool intr)
  681. {
  682. u32 reg = mei_me_d0i3c_read(dev);
  683. reg |= H_D0I3C_I3;
  684. if (intr)
  685. reg |= H_D0I3C_IR;
  686. else
  687. reg &= ~H_D0I3C_IR;
  688. mei_me_d0i3c_write(dev, reg);
  689. /* read it to ensure HW consistency */
  690. reg = mei_me_d0i3c_read(dev);
  691. return reg;
  692. }
  693. /**
  694. * mei_me_d0i3_unset - clean d0i3 register bit on mei device.
  695. *
  696. * @dev: the device structure
  697. *
  698. * Return: D0I3C register value
  699. */
  700. static u32 mei_me_d0i3_unset(struct mei_device *dev)
  701. {
  702. u32 reg = mei_me_d0i3c_read(dev);
  703. reg &= ~H_D0I3C_I3;
  704. reg |= H_D0I3C_IR;
  705. mei_me_d0i3c_write(dev, reg);
  706. /* read it to ensure HW consistency */
  707. reg = mei_me_d0i3c_read(dev);
  708. return reg;
  709. }
  710. /**
  711. * mei_me_d0i3_enter_sync - perform d0i3 entry procedure
  712. *
  713. * @dev: the device structure
  714. *
  715. * Return: 0 on success an error code otherwise
  716. */
  717. static int mei_me_d0i3_enter_sync(struct mei_device *dev)
  718. {
  719. struct mei_me_hw *hw = to_me_hw(dev);
  720. unsigned long d0i3_timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
  721. unsigned long pgi_timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
  722. int ret;
  723. u32 reg;
  724. reg = mei_me_d0i3c_read(dev);
  725. if (reg & H_D0I3C_I3) {
  726. /* we are in d0i3, nothing to do */
  727. dev_dbg(dev->dev, "d0i3 set not needed\n");
  728. ret = 0;
  729. goto on;
  730. }
  731. /* PGI entry procedure */
  732. dev->pg_event = MEI_PG_EVENT_WAIT;
  733. ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
  734. if (ret)
  735. /* FIXME: should we reset here? */
  736. goto out;
  737. mutex_unlock(&dev->device_lock);
  738. wait_event_timeout(dev->wait_pg,
  739. dev->pg_event == MEI_PG_EVENT_RECEIVED, pgi_timeout);
  740. mutex_lock(&dev->device_lock);
  741. if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
  742. ret = -ETIME;
  743. goto out;
  744. }
  745. /* end PGI entry procedure */
  746. dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
  747. reg = mei_me_d0i3_set(dev, true);
  748. if (!(reg & H_D0I3C_CIP)) {
  749. dev_dbg(dev->dev, "d0i3 enter wait not needed\n");
  750. ret = 0;
  751. goto on;
  752. }
  753. mutex_unlock(&dev->device_lock);
  754. wait_event_timeout(dev->wait_pg,
  755. dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, d0i3_timeout);
  756. mutex_lock(&dev->device_lock);
  757. if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
  758. reg = mei_me_d0i3c_read(dev);
  759. if (!(reg & H_D0I3C_I3)) {
  760. ret = -ETIME;
  761. goto out;
  762. }
  763. }
  764. ret = 0;
  765. on:
  766. hw->pg_state = MEI_PG_ON;
  767. out:
  768. dev->pg_event = MEI_PG_EVENT_IDLE;
  769. dev_dbg(dev->dev, "d0i3 enter ret = %d\n", ret);
  770. return ret;
  771. }
  772. /**
  773. * mei_me_d0i3_enter - perform d0i3 entry procedure
  774. * no hbm PG handshake
  775. * no waiting for confirmation; runs with interrupts
  776. * disabled
  777. *
  778. * @dev: the device structure
  779. *
  780. * Return: 0 on success an error code otherwise
  781. */
  782. static int mei_me_d0i3_enter(struct mei_device *dev)
  783. {
  784. struct mei_me_hw *hw = to_me_hw(dev);
  785. u32 reg;
  786. reg = mei_me_d0i3c_read(dev);
  787. if (reg & H_D0I3C_I3) {
  788. /* we are in d0i3, nothing to do */
  789. dev_dbg(dev->dev, "already d0i3 : set not needed\n");
  790. goto on;
  791. }
  792. mei_me_d0i3_set(dev, false);
  793. on:
  794. hw->pg_state = MEI_PG_ON;
  795. dev->pg_event = MEI_PG_EVENT_IDLE;
  796. dev_dbg(dev->dev, "d0i3 enter\n");
  797. return 0;
  798. }
  799. /**
  800. * mei_me_d0i3_exit_sync - perform d0i3 exit procedure
  801. *
  802. * @dev: the device structure
  803. *
  804. * Return: 0 on success an error code otherwise
  805. */
  806. static int mei_me_d0i3_exit_sync(struct mei_device *dev)
  807. {
  808. struct mei_me_hw *hw = to_me_hw(dev);
  809. unsigned long timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
  810. int ret;
  811. u32 reg;
  812. dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
  813. reg = mei_me_d0i3c_read(dev);
  814. if (!(reg & H_D0I3C_I3)) {
  815. /* we are not in d0i3, nothing to do */
  816. dev_dbg(dev->dev, "d0i3 exit not needed\n");
  817. ret = 0;
  818. goto off;
  819. }
  820. reg = mei_me_d0i3_unset(dev);
  821. if (!(reg & H_D0I3C_CIP)) {
  822. dev_dbg(dev->dev, "d0i3 exit wait not needed\n");
  823. ret = 0;
  824. goto off;
  825. }
  826. mutex_unlock(&dev->device_lock);
  827. wait_event_timeout(dev->wait_pg,
  828. dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
  829. mutex_lock(&dev->device_lock);
  830. if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
  831. reg = mei_me_d0i3c_read(dev);
  832. if (reg & H_D0I3C_I3) {
  833. ret = -ETIME;
  834. goto out;
  835. }
  836. }
  837. ret = 0;
  838. off:
  839. hw->pg_state = MEI_PG_OFF;
  840. out:
  841. dev->pg_event = MEI_PG_EVENT_IDLE;
  842. dev_dbg(dev->dev, "d0i3 exit ret = %d\n", ret);
  843. return ret;
  844. }
  845. /**
  846. * mei_me_pg_legacy_intr - perform legacy pg processing
  847. * in interrupt thread handler
  848. *
  849. * @dev: the device structure
  850. */
  851. static void mei_me_pg_legacy_intr(struct mei_device *dev)
  852. {
  853. struct mei_me_hw *hw = to_me_hw(dev);
  854. if (dev->pg_event != MEI_PG_EVENT_INTR_WAIT)
  855. return;
  856. dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
  857. hw->pg_state = MEI_PG_OFF;
  858. if (waitqueue_active(&dev->wait_pg))
  859. wake_up(&dev->wait_pg);
  860. }
  861. /**
  862. * mei_me_d0i3_intr - perform d0i3 processing in interrupt thread handler
  863. *
  864. * @dev: the device structure
  865. * @intr_source: interrupt source
  866. */
  867. static void mei_me_d0i3_intr(struct mei_device *dev, u32 intr_source)
  868. {
  869. struct mei_me_hw *hw = to_me_hw(dev);
  870. if (dev->pg_event == MEI_PG_EVENT_INTR_WAIT &&
  871. (intr_source & H_D0I3C_IS)) {
  872. dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
  873. if (hw->pg_state == MEI_PG_ON) {
  874. hw->pg_state = MEI_PG_OFF;
  875. if (dev->hbm_state != MEI_HBM_IDLE) {
  876. /*
  877. * force H_RDY because it could be
  878. * wiped off during PG
  879. */
  880. dev_dbg(dev->dev, "d0i3 set host ready\n");
  881. mei_me_host_set_ready(dev);
  882. }
  883. } else {
  884. hw->pg_state = MEI_PG_ON;
  885. }
  886. wake_up(&dev->wait_pg);
  887. }
  888. if (hw->pg_state == MEI_PG_ON && (intr_source & H_IS)) {
  889. /*
  890. * HW sent some data and we are in D0i3, so
  891. * we got here because of HW initiated exit from D0i3.
  892. * Start runtime pm resume sequence to exit low power state.
  893. */
  894. dev_dbg(dev->dev, "d0i3 want resume\n");
  895. mei_hbm_pg_resume(dev);
  896. }
  897. }
  898. /**
  899. * mei_me_pg_intr - perform pg processing in interrupt thread handler
  900. *
  901. * @dev: the device structure
  902. * @intr_source: interrupt source
  903. */
  904. static void mei_me_pg_intr(struct mei_device *dev, u32 intr_source)
  905. {
  906. struct mei_me_hw *hw = to_me_hw(dev);
  907. if (hw->d0i3_supported)
  908. mei_me_d0i3_intr(dev, intr_source);
  909. else
  910. mei_me_pg_legacy_intr(dev);
  911. }
  912. /**
  913. * mei_me_pg_enter_sync - perform runtime pm entry procedure
  914. *
  915. * @dev: the device structure
  916. *
  917. * Return: 0 on success an error code otherwise
  918. */
  919. int mei_me_pg_enter_sync(struct mei_device *dev)
  920. {
  921. struct mei_me_hw *hw = to_me_hw(dev);
  922. if (hw->d0i3_supported)
  923. return mei_me_d0i3_enter_sync(dev);
  924. else
  925. return mei_me_pg_legacy_enter_sync(dev);
  926. }
  927. /**
  928. * mei_me_pg_exit_sync - perform runtime pm exit procedure
  929. *
  930. * @dev: the device structure
  931. *
  932. * Return: 0 on success an error code otherwise
  933. */
  934. int mei_me_pg_exit_sync(struct mei_device *dev)
  935. {
  936. struct mei_me_hw *hw = to_me_hw(dev);
  937. if (hw->d0i3_supported)
  938. return mei_me_d0i3_exit_sync(dev);
  939. else
  940. return mei_me_pg_legacy_exit_sync(dev);
  941. }
  942. /**
  943. * mei_me_hw_reset - resets fw via mei csr register.
  944. *
  945. * @dev: the device structure
  946. * @intr_enable: if interrupt should be enabled after reset.
  947. *
  948. * Return: 0 on success an error code otherwise
  949. */
  950. static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
  951. {
  952. struct mei_me_hw *hw = to_me_hw(dev);
  953. int ret;
  954. u32 hcsr;
  955. if (intr_enable) {
  956. mei_me_intr_enable(dev);
  957. if (hw->d0i3_supported) {
  958. ret = mei_me_d0i3_exit_sync(dev);
  959. if (ret)
  960. return ret;
  961. }
  962. }
  963. pm_runtime_set_active(dev->dev);
  964. hcsr = mei_hcsr_read(dev);
  965. /* H_RST may be found lit before reset is started,
  966. * for example if preceding reset flow hasn't completed.
  967. * In that case asserting H_RST will be ignored, therefore
  968. * we need to clean H_RST bit to start a successful reset sequence.
  969. */
  970. if ((hcsr & H_RST) == H_RST) {
  971. dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
  972. hcsr &= ~H_RST;
  973. mei_hcsr_set(dev, hcsr);
  974. hcsr = mei_hcsr_read(dev);
  975. }
  976. hcsr |= H_RST | H_IG | H_CSR_IS_MASK;
  977. if (!intr_enable)
  978. hcsr &= ~H_CSR_IE_MASK;
  979. dev->recvd_hw_ready = false;
  980. mei_hcsr_write(dev, hcsr);
  981. /*
  982. * Host reads the H_CSR once to ensure that the
  983. * posted write to H_CSR completes.
  984. */
  985. hcsr = mei_hcsr_read(dev);
  986. if ((hcsr & H_RST) == 0)
  987. dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr);
  988. if ((hcsr & H_RDY) == H_RDY)
  989. dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr);
  990. if (!intr_enable) {
  991. mei_me_hw_reset_release(dev);
  992. if (hw->d0i3_supported) {
  993. ret = mei_me_d0i3_enter(dev);
  994. if (ret)
  995. return ret;
  996. }
  997. }
  998. return 0;
  999. }
  1000. /**
  1001. * mei_me_irq_quick_handler - The ISR of the MEI device
  1002. *
  1003. * @irq: The irq number
  1004. * @dev_id: pointer to the device structure
  1005. *
  1006. * Return: irqreturn_t
  1007. */
  1008. irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
  1009. {
  1010. struct mei_device *dev = (struct mei_device *)dev_id;
  1011. u32 hcsr;
  1012. hcsr = mei_hcsr_read(dev);
  1013. if (!me_intr_src(hcsr))
  1014. return IRQ_NONE;
  1015. dev_dbg(dev->dev, "interrupt source 0x%08X\n", me_intr_src(hcsr));
  1016. /* disable interrupts on device */
  1017. me_intr_disable(dev, hcsr);
  1018. return IRQ_WAKE_THREAD;
  1019. }
  1020. /**
  1021. * mei_me_irq_thread_handler - function called after ISR to handle the interrupt
  1022. * processing.
  1023. *
  1024. * @irq: The irq number
  1025. * @dev_id: pointer to the device structure
  1026. *
  1027. * Return: irqreturn_t
  1028. *
  1029. */
  1030. irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
  1031. {
  1032. struct mei_device *dev = (struct mei_device *) dev_id;
  1033. struct list_head cmpl_list;
  1034. s32 slots;
  1035. u32 hcsr;
  1036. int rets = 0;
  1037. dev_dbg(dev->dev, "function called after ISR to handle the interrupt processing.\n");
  1038. /* initialize our complete list */
  1039. mutex_lock(&dev->device_lock);
  1040. hcsr = mei_hcsr_read(dev);
  1041. me_intr_clear(dev, hcsr);
  1042. INIT_LIST_HEAD(&cmpl_list);
  1043. /* check if ME wants a reset */
  1044. if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
  1045. dev_warn(dev->dev, "FW not ready: resetting.\n");
  1046. schedule_work(&dev->reset_work);
  1047. goto end;
  1048. }
  1049. if (mei_me_hw_is_resetting(dev))
  1050. mei_hcsr_set_hig(dev);
  1051. mei_me_pg_intr(dev, me_intr_src(hcsr));
  1052. /* check if we need to start the dev */
  1053. if (!mei_host_is_ready(dev)) {
  1054. if (mei_hw_is_ready(dev)) {
  1055. dev_dbg(dev->dev, "we need to start the dev.\n");
  1056. dev->recvd_hw_ready = true;
  1057. wake_up(&dev->wait_hw_ready);
  1058. } else {
  1059. dev_dbg(dev->dev, "Spurious Interrupt\n");
  1060. }
  1061. goto end;
  1062. }
  1063. /* check slots available for reading */
  1064. slots = mei_count_full_read_slots(dev);
  1065. while (slots > 0) {
  1066. dev_dbg(dev->dev, "slots to read = %08x\n", slots);
  1067. rets = mei_irq_read_handler(dev, &cmpl_list, &slots);
  1068. /* There is a race between ME write and interrupt delivery:
  1069. * Not all data is always available immediately after the
  1070. * interrupt, so try to read again on the next interrupt.
  1071. */
  1072. if (rets == -ENODATA)
  1073. break;
  1074. if (rets && dev->dev_state != MEI_DEV_RESETTING) {
  1075. dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n",
  1076. rets);
  1077. schedule_work(&dev->reset_work);
  1078. goto end;
  1079. }
  1080. }
  1081. dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
  1082. /*
  1083. * During PG handshake only allowed write is the replay to the
  1084. * PG exit message, so block calling write function
  1085. * if the pg event is in PG handshake
  1086. */
  1087. if (dev->pg_event != MEI_PG_EVENT_WAIT &&
  1088. dev->pg_event != MEI_PG_EVENT_RECEIVED) {
  1089. rets = mei_irq_write_handler(dev, &cmpl_list);
  1090. dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
  1091. }
  1092. mei_irq_compl_handler(dev, &cmpl_list);
  1093. end:
  1094. dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
  1095. mei_me_intr_enable(dev);
  1096. mutex_unlock(&dev->device_lock);
  1097. return IRQ_HANDLED;
  1098. }
  1099. static const struct mei_hw_ops mei_me_hw_ops = {
  1100. .fw_status = mei_me_fw_status,
  1101. .pg_state = mei_me_pg_state,
  1102. .host_is_ready = mei_me_host_is_ready,
  1103. .hw_is_ready = mei_me_hw_is_ready,
  1104. .hw_reset = mei_me_hw_reset,
  1105. .hw_config = mei_me_hw_config,
  1106. .hw_start = mei_me_hw_start,
  1107. .pg_in_transition = mei_me_pg_in_transition,
  1108. .pg_is_enabled = mei_me_pg_is_enabled,
  1109. .intr_clear = mei_me_intr_clear,
  1110. .intr_enable = mei_me_intr_enable,
  1111. .intr_disable = mei_me_intr_disable,
  1112. .synchronize_irq = mei_me_synchronize_irq,
  1113. .hbuf_free_slots = mei_me_hbuf_empty_slots,
  1114. .hbuf_is_ready = mei_me_hbuf_is_empty,
  1115. .hbuf_max_len = mei_me_hbuf_max_len,
  1116. .write = mei_me_hbuf_write,
  1117. .rdbuf_full_slots = mei_me_count_full_read_slots,
  1118. .read_hdr = mei_me_mecbrw_read,
  1119. .read = mei_me_read_slots
  1120. };
  1121. static bool mei_me_fw_type_nm(struct pci_dev *pdev)
  1122. {
  1123. u32 reg;
  1124. pci_read_config_dword(pdev, PCI_CFG_HFS_2, &reg);
  1125. trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_2", PCI_CFG_HFS_2, reg);
  1126. /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
  1127. return (reg & 0x600) == 0x200;
  1128. }
  1129. #define MEI_CFG_FW_NM \
  1130. .quirk_probe = mei_me_fw_type_nm
  1131. static bool mei_me_fw_type_sps(struct pci_dev *pdev)
  1132. {
  1133. u32 reg;
  1134. unsigned int devfn;
  1135. /*
  1136. * Read ME FW Status register to check for SPS Firmware
  1137. * The SPS FW is only signaled in pci function 0
  1138. */
  1139. devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
  1140. pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_1, &reg);
  1141. trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
  1142. /* if bits [19:16] = 15, running SPS Firmware */
  1143. return (reg & 0xf0000) == 0xf0000;
  1144. }
  1145. #define MEI_CFG_FW_SPS \
  1146. .quirk_probe = mei_me_fw_type_sps
  1147. #define MEI_CFG_ICH_HFS \
  1148. .fw_status.count = 0
  1149. #define MEI_CFG_ICH10_HFS \
  1150. .fw_status.count = 1, \
  1151. .fw_status.status[0] = PCI_CFG_HFS_1
  1152. #define MEI_CFG_PCH_HFS \
  1153. .fw_status.count = 2, \
  1154. .fw_status.status[0] = PCI_CFG_HFS_1, \
  1155. .fw_status.status[1] = PCI_CFG_HFS_2
  1156. #define MEI_CFG_PCH8_HFS \
  1157. .fw_status.count = 6, \
  1158. .fw_status.status[0] = PCI_CFG_HFS_1, \
  1159. .fw_status.status[1] = PCI_CFG_HFS_2, \
  1160. .fw_status.status[2] = PCI_CFG_HFS_3, \
  1161. .fw_status.status[3] = PCI_CFG_HFS_4, \
  1162. .fw_status.status[4] = PCI_CFG_HFS_5, \
  1163. .fw_status.status[5] = PCI_CFG_HFS_6
  1164. /* ICH Legacy devices */
  1165. static const struct mei_cfg mei_me_ich_cfg = {
  1166. MEI_CFG_ICH_HFS,
  1167. };
  1168. /* ICH devices */
  1169. static const struct mei_cfg mei_me_ich10_cfg = {
  1170. MEI_CFG_ICH10_HFS,
  1171. };
  1172. /* PCH devices */
  1173. static const struct mei_cfg mei_me_pch_cfg = {
  1174. MEI_CFG_PCH_HFS,
  1175. };
  1176. /* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */
  1177. static const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
  1178. MEI_CFG_PCH_HFS,
  1179. MEI_CFG_FW_NM,
  1180. };
  1181. /* PCH8 Lynx Point and newer devices */
  1182. static const struct mei_cfg mei_me_pch8_cfg = {
  1183. MEI_CFG_PCH8_HFS,
  1184. };
  1185. /* PCH8 Lynx Point with quirk for SPS Firmware exclusion */
  1186. static const struct mei_cfg mei_me_pch8_sps_cfg = {
  1187. MEI_CFG_PCH8_HFS,
  1188. MEI_CFG_FW_SPS,
  1189. };
  1190. /*
  1191. * mei_cfg_list - A list of platform platform specific configurations.
  1192. * Note: has to be synchronized with enum mei_cfg_idx.
  1193. */
  1194. static const struct mei_cfg *const mei_cfg_list[] = {
  1195. [MEI_ME_UNDEF_CFG] = NULL,
  1196. [MEI_ME_ICH_CFG] = &mei_me_ich_cfg,
  1197. [MEI_ME_ICH10_CFG] = &mei_me_ich10_cfg,
  1198. [MEI_ME_PCH_CFG] = &mei_me_pch_cfg,
  1199. [MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg,
  1200. [MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
  1201. [MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg,
  1202. };
  1203. const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx)
  1204. {
  1205. BUILD_BUG_ON(ARRAY_SIZE(mei_cfg_list) != MEI_ME_NUM_CFG);
  1206. if (idx >= MEI_ME_NUM_CFG)
  1207. return NULL;
  1208. return mei_cfg_list[idx];
  1209. };
  1210. /**
  1211. * mei_me_dev_init - allocates and initializes the mei device structure
  1212. *
  1213. * @pdev: The pci device structure
  1214. * @cfg: per device generation config
  1215. *
  1216. * Return: The mei_device pointer on success, NULL on failure.
  1217. */
  1218. struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
  1219. const struct mei_cfg *cfg)
  1220. {
  1221. struct mei_device *dev;
  1222. struct mei_me_hw *hw;
  1223. dev = devm_kzalloc(&pdev->dev, sizeof(struct mei_device) +
  1224. sizeof(struct mei_me_hw), GFP_KERNEL);
  1225. if (!dev)
  1226. return NULL;
  1227. hw = to_me_hw(dev);
  1228. mei_device_init(dev, &pdev->dev, &mei_me_hw_ops);
  1229. hw->cfg = cfg;
  1230. return dev;
  1231. }