hw-me.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410
  1. /*
  2. *
  3. * Intel Management Engine Interface (Intel MEI) Linux driver
  4. * Copyright (c) 2003-2012, Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. */
  16. #include <linux/pci.h>
  17. #include <linux/kthread.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/pm_runtime.h>
  20. #include "mei_dev.h"
  21. #include "hbm.h"
  22. #include "hw-me.h"
  23. #include "hw-me-regs.h"
  24. #include "mei-trace.h"
  25. /**
  26. * mei_me_reg_read - Reads 32bit data from the mei device
  27. *
  28. * @hw: the me hardware structure
  29. * @offset: offset from which to read the data
  30. *
  31. * Return: register value (u32)
  32. */
  33. static inline u32 mei_me_reg_read(const struct mei_me_hw *hw,
  34. unsigned long offset)
  35. {
  36. return ioread32(hw->mem_addr + offset);
  37. }
  38. /**
  39. * mei_me_reg_write - Writes 32bit data to the mei device
  40. *
  41. * @hw: the me hardware structure
  42. * @offset: offset from which to write the data
  43. * @value: register value to write (u32)
  44. */
  45. static inline void mei_me_reg_write(const struct mei_me_hw *hw,
  46. unsigned long offset, u32 value)
  47. {
  48. iowrite32(value, hw->mem_addr + offset);
  49. }
  50. /**
  51. * mei_me_mecbrw_read - Reads 32bit data from ME circular buffer
  52. * read window register
  53. *
  54. * @dev: the device structure
  55. *
  56. * Return: ME_CB_RW register value (u32)
  57. */
  58. static inline u32 mei_me_mecbrw_read(const struct mei_device *dev)
  59. {
  60. return mei_me_reg_read(to_me_hw(dev), ME_CB_RW);
  61. }
  62. /**
  63. * mei_me_hcbww_write - write 32bit data to the host circular buffer
  64. *
  65. * @dev: the device structure
  66. * @data: 32bit data to be written to the host circular buffer
  67. */
  68. static inline void mei_me_hcbww_write(struct mei_device *dev, u32 data)
  69. {
  70. mei_me_reg_write(to_me_hw(dev), H_CB_WW, data);
  71. }
  72. /**
  73. * mei_me_mecsr_read - Reads 32bit data from the ME CSR
  74. *
  75. * @dev: the device structure
  76. *
  77. * Return: ME_CSR_HA register value (u32)
  78. */
  79. static inline u32 mei_me_mecsr_read(const struct mei_device *dev)
  80. {
  81. u32 reg;
  82. reg = mei_me_reg_read(to_me_hw(dev), ME_CSR_HA);
  83. trace_mei_reg_read(dev->dev, "ME_CSR_HA", ME_CSR_HA, reg);
  84. return reg;
  85. }
  86. /**
  87. * mei_hcsr_read - Reads 32bit data from the host CSR
  88. *
  89. * @dev: the device structure
  90. *
  91. * Return: H_CSR register value (u32)
  92. */
  93. static inline u32 mei_hcsr_read(const struct mei_device *dev)
  94. {
  95. u32 reg;
  96. reg = mei_me_reg_read(to_me_hw(dev), H_CSR);
  97. trace_mei_reg_read(dev->dev, "H_CSR", H_CSR, reg);
  98. return reg;
  99. }
  100. /**
  101. * mei_hcsr_write - writes H_CSR register to the mei device
  102. *
  103. * @dev: the device structure
  104. * @reg: new register value
  105. */
  106. static inline void mei_hcsr_write(struct mei_device *dev, u32 reg)
  107. {
  108. trace_mei_reg_write(dev->dev, "H_CSR", H_CSR, reg);
  109. mei_me_reg_write(to_me_hw(dev), H_CSR, reg);
  110. }
  111. /**
  112. * mei_hcsr_set - writes H_CSR register to the mei device,
  113. * and ignores the H_IS bit for it is write-one-to-zero.
  114. *
  115. * @dev: the device structure
  116. * @reg: new register value
  117. */
  118. static inline void mei_hcsr_set(struct mei_device *dev, u32 reg)
  119. {
  120. reg &= ~H_CSR_IS_MASK;
  121. mei_hcsr_write(dev, reg);
  122. }
  123. /**
  124. * mei_me_d0i3c_read - Reads 32bit data from the D0I3C register
  125. *
  126. * @dev: the device structure
  127. *
  128. * Return: H_D0I3C register value (u32)
  129. */
  130. static inline u32 mei_me_d0i3c_read(const struct mei_device *dev)
  131. {
  132. u32 reg;
  133. reg = mei_me_reg_read(to_me_hw(dev), H_D0I3C);
  134. trace_mei_reg_read(dev->dev, "H_D0I3C", H_D0I3C, reg);
  135. return reg;
  136. }
  137. /**
  138. * mei_me_d0i3c_write - writes H_D0I3C register to device
  139. *
  140. * @dev: the device structure
  141. * @reg: new register value
  142. */
  143. static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg)
  144. {
  145. trace_mei_reg_write(dev->dev, "H_D0I3C", H_D0I3C, reg);
  146. mei_me_reg_write(to_me_hw(dev), H_D0I3C, reg);
  147. }
  148. /**
  149. * mei_me_fw_status - read fw status register from pci config space
  150. *
  151. * @dev: mei device
  152. * @fw_status: fw status register values
  153. *
  154. * Return: 0 on success, error otherwise
  155. */
  156. static int mei_me_fw_status(struct mei_device *dev,
  157. struct mei_fw_status *fw_status)
  158. {
  159. struct pci_dev *pdev = to_pci_dev(dev->dev);
  160. struct mei_me_hw *hw = to_me_hw(dev);
  161. const struct mei_fw_status *fw_src = &hw->cfg->fw_status;
  162. int ret;
  163. int i;
  164. if (!fw_status)
  165. return -EINVAL;
  166. fw_status->count = fw_src->count;
  167. for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
  168. ret = pci_read_config_dword(pdev, fw_src->status[i],
  169. &fw_status->status[i]);
  170. trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X",
  171. fw_src->status[i],
  172. fw_status->status[i]);
  173. if (ret)
  174. return ret;
  175. }
  176. return 0;
  177. }
  178. /**
  179. * mei_me_hw_config - configure hw dependent settings
  180. *
  181. * @dev: mei device
  182. */
  183. static void mei_me_hw_config(struct mei_device *dev)
  184. {
  185. struct pci_dev *pdev = to_pci_dev(dev->dev);
  186. struct mei_me_hw *hw = to_me_hw(dev);
  187. u32 hcsr, reg;
  188. /* Doesn't change in runtime */
  189. hcsr = mei_hcsr_read(dev);
  190. dev->hbuf_depth = (hcsr & H_CBD) >> 24;
  191. reg = 0;
  192. pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
  193. trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
  194. hw->d0i3_supported =
  195. ((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK);
  196. hw->pg_state = MEI_PG_OFF;
  197. if (hw->d0i3_supported) {
  198. reg = mei_me_d0i3c_read(dev);
  199. if (reg & H_D0I3C_I3)
  200. hw->pg_state = MEI_PG_ON;
  201. }
  202. }
  203. /**
  204. * mei_me_pg_state - translate internal pg state
  205. * to the mei power gating state
  206. *
  207. * @dev: mei device
  208. *
  209. * Return: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise
  210. */
  211. static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
  212. {
  213. struct mei_me_hw *hw = to_me_hw(dev);
  214. return hw->pg_state;
  215. }
  216. static inline u32 me_intr_src(u32 hcsr)
  217. {
  218. return hcsr & H_CSR_IS_MASK;
  219. }
  220. /**
  221. * me_intr_disable - disables mei device interrupts
  222. * using supplied hcsr register value.
  223. *
  224. * @dev: the device structure
  225. * @hcsr: supplied hcsr register value
  226. */
  227. static inline void me_intr_disable(struct mei_device *dev, u32 hcsr)
  228. {
  229. hcsr &= ~H_CSR_IE_MASK;
  230. mei_hcsr_set(dev, hcsr);
  231. }
  232. /**
  233. * mei_me_intr_clear - clear and stop interrupts
  234. *
  235. * @dev: the device structure
  236. * @hcsr: supplied hcsr register value
  237. */
  238. static inline void me_intr_clear(struct mei_device *dev, u32 hcsr)
  239. {
  240. if (me_intr_src(hcsr))
  241. mei_hcsr_write(dev, hcsr);
  242. }
  243. /**
  244. * mei_me_intr_clear - clear and stop interrupts
  245. *
  246. * @dev: the device structure
  247. */
  248. static void mei_me_intr_clear(struct mei_device *dev)
  249. {
  250. u32 hcsr = mei_hcsr_read(dev);
  251. me_intr_clear(dev, hcsr);
  252. }
  253. /**
  254. * mei_me_intr_enable - enables mei device interrupts
  255. *
  256. * @dev: the device structure
  257. */
  258. static void mei_me_intr_enable(struct mei_device *dev)
  259. {
  260. u32 hcsr = mei_hcsr_read(dev);
  261. hcsr |= H_CSR_IE_MASK;
  262. mei_hcsr_set(dev, hcsr);
  263. }
  264. /**
  265. * mei_me_intr_disable - disables mei device interrupts
  266. *
  267. * @dev: the device structure
  268. */
  269. static void mei_me_intr_disable(struct mei_device *dev)
  270. {
  271. u32 hcsr = mei_hcsr_read(dev);
  272. me_intr_disable(dev, hcsr);
  273. }
  274. /**
  275. * mei_me_synchronize_irq - wait for pending IRQ handlers
  276. *
  277. * @dev: the device structure
  278. */
  279. static void mei_me_synchronize_irq(struct mei_device *dev)
  280. {
  281. struct pci_dev *pdev = to_pci_dev(dev->dev);
  282. synchronize_irq(pdev->irq);
  283. }
  284. /**
  285. * mei_me_hw_reset_release - release device from the reset
  286. *
  287. * @dev: the device structure
  288. */
  289. static void mei_me_hw_reset_release(struct mei_device *dev)
  290. {
  291. u32 hcsr = mei_hcsr_read(dev);
  292. hcsr |= H_IG;
  293. hcsr &= ~H_RST;
  294. mei_hcsr_set(dev, hcsr);
  295. /* complete this write before we set host ready on another CPU */
  296. mmiowb();
  297. }
  298. /**
  299. * mei_me_host_set_ready - enable device
  300. *
  301. * @dev: mei device
  302. */
  303. static void mei_me_host_set_ready(struct mei_device *dev)
  304. {
  305. u32 hcsr = mei_hcsr_read(dev);
  306. hcsr |= H_CSR_IE_MASK | H_IG | H_RDY;
  307. mei_hcsr_set(dev, hcsr);
  308. }
  309. /**
  310. * mei_me_host_is_ready - check whether the host has turned ready
  311. *
  312. * @dev: mei device
  313. * Return: bool
  314. */
  315. static bool mei_me_host_is_ready(struct mei_device *dev)
  316. {
  317. u32 hcsr = mei_hcsr_read(dev);
  318. return (hcsr & H_RDY) == H_RDY;
  319. }
  320. /**
  321. * mei_me_hw_is_ready - check whether the me(hw) has turned ready
  322. *
  323. * @dev: mei device
  324. * Return: bool
  325. */
  326. static bool mei_me_hw_is_ready(struct mei_device *dev)
  327. {
  328. u32 mecsr = mei_me_mecsr_read(dev);
  329. return (mecsr & ME_RDY_HRA) == ME_RDY_HRA;
  330. }
  331. /**
  332. * mei_me_hw_ready_wait - wait until the me(hw) has turned ready
  333. * or timeout is reached
  334. *
  335. * @dev: mei device
  336. * Return: 0 on success, error otherwise
  337. */
  338. static int mei_me_hw_ready_wait(struct mei_device *dev)
  339. {
  340. mutex_unlock(&dev->device_lock);
  341. wait_event_timeout(dev->wait_hw_ready,
  342. dev->recvd_hw_ready,
  343. mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
  344. mutex_lock(&dev->device_lock);
  345. if (!dev->recvd_hw_ready) {
  346. dev_err(dev->dev, "wait hw ready failed\n");
  347. return -ETIME;
  348. }
  349. mei_me_hw_reset_release(dev);
  350. dev->recvd_hw_ready = false;
  351. return 0;
  352. }
  353. /**
  354. * mei_me_hw_start - hw start routine
  355. *
  356. * @dev: mei device
  357. * Return: 0 on success, error otherwise
  358. */
  359. static int mei_me_hw_start(struct mei_device *dev)
  360. {
  361. int ret = mei_me_hw_ready_wait(dev);
  362. if (ret)
  363. return ret;
  364. dev_dbg(dev->dev, "hw is ready\n");
  365. mei_me_host_set_ready(dev);
  366. return ret;
  367. }
  368. /**
  369. * mei_hbuf_filled_slots - gets number of device filled buffer slots
  370. *
  371. * @dev: the device structure
  372. *
  373. * Return: number of filled slots
  374. */
  375. static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
  376. {
  377. u32 hcsr;
  378. char read_ptr, write_ptr;
  379. hcsr = mei_hcsr_read(dev);
  380. read_ptr = (char) ((hcsr & H_CBRP) >> 8);
  381. write_ptr = (char) ((hcsr & H_CBWP) >> 16);
  382. return (unsigned char) (write_ptr - read_ptr);
  383. }
  384. /**
  385. * mei_me_hbuf_is_empty - checks if host buffer is empty.
  386. *
  387. * @dev: the device structure
  388. *
  389. * Return: true if empty, false - otherwise.
  390. */
  391. static bool mei_me_hbuf_is_empty(struct mei_device *dev)
  392. {
  393. return mei_hbuf_filled_slots(dev) == 0;
  394. }
  395. /**
  396. * mei_me_hbuf_empty_slots - counts write empty slots.
  397. *
  398. * @dev: the device structure
  399. *
  400. * Return: -EOVERFLOW if overflow, otherwise empty slots count
  401. */
  402. static int mei_me_hbuf_empty_slots(struct mei_device *dev)
  403. {
  404. unsigned char filled_slots, empty_slots;
  405. filled_slots = mei_hbuf_filled_slots(dev);
  406. empty_slots = dev->hbuf_depth - filled_slots;
  407. /* check for overflow */
  408. if (filled_slots > dev->hbuf_depth)
  409. return -EOVERFLOW;
  410. return empty_slots;
  411. }
  412. /**
  413. * mei_me_hbuf_max_len - returns size of hw buffer.
  414. *
  415. * @dev: the device structure
  416. *
  417. * Return: size of hw buffer in bytes
  418. */
  419. static size_t mei_me_hbuf_max_len(const struct mei_device *dev)
  420. {
  421. return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr);
  422. }
  423. /**
  424. * mei_me_hbuf_write - writes a message to host hw buffer.
  425. *
  426. * @dev: the device structure
  427. * @header: mei HECI header of message
  428. * @buf: message payload will be written
  429. *
  430. * Return: -EIO if write has failed
  431. */
  432. static int mei_me_hbuf_write(struct mei_device *dev,
  433. struct mei_msg_hdr *header,
  434. const unsigned char *buf)
  435. {
  436. unsigned long rem;
  437. unsigned long length = header->length;
  438. u32 *reg_buf = (u32 *)buf;
  439. u32 hcsr;
  440. u32 dw_cnt;
  441. int i;
  442. int empty_slots;
  443. dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header));
  444. empty_slots = mei_hbuf_empty_slots(dev);
  445. dev_dbg(dev->dev, "empty slots = %hu.\n", empty_slots);
  446. dw_cnt = mei_data2slots(length);
  447. if (empty_slots < 0 || dw_cnt > empty_slots)
  448. return -EMSGSIZE;
  449. mei_me_hcbww_write(dev, *((u32 *) header));
  450. for (i = 0; i < length / 4; i++)
  451. mei_me_hcbww_write(dev, reg_buf[i]);
  452. rem = length & 0x3;
  453. if (rem > 0) {
  454. u32 reg = 0;
  455. memcpy(&reg, &buf[length - rem], rem);
  456. mei_me_hcbww_write(dev, reg);
  457. }
  458. hcsr = mei_hcsr_read(dev) | H_IG;
  459. mei_hcsr_set(dev, hcsr);
  460. if (!mei_me_hw_is_ready(dev))
  461. return -EIO;
  462. return 0;
  463. }
  464. /**
  465. * mei_me_count_full_read_slots - counts read full slots.
  466. *
  467. * @dev: the device structure
  468. *
  469. * Return: -EOVERFLOW if overflow, otherwise filled slots count
  470. */
  471. static int mei_me_count_full_read_slots(struct mei_device *dev)
  472. {
  473. u32 me_csr;
  474. char read_ptr, write_ptr;
  475. unsigned char buffer_depth, filled_slots;
  476. me_csr = mei_me_mecsr_read(dev);
  477. buffer_depth = (unsigned char)((me_csr & ME_CBD_HRA) >> 24);
  478. read_ptr = (char) ((me_csr & ME_CBRP_HRA) >> 8);
  479. write_ptr = (char) ((me_csr & ME_CBWP_HRA) >> 16);
  480. filled_slots = (unsigned char) (write_ptr - read_ptr);
  481. /* check for overflow */
  482. if (filled_slots > buffer_depth)
  483. return -EOVERFLOW;
  484. dev_dbg(dev->dev, "filled_slots =%08x\n", filled_slots);
  485. return (int)filled_slots;
  486. }
  487. /**
  488. * mei_me_read_slots - reads a message from mei device.
  489. *
  490. * @dev: the device structure
  491. * @buffer: message buffer will be written
  492. * @buffer_length: message size will be read
  493. *
  494. * Return: always 0
  495. */
  496. static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
  497. unsigned long buffer_length)
  498. {
  499. u32 *reg_buf = (u32 *)buffer;
  500. u32 hcsr;
  501. for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
  502. *reg_buf++ = mei_me_mecbrw_read(dev);
  503. if (buffer_length > 0) {
  504. u32 reg = mei_me_mecbrw_read(dev);
  505. memcpy(reg_buf, &reg, buffer_length);
  506. }
  507. hcsr = mei_hcsr_read(dev) | H_IG;
  508. mei_hcsr_set(dev, hcsr);
  509. return 0;
  510. }
  511. /**
  512. * mei_me_pg_set - write pg enter register
  513. *
  514. * @dev: the device structure
  515. */
  516. static void mei_me_pg_set(struct mei_device *dev)
  517. {
  518. struct mei_me_hw *hw = to_me_hw(dev);
  519. u32 reg;
  520. reg = mei_me_reg_read(hw, H_HPG_CSR);
  521. trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
  522. reg |= H_HPG_CSR_PGI;
  523. trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
  524. mei_me_reg_write(hw, H_HPG_CSR, reg);
  525. }
  526. /**
  527. * mei_me_pg_unset - write pg exit register
  528. *
  529. * @dev: the device structure
  530. */
  531. static void mei_me_pg_unset(struct mei_device *dev)
  532. {
  533. struct mei_me_hw *hw = to_me_hw(dev);
  534. u32 reg;
  535. reg = mei_me_reg_read(hw, H_HPG_CSR);
  536. trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
  537. WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n");
  538. reg |= H_HPG_CSR_PGIHEXR;
  539. trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
  540. mei_me_reg_write(hw, H_HPG_CSR, reg);
  541. }
  542. /**
  543. * mei_me_pg_legacy_enter_sync - perform legacy pg entry procedure
  544. *
  545. * @dev: the device structure
  546. *
  547. * Return: 0 on success an error code otherwise
  548. */
  549. static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
  550. {
  551. struct mei_me_hw *hw = to_me_hw(dev);
  552. unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
  553. int ret;
  554. dev->pg_event = MEI_PG_EVENT_WAIT;
  555. ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
  556. if (ret)
  557. return ret;
  558. mutex_unlock(&dev->device_lock);
  559. wait_event_timeout(dev->wait_pg,
  560. dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
  561. mutex_lock(&dev->device_lock);
  562. if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
  563. mei_me_pg_set(dev);
  564. ret = 0;
  565. } else {
  566. ret = -ETIME;
  567. }
  568. dev->pg_event = MEI_PG_EVENT_IDLE;
  569. hw->pg_state = MEI_PG_ON;
  570. return ret;
  571. }
  572. /**
  573. * mei_me_pg_legacy_exit_sync - perform legacy pg exit procedure
  574. *
  575. * @dev: the device structure
  576. *
  577. * Return: 0 on success an error code otherwise
  578. */
  579. static int mei_me_pg_legacy_exit_sync(struct mei_device *dev)
  580. {
  581. struct mei_me_hw *hw = to_me_hw(dev);
  582. unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
  583. int ret;
  584. if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
  585. goto reply;
  586. dev->pg_event = MEI_PG_EVENT_WAIT;
  587. mei_me_pg_unset(dev);
  588. mutex_unlock(&dev->device_lock);
  589. wait_event_timeout(dev->wait_pg,
  590. dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
  591. mutex_lock(&dev->device_lock);
  592. reply:
  593. if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
  594. ret = -ETIME;
  595. goto out;
  596. }
  597. dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
  598. ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
  599. if (ret)
  600. return ret;
  601. mutex_unlock(&dev->device_lock);
  602. wait_event_timeout(dev->wait_pg,
  603. dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
  604. mutex_lock(&dev->device_lock);
  605. if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED)
  606. ret = 0;
  607. else
  608. ret = -ETIME;
  609. out:
  610. dev->pg_event = MEI_PG_EVENT_IDLE;
  611. hw->pg_state = MEI_PG_OFF;
  612. return ret;
  613. }
  614. /**
  615. * mei_me_pg_in_transition - is device now in pg transition
  616. *
  617. * @dev: the device structure
  618. *
  619. * Return: true if in pg transition, false otherwise
  620. */
  621. static bool mei_me_pg_in_transition(struct mei_device *dev)
  622. {
  623. return dev->pg_event >= MEI_PG_EVENT_WAIT &&
  624. dev->pg_event <= MEI_PG_EVENT_INTR_WAIT;
  625. }
  626. /**
  627. * mei_me_pg_is_enabled - detect if PG is supported by HW
  628. *
  629. * @dev: the device structure
  630. *
  631. * Return: true is pg supported, false otherwise
  632. */
  633. static bool mei_me_pg_is_enabled(struct mei_device *dev)
  634. {
  635. struct mei_me_hw *hw = to_me_hw(dev);
  636. u32 reg = mei_me_mecsr_read(dev);
  637. if (hw->d0i3_supported)
  638. return true;
  639. if ((reg & ME_PGIC_HRA) == 0)
  640. goto notsupported;
  641. if (!dev->hbm_f_pg_supported)
  642. goto notsupported;
  643. return true;
  644. notsupported:
  645. dev_dbg(dev->dev, "pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d\n",
  646. hw->d0i3_supported,
  647. !!(reg & ME_PGIC_HRA),
  648. dev->version.major_version,
  649. dev->version.minor_version,
  650. HBM_MAJOR_VERSION_PGI,
  651. HBM_MINOR_VERSION_PGI);
  652. return false;
  653. }
  654. /**
  655. * mei_me_d0i3_set - write d0i3 register bit on mei device.
  656. *
  657. * @dev: the device structure
  658. * @intr: ask for interrupt
  659. *
  660. * Return: D0I3C register value
  661. */
  662. static u32 mei_me_d0i3_set(struct mei_device *dev, bool intr)
  663. {
  664. u32 reg = mei_me_d0i3c_read(dev);
  665. reg |= H_D0I3C_I3;
  666. if (intr)
  667. reg |= H_D0I3C_IR;
  668. else
  669. reg &= ~H_D0I3C_IR;
  670. mei_me_d0i3c_write(dev, reg);
  671. /* read it to ensure HW consistency */
  672. reg = mei_me_d0i3c_read(dev);
  673. return reg;
  674. }
  675. /**
  676. * mei_me_d0i3_unset - clean d0i3 register bit on mei device.
  677. *
  678. * @dev: the device structure
  679. *
  680. * Return: D0I3C register value
  681. */
  682. static u32 mei_me_d0i3_unset(struct mei_device *dev)
  683. {
  684. u32 reg = mei_me_d0i3c_read(dev);
  685. reg &= ~H_D0I3C_I3;
  686. reg |= H_D0I3C_IR;
  687. mei_me_d0i3c_write(dev, reg);
  688. /* read it to ensure HW consistency */
  689. reg = mei_me_d0i3c_read(dev);
  690. return reg;
  691. }
  692. /**
  693. * mei_me_d0i3_enter_sync - perform d0i3 entry procedure
  694. *
  695. * @dev: the device structure
  696. *
  697. * Return: 0 on success an error code otherwise
  698. */
  699. static int mei_me_d0i3_enter_sync(struct mei_device *dev)
  700. {
  701. struct mei_me_hw *hw = to_me_hw(dev);
  702. unsigned long d0i3_timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
  703. unsigned long pgi_timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
  704. int ret;
  705. u32 reg;
  706. reg = mei_me_d0i3c_read(dev);
  707. if (reg & H_D0I3C_I3) {
  708. /* we are in d0i3, nothing to do */
  709. dev_dbg(dev->dev, "d0i3 set not needed\n");
  710. ret = 0;
  711. goto on;
  712. }
  713. /* PGI entry procedure */
  714. dev->pg_event = MEI_PG_EVENT_WAIT;
  715. ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
  716. if (ret)
  717. /* FIXME: should we reset here? */
  718. goto out;
  719. mutex_unlock(&dev->device_lock);
  720. wait_event_timeout(dev->wait_pg,
  721. dev->pg_event == MEI_PG_EVENT_RECEIVED, pgi_timeout);
  722. mutex_lock(&dev->device_lock);
  723. if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
  724. ret = -ETIME;
  725. goto out;
  726. }
  727. /* end PGI entry procedure */
  728. dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
  729. reg = mei_me_d0i3_set(dev, true);
  730. if (!(reg & H_D0I3C_CIP)) {
  731. dev_dbg(dev->dev, "d0i3 enter wait not needed\n");
  732. ret = 0;
  733. goto on;
  734. }
  735. mutex_unlock(&dev->device_lock);
  736. wait_event_timeout(dev->wait_pg,
  737. dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, d0i3_timeout);
  738. mutex_lock(&dev->device_lock);
  739. if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
  740. reg = mei_me_d0i3c_read(dev);
  741. if (!(reg & H_D0I3C_I3)) {
  742. ret = -ETIME;
  743. goto out;
  744. }
  745. }
  746. ret = 0;
  747. on:
  748. hw->pg_state = MEI_PG_ON;
  749. out:
  750. dev->pg_event = MEI_PG_EVENT_IDLE;
  751. dev_dbg(dev->dev, "d0i3 enter ret = %d\n", ret);
  752. return ret;
  753. }
  754. /**
  755. * mei_me_d0i3_enter - perform d0i3 entry procedure
  756. * no hbm PG handshake
  757. * no waiting for confirmation; runs with interrupts
  758. * disabled
  759. *
  760. * @dev: the device structure
  761. *
  762. * Return: 0 on success an error code otherwise
  763. */
  764. static int mei_me_d0i3_enter(struct mei_device *dev)
  765. {
  766. struct mei_me_hw *hw = to_me_hw(dev);
  767. u32 reg;
  768. reg = mei_me_d0i3c_read(dev);
  769. if (reg & H_D0I3C_I3) {
  770. /* we are in d0i3, nothing to do */
  771. dev_dbg(dev->dev, "already d0i3 : set not needed\n");
  772. goto on;
  773. }
  774. mei_me_d0i3_set(dev, false);
  775. on:
  776. hw->pg_state = MEI_PG_ON;
  777. dev->pg_event = MEI_PG_EVENT_IDLE;
  778. dev_dbg(dev->dev, "d0i3 enter\n");
  779. return 0;
  780. }
  781. /**
  782. * mei_me_d0i3_exit_sync - perform d0i3 exit procedure
  783. *
  784. * @dev: the device structure
  785. *
  786. * Return: 0 on success an error code otherwise
  787. */
  788. static int mei_me_d0i3_exit_sync(struct mei_device *dev)
  789. {
  790. struct mei_me_hw *hw = to_me_hw(dev);
  791. unsigned long timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
  792. int ret;
  793. u32 reg;
  794. dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
  795. reg = mei_me_d0i3c_read(dev);
  796. if (!(reg & H_D0I3C_I3)) {
  797. /* we are not in d0i3, nothing to do */
  798. dev_dbg(dev->dev, "d0i3 exit not needed\n");
  799. ret = 0;
  800. goto off;
  801. }
  802. reg = mei_me_d0i3_unset(dev);
  803. if (!(reg & H_D0I3C_CIP)) {
  804. dev_dbg(dev->dev, "d0i3 exit wait not needed\n");
  805. ret = 0;
  806. goto off;
  807. }
  808. mutex_unlock(&dev->device_lock);
  809. wait_event_timeout(dev->wait_pg,
  810. dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
  811. mutex_lock(&dev->device_lock);
  812. if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
  813. reg = mei_me_d0i3c_read(dev);
  814. if (reg & H_D0I3C_I3) {
  815. ret = -ETIME;
  816. goto out;
  817. }
  818. }
  819. ret = 0;
  820. off:
  821. hw->pg_state = MEI_PG_OFF;
  822. out:
  823. dev->pg_event = MEI_PG_EVENT_IDLE;
  824. dev_dbg(dev->dev, "d0i3 exit ret = %d\n", ret);
  825. return ret;
  826. }
  827. /**
  828. * mei_me_pg_legacy_intr - perform legacy pg processing
  829. * in interrupt thread handler
  830. *
  831. * @dev: the device structure
  832. */
  833. static void mei_me_pg_legacy_intr(struct mei_device *dev)
  834. {
  835. struct mei_me_hw *hw = to_me_hw(dev);
  836. if (dev->pg_event != MEI_PG_EVENT_INTR_WAIT)
  837. return;
  838. dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
  839. hw->pg_state = MEI_PG_OFF;
  840. if (waitqueue_active(&dev->wait_pg))
  841. wake_up(&dev->wait_pg);
  842. }
  843. /**
  844. * mei_me_d0i3_intr - perform d0i3 processing in interrupt thread handler
  845. *
  846. * @dev: the device structure
  847. * @intr_source: interrupt source
  848. */
  849. static void mei_me_d0i3_intr(struct mei_device *dev, u32 intr_source)
  850. {
  851. struct mei_me_hw *hw = to_me_hw(dev);
  852. if (dev->pg_event == MEI_PG_EVENT_INTR_WAIT &&
  853. (intr_source & H_D0I3C_IS)) {
  854. dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
  855. if (hw->pg_state == MEI_PG_ON) {
  856. hw->pg_state = MEI_PG_OFF;
  857. if (dev->hbm_state != MEI_HBM_IDLE) {
  858. /*
  859. * force H_RDY because it could be
  860. * wiped off during PG
  861. */
  862. dev_dbg(dev->dev, "d0i3 set host ready\n");
  863. mei_me_host_set_ready(dev);
  864. }
  865. } else {
  866. hw->pg_state = MEI_PG_ON;
  867. }
  868. wake_up(&dev->wait_pg);
  869. }
  870. if (hw->pg_state == MEI_PG_ON && (intr_source & H_IS)) {
  871. /*
  872. * HW sent some data and we are in D0i3, so
  873. * we got here because of HW initiated exit from D0i3.
  874. * Start runtime pm resume sequence to exit low power state.
  875. */
  876. dev_dbg(dev->dev, "d0i3 want resume\n");
  877. mei_hbm_pg_resume(dev);
  878. }
  879. }
  880. /**
  881. * mei_me_pg_intr - perform pg processing in interrupt thread handler
  882. *
  883. * @dev: the device structure
  884. * @intr_source: interrupt source
  885. */
  886. static void mei_me_pg_intr(struct mei_device *dev, u32 intr_source)
  887. {
  888. struct mei_me_hw *hw = to_me_hw(dev);
  889. if (hw->d0i3_supported)
  890. mei_me_d0i3_intr(dev, intr_source);
  891. else
  892. mei_me_pg_legacy_intr(dev);
  893. }
  894. /**
  895. * mei_me_pg_enter_sync - perform runtime pm entry procedure
  896. *
  897. * @dev: the device structure
  898. *
  899. * Return: 0 on success an error code otherwise
  900. */
  901. int mei_me_pg_enter_sync(struct mei_device *dev)
  902. {
  903. struct mei_me_hw *hw = to_me_hw(dev);
  904. if (hw->d0i3_supported)
  905. return mei_me_d0i3_enter_sync(dev);
  906. else
  907. return mei_me_pg_legacy_enter_sync(dev);
  908. }
  909. /**
  910. * mei_me_pg_exit_sync - perform runtime pm exit procedure
  911. *
  912. * @dev: the device structure
  913. *
  914. * Return: 0 on success an error code otherwise
  915. */
  916. int mei_me_pg_exit_sync(struct mei_device *dev)
  917. {
  918. struct mei_me_hw *hw = to_me_hw(dev);
  919. if (hw->d0i3_supported)
  920. return mei_me_d0i3_exit_sync(dev);
  921. else
  922. return mei_me_pg_legacy_exit_sync(dev);
  923. }
  924. /**
  925. * mei_me_hw_reset - resets fw via mei csr register.
  926. *
  927. * @dev: the device structure
  928. * @intr_enable: if interrupt should be enabled after reset.
  929. *
  930. * Return: 0 on success an error code otherwise
  931. */
  932. static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
  933. {
  934. struct mei_me_hw *hw = to_me_hw(dev);
  935. int ret;
  936. u32 hcsr;
  937. if (intr_enable) {
  938. mei_me_intr_enable(dev);
  939. if (hw->d0i3_supported) {
  940. ret = mei_me_d0i3_exit_sync(dev);
  941. if (ret)
  942. return ret;
  943. }
  944. }
  945. pm_runtime_set_active(dev->dev);
  946. hcsr = mei_hcsr_read(dev);
  947. /* H_RST may be found lit before reset is started,
  948. * for example if preceding reset flow hasn't completed.
  949. * In that case asserting H_RST will be ignored, therefore
  950. * we need to clean H_RST bit to start a successful reset sequence.
  951. */
  952. if ((hcsr & H_RST) == H_RST) {
  953. dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
  954. hcsr &= ~H_RST;
  955. mei_hcsr_set(dev, hcsr);
  956. hcsr = mei_hcsr_read(dev);
  957. }
  958. hcsr |= H_RST | H_IG | H_CSR_IS_MASK;
  959. if (!intr_enable)
  960. hcsr &= ~H_CSR_IE_MASK;
  961. dev->recvd_hw_ready = false;
  962. mei_hcsr_write(dev, hcsr);
  963. /*
  964. * Host reads the H_CSR once to ensure that the
  965. * posted write to H_CSR completes.
  966. */
  967. hcsr = mei_hcsr_read(dev);
  968. if ((hcsr & H_RST) == 0)
  969. dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr);
  970. if ((hcsr & H_RDY) == H_RDY)
  971. dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr);
  972. if (!intr_enable) {
  973. mei_me_hw_reset_release(dev);
  974. if (hw->d0i3_supported) {
  975. ret = mei_me_d0i3_enter(dev);
  976. if (ret)
  977. return ret;
  978. }
  979. }
  980. return 0;
  981. }
  982. /**
  983. * mei_me_irq_quick_handler - The ISR of the MEI device
  984. *
  985. * @irq: The irq number
  986. * @dev_id: pointer to the device structure
  987. *
  988. * Return: irqreturn_t
  989. */
  990. irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
  991. {
  992. struct mei_device *dev = (struct mei_device *)dev_id;
  993. u32 hcsr;
  994. hcsr = mei_hcsr_read(dev);
  995. if (!me_intr_src(hcsr))
  996. return IRQ_NONE;
  997. dev_dbg(dev->dev, "interrupt source 0x%08X\n", me_intr_src(hcsr));
  998. /* disable interrupts on device */
  999. me_intr_disable(dev, hcsr);
  1000. return IRQ_WAKE_THREAD;
  1001. }
  1002. /**
  1003. * mei_me_irq_thread_handler - function called after ISR to handle the interrupt
  1004. * processing.
  1005. *
  1006. * @irq: The irq number
  1007. * @dev_id: pointer to the device structure
  1008. *
  1009. * Return: irqreturn_t
  1010. *
  1011. */
  1012. irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
  1013. {
  1014. struct mei_device *dev = (struct mei_device *) dev_id;
  1015. struct mei_cl_cb complete_list;
  1016. s32 slots;
  1017. u32 hcsr;
  1018. int rets = 0;
  1019. dev_dbg(dev->dev, "function called after ISR to handle the interrupt processing.\n");
  1020. /* initialize our complete list */
  1021. mutex_lock(&dev->device_lock);
  1022. hcsr = mei_hcsr_read(dev);
  1023. me_intr_clear(dev, hcsr);
  1024. mei_io_list_init(&complete_list);
  1025. /* check if ME wants a reset */
  1026. if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
  1027. dev_warn(dev->dev, "FW not ready: resetting.\n");
  1028. schedule_work(&dev->reset_work);
  1029. goto end;
  1030. }
  1031. mei_me_pg_intr(dev, me_intr_src(hcsr));
  1032. /* check if we need to start the dev */
  1033. if (!mei_host_is_ready(dev)) {
  1034. if (mei_hw_is_ready(dev)) {
  1035. dev_dbg(dev->dev, "we need to start the dev.\n");
  1036. dev->recvd_hw_ready = true;
  1037. wake_up(&dev->wait_hw_ready);
  1038. } else {
  1039. dev_dbg(dev->dev, "Spurious Interrupt\n");
  1040. }
  1041. goto end;
  1042. }
  1043. /* check slots available for reading */
  1044. slots = mei_count_full_read_slots(dev);
  1045. while (slots > 0) {
  1046. dev_dbg(dev->dev, "slots to read = %08x\n", slots);
  1047. rets = mei_irq_read_handler(dev, &complete_list, &slots);
  1048. /* There is a race between ME write and interrupt delivery:
  1049. * Not all data is always available immediately after the
  1050. * interrupt, so try to read again on the next interrupt.
  1051. */
  1052. if (rets == -ENODATA)
  1053. break;
  1054. if (rets && dev->dev_state != MEI_DEV_RESETTING) {
  1055. dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n",
  1056. rets);
  1057. schedule_work(&dev->reset_work);
  1058. goto end;
  1059. }
  1060. }
  1061. dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
  1062. /*
  1063. * During PG handshake only allowed write is the replay to the
  1064. * PG exit message, so block calling write function
  1065. * if the pg event is in PG handshake
  1066. */
  1067. if (dev->pg_event != MEI_PG_EVENT_WAIT &&
  1068. dev->pg_event != MEI_PG_EVENT_RECEIVED) {
  1069. rets = mei_irq_write_handler(dev, &complete_list);
  1070. dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
  1071. }
  1072. mei_irq_compl_handler(dev, &complete_list);
  1073. end:
  1074. dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
  1075. mei_me_intr_enable(dev);
  1076. mutex_unlock(&dev->device_lock);
  1077. return IRQ_HANDLED;
  1078. }
  1079. static const struct mei_hw_ops mei_me_hw_ops = {
  1080. .fw_status = mei_me_fw_status,
  1081. .pg_state = mei_me_pg_state,
  1082. .host_is_ready = mei_me_host_is_ready,
  1083. .hw_is_ready = mei_me_hw_is_ready,
  1084. .hw_reset = mei_me_hw_reset,
  1085. .hw_config = mei_me_hw_config,
  1086. .hw_start = mei_me_hw_start,
  1087. .pg_in_transition = mei_me_pg_in_transition,
  1088. .pg_is_enabled = mei_me_pg_is_enabled,
  1089. .intr_clear = mei_me_intr_clear,
  1090. .intr_enable = mei_me_intr_enable,
  1091. .intr_disable = mei_me_intr_disable,
  1092. .synchronize_irq = mei_me_synchronize_irq,
  1093. .hbuf_free_slots = mei_me_hbuf_empty_slots,
  1094. .hbuf_is_ready = mei_me_hbuf_is_empty,
  1095. .hbuf_max_len = mei_me_hbuf_max_len,
  1096. .write = mei_me_hbuf_write,
  1097. .rdbuf_full_slots = mei_me_count_full_read_slots,
  1098. .read_hdr = mei_me_mecbrw_read,
  1099. .read = mei_me_read_slots
  1100. };
  1101. static bool mei_me_fw_type_nm(struct pci_dev *pdev)
  1102. {
  1103. u32 reg;
  1104. pci_read_config_dword(pdev, PCI_CFG_HFS_2, &reg);
  1105. trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_2", PCI_CFG_HFS_2, reg);
  1106. /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
  1107. return (reg & 0x600) == 0x200;
  1108. }
  1109. #define MEI_CFG_FW_NM \
  1110. .quirk_probe = mei_me_fw_type_nm
  1111. static bool mei_me_fw_type_sps(struct pci_dev *pdev)
  1112. {
  1113. u32 reg;
  1114. unsigned int devfn;
  1115. /*
  1116. * Read ME FW Status register to check for SPS Firmware
  1117. * The SPS FW is only signaled in pci function 0
  1118. */
  1119. devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
  1120. pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_1, &reg);
  1121. trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
  1122. /* if bits [19:16] = 15, running SPS Firmware */
  1123. return (reg & 0xf0000) == 0xf0000;
  1124. }
  1125. #define MEI_CFG_FW_SPS \
  1126. .quirk_probe = mei_me_fw_type_sps
  1127. #define MEI_CFG_LEGACY_HFS \
  1128. .fw_status.count = 0
  1129. #define MEI_CFG_ICH_HFS \
  1130. .fw_status.count = 1, \
  1131. .fw_status.status[0] = PCI_CFG_HFS_1
  1132. #define MEI_CFG_PCH_HFS \
  1133. .fw_status.count = 2, \
  1134. .fw_status.status[0] = PCI_CFG_HFS_1, \
  1135. .fw_status.status[1] = PCI_CFG_HFS_2
  1136. #define MEI_CFG_PCH8_HFS \
  1137. .fw_status.count = 6, \
  1138. .fw_status.status[0] = PCI_CFG_HFS_1, \
  1139. .fw_status.status[1] = PCI_CFG_HFS_2, \
  1140. .fw_status.status[2] = PCI_CFG_HFS_3, \
  1141. .fw_status.status[3] = PCI_CFG_HFS_4, \
  1142. .fw_status.status[4] = PCI_CFG_HFS_5, \
  1143. .fw_status.status[5] = PCI_CFG_HFS_6
  1144. /* ICH Legacy devices */
  1145. const struct mei_cfg mei_me_legacy_cfg = {
  1146. MEI_CFG_LEGACY_HFS,
  1147. };
  1148. /* ICH devices */
  1149. const struct mei_cfg mei_me_ich_cfg = {
  1150. MEI_CFG_ICH_HFS,
  1151. };
  1152. /* PCH devices */
  1153. const struct mei_cfg mei_me_pch_cfg = {
  1154. MEI_CFG_PCH_HFS,
  1155. };
  1156. /* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */
  1157. const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
  1158. MEI_CFG_PCH_HFS,
  1159. MEI_CFG_FW_NM,
  1160. };
  1161. /* PCH8 Lynx Point and newer devices */
  1162. const struct mei_cfg mei_me_pch8_cfg = {
  1163. MEI_CFG_PCH8_HFS,
  1164. };
  1165. /* PCH8 Lynx Point with quirk for SPS Firmware exclusion */
  1166. const struct mei_cfg mei_me_pch8_sps_cfg = {
  1167. MEI_CFG_PCH8_HFS,
  1168. MEI_CFG_FW_SPS,
  1169. };
  1170. /**
  1171. * mei_me_dev_init - allocates and initializes the mei device structure
  1172. *
  1173. * @pdev: The pci device structure
  1174. * @cfg: per device generation config
  1175. *
  1176. * Return: The mei_device_device pointer on success, NULL on failure.
  1177. */
  1178. struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
  1179. const struct mei_cfg *cfg)
  1180. {
  1181. struct mei_device *dev;
  1182. struct mei_me_hw *hw;
  1183. dev = kzalloc(sizeof(struct mei_device) +
  1184. sizeof(struct mei_me_hw), GFP_KERNEL);
  1185. if (!dev)
  1186. return NULL;
  1187. hw = to_me_hw(dev);
  1188. mei_device_init(dev, &pdev->dev, &mei_me_hw_ops);
  1189. hw->cfg = cfg;
  1190. return dev;
  1191. }