dca.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710
  1. /*
  2. * Intel I/OAT DMA Linux driver
  3. * Copyright(c) 2007 - 2009 Intel Corporation.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc.,
  16. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  17. *
  18. * The full GNU General Public License is included in this distribution in
  19. * the file called "COPYING".
  20. *
  21. */
  22. #include <linux/kernel.h>
  23. #include <linux/pci.h>
  24. #include <linux/smp.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/dca.h>
  27. /* either a kernel change is needed, or we need something like this in kernel */
  28. #ifndef CONFIG_SMP
  29. #include <asm/smp.h>
  30. #undef cpu_physical_id
  31. #define cpu_physical_id(cpu) (cpuid_ebx(1) >> 24)
  32. #endif
  33. #include "dma.h"
  34. #include "registers.h"
  35. #include "dma_v2.h"
  36. /*
  37. * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
  38. * contain the bit number of the APIC ID to map into the DCA tag. If the valid
  39. * bit is not set, then the value must be 0 or 1 and defines the bit in the tag.
  40. */
  41. #define DCA_TAG_MAP_VALID 0x80
  42. #define DCA3_TAG_MAP_BIT_TO_INV 0x80
  43. #define DCA3_TAG_MAP_BIT_TO_SEL 0x40
  44. #define DCA3_TAG_MAP_LITERAL_VAL 0x1
  45. #define DCA_TAG_MAP_MASK 0xDF
  46. /* expected tag map bytes for I/OAT ver.2 */
  47. #define DCA2_TAG_MAP_BYTE0 0x80
  48. #define DCA2_TAG_MAP_BYTE1 0x0
  49. #define DCA2_TAG_MAP_BYTE2 0x81
  50. #define DCA2_TAG_MAP_BYTE3 0x82
  51. #define DCA2_TAG_MAP_BYTE4 0x82
  52. /* verify if tag map matches expected values */
  53. static inline int dca2_tag_map_valid(u8 *tag_map)
  54. {
  55. return ((tag_map[0] == DCA2_TAG_MAP_BYTE0) &&
  56. (tag_map[1] == DCA2_TAG_MAP_BYTE1) &&
  57. (tag_map[2] == DCA2_TAG_MAP_BYTE2) &&
  58. (tag_map[3] == DCA2_TAG_MAP_BYTE3) &&
  59. (tag_map[4] == DCA2_TAG_MAP_BYTE4));
  60. }
  61. /*
  62. * "Legacy" DCA systems do not implement the DCA register set in the
  63. * I/OAT device. Software needs direct support for their tag mappings.
  64. */
  65. #define APICID_BIT(x) (DCA_TAG_MAP_VALID | (x))
  66. #define IOAT_TAG_MAP_LEN 8
  67. static u8 ioat_tag_map_BNB[IOAT_TAG_MAP_LEN] = {
  68. 1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), };
  69. static u8 ioat_tag_map_SCNB[IOAT_TAG_MAP_LEN] = {
  70. 1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), };
  71. static u8 ioat_tag_map_CNB[IOAT_TAG_MAP_LEN] = {
  72. 1, APICID_BIT(1), APICID_BIT(3), APICID_BIT(4), APICID_BIT(2), };
  73. static u8 ioat_tag_map_UNISYS[IOAT_TAG_MAP_LEN] = { 0 };
  74. /* pack PCI B/D/F into a u16 */
  75. static inline u16 dcaid_from_pcidev(struct pci_dev *pci)
  76. {
  77. return (pci->bus->number << 8) | pci->devfn;
  78. }
  79. static int dca_enabled_in_bios(struct pci_dev *pdev)
  80. {
  81. /* CPUID level 9 returns DCA configuration */
  82. /* Bit 0 indicates DCA enabled by the BIOS */
  83. unsigned long cpuid_level_9;
  84. int res;
  85. cpuid_level_9 = cpuid_eax(9);
  86. res = test_bit(0, &cpuid_level_9);
  87. if (!res)
  88. dev_dbg(&pdev->dev, "DCA is disabled in BIOS\n");
  89. return res;
  90. }
  91. int system_has_dca_enabled(struct pci_dev *pdev)
  92. {
  93. if (boot_cpu_has(X86_FEATURE_DCA))
  94. return dca_enabled_in_bios(pdev);
  95. dev_dbg(&pdev->dev, "boot cpu doesn't have X86_FEATURE_DCA\n");
  96. return 0;
  97. }
  98. struct ioat_dca_slot {
  99. struct pci_dev *pdev; /* requester device */
  100. u16 rid; /* requester id, as used by IOAT */
  101. };
  102. #define IOAT_DCA_MAX_REQ 6
  103. #define IOAT3_DCA_MAX_REQ 2
  104. struct ioat_dca_priv {
  105. void __iomem *iobase;
  106. void __iomem *dca_base;
  107. int max_requesters;
  108. int requester_count;
  109. u8 tag_map[IOAT_TAG_MAP_LEN];
  110. struct ioat_dca_slot req_slots[0];
  111. };
  112. /* 5000 series chipset DCA Port Requester ID Table Entry Format
  113. * [15:8] PCI-Express Bus Number
  114. * [7:3] PCI-Express Device Number
  115. * [2:0] PCI-Express Function Number
  116. *
  117. * 5000 series chipset DCA control register format
  118. * [7:1] Reserved (0)
  119. * [0] Ignore Function Number
  120. */
  121. static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev)
  122. {
  123. struct ioat_dca_priv *ioatdca = dca_priv(dca);
  124. struct pci_dev *pdev;
  125. int i;
  126. u16 id;
  127. /* This implementation only supports PCI-Express */
  128. if (!dev_is_pci(dev))
  129. return -ENODEV;
  130. pdev = to_pci_dev(dev);
  131. id = dcaid_from_pcidev(pdev);
  132. if (ioatdca->requester_count == ioatdca->max_requesters)
  133. return -ENODEV;
  134. for (i = 0; i < ioatdca->max_requesters; i++) {
  135. if (ioatdca->req_slots[i].pdev == NULL) {
  136. /* found an empty slot */
  137. ioatdca->requester_count++;
  138. ioatdca->req_slots[i].pdev = pdev;
  139. ioatdca->req_slots[i].rid = id;
  140. writew(id, ioatdca->dca_base + (i * 4));
  141. /* make sure the ignore function bit is off */
  142. writeb(0, ioatdca->dca_base + (i * 4) + 2);
  143. return i;
  144. }
  145. }
  146. /* Error, ioatdma->requester_count is out of whack */
  147. return -EFAULT;
  148. }
  149. static int ioat_dca_remove_requester(struct dca_provider *dca,
  150. struct device *dev)
  151. {
  152. struct ioat_dca_priv *ioatdca = dca_priv(dca);
  153. struct pci_dev *pdev;
  154. int i;
  155. /* This implementation only supports PCI-Express */
  156. if (!dev_is_pci(dev))
  157. return -ENODEV;
  158. pdev = to_pci_dev(dev);
  159. for (i = 0; i < ioatdca->max_requesters; i++) {
  160. if (ioatdca->req_slots[i].pdev == pdev) {
  161. writew(0, ioatdca->dca_base + (i * 4));
  162. ioatdca->req_slots[i].pdev = NULL;
  163. ioatdca->req_slots[i].rid = 0;
  164. ioatdca->requester_count--;
  165. return i;
  166. }
  167. }
  168. return -ENODEV;
  169. }
  170. static u8 ioat_dca_get_tag(struct dca_provider *dca,
  171. struct device *dev,
  172. int cpu)
  173. {
  174. struct ioat_dca_priv *ioatdca = dca_priv(dca);
  175. int i, apic_id, bit, value;
  176. u8 entry, tag;
  177. tag = 0;
  178. apic_id = cpu_physical_id(cpu);
  179. for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
  180. entry = ioatdca->tag_map[i];
  181. if (entry & DCA_TAG_MAP_VALID) {
  182. bit = entry & ~DCA_TAG_MAP_VALID;
  183. value = (apic_id & (1 << bit)) ? 1 : 0;
  184. } else {
  185. value = entry ? 1 : 0;
  186. }
  187. tag |= (value << i);
  188. }
  189. return tag;
  190. }
  191. static int ioat_dca_dev_managed(struct dca_provider *dca,
  192. struct device *dev)
  193. {
  194. struct ioat_dca_priv *ioatdca = dca_priv(dca);
  195. struct pci_dev *pdev;
  196. int i;
  197. pdev = to_pci_dev(dev);
  198. for (i = 0; i < ioatdca->max_requesters; i++) {
  199. if (ioatdca->req_slots[i].pdev == pdev)
  200. return 1;
  201. }
  202. return 0;
  203. }
  204. static struct dca_ops ioat_dca_ops = {
  205. .add_requester = ioat_dca_add_requester,
  206. .remove_requester = ioat_dca_remove_requester,
  207. .get_tag = ioat_dca_get_tag,
  208. .dev_managed = ioat_dca_dev_managed,
  209. };
  210. struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
  211. {
  212. struct dca_provider *dca;
  213. struct ioat_dca_priv *ioatdca;
  214. u8 *tag_map = NULL;
  215. int i;
  216. int err;
  217. u8 version;
  218. u8 max_requesters;
  219. if (!system_has_dca_enabled(pdev))
  220. return NULL;
  221. /* I/OAT v1 systems must have a known tag_map to support DCA */
  222. switch (pdev->vendor) {
  223. case PCI_VENDOR_ID_INTEL:
  224. switch (pdev->device) {
  225. case PCI_DEVICE_ID_INTEL_IOAT:
  226. tag_map = ioat_tag_map_BNB;
  227. break;
  228. case PCI_DEVICE_ID_INTEL_IOAT_CNB:
  229. tag_map = ioat_tag_map_CNB;
  230. break;
  231. case PCI_DEVICE_ID_INTEL_IOAT_SCNB:
  232. tag_map = ioat_tag_map_SCNB;
  233. break;
  234. }
  235. break;
  236. case PCI_VENDOR_ID_UNISYS:
  237. switch (pdev->device) {
  238. case PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR:
  239. tag_map = ioat_tag_map_UNISYS;
  240. break;
  241. }
  242. break;
  243. }
  244. if (tag_map == NULL)
  245. return NULL;
  246. version = readb(iobase + IOAT_VER_OFFSET);
  247. if (version == IOAT_VER_3_0)
  248. max_requesters = IOAT3_DCA_MAX_REQ;
  249. else
  250. max_requesters = IOAT_DCA_MAX_REQ;
  251. dca = alloc_dca_provider(&ioat_dca_ops,
  252. sizeof(*ioatdca) +
  253. (sizeof(struct ioat_dca_slot) * max_requesters));
  254. if (!dca)
  255. return NULL;
  256. ioatdca = dca_priv(dca);
  257. ioatdca->max_requesters = max_requesters;
  258. ioatdca->dca_base = iobase + 0x54;
  259. /* copy over the APIC ID to DCA tag mapping */
  260. for (i = 0; i < IOAT_TAG_MAP_LEN; i++)
  261. ioatdca->tag_map[i] = tag_map[i];
  262. err = register_dca_provider(dca, &pdev->dev);
  263. if (err) {
  264. free_dca_provider(dca);
  265. return NULL;
  266. }
  267. return dca;
  268. }
  269. static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev)
  270. {
  271. struct ioat_dca_priv *ioatdca = dca_priv(dca);
  272. struct pci_dev *pdev;
  273. int i;
  274. u16 id;
  275. u16 global_req_table;
  276. /* This implementation only supports PCI-Express */
  277. if (!dev_is_pci(dev))
  278. return -ENODEV;
  279. pdev = to_pci_dev(dev);
  280. id = dcaid_from_pcidev(pdev);
  281. if (ioatdca->requester_count == ioatdca->max_requesters)
  282. return -ENODEV;
  283. for (i = 0; i < ioatdca->max_requesters; i++) {
  284. if (ioatdca->req_slots[i].pdev == NULL) {
  285. /* found an empty slot */
  286. ioatdca->requester_count++;
  287. ioatdca->req_slots[i].pdev = pdev;
  288. ioatdca->req_slots[i].rid = id;
  289. global_req_table =
  290. readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET);
  291. writel(id | IOAT_DCA_GREQID_VALID,
  292. ioatdca->iobase + global_req_table + (i * 4));
  293. return i;
  294. }
  295. }
  296. /* Error, ioatdma->requester_count is out of whack */
  297. return -EFAULT;
  298. }
  299. static int ioat2_dca_remove_requester(struct dca_provider *dca,
  300. struct device *dev)
  301. {
  302. struct ioat_dca_priv *ioatdca = dca_priv(dca);
  303. struct pci_dev *pdev;
  304. int i;
  305. u16 global_req_table;
  306. /* This implementation only supports PCI-Express */
  307. if (!dev_is_pci(dev))
  308. return -ENODEV;
  309. pdev = to_pci_dev(dev);
  310. for (i = 0; i < ioatdca->max_requesters; i++) {
  311. if (ioatdca->req_slots[i].pdev == pdev) {
  312. global_req_table =
  313. readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET);
  314. writel(0, ioatdca->iobase + global_req_table + (i * 4));
  315. ioatdca->req_slots[i].pdev = NULL;
  316. ioatdca->req_slots[i].rid = 0;
  317. ioatdca->requester_count--;
  318. return i;
  319. }
  320. }
  321. return -ENODEV;
  322. }
  323. static u8 ioat2_dca_get_tag(struct dca_provider *dca,
  324. struct device *dev,
  325. int cpu)
  326. {
  327. u8 tag;
  328. tag = ioat_dca_get_tag(dca, dev, cpu);
  329. tag = (~tag) & 0x1F;
  330. return tag;
  331. }
  332. static struct dca_ops ioat2_dca_ops = {
  333. .add_requester = ioat2_dca_add_requester,
  334. .remove_requester = ioat2_dca_remove_requester,
  335. .get_tag = ioat2_dca_get_tag,
  336. .dev_managed = ioat_dca_dev_managed,
  337. };
  338. static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset)
  339. {
  340. int slots = 0;
  341. u32 req;
  342. u16 global_req_table;
  343. global_req_table = readw(iobase + dca_offset + IOAT_DCA_GREQID_OFFSET);
  344. if (global_req_table == 0)
  345. return 0;
  346. do {
  347. req = readl(iobase + global_req_table + (slots * sizeof(u32)));
  348. slots++;
  349. } while ((req & IOAT_DCA_GREQID_LASTID) == 0);
  350. return slots;
  351. }
  352. struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
  353. {
  354. struct dca_provider *dca;
  355. struct ioat_dca_priv *ioatdca;
  356. int slots;
  357. int i;
  358. int err;
  359. u32 tag_map;
  360. u16 dca_offset;
  361. u16 csi_fsb_control;
  362. u16 pcie_control;
  363. u8 bit;
  364. if (!system_has_dca_enabled(pdev))
  365. return NULL;
  366. dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
  367. if (dca_offset == 0)
  368. return NULL;
  369. slots = ioat2_dca_count_dca_slots(iobase, dca_offset);
  370. if (slots == 0)
  371. return NULL;
  372. dca = alloc_dca_provider(&ioat2_dca_ops,
  373. sizeof(*ioatdca)
  374. + (sizeof(struct ioat_dca_slot) * slots));
  375. if (!dca)
  376. return NULL;
  377. ioatdca = dca_priv(dca);
  378. ioatdca->iobase = iobase;
  379. ioatdca->dca_base = iobase + dca_offset;
  380. ioatdca->max_requesters = slots;
  381. /* some bios might not know to turn these on */
  382. csi_fsb_control = readw(ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET);
  383. if ((csi_fsb_control & IOAT_FSB_CAP_ENABLE_PREFETCH) == 0) {
  384. csi_fsb_control |= IOAT_FSB_CAP_ENABLE_PREFETCH;
  385. writew(csi_fsb_control,
  386. ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET);
  387. }
  388. pcie_control = readw(ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET);
  389. if ((pcie_control & IOAT_PCI_CAP_ENABLE_MEMWR) == 0) {
  390. pcie_control |= IOAT_PCI_CAP_ENABLE_MEMWR;
  391. writew(pcie_control,
  392. ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET);
  393. }
  394. /* TODO version, compatibility and configuration checks */
  395. /* copy out the APIC to DCA tag map */
  396. tag_map = readl(ioatdca->dca_base + IOAT_APICID_TAG_MAP_OFFSET);
  397. for (i = 0; i < 5; i++) {
  398. bit = (tag_map >> (4 * i)) & 0x0f;
  399. if (bit < 8)
  400. ioatdca->tag_map[i] = bit | DCA_TAG_MAP_VALID;
  401. else
  402. ioatdca->tag_map[i] = 0;
  403. }
  404. if (!dca2_tag_map_valid(ioatdca->tag_map)) {
  405. WARN_TAINT_ONCE(1, TAINT_FIRMWARE_WORKAROUND,
  406. "%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n",
  407. dev_driver_string(&pdev->dev),
  408. dev_name(&pdev->dev));
  409. free_dca_provider(dca);
  410. return NULL;
  411. }
  412. err = register_dca_provider(dca, &pdev->dev);
  413. if (err) {
  414. free_dca_provider(dca);
  415. return NULL;
  416. }
  417. return dca;
  418. }
  419. static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev)
  420. {
  421. struct ioat_dca_priv *ioatdca = dca_priv(dca);
  422. struct pci_dev *pdev;
  423. int i;
  424. u16 id;
  425. u16 global_req_table;
  426. /* This implementation only supports PCI-Express */
  427. if (!dev_is_pci(dev))
  428. return -ENODEV;
  429. pdev = to_pci_dev(dev);
  430. id = dcaid_from_pcidev(pdev);
  431. if (ioatdca->requester_count == ioatdca->max_requesters)
  432. return -ENODEV;
  433. for (i = 0; i < ioatdca->max_requesters; i++) {
  434. if (ioatdca->req_slots[i].pdev == NULL) {
  435. /* found an empty slot */
  436. ioatdca->requester_count++;
  437. ioatdca->req_slots[i].pdev = pdev;
  438. ioatdca->req_slots[i].rid = id;
  439. global_req_table =
  440. readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
  441. writel(id | IOAT_DCA_GREQID_VALID,
  442. ioatdca->iobase + global_req_table + (i * 4));
  443. return i;
  444. }
  445. }
  446. /* Error, ioatdma->requester_count is out of whack */
  447. return -EFAULT;
  448. }
  449. static int ioat3_dca_remove_requester(struct dca_provider *dca,
  450. struct device *dev)
  451. {
  452. struct ioat_dca_priv *ioatdca = dca_priv(dca);
  453. struct pci_dev *pdev;
  454. int i;
  455. u16 global_req_table;
  456. /* This implementation only supports PCI-Express */
  457. if (!dev_is_pci(dev))
  458. return -ENODEV;
  459. pdev = to_pci_dev(dev);
  460. for (i = 0; i < ioatdca->max_requesters; i++) {
  461. if (ioatdca->req_slots[i].pdev == pdev) {
  462. global_req_table =
  463. readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
  464. writel(0, ioatdca->iobase + global_req_table + (i * 4));
  465. ioatdca->req_slots[i].pdev = NULL;
  466. ioatdca->req_slots[i].rid = 0;
  467. ioatdca->requester_count--;
  468. return i;
  469. }
  470. }
  471. return -ENODEV;
  472. }
  473. static u8 ioat3_dca_get_tag(struct dca_provider *dca,
  474. struct device *dev,
  475. int cpu)
  476. {
  477. u8 tag;
  478. struct ioat_dca_priv *ioatdca = dca_priv(dca);
  479. int i, apic_id, bit, value;
  480. u8 entry;
  481. tag = 0;
  482. apic_id = cpu_physical_id(cpu);
  483. for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
  484. entry = ioatdca->tag_map[i];
  485. if (entry & DCA3_TAG_MAP_BIT_TO_SEL) {
  486. bit = entry &
  487. ~(DCA3_TAG_MAP_BIT_TO_SEL | DCA3_TAG_MAP_BIT_TO_INV);
  488. value = (apic_id & (1 << bit)) ? 1 : 0;
  489. } else if (entry & DCA3_TAG_MAP_BIT_TO_INV) {
  490. bit = entry & ~DCA3_TAG_MAP_BIT_TO_INV;
  491. value = (apic_id & (1 << bit)) ? 0 : 1;
  492. } else {
  493. value = (entry & DCA3_TAG_MAP_LITERAL_VAL) ? 1 : 0;
  494. }
  495. tag |= (value << i);
  496. }
  497. return tag;
  498. }
  499. static struct dca_ops ioat3_dca_ops = {
  500. .add_requester = ioat3_dca_add_requester,
  501. .remove_requester = ioat3_dca_remove_requester,
  502. .get_tag = ioat3_dca_get_tag,
  503. .dev_managed = ioat_dca_dev_managed,
  504. };
  505. static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset)
  506. {
  507. int slots = 0;
  508. u32 req;
  509. u16 global_req_table;
  510. global_req_table = readw(iobase + dca_offset + IOAT3_DCA_GREQID_OFFSET);
  511. if (global_req_table == 0)
  512. return 0;
  513. do {
  514. req = readl(iobase + global_req_table + (slots * sizeof(u32)));
  515. slots++;
  516. } while ((req & IOAT_DCA_GREQID_LASTID) == 0);
  517. return slots;
  518. }
  519. static inline int dca3_tag_map_invalid(u8 *tag_map)
  520. {
  521. /*
  522. * If the tag map is not programmed by the BIOS the default is:
  523. * 0x80 0x80 0x80 0x80 0x80 0x00 0x00 0x00
  524. *
  525. * This an invalid map and will result in only 2 possible tags
  526. * 0x1F and 0x00. 0x00 is an invalid DCA tag so we know that
  527. * this entire definition is invalid.
  528. */
  529. return ((tag_map[0] == DCA_TAG_MAP_VALID) &&
  530. (tag_map[1] == DCA_TAG_MAP_VALID) &&
  531. (tag_map[2] == DCA_TAG_MAP_VALID) &&
  532. (tag_map[3] == DCA_TAG_MAP_VALID) &&
  533. (tag_map[4] == DCA_TAG_MAP_VALID));
  534. }
  535. struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
  536. {
  537. struct dca_provider *dca;
  538. struct ioat_dca_priv *ioatdca;
  539. int slots;
  540. int i;
  541. int err;
  542. u16 dca_offset;
  543. u16 csi_fsb_control;
  544. u16 pcie_control;
  545. u8 bit;
  546. union {
  547. u64 full;
  548. struct {
  549. u32 low;
  550. u32 high;
  551. };
  552. } tag_map;
  553. if (!system_has_dca_enabled(pdev))
  554. return NULL;
  555. dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
  556. if (dca_offset == 0)
  557. return NULL;
  558. slots = ioat3_dca_count_dca_slots(iobase, dca_offset);
  559. if (slots == 0)
  560. return NULL;
  561. dca = alloc_dca_provider(&ioat3_dca_ops,
  562. sizeof(*ioatdca)
  563. + (sizeof(struct ioat_dca_slot) * slots));
  564. if (!dca)
  565. return NULL;
  566. ioatdca = dca_priv(dca);
  567. ioatdca->iobase = iobase;
  568. ioatdca->dca_base = iobase + dca_offset;
  569. ioatdca->max_requesters = slots;
  570. /* some bios might not know to turn these on */
  571. csi_fsb_control = readw(ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
  572. if ((csi_fsb_control & IOAT3_CSI_CONTROL_PREFETCH) == 0) {
  573. csi_fsb_control |= IOAT3_CSI_CONTROL_PREFETCH;
  574. writew(csi_fsb_control,
  575. ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
  576. }
  577. pcie_control = readw(ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
  578. if ((pcie_control & IOAT3_PCI_CONTROL_MEMWR) == 0) {
  579. pcie_control |= IOAT3_PCI_CONTROL_MEMWR;
  580. writew(pcie_control,
  581. ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
  582. }
  583. /* TODO version, compatibility and configuration checks */
  584. /* copy out the APIC to DCA tag map */
  585. tag_map.low =
  586. readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_LOW);
  587. tag_map.high =
  588. readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_HIGH);
  589. for (i = 0; i < 8; i++) {
  590. bit = tag_map.full >> (8 * i);
  591. ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK;
  592. }
  593. if (dca3_tag_map_invalid(ioatdca->tag_map)) {
  594. WARN_TAINT_ONCE(1, TAINT_FIRMWARE_WORKAROUND,
  595. "%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n",
  596. dev_driver_string(&pdev->dev),
  597. dev_name(&pdev->dev));
  598. free_dca_provider(dca);
  599. return NULL;
  600. }
  601. err = register_dca_provider(dca, &pdev->dev);
  602. if (err) {
  603. free_dca_provider(dca);
  604. return NULL;
  605. }
  606. return dca;
  607. }