dmar.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755
  1. /*
  2. * Copyright (c) 2006, Intel Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  15. * Place - Suite 330, Boston, MA 02111-1307 USA.
  16. *
  17. * Copyright (C) 2006-2008 Intel Corporation
  18. * Author: Ashok Raj <ashok.raj@intel.com>
  19. * Author: Shaohua Li <shaohua.li@intel.com>
  20. * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  21. *
  22. * This file implements early detection/parsing of Remapping Devices
  23. * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
  24. * tables.
  25. *
  26. * These routines are used by both DMA-remapping and Interrupt-remapping
  27. */
  28. #include <linux/pci.h>
  29. #include <linux/dmar.h>
  30. #include <linux/iova.h>
  31. #include <linux/intel-iommu.h>
  32. #include <linux/timer.h>
  33. #undef PREFIX
  34. #define PREFIX "DMAR:"
  35. /* No locks are needed as DMA remapping hardware unit
  36. * list is constructed at boot time and hotplug of
  37. * these units are not supported by the architecture.
  38. */
  39. LIST_HEAD(dmar_drhd_units);
  40. static struct acpi_table_header * __initdata dmar_tbl;
  41. static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
  42. {
  43. /*
  44. * add INCLUDE_ALL at the tail, so scan the list will find it at
  45. * the very end.
  46. */
  47. if (drhd->include_all)
  48. list_add_tail(&drhd->list, &dmar_drhd_units);
  49. else
  50. list_add(&drhd->list, &dmar_drhd_units);
  51. }
  52. static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
  53. struct pci_dev **dev, u16 segment)
  54. {
  55. struct pci_bus *bus;
  56. struct pci_dev *pdev = NULL;
  57. struct acpi_dmar_pci_path *path;
  58. int count;
  59. bus = pci_find_bus(segment, scope->bus);
  60. path = (struct acpi_dmar_pci_path *)(scope + 1);
  61. count = (scope->length - sizeof(struct acpi_dmar_device_scope))
  62. / sizeof(struct acpi_dmar_pci_path);
  63. while (count) {
  64. if (pdev)
  65. pci_dev_put(pdev);
  66. /*
  67. * Some BIOSes list non-exist devices in DMAR table, just
  68. * ignore it
  69. */
  70. if (!bus) {
  71. printk(KERN_WARNING
  72. PREFIX "Device scope bus [%d] not found\n",
  73. scope->bus);
  74. break;
  75. }
  76. pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
  77. if (!pdev) {
  78. printk(KERN_WARNING PREFIX
  79. "Device scope device [%04x:%02x:%02x.%02x] not found\n",
  80. segment, bus->number, path->dev, path->fn);
  81. break;
  82. }
  83. path ++;
  84. count --;
  85. bus = pdev->subordinate;
  86. }
  87. if (!pdev) {
  88. printk(KERN_WARNING PREFIX
  89. "Device scope device [%04x:%02x:%02x.%02x] not found\n",
  90. segment, scope->bus, path->dev, path->fn);
  91. *dev = NULL;
  92. return 0;
  93. }
  94. if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
  95. pdev->subordinate) || (scope->entry_type == \
  96. ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
  97. pci_dev_put(pdev);
  98. printk(KERN_WARNING PREFIX
  99. "Device scope type does not match for %s\n",
  100. pci_name(pdev));
  101. return -EINVAL;
  102. }
  103. *dev = pdev;
  104. return 0;
  105. }
  106. static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
  107. struct pci_dev ***devices, u16 segment)
  108. {
  109. struct acpi_dmar_device_scope *scope;
  110. void * tmp = start;
  111. int index;
  112. int ret;
  113. *cnt = 0;
  114. while (start < end) {
  115. scope = start;
  116. if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
  117. scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
  118. (*cnt)++;
  119. else
  120. printk(KERN_WARNING PREFIX
  121. "Unsupported device scope\n");
  122. start += scope->length;
  123. }
  124. if (*cnt == 0)
  125. return 0;
  126. *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
  127. if (!*devices)
  128. return -ENOMEM;
  129. start = tmp;
  130. index = 0;
  131. while (start < end) {
  132. scope = start;
  133. if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
  134. scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
  135. ret = dmar_parse_one_dev_scope(scope,
  136. &(*devices)[index], segment);
  137. if (ret) {
  138. kfree(*devices);
  139. return ret;
  140. }
  141. index ++;
  142. }
  143. start += scope->length;
  144. }
  145. return 0;
  146. }
  147. /**
  148. * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
  149. * structure which uniquely represent one DMA remapping hardware unit
  150. * present in the platform
  151. */
  152. static int __init
  153. dmar_parse_one_drhd(struct acpi_dmar_header *header)
  154. {
  155. struct acpi_dmar_hardware_unit *drhd;
  156. struct dmar_drhd_unit *dmaru;
  157. int ret = 0;
  158. dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
  159. if (!dmaru)
  160. return -ENOMEM;
  161. dmaru->hdr = header;
  162. drhd = (struct acpi_dmar_hardware_unit *)header;
  163. dmaru->reg_base_addr = drhd->address;
  164. dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
  165. ret = alloc_iommu(dmaru);
  166. if (ret) {
  167. kfree(dmaru);
  168. return ret;
  169. }
  170. dmar_register_drhd_unit(dmaru);
  171. return 0;
  172. }
  173. static int __init
  174. dmar_parse_dev(struct dmar_drhd_unit *dmaru)
  175. {
  176. struct acpi_dmar_hardware_unit *drhd;
  177. static int include_all;
  178. int ret;
  179. drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
  180. if (!dmaru->include_all)
  181. ret = dmar_parse_dev_scope((void *)(drhd + 1),
  182. ((void *)drhd) + drhd->header.length,
  183. &dmaru->devices_cnt, &dmaru->devices,
  184. drhd->segment);
  185. else {
  186. /* Only allow one INCLUDE_ALL */
  187. if (include_all) {
  188. printk(KERN_WARNING PREFIX "Only one INCLUDE_ALL "
  189. "device scope is allowed\n");
  190. ret = -EINVAL;
  191. }
  192. include_all = 1;
  193. }
  194. if (ret || (dmaru->devices_cnt == 0 && !dmaru->include_all)) {
  195. list_del(&dmaru->list);
  196. kfree(dmaru);
  197. }
  198. return ret;
  199. }
  200. #ifdef CONFIG_DMAR
  201. LIST_HEAD(dmar_rmrr_units);
  202. static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
  203. {
  204. list_add(&rmrr->list, &dmar_rmrr_units);
  205. }
  206. static int __init
  207. dmar_parse_one_rmrr(struct acpi_dmar_header *header)
  208. {
  209. struct acpi_dmar_reserved_memory *rmrr;
  210. struct dmar_rmrr_unit *rmrru;
  211. rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
  212. if (!rmrru)
  213. return -ENOMEM;
  214. rmrru->hdr = header;
  215. rmrr = (struct acpi_dmar_reserved_memory *)header;
  216. rmrru->base_address = rmrr->base_address;
  217. rmrru->end_address = rmrr->end_address;
  218. dmar_register_rmrr_unit(rmrru);
  219. return 0;
  220. }
  221. static int __init
  222. rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
  223. {
  224. struct acpi_dmar_reserved_memory *rmrr;
  225. int ret;
  226. rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
  227. ret = dmar_parse_dev_scope((void *)(rmrr + 1),
  228. ((void *)rmrr) + rmrr->header.length,
  229. &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
  230. if (ret || (rmrru->devices_cnt == 0)) {
  231. list_del(&rmrru->list);
  232. kfree(rmrru);
  233. }
  234. return ret;
  235. }
  236. #endif
  237. static void __init
  238. dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
  239. {
  240. struct acpi_dmar_hardware_unit *drhd;
  241. struct acpi_dmar_reserved_memory *rmrr;
  242. switch (header->type) {
  243. case ACPI_DMAR_TYPE_HARDWARE_UNIT:
  244. drhd = (struct acpi_dmar_hardware_unit *)header;
  245. printk (KERN_INFO PREFIX
  246. "DRHD (flags: 0x%08x)base: 0x%016Lx\n",
  247. drhd->flags, drhd->address);
  248. break;
  249. case ACPI_DMAR_TYPE_RESERVED_MEMORY:
  250. rmrr = (struct acpi_dmar_reserved_memory *)header;
  251. printk (KERN_INFO PREFIX
  252. "RMRR base: 0x%016Lx end: 0x%016Lx\n",
  253. rmrr->base_address, rmrr->end_address);
  254. break;
  255. }
  256. }
  257. /**
  258. * parse_dmar_table - parses the DMA reporting table
  259. */
  260. static int __init
  261. parse_dmar_table(void)
  262. {
  263. struct acpi_table_dmar *dmar;
  264. struct acpi_dmar_header *entry_header;
  265. int ret = 0;
  266. dmar = (struct acpi_table_dmar *)dmar_tbl;
  267. if (!dmar)
  268. return -ENODEV;
  269. if (dmar->width < PAGE_SHIFT_4K - 1) {
  270. printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
  271. return -EINVAL;
  272. }
  273. printk (KERN_INFO PREFIX "Host address width %d\n",
  274. dmar->width + 1);
  275. entry_header = (struct acpi_dmar_header *)(dmar + 1);
  276. while (((unsigned long)entry_header) <
  277. (((unsigned long)dmar) + dmar_tbl->length)) {
  278. dmar_table_print_dmar_entry(entry_header);
  279. switch (entry_header->type) {
  280. case ACPI_DMAR_TYPE_HARDWARE_UNIT:
  281. ret = dmar_parse_one_drhd(entry_header);
  282. break;
  283. case ACPI_DMAR_TYPE_RESERVED_MEMORY:
  284. #ifdef CONFIG_DMAR
  285. ret = dmar_parse_one_rmrr(entry_header);
  286. #endif
  287. break;
  288. default:
  289. printk(KERN_WARNING PREFIX
  290. "Unknown DMAR structure type\n");
  291. ret = 0; /* for forward compatibility */
  292. break;
  293. }
  294. if (ret)
  295. break;
  296. entry_header = ((void *)entry_header + entry_header->length);
  297. }
  298. return ret;
  299. }
  300. int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
  301. struct pci_dev *dev)
  302. {
  303. int index;
  304. while (dev) {
  305. for (index = 0; index < cnt; index++)
  306. if (dev == devices[index])
  307. return 1;
  308. /* Check our parent */
  309. dev = dev->bus->self;
  310. }
  311. return 0;
  312. }
  313. struct dmar_drhd_unit *
  314. dmar_find_matched_drhd_unit(struct pci_dev *dev)
  315. {
  316. struct dmar_drhd_unit *drhd = NULL;
  317. list_for_each_entry(drhd, &dmar_drhd_units, list) {
  318. if (drhd->include_all || dmar_pci_device_match(drhd->devices,
  319. drhd->devices_cnt, dev))
  320. return drhd;
  321. }
  322. return NULL;
  323. }
  324. int __init dmar_dev_scope_init(void)
  325. {
  326. struct dmar_drhd_unit *drhd;
  327. int ret = -ENODEV;
  328. for_each_drhd_unit(drhd) {
  329. ret = dmar_parse_dev(drhd);
  330. if (ret)
  331. return ret;
  332. }
  333. #ifdef CONFIG_DMAR
  334. {
  335. struct dmar_rmrr_unit *rmrr;
  336. for_each_rmrr_units(rmrr) {
  337. ret = rmrr_parse_dev(rmrr);
  338. if (ret)
  339. return ret;
  340. }
  341. }
  342. #endif
  343. return ret;
  344. }
  345. int __init dmar_table_init(void)
  346. {
  347. static int dmar_table_initialized;
  348. int ret;
  349. if (dmar_table_initialized)
  350. return 0;
  351. dmar_table_initialized = 1;
  352. ret = parse_dmar_table();
  353. if (ret) {
  354. if (ret != -ENODEV)
  355. printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
  356. return ret;
  357. }
  358. if (list_empty(&dmar_drhd_units)) {
  359. printk(KERN_INFO PREFIX "No DMAR devices found\n");
  360. return -ENODEV;
  361. }
  362. #ifdef CONFIG_DMAR
  363. if (list_empty(&dmar_rmrr_units))
  364. printk(KERN_INFO PREFIX "No RMRR found\n");
  365. #endif
  366. #ifdef CONFIG_INTR_REMAP
  367. parse_ioapics_under_ir();
  368. #endif
  369. return 0;
  370. }
  371. /**
  372. * early_dmar_detect - checks to see if the platform supports DMAR devices
  373. */
  374. int __init early_dmar_detect(void)
  375. {
  376. acpi_status status = AE_OK;
  377. /* if we could find DMAR table, then there are DMAR devices */
  378. status = acpi_get_table(ACPI_SIG_DMAR, 0,
  379. (struct acpi_table_header **)&dmar_tbl);
  380. if (ACPI_SUCCESS(status) && !dmar_tbl) {
  381. printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
  382. status = AE_NOT_FOUND;
  383. }
  384. return (ACPI_SUCCESS(status) ? 1 : 0);
  385. }
  386. void __init detect_intel_iommu(void)
  387. {
  388. int ret;
  389. ret = early_dmar_detect();
  390. {
  391. #ifdef CONFIG_INTR_REMAP
  392. struct acpi_table_dmar *dmar;
  393. /*
  394. * for now we will disable dma-remapping when interrupt
  395. * remapping is enabled.
  396. * When support for queued invalidation for IOTLB invalidation
  397. * is added, we will not need this any more.
  398. */
  399. dmar = (struct acpi_table_dmar *) dmar_tbl;
  400. if (ret && cpu_has_x2apic && dmar->flags & 0x1)
  401. printk(KERN_INFO
  402. "Queued invalidation will be enabled to support "
  403. "x2apic and Intr-remapping.\n");
  404. #endif
  405. #ifdef CONFIG_DMAR
  406. if (ret && !no_iommu && !iommu_detected && !swiotlb &&
  407. !dmar_disabled)
  408. iommu_detected = 1;
  409. #endif
  410. }
  411. }
  412. int alloc_iommu(struct dmar_drhd_unit *drhd)
  413. {
  414. struct intel_iommu *iommu;
  415. int map_size;
  416. u32 ver;
  417. static int iommu_allocated = 0;
  418. iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
  419. if (!iommu)
  420. return -ENOMEM;
  421. iommu->seq_id = iommu_allocated++;
  422. iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K);
  423. if (!iommu->reg) {
  424. printk(KERN_ERR "IOMMU: can't map the region\n");
  425. goto error;
  426. }
  427. iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
  428. iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
  429. /* the registers might be more than one page */
  430. map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
  431. cap_max_fault_reg_offset(iommu->cap));
  432. map_size = PAGE_ALIGN_4K(map_size);
  433. if (map_size > PAGE_SIZE_4K) {
  434. iounmap(iommu->reg);
  435. iommu->reg = ioremap(drhd->reg_base_addr, map_size);
  436. if (!iommu->reg) {
  437. printk(KERN_ERR "IOMMU: can't map the region\n");
  438. goto error;
  439. }
  440. }
  441. ver = readl(iommu->reg + DMAR_VER_REG);
  442. pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
  443. drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
  444. iommu->cap, iommu->ecap);
  445. spin_lock_init(&iommu->register_lock);
  446. drhd->iommu = iommu;
  447. return 0;
  448. error:
  449. kfree(iommu);
  450. return -1;
  451. }
  452. void free_iommu(struct intel_iommu *iommu)
  453. {
  454. if (!iommu)
  455. return;
  456. #ifdef CONFIG_DMAR
  457. free_dmar_iommu(iommu);
  458. #endif
  459. if (iommu->reg)
  460. iounmap(iommu->reg);
  461. kfree(iommu);
  462. }
  463. /*
  464. * Reclaim all the submitted descriptors which have completed its work.
  465. */
  466. static inline void reclaim_free_desc(struct q_inval *qi)
  467. {
  468. while (qi->desc_status[qi->free_tail] == QI_DONE) {
  469. qi->desc_status[qi->free_tail] = QI_FREE;
  470. qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
  471. qi->free_cnt++;
  472. }
  473. }
  474. /*
  475. * Submit the queued invalidation descriptor to the remapping
  476. * hardware unit and wait for its completion.
  477. */
  478. void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
  479. {
  480. struct q_inval *qi = iommu->qi;
  481. struct qi_desc *hw, wait_desc;
  482. int wait_index, index;
  483. unsigned long flags;
  484. if (!qi)
  485. return;
  486. hw = qi->desc;
  487. spin_lock_irqsave(&qi->q_lock, flags);
  488. while (qi->free_cnt < 3) {
  489. spin_unlock_irqrestore(&qi->q_lock, flags);
  490. cpu_relax();
  491. spin_lock_irqsave(&qi->q_lock, flags);
  492. }
  493. index = qi->free_head;
  494. wait_index = (index + 1) % QI_LENGTH;
  495. qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
  496. hw[index] = *desc;
  497. wait_desc.low = QI_IWD_STATUS_DATA(2) | QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
  498. wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
  499. hw[wait_index] = wait_desc;
  500. __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
  501. __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
  502. qi->free_head = (qi->free_head + 2) % QI_LENGTH;
  503. qi->free_cnt -= 2;
  504. spin_lock(&iommu->register_lock);
  505. /*
  506. * update the HW tail register indicating the presence of
  507. * new descriptors.
  508. */
  509. writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
  510. spin_unlock(&iommu->register_lock);
  511. while (qi->desc_status[wait_index] != QI_DONE) {
  512. /*
  513. * We will leave the interrupts disabled, to prevent interrupt
  514. * context to queue another cmd while a cmd is already submitted
  515. * and waiting for completion on this cpu. This is to avoid
  516. * a deadlock where the interrupt context can wait indefinitely
  517. * for free slots in the queue.
  518. */
  519. spin_unlock(&qi->q_lock);
  520. cpu_relax();
  521. spin_lock(&qi->q_lock);
  522. }
  523. qi->desc_status[index] = QI_DONE;
  524. reclaim_free_desc(qi);
  525. spin_unlock_irqrestore(&qi->q_lock, flags);
  526. }
  527. /*
  528. * Flush the global interrupt entry cache.
  529. */
  530. void qi_global_iec(struct intel_iommu *iommu)
  531. {
  532. struct qi_desc desc;
  533. desc.low = QI_IEC_TYPE;
  534. desc.high = 0;
  535. qi_submit_sync(&desc, iommu);
  536. }
  537. int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
  538. u64 type, int non_present_entry_flush)
  539. {
  540. struct qi_desc desc;
  541. if (non_present_entry_flush) {
  542. if (!cap_caching_mode(iommu->cap))
  543. return 1;
  544. else
  545. did = 0;
  546. }
  547. desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
  548. | QI_CC_GRAN(type) | QI_CC_TYPE;
  549. desc.high = 0;
  550. qi_submit_sync(&desc, iommu);
  551. return 0;
  552. }
  553. int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
  554. unsigned int size_order, u64 type,
  555. int non_present_entry_flush)
  556. {
  557. u8 dw = 0, dr = 0;
  558. struct qi_desc desc;
  559. int ih = 0;
  560. if (non_present_entry_flush) {
  561. if (!cap_caching_mode(iommu->cap))
  562. return 1;
  563. else
  564. did = 0;
  565. }
  566. if (cap_write_drain(iommu->cap))
  567. dw = 1;
  568. if (cap_read_drain(iommu->cap))
  569. dr = 1;
  570. desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
  571. | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
  572. desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
  573. | QI_IOTLB_AM(size_order);
  574. qi_submit_sync(&desc, iommu);
  575. return 0;
  576. }
  577. /*
  578. * Enable Queued Invalidation interface. This is a must to support
  579. * interrupt-remapping. Also used by DMA-remapping, which replaces
  580. * register based IOTLB invalidation.
  581. */
  582. int dmar_enable_qi(struct intel_iommu *iommu)
  583. {
  584. u32 cmd, sts;
  585. unsigned long flags;
  586. struct q_inval *qi;
  587. if (!ecap_qis(iommu->ecap))
  588. return -ENOENT;
  589. /*
  590. * queued invalidation is already setup and enabled.
  591. */
  592. if (iommu->qi)
  593. return 0;
  594. iommu->qi = kmalloc(sizeof(*qi), GFP_KERNEL);
  595. if (!iommu->qi)
  596. return -ENOMEM;
  597. qi = iommu->qi;
  598. qi->desc = (void *)(get_zeroed_page(GFP_KERNEL));
  599. if (!qi->desc) {
  600. kfree(qi);
  601. iommu->qi = 0;
  602. return -ENOMEM;
  603. }
  604. qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_KERNEL);
  605. if (!qi->desc_status) {
  606. free_page((unsigned long) qi->desc);
  607. kfree(qi);
  608. iommu->qi = 0;
  609. return -ENOMEM;
  610. }
  611. qi->free_head = qi->free_tail = 0;
  612. qi->free_cnt = QI_LENGTH;
  613. spin_lock_init(&qi->q_lock);
  614. spin_lock_irqsave(&iommu->register_lock, flags);
  615. /* write zero to the tail reg */
  616. writel(0, iommu->reg + DMAR_IQT_REG);
  617. dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
  618. cmd = iommu->gcmd | DMA_GCMD_QIE;
  619. iommu->gcmd |= DMA_GCMD_QIE;
  620. writel(cmd, iommu->reg + DMAR_GCMD_REG);
  621. /* Make sure hardware complete it */
  622. IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
  623. spin_unlock_irqrestore(&iommu->register_lock, flags);
  624. return 0;
  625. }