iort.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961
  1. /*
  2. * Copyright (C) 2016, Semihalf
  3. * Author: Tomasz Nowicki <tn@semihalf.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * This file implements early detection/parsing of I/O mapping
  15. * reported to OS through firmware via I/O Remapping Table (IORT)
  16. * IORT document number: ARM DEN 0049A
  17. */
  18. #define pr_fmt(fmt) "ACPI: IORT: " fmt
  19. #include <linux/acpi_iort.h>
  20. #include <linux/iommu.h>
  21. #include <linux/kernel.h>
  22. #include <linux/list.h>
  23. #include <linux/pci.h>
  24. #include <linux/platform_device.h>
  25. #include <linux/slab.h>
  26. #define IORT_TYPE_MASK(type) (1 << (type))
  27. #define IORT_MSI_TYPE (1 << ACPI_IORT_NODE_ITS_GROUP)
  28. #define IORT_IOMMU_TYPE ((1 << ACPI_IORT_NODE_SMMU) | \
  29. (1 << ACPI_IORT_NODE_SMMU_V3))
  30. struct iort_its_msi_chip {
  31. struct list_head list;
  32. struct fwnode_handle *fw_node;
  33. u32 translation_id;
  34. };
  35. struct iort_fwnode {
  36. struct list_head list;
  37. struct acpi_iort_node *iort_node;
  38. struct fwnode_handle *fwnode;
  39. };
  40. static LIST_HEAD(iort_fwnode_list);
  41. static DEFINE_SPINLOCK(iort_fwnode_lock);
  42. /**
  43. * iort_set_fwnode() - Create iort_fwnode and use it to register
  44. * iommu data in the iort_fwnode_list
  45. *
  46. * @node: IORT table node associated with the IOMMU
  47. * @fwnode: fwnode associated with the IORT node
  48. *
  49. * Returns: 0 on success
  50. * <0 on failure
  51. */
  52. static inline int iort_set_fwnode(struct acpi_iort_node *iort_node,
  53. struct fwnode_handle *fwnode)
  54. {
  55. struct iort_fwnode *np;
  56. np = kzalloc(sizeof(struct iort_fwnode), GFP_ATOMIC);
  57. if (WARN_ON(!np))
  58. return -ENOMEM;
  59. INIT_LIST_HEAD(&np->list);
  60. np->iort_node = iort_node;
  61. np->fwnode = fwnode;
  62. spin_lock(&iort_fwnode_lock);
  63. list_add_tail(&np->list, &iort_fwnode_list);
  64. spin_unlock(&iort_fwnode_lock);
  65. return 0;
  66. }
  67. /**
  68. * iort_get_fwnode() - Retrieve fwnode associated with an IORT node
  69. *
  70. * @node: IORT table node to be looked-up
  71. *
  72. * Returns: fwnode_handle pointer on success, NULL on failure
  73. */
  74. static inline
  75. struct fwnode_handle *iort_get_fwnode(struct acpi_iort_node *node)
  76. {
  77. struct iort_fwnode *curr;
  78. struct fwnode_handle *fwnode = NULL;
  79. spin_lock(&iort_fwnode_lock);
  80. list_for_each_entry(curr, &iort_fwnode_list, list) {
  81. if (curr->iort_node == node) {
  82. fwnode = curr->fwnode;
  83. break;
  84. }
  85. }
  86. spin_unlock(&iort_fwnode_lock);
  87. return fwnode;
  88. }
  89. /**
  90. * iort_delete_fwnode() - Delete fwnode associated with an IORT node
  91. *
  92. * @node: IORT table node associated with fwnode to delete
  93. */
  94. static inline void iort_delete_fwnode(struct acpi_iort_node *node)
  95. {
  96. struct iort_fwnode *curr, *tmp;
  97. spin_lock(&iort_fwnode_lock);
  98. list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) {
  99. if (curr->iort_node == node) {
  100. list_del(&curr->list);
  101. kfree(curr);
  102. break;
  103. }
  104. }
  105. spin_unlock(&iort_fwnode_lock);
  106. }
  107. typedef acpi_status (*iort_find_node_callback)
  108. (struct acpi_iort_node *node, void *context);
  109. /* Root pointer to the mapped IORT table */
  110. static struct acpi_table_header *iort_table;
  111. static LIST_HEAD(iort_msi_chip_list);
  112. static DEFINE_SPINLOCK(iort_msi_chip_lock);
  113. /**
  114. * iort_register_domain_token() - register domain token and related ITS ID
  115. * to the list from where we can get it back later on.
  116. * @trans_id: ITS ID.
  117. * @fw_node: Domain token.
  118. *
  119. * Returns: 0 on success, -ENOMEM if no memory when allocating list element
  120. */
  121. int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node)
  122. {
  123. struct iort_its_msi_chip *its_msi_chip;
  124. its_msi_chip = kzalloc(sizeof(*its_msi_chip), GFP_KERNEL);
  125. if (!its_msi_chip)
  126. return -ENOMEM;
  127. its_msi_chip->fw_node = fw_node;
  128. its_msi_chip->translation_id = trans_id;
  129. spin_lock(&iort_msi_chip_lock);
  130. list_add(&its_msi_chip->list, &iort_msi_chip_list);
  131. spin_unlock(&iort_msi_chip_lock);
  132. return 0;
  133. }
  134. /**
  135. * iort_deregister_domain_token() - Deregister domain token based on ITS ID
  136. * @trans_id: ITS ID.
  137. *
  138. * Returns: none.
  139. */
  140. void iort_deregister_domain_token(int trans_id)
  141. {
  142. struct iort_its_msi_chip *its_msi_chip, *t;
  143. spin_lock(&iort_msi_chip_lock);
  144. list_for_each_entry_safe(its_msi_chip, t, &iort_msi_chip_list, list) {
  145. if (its_msi_chip->translation_id == trans_id) {
  146. list_del(&its_msi_chip->list);
  147. kfree(its_msi_chip);
  148. break;
  149. }
  150. }
  151. spin_unlock(&iort_msi_chip_lock);
  152. }
  153. /**
  154. * iort_find_domain_token() - Find domain token based on given ITS ID
  155. * @trans_id: ITS ID.
  156. *
  157. * Returns: domain token when find on the list, NULL otherwise
  158. */
  159. struct fwnode_handle *iort_find_domain_token(int trans_id)
  160. {
  161. struct fwnode_handle *fw_node = NULL;
  162. struct iort_its_msi_chip *its_msi_chip;
  163. spin_lock(&iort_msi_chip_lock);
  164. list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
  165. if (its_msi_chip->translation_id == trans_id) {
  166. fw_node = its_msi_chip->fw_node;
  167. break;
  168. }
  169. }
  170. spin_unlock(&iort_msi_chip_lock);
  171. return fw_node;
  172. }
  173. static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type,
  174. iort_find_node_callback callback,
  175. void *context)
  176. {
  177. struct acpi_iort_node *iort_node, *iort_end;
  178. struct acpi_table_iort *iort;
  179. int i;
  180. if (!iort_table)
  181. return NULL;
  182. /* Get the first IORT node */
  183. iort = (struct acpi_table_iort *)iort_table;
  184. iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
  185. iort->node_offset);
  186. iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
  187. iort_table->length);
  188. for (i = 0; i < iort->node_count; i++) {
  189. if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
  190. "IORT node pointer overflows, bad table!\n"))
  191. return NULL;
  192. if (iort_node->type == type &&
  193. ACPI_SUCCESS(callback(iort_node, context)))
  194. return iort_node;
  195. iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
  196. iort_node->length);
  197. }
  198. return NULL;
  199. }
  200. static acpi_status
  201. iort_match_type_callback(struct acpi_iort_node *node, void *context)
  202. {
  203. return AE_OK;
  204. }
  205. bool iort_node_match(u8 type)
  206. {
  207. struct acpi_iort_node *node;
  208. node = iort_scan_node(type, iort_match_type_callback, NULL);
  209. return node != NULL;
  210. }
  211. static acpi_status iort_match_node_callback(struct acpi_iort_node *node,
  212. void *context)
  213. {
  214. struct device *dev = context;
  215. acpi_status status;
  216. if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) {
  217. struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
  218. struct acpi_device *adev = to_acpi_device_node(dev->fwnode);
  219. struct acpi_iort_named_component *ncomp;
  220. if (!adev) {
  221. status = AE_NOT_FOUND;
  222. goto out;
  223. }
  224. status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
  225. if (ACPI_FAILURE(status)) {
  226. dev_warn(dev, "Can't get device full path name\n");
  227. goto out;
  228. }
  229. ncomp = (struct acpi_iort_named_component *)node->node_data;
  230. status = !strcmp(ncomp->device_name, buf.pointer) ?
  231. AE_OK : AE_NOT_FOUND;
  232. acpi_os_free(buf.pointer);
  233. } else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
  234. struct acpi_iort_root_complex *pci_rc;
  235. struct pci_bus *bus;
  236. bus = to_pci_bus(dev);
  237. pci_rc = (struct acpi_iort_root_complex *)node->node_data;
  238. /*
  239. * It is assumed that PCI segment numbers maps one-to-one
  240. * with root complexes. Each segment number can represent only
  241. * one root complex.
  242. */
  243. status = pci_rc->pci_segment_number == pci_domain_nr(bus) ?
  244. AE_OK : AE_NOT_FOUND;
  245. } else {
  246. status = AE_NOT_FOUND;
  247. }
  248. out:
  249. return status;
  250. }
  251. static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
  252. u32 *rid_out)
  253. {
  254. /* Single mapping does not care for input id */
  255. if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
  256. if (type == ACPI_IORT_NODE_NAMED_COMPONENT ||
  257. type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
  258. *rid_out = map->output_base;
  259. return 0;
  260. }
  261. pr_warn(FW_BUG "[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n",
  262. map, type);
  263. return -ENXIO;
  264. }
  265. if (rid_in < map->input_base ||
  266. (rid_in >= map->input_base + map->id_count))
  267. return -ENXIO;
  268. *rid_out = map->output_base + (rid_in - map->input_base);
  269. return 0;
  270. }
  271. static
  272. struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
  273. u32 *id_out, u8 type_mask,
  274. int index)
  275. {
  276. struct acpi_iort_node *parent;
  277. struct acpi_iort_id_mapping *map;
  278. if (!node->mapping_offset || !node->mapping_count ||
  279. index >= node->mapping_count)
  280. return NULL;
  281. map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
  282. node->mapping_offset);
  283. /* Firmware bug! */
  284. if (!map->output_reference) {
  285. pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
  286. node, node->type);
  287. return NULL;
  288. }
  289. parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
  290. map->output_reference);
  291. if (!(IORT_TYPE_MASK(parent->type) & type_mask))
  292. return NULL;
  293. if (map[index].flags & ACPI_IORT_ID_SINGLE_MAPPING) {
  294. if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
  295. node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
  296. *id_out = map[index].output_base;
  297. return parent;
  298. }
  299. }
  300. return NULL;
  301. }
  302. static struct acpi_iort_node *iort_node_map_rid(struct acpi_iort_node *node,
  303. u32 rid_in, u32 *rid_out,
  304. u8 type_mask)
  305. {
  306. u32 rid = rid_in;
  307. /* Parse the ID mapping tree to find specified node type */
  308. while (node) {
  309. struct acpi_iort_id_mapping *map;
  310. int i;
  311. if (IORT_TYPE_MASK(node->type) & type_mask) {
  312. if (rid_out)
  313. *rid_out = rid;
  314. return node;
  315. }
  316. if (!node->mapping_offset || !node->mapping_count)
  317. goto fail_map;
  318. map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
  319. node->mapping_offset);
  320. /* Firmware bug! */
  321. if (!map->output_reference) {
  322. pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
  323. node, node->type);
  324. goto fail_map;
  325. }
  326. /* Do the RID translation */
  327. for (i = 0; i < node->mapping_count; i++, map++) {
  328. if (!iort_id_map(map, node->type, rid, &rid))
  329. break;
  330. }
  331. if (i == node->mapping_count)
  332. goto fail_map;
  333. node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
  334. map->output_reference);
  335. }
  336. fail_map:
  337. /* Map input RID to output RID unchanged on mapping failure*/
  338. if (rid_out)
  339. *rid_out = rid_in;
  340. return NULL;
  341. }
  342. static struct acpi_iort_node *iort_find_dev_node(struct device *dev)
  343. {
  344. struct pci_bus *pbus;
  345. if (!dev_is_pci(dev))
  346. return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
  347. iort_match_node_callback, dev);
  348. /* Find a PCI root bus */
  349. pbus = to_pci_dev(dev)->bus;
  350. while (!pci_is_root_bus(pbus))
  351. pbus = pbus->parent;
  352. return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
  353. iort_match_node_callback, &pbus->dev);
  354. }
  355. /**
  356. * iort_msi_map_rid() - Map a MSI requester ID for a device
  357. * @dev: The device for which the mapping is to be done.
  358. * @req_id: The device requester ID.
  359. *
  360. * Returns: mapped MSI RID on success, input requester ID otherwise
  361. */
  362. u32 iort_msi_map_rid(struct device *dev, u32 req_id)
  363. {
  364. struct acpi_iort_node *node;
  365. u32 dev_id;
  366. node = iort_find_dev_node(dev);
  367. if (!node)
  368. return req_id;
  369. iort_node_map_rid(node, req_id, &dev_id, IORT_MSI_TYPE);
  370. return dev_id;
  371. }
  372. /**
  373. * iort_dev_find_its_id() - Find the ITS identifier for a device
  374. * @dev: The device.
  375. * @idx: Index of the ITS identifier list.
  376. * @its_id: ITS identifier.
  377. *
  378. * Returns: 0 on success, appropriate error value otherwise
  379. */
  380. static int iort_dev_find_its_id(struct device *dev, u32 req_id,
  381. unsigned int idx, int *its_id)
  382. {
  383. struct acpi_iort_its_group *its;
  384. struct acpi_iort_node *node;
  385. node = iort_find_dev_node(dev);
  386. if (!node)
  387. return -ENXIO;
  388. node = iort_node_map_rid(node, req_id, NULL, IORT_MSI_TYPE);
  389. if (!node)
  390. return -ENXIO;
  391. /* Move to ITS specific data */
  392. its = (struct acpi_iort_its_group *)node->node_data;
  393. if (idx > its->its_count) {
  394. dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n",
  395. idx, its->its_count);
  396. return -ENXIO;
  397. }
  398. *its_id = its->identifiers[idx];
  399. return 0;
  400. }
  401. /**
  402. * iort_get_device_domain() - Find MSI domain related to a device
  403. * @dev: The device.
  404. * @req_id: Requester ID for the device.
  405. *
  406. * Returns: the MSI domain for this device, NULL otherwise
  407. */
  408. struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id)
  409. {
  410. struct fwnode_handle *handle;
  411. int its_id;
  412. if (iort_dev_find_its_id(dev, req_id, 0, &its_id))
  413. return NULL;
  414. handle = iort_find_domain_token(its_id);
  415. if (!handle)
  416. return NULL;
  417. return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI);
  418. }
  419. static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data)
  420. {
  421. u32 *rid = data;
  422. *rid = alias;
  423. return 0;
  424. }
  425. static int arm_smmu_iort_xlate(struct device *dev, u32 streamid,
  426. struct fwnode_handle *fwnode,
  427. const struct iommu_ops *ops)
  428. {
  429. int ret = iommu_fwspec_init(dev, fwnode, ops);
  430. if (!ret)
  431. ret = iommu_fwspec_add_ids(dev, &streamid, 1);
  432. return ret;
  433. }
  434. static const struct iommu_ops *iort_iommu_xlate(struct device *dev,
  435. struct acpi_iort_node *node,
  436. u32 streamid)
  437. {
  438. const struct iommu_ops *ops = NULL;
  439. int ret = -ENODEV;
  440. struct fwnode_handle *iort_fwnode;
  441. if (node) {
  442. iort_fwnode = iort_get_fwnode(node);
  443. if (!iort_fwnode)
  444. return NULL;
  445. ops = iommu_get_instance(iort_fwnode);
  446. if (!ops)
  447. return NULL;
  448. ret = arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops);
  449. }
  450. return ret ? NULL : ops;
  451. }
  452. /**
  453. * iort_set_dma_mask - Set-up dma mask for a device.
  454. *
  455. * @dev: device to configure
  456. */
  457. void iort_set_dma_mask(struct device *dev)
  458. {
  459. /*
  460. * Set default coherent_dma_mask to 32 bit. Drivers are expected to
  461. * setup the correct supported mask.
  462. */
  463. if (!dev->coherent_dma_mask)
  464. dev->coherent_dma_mask = DMA_BIT_MASK(32);
  465. /*
  466. * Set it to coherent_dma_mask by default if the architecture
  467. * code has not set it.
  468. */
  469. if (!dev->dma_mask)
  470. dev->dma_mask = &dev->coherent_dma_mask;
  471. }
  472. /**
  473. * iort_iommu_configure - Set-up IOMMU configuration for a device.
  474. *
  475. * @dev: device to configure
  476. *
  477. * Returns: iommu_ops pointer on configuration success
  478. * NULL on configuration failure
  479. */
  480. const struct iommu_ops *iort_iommu_configure(struct device *dev)
  481. {
  482. struct acpi_iort_node *node, *parent;
  483. const struct iommu_ops *ops = NULL;
  484. u32 streamid = 0;
  485. if (dev_is_pci(dev)) {
  486. struct pci_bus *bus = to_pci_dev(dev)->bus;
  487. u32 rid;
  488. pci_for_each_dma_alias(to_pci_dev(dev), __get_pci_rid,
  489. &rid);
  490. node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
  491. iort_match_node_callback, &bus->dev);
  492. if (!node)
  493. return NULL;
  494. parent = iort_node_map_rid(node, rid, &streamid,
  495. IORT_IOMMU_TYPE);
  496. ops = iort_iommu_xlate(dev, parent, streamid);
  497. } else {
  498. int i = 0;
  499. node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
  500. iort_match_node_callback, dev);
  501. if (!node)
  502. return NULL;
  503. parent = iort_node_get_id(node, &streamid,
  504. IORT_IOMMU_TYPE, i++);
  505. while (parent) {
  506. ops = iort_iommu_xlate(dev, parent, streamid);
  507. parent = iort_node_get_id(node, &streamid,
  508. IORT_IOMMU_TYPE, i++);
  509. }
  510. }
  511. return ops;
  512. }
  513. static void __init acpi_iort_register_irq(int hwirq, const char *name,
  514. int trigger,
  515. struct resource *res)
  516. {
  517. int irq = acpi_register_gsi(NULL, hwirq, trigger,
  518. ACPI_ACTIVE_HIGH);
  519. if (irq <= 0) {
  520. pr_err("could not register gsi hwirq %d name [%s]\n", hwirq,
  521. name);
  522. return;
  523. }
  524. res->start = irq;
  525. res->end = irq;
  526. res->flags = IORESOURCE_IRQ;
  527. res->name = name;
  528. }
  529. static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node)
  530. {
  531. struct acpi_iort_smmu_v3 *smmu;
  532. /* Always present mem resource */
  533. int num_res = 1;
  534. /* Retrieve SMMUv3 specific data */
  535. smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
  536. if (smmu->event_gsiv)
  537. num_res++;
  538. if (smmu->pri_gsiv)
  539. num_res++;
  540. if (smmu->gerr_gsiv)
  541. num_res++;
  542. if (smmu->sync_gsiv)
  543. num_res++;
  544. return num_res;
  545. }
  546. static void __init arm_smmu_v3_init_resources(struct resource *res,
  547. struct acpi_iort_node *node)
  548. {
  549. struct acpi_iort_smmu_v3 *smmu;
  550. int num_res = 0;
  551. /* Retrieve SMMUv3 specific data */
  552. smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
  553. res[num_res].start = smmu->base_address;
  554. res[num_res].end = smmu->base_address + SZ_128K - 1;
  555. res[num_res].flags = IORESOURCE_MEM;
  556. num_res++;
  557. if (smmu->event_gsiv)
  558. acpi_iort_register_irq(smmu->event_gsiv, "eventq",
  559. ACPI_EDGE_SENSITIVE,
  560. &res[num_res++]);
  561. if (smmu->pri_gsiv)
  562. acpi_iort_register_irq(smmu->pri_gsiv, "priq",
  563. ACPI_EDGE_SENSITIVE,
  564. &res[num_res++]);
  565. if (smmu->gerr_gsiv)
  566. acpi_iort_register_irq(smmu->gerr_gsiv, "gerror",
  567. ACPI_EDGE_SENSITIVE,
  568. &res[num_res++]);
  569. if (smmu->sync_gsiv)
  570. acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync",
  571. ACPI_EDGE_SENSITIVE,
  572. &res[num_res++]);
  573. }
  574. static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node)
  575. {
  576. struct acpi_iort_smmu_v3 *smmu;
  577. /* Retrieve SMMUv3 specific data */
  578. smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
  579. return smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE;
  580. }
  581. static int __init arm_smmu_count_resources(struct acpi_iort_node *node)
  582. {
  583. struct acpi_iort_smmu *smmu;
  584. /* Retrieve SMMU specific data */
  585. smmu = (struct acpi_iort_smmu *)node->node_data;
  586. /*
  587. * Only consider the global fault interrupt and ignore the
  588. * configuration access interrupt.
  589. *
  590. * MMIO address and global fault interrupt resources are always
  591. * present so add them to the context interrupt count as a static
  592. * value.
  593. */
  594. return smmu->context_interrupt_count + 2;
  595. }
  596. static void __init arm_smmu_init_resources(struct resource *res,
  597. struct acpi_iort_node *node)
  598. {
  599. struct acpi_iort_smmu *smmu;
  600. int i, hw_irq, trigger, num_res = 0;
  601. u64 *ctx_irq, *glb_irq;
  602. /* Retrieve SMMU specific data */
  603. smmu = (struct acpi_iort_smmu *)node->node_data;
  604. res[num_res].start = smmu->base_address;
  605. res[num_res].end = smmu->base_address + smmu->span - 1;
  606. res[num_res].flags = IORESOURCE_MEM;
  607. num_res++;
  608. glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset);
  609. /* Global IRQs */
  610. hw_irq = IORT_IRQ_MASK(glb_irq[0]);
  611. trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]);
  612. acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger,
  613. &res[num_res++]);
  614. /* Context IRQs */
  615. ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset);
  616. for (i = 0; i < smmu->context_interrupt_count; i++) {
  617. hw_irq = IORT_IRQ_MASK(ctx_irq[i]);
  618. trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]);
  619. acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger,
  620. &res[num_res++]);
  621. }
  622. }
  623. static bool __init arm_smmu_is_coherent(struct acpi_iort_node *node)
  624. {
  625. struct acpi_iort_smmu *smmu;
  626. /* Retrieve SMMU specific data */
  627. smmu = (struct acpi_iort_smmu *)node->node_data;
  628. return smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK;
  629. }
  630. struct iort_iommu_config {
  631. const char *name;
  632. int (*iommu_init)(struct acpi_iort_node *node);
  633. bool (*iommu_is_coherent)(struct acpi_iort_node *node);
  634. int (*iommu_count_resources)(struct acpi_iort_node *node);
  635. void (*iommu_init_resources)(struct resource *res,
  636. struct acpi_iort_node *node);
  637. };
  638. static const struct iort_iommu_config iort_arm_smmu_v3_cfg __initconst = {
  639. .name = "arm-smmu-v3",
  640. .iommu_is_coherent = arm_smmu_v3_is_coherent,
  641. .iommu_count_resources = arm_smmu_v3_count_resources,
  642. .iommu_init_resources = arm_smmu_v3_init_resources
  643. };
  644. static const struct iort_iommu_config iort_arm_smmu_cfg __initconst = {
  645. .name = "arm-smmu",
  646. .iommu_is_coherent = arm_smmu_is_coherent,
  647. .iommu_count_resources = arm_smmu_count_resources,
  648. .iommu_init_resources = arm_smmu_init_resources
  649. };
  650. static __init
  651. const struct iort_iommu_config *iort_get_iommu_cfg(struct acpi_iort_node *node)
  652. {
  653. switch (node->type) {
  654. case ACPI_IORT_NODE_SMMU_V3:
  655. return &iort_arm_smmu_v3_cfg;
  656. case ACPI_IORT_NODE_SMMU:
  657. return &iort_arm_smmu_cfg;
  658. default:
  659. return NULL;
  660. }
  661. }
  662. /**
  663. * iort_add_smmu_platform_device() - Allocate a platform device for SMMU
  664. * @node: Pointer to SMMU ACPI IORT node
  665. *
  666. * Returns: 0 on success, <0 failure
  667. */
  668. static int __init iort_add_smmu_platform_device(struct acpi_iort_node *node)
  669. {
  670. struct fwnode_handle *fwnode;
  671. struct platform_device *pdev;
  672. struct resource *r;
  673. enum dev_dma_attr attr;
  674. int ret, count;
  675. const struct iort_iommu_config *ops = iort_get_iommu_cfg(node);
  676. if (!ops)
  677. return -ENODEV;
  678. pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO);
  679. if (!pdev)
  680. return PTR_ERR(pdev);
  681. count = ops->iommu_count_resources(node);
  682. r = kcalloc(count, sizeof(*r), GFP_KERNEL);
  683. if (!r) {
  684. ret = -ENOMEM;
  685. goto dev_put;
  686. }
  687. ops->iommu_init_resources(r, node);
  688. ret = platform_device_add_resources(pdev, r, count);
  689. /*
  690. * Resources are duplicated in platform_device_add_resources,
  691. * free their allocated memory
  692. */
  693. kfree(r);
  694. if (ret)
  695. goto dev_put;
  696. /*
  697. * Add a copy of IORT node pointer to platform_data to
  698. * be used to retrieve IORT data information.
  699. */
  700. ret = platform_device_add_data(pdev, &node, sizeof(node));
  701. if (ret)
  702. goto dev_put;
  703. /*
  704. * We expect the dma masks to be equivalent for
  705. * all SMMUs set-ups
  706. */
  707. pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
  708. fwnode = iort_get_fwnode(node);
  709. if (!fwnode) {
  710. ret = -ENODEV;
  711. goto dev_put;
  712. }
  713. pdev->dev.fwnode = fwnode;
  714. attr = ops->iommu_is_coherent(node) ?
  715. DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
  716. /* Configure DMA for the page table walker */
  717. acpi_dma_configure(&pdev->dev, attr);
  718. ret = platform_device_add(pdev);
  719. if (ret)
  720. goto dma_deconfigure;
  721. return 0;
  722. dma_deconfigure:
  723. acpi_dma_deconfigure(&pdev->dev);
  724. dev_put:
  725. platform_device_put(pdev);
  726. return ret;
  727. }
  728. static void __init iort_init_platform_devices(void)
  729. {
  730. struct acpi_iort_node *iort_node, *iort_end;
  731. struct acpi_table_iort *iort;
  732. struct fwnode_handle *fwnode;
  733. int i, ret;
  734. /*
  735. * iort_table and iort both point to the start of IORT table, but
  736. * have different struct types
  737. */
  738. iort = (struct acpi_table_iort *)iort_table;
  739. /* Get the first IORT node */
  740. iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
  741. iort->node_offset);
  742. iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
  743. iort_table->length);
  744. for (i = 0; i < iort->node_count; i++) {
  745. if (iort_node >= iort_end) {
  746. pr_err("iort node pointer overflows, bad table\n");
  747. return;
  748. }
  749. if ((iort_node->type == ACPI_IORT_NODE_SMMU) ||
  750. (iort_node->type == ACPI_IORT_NODE_SMMU_V3)) {
  751. fwnode = acpi_alloc_fwnode_static();
  752. if (!fwnode)
  753. return;
  754. iort_set_fwnode(iort_node, fwnode);
  755. ret = iort_add_smmu_platform_device(iort_node);
  756. if (ret) {
  757. iort_delete_fwnode(iort_node);
  758. acpi_free_fwnode_static(fwnode);
  759. return;
  760. }
  761. }
  762. iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
  763. iort_node->length);
  764. }
  765. }
  766. void __init acpi_iort_init(void)
  767. {
  768. acpi_status status;
  769. status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table);
  770. if (ACPI_FAILURE(status)) {
  771. if (status != AE_NOT_FOUND) {
  772. const char *msg = acpi_format_exception(status);
  773. pr_err("Failed to get table, %s\n", msg);
  774. }
  775. return;
  776. }
  777. iort_init_platform_devices();
  778. acpi_probe_device_table(iort);
  779. }