iort.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123
  1. /*
  2. * Copyright (C) 2016, Semihalf
  3. * Author: Tomasz Nowicki <tn@semihalf.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * This file implements early detection/parsing of I/O mapping
  15. * reported to OS through firmware via I/O Remapping Table (IORT)
  16. * IORT document number: ARM DEN 0049A
  17. */
  18. #define pr_fmt(fmt) "ACPI: IORT: " fmt
  19. #include <linux/acpi_iort.h>
  20. #include <linux/iommu.h>
  21. #include <linux/kernel.h>
  22. #include <linux/list.h>
  23. #include <linux/pci.h>
  24. #include <linux/platform_device.h>
  25. #include <linux/slab.h>
  26. #define IORT_TYPE_MASK(type) (1 << (type))
  27. #define IORT_MSI_TYPE (1 << ACPI_IORT_NODE_ITS_GROUP)
  28. #define IORT_IOMMU_TYPE ((1 << ACPI_IORT_NODE_SMMU) | \
  29. (1 << ACPI_IORT_NODE_SMMU_V3))
  30. struct iort_its_msi_chip {
  31. struct list_head list;
  32. struct fwnode_handle *fw_node;
  33. u32 translation_id;
  34. };
  35. struct iort_fwnode {
  36. struct list_head list;
  37. struct acpi_iort_node *iort_node;
  38. struct fwnode_handle *fwnode;
  39. };
  40. static LIST_HEAD(iort_fwnode_list);
  41. static DEFINE_SPINLOCK(iort_fwnode_lock);
  42. /**
  43. * iort_set_fwnode() - Create iort_fwnode and use it to register
  44. * iommu data in the iort_fwnode_list
  45. *
  46. * @node: IORT table node associated with the IOMMU
  47. * @fwnode: fwnode associated with the IORT node
  48. *
  49. * Returns: 0 on success
  50. * <0 on failure
  51. */
  52. static inline int iort_set_fwnode(struct acpi_iort_node *iort_node,
  53. struct fwnode_handle *fwnode)
  54. {
  55. struct iort_fwnode *np;
  56. np = kzalloc(sizeof(struct iort_fwnode), GFP_ATOMIC);
  57. if (WARN_ON(!np))
  58. return -ENOMEM;
  59. INIT_LIST_HEAD(&np->list);
  60. np->iort_node = iort_node;
  61. np->fwnode = fwnode;
  62. spin_lock(&iort_fwnode_lock);
  63. list_add_tail(&np->list, &iort_fwnode_list);
  64. spin_unlock(&iort_fwnode_lock);
  65. return 0;
  66. }
  67. /**
  68. * iort_get_fwnode() - Retrieve fwnode associated with an IORT node
  69. *
  70. * @node: IORT table node to be looked-up
  71. *
  72. * Returns: fwnode_handle pointer on success, NULL on failure
  73. */
  74. static inline
  75. struct fwnode_handle *iort_get_fwnode(struct acpi_iort_node *node)
  76. {
  77. struct iort_fwnode *curr;
  78. struct fwnode_handle *fwnode = NULL;
  79. spin_lock(&iort_fwnode_lock);
  80. list_for_each_entry(curr, &iort_fwnode_list, list) {
  81. if (curr->iort_node == node) {
  82. fwnode = curr->fwnode;
  83. break;
  84. }
  85. }
  86. spin_unlock(&iort_fwnode_lock);
  87. return fwnode;
  88. }
  89. /**
  90. * iort_delete_fwnode() - Delete fwnode associated with an IORT node
  91. *
  92. * @node: IORT table node associated with fwnode to delete
  93. */
  94. static inline void iort_delete_fwnode(struct acpi_iort_node *node)
  95. {
  96. struct iort_fwnode *curr, *tmp;
  97. spin_lock(&iort_fwnode_lock);
  98. list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) {
  99. if (curr->iort_node == node) {
  100. list_del(&curr->list);
  101. kfree(curr);
  102. break;
  103. }
  104. }
  105. spin_unlock(&iort_fwnode_lock);
  106. }
  107. typedef acpi_status (*iort_find_node_callback)
  108. (struct acpi_iort_node *node, void *context);
  109. /* Root pointer to the mapped IORT table */
  110. static struct acpi_table_header *iort_table;
  111. static LIST_HEAD(iort_msi_chip_list);
  112. static DEFINE_SPINLOCK(iort_msi_chip_lock);
  113. /**
  114. * iort_register_domain_token() - register domain token and related ITS ID
  115. * to the list from where we can get it back later on.
  116. * @trans_id: ITS ID.
  117. * @fw_node: Domain token.
  118. *
  119. * Returns: 0 on success, -ENOMEM if no memory when allocating list element
  120. */
  121. int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node)
  122. {
  123. struct iort_its_msi_chip *its_msi_chip;
  124. its_msi_chip = kzalloc(sizeof(*its_msi_chip), GFP_KERNEL);
  125. if (!its_msi_chip)
  126. return -ENOMEM;
  127. its_msi_chip->fw_node = fw_node;
  128. its_msi_chip->translation_id = trans_id;
  129. spin_lock(&iort_msi_chip_lock);
  130. list_add(&its_msi_chip->list, &iort_msi_chip_list);
  131. spin_unlock(&iort_msi_chip_lock);
  132. return 0;
  133. }
  134. /**
  135. * iort_deregister_domain_token() - Deregister domain token based on ITS ID
  136. * @trans_id: ITS ID.
  137. *
  138. * Returns: none.
  139. */
  140. void iort_deregister_domain_token(int trans_id)
  141. {
  142. struct iort_its_msi_chip *its_msi_chip, *t;
  143. spin_lock(&iort_msi_chip_lock);
  144. list_for_each_entry_safe(its_msi_chip, t, &iort_msi_chip_list, list) {
  145. if (its_msi_chip->translation_id == trans_id) {
  146. list_del(&its_msi_chip->list);
  147. kfree(its_msi_chip);
  148. break;
  149. }
  150. }
  151. spin_unlock(&iort_msi_chip_lock);
  152. }
  153. /**
  154. * iort_find_domain_token() - Find domain token based on given ITS ID
  155. * @trans_id: ITS ID.
  156. *
  157. * Returns: domain token when find on the list, NULL otherwise
  158. */
  159. struct fwnode_handle *iort_find_domain_token(int trans_id)
  160. {
  161. struct fwnode_handle *fw_node = NULL;
  162. struct iort_its_msi_chip *its_msi_chip;
  163. spin_lock(&iort_msi_chip_lock);
  164. list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
  165. if (its_msi_chip->translation_id == trans_id) {
  166. fw_node = its_msi_chip->fw_node;
  167. break;
  168. }
  169. }
  170. spin_unlock(&iort_msi_chip_lock);
  171. return fw_node;
  172. }
  173. static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type,
  174. iort_find_node_callback callback,
  175. void *context)
  176. {
  177. struct acpi_iort_node *iort_node, *iort_end;
  178. struct acpi_table_iort *iort;
  179. int i;
  180. if (!iort_table)
  181. return NULL;
  182. /* Get the first IORT node */
  183. iort = (struct acpi_table_iort *)iort_table;
  184. iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
  185. iort->node_offset);
  186. iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
  187. iort_table->length);
  188. for (i = 0; i < iort->node_count; i++) {
  189. if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
  190. "IORT node pointer overflows, bad table!\n"))
  191. return NULL;
  192. if (iort_node->type == type &&
  193. ACPI_SUCCESS(callback(iort_node, context)))
  194. return iort_node;
  195. iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
  196. iort_node->length);
  197. }
  198. return NULL;
  199. }
  200. static acpi_status
  201. iort_match_type_callback(struct acpi_iort_node *node, void *context)
  202. {
  203. return AE_OK;
  204. }
  205. bool iort_node_match(u8 type)
  206. {
  207. struct acpi_iort_node *node;
  208. node = iort_scan_node(type, iort_match_type_callback, NULL);
  209. return node != NULL;
  210. }
  211. static acpi_status iort_match_node_callback(struct acpi_iort_node *node,
  212. void *context)
  213. {
  214. struct device *dev = context;
  215. acpi_status status = AE_NOT_FOUND;
  216. if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) {
  217. struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
  218. struct acpi_device *adev = to_acpi_device_node(dev->fwnode);
  219. struct acpi_iort_named_component *ncomp;
  220. if (!adev)
  221. goto out;
  222. status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
  223. if (ACPI_FAILURE(status)) {
  224. dev_warn(dev, "Can't get device full path name\n");
  225. goto out;
  226. }
  227. ncomp = (struct acpi_iort_named_component *)node->node_data;
  228. status = !strcmp(ncomp->device_name, buf.pointer) ?
  229. AE_OK : AE_NOT_FOUND;
  230. acpi_os_free(buf.pointer);
  231. } else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
  232. struct acpi_iort_root_complex *pci_rc;
  233. struct pci_bus *bus;
  234. bus = to_pci_bus(dev);
  235. pci_rc = (struct acpi_iort_root_complex *)node->node_data;
  236. /*
  237. * It is assumed that PCI segment numbers maps one-to-one
  238. * with root complexes. Each segment number can represent only
  239. * one root complex.
  240. */
  241. status = pci_rc->pci_segment_number == pci_domain_nr(bus) ?
  242. AE_OK : AE_NOT_FOUND;
  243. }
  244. out:
  245. return status;
  246. }
  247. static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
  248. u32 *rid_out)
  249. {
  250. /* Single mapping does not care for input id */
  251. if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
  252. if (type == ACPI_IORT_NODE_NAMED_COMPONENT ||
  253. type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
  254. *rid_out = map->output_base;
  255. return 0;
  256. }
  257. pr_warn(FW_BUG "[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n",
  258. map, type);
  259. return -ENXIO;
  260. }
  261. if (rid_in < map->input_base ||
  262. (rid_in >= map->input_base + map->id_count))
  263. return -ENXIO;
  264. *rid_out = map->output_base + (rid_in - map->input_base);
  265. return 0;
  266. }
  267. static
  268. struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
  269. u32 *id_out, int index)
  270. {
  271. struct acpi_iort_node *parent;
  272. struct acpi_iort_id_mapping *map;
  273. if (!node->mapping_offset || !node->mapping_count ||
  274. index >= node->mapping_count)
  275. return NULL;
  276. map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
  277. node->mapping_offset + index * sizeof(*map));
  278. /* Firmware bug! */
  279. if (!map->output_reference) {
  280. pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
  281. node, node->type);
  282. return NULL;
  283. }
  284. parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
  285. map->output_reference);
  286. if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
  287. if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
  288. node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
  289. *id_out = map->output_base;
  290. return parent;
  291. }
  292. }
  293. return NULL;
  294. }
  295. static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node,
  296. u32 id_in, u32 *id_out,
  297. u8 type_mask)
  298. {
  299. u32 id = id_in;
  300. /* Parse the ID mapping tree to find specified node type */
  301. while (node) {
  302. struct acpi_iort_id_mapping *map;
  303. int i;
  304. if (IORT_TYPE_MASK(node->type) & type_mask) {
  305. if (id_out)
  306. *id_out = id;
  307. return node;
  308. }
  309. if (!node->mapping_offset || !node->mapping_count)
  310. goto fail_map;
  311. map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
  312. node->mapping_offset);
  313. /* Firmware bug! */
  314. if (!map->output_reference) {
  315. pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
  316. node, node->type);
  317. goto fail_map;
  318. }
  319. /* Do the ID translation */
  320. for (i = 0; i < node->mapping_count; i++, map++) {
  321. if (!iort_id_map(map, node->type, id, &id))
  322. break;
  323. }
  324. if (i == node->mapping_count)
  325. goto fail_map;
  326. node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
  327. map->output_reference);
  328. }
  329. fail_map:
  330. /* Map input ID to output ID unchanged on mapping failure */
  331. if (id_out)
  332. *id_out = id_in;
  333. return NULL;
  334. }
  335. static
  336. struct acpi_iort_node *iort_node_map_platform_id(struct acpi_iort_node *node,
  337. u32 *id_out, u8 type_mask,
  338. int index)
  339. {
  340. struct acpi_iort_node *parent;
  341. u32 id;
  342. /* step 1: retrieve the initial dev id */
  343. parent = iort_node_get_id(node, &id, index);
  344. if (!parent)
  345. return NULL;
  346. /*
  347. * optional step 2: map the initial dev id if its parent is not
  348. * the target type we want, map it again for the use cases such
  349. * as NC (named component) -> SMMU -> ITS. If the type is matched,
  350. * return the initial dev id and its parent pointer directly.
  351. */
  352. if (!(IORT_TYPE_MASK(parent->type) & type_mask))
  353. parent = iort_node_map_id(parent, id, id_out, type_mask);
  354. else
  355. if (id_out)
  356. *id_out = id;
  357. return parent;
  358. }
  359. static struct acpi_iort_node *iort_find_dev_node(struct device *dev)
  360. {
  361. struct pci_bus *pbus;
  362. if (!dev_is_pci(dev))
  363. return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
  364. iort_match_node_callback, dev);
  365. /* Find a PCI root bus */
  366. pbus = to_pci_dev(dev)->bus;
  367. while (!pci_is_root_bus(pbus))
  368. pbus = pbus->parent;
  369. return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
  370. iort_match_node_callback, &pbus->dev);
  371. }
  372. /**
  373. * iort_msi_map_rid() - Map a MSI requester ID for a device
  374. * @dev: The device for which the mapping is to be done.
  375. * @req_id: The device requester ID.
  376. *
  377. * Returns: mapped MSI RID on success, input requester ID otherwise
  378. */
  379. u32 iort_msi_map_rid(struct device *dev, u32 req_id)
  380. {
  381. struct acpi_iort_node *node;
  382. u32 dev_id;
  383. node = iort_find_dev_node(dev);
  384. if (!node)
  385. return req_id;
  386. iort_node_map_id(node, req_id, &dev_id, IORT_MSI_TYPE);
  387. return dev_id;
  388. }
  389. /**
  390. * iort_pmsi_get_dev_id() - Get the device id for a device
  391. * @dev: The device for which the mapping is to be done.
  392. * @dev_id: The device ID found.
  393. *
  394. * Returns: 0 for successful find a dev id, -ENODEV on error
  395. */
  396. int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
  397. {
  398. int i;
  399. struct acpi_iort_node *node;
  400. node = iort_find_dev_node(dev);
  401. if (!node)
  402. return -ENODEV;
  403. for (i = 0; i < node->mapping_count; i++) {
  404. if (iort_node_map_platform_id(node, dev_id, IORT_MSI_TYPE, i))
  405. return 0;
  406. }
  407. return -ENODEV;
  408. }
  409. /**
  410. * iort_dev_find_its_id() - Find the ITS identifier for a device
  411. * @dev: The device.
  412. * @req_id: Device's requester ID
  413. * @idx: Index of the ITS identifier list.
  414. * @its_id: ITS identifier.
  415. *
  416. * Returns: 0 on success, appropriate error value otherwise
  417. */
  418. static int iort_dev_find_its_id(struct device *dev, u32 req_id,
  419. unsigned int idx, int *its_id)
  420. {
  421. struct acpi_iort_its_group *its;
  422. struct acpi_iort_node *node;
  423. node = iort_find_dev_node(dev);
  424. if (!node)
  425. return -ENXIO;
  426. node = iort_node_map_id(node, req_id, NULL, IORT_MSI_TYPE);
  427. if (!node)
  428. return -ENXIO;
  429. /* Move to ITS specific data */
  430. its = (struct acpi_iort_its_group *)node->node_data;
  431. if (idx > its->its_count) {
  432. dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n",
  433. idx, its->its_count);
  434. return -ENXIO;
  435. }
  436. *its_id = its->identifiers[idx];
  437. return 0;
  438. }
  439. /**
  440. * iort_get_device_domain() - Find MSI domain related to a device
  441. * @dev: The device.
  442. * @req_id: Requester ID for the device.
  443. *
  444. * Returns: the MSI domain for this device, NULL otherwise
  445. */
  446. struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id)
  447. {
  448. struct fwnode_handle *handle;
  449. int its_id;
  450. if (iort_dev_find_its_id(dev, req_id, 0, &its_id))
  451. return NULL;
  452. handle = iort_find_domain_token(its_id);
  453. if (!handle)
  454. return NULL;
  455. return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI);
  456. }
  457. /**
  458. * iort_get_platform_device_domain() - Find MSI domain related to a
  459. * platform device
  460. * @dev: the dev pointer associated with the platform device
  461. *
  462. * Returns: the MSI domain for this device, NULL otherwise
  463. */
  464. static struct irq_domain *iort_get_platform_device_domain(struct device *dev)
  465. {
  466. struct acpi_iort_node *node, *msi_parent;
  467. struct fwnode_handle *iort_fwnode;
  468. struct acpi_iort_its_group *its;
  469. int i;
  470. /* find its associated iort node */
  471. node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
  472. iort_match_node_callback, dev);
  473. if (!node)
  474. return NULL;
  475. /* then find its msi parent node */
  476. for (i = 0; i < node->mapping_count; i++) {
  477. msi_parent = iort_node_map_platform_id(node, NULL,
  478. IORT_MSI_TYPE, i);
  479. if (msi_parent)
  480. break;
  481. }
  482. if (!msi_parent)
  483. return NULL;
  484. /* Move to ITS specific data */
  485. its = (struct acpi_iort_its_group *)msi_parent->node_data;
  486. iort_fwnode = iort_find_domain_token(its->identifiers[0]);
  487. if (!iort_fwnode)
  488. return NULL;
  489. return irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
  490. }
  491. void acpi_configure_pmsi_domain(struct device *dev)
  492. {
  493. struct irq_domain *msi_domain;
  494. msi_domain = iort_get_platform_device_domain(dev);
  495. if (msi_domain)
  496. dev_set_msi_domain(dev, msi_domain);
  497. }
  498. static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data)
  499. {
  500. u32 *rid = data;
  501. *rid = alias;
  502. return 0;
  503. }
  504. static int arm_smmu_iort_xlate(struct device *dev, u32 streamid,
  505. struct fwnode_handle *fwnode,
  506. const struct iommu_ops *ops)
  507. {
  508. int ret = iommu_fwspec_init(dev, fwnode, ops);
  509. if (!ret)
  510. ret = iommu_fwspec_add_ids(dev, &streamid, 1);
  511. return ret;
  512. }
  513. static inline bool iort_iommu_driver_enabled(u8 type)
  514. {
  515. switch (type) {
  516. case ACPI_IORT_NODE_SMMU_V3:
  517. return IS_BUILTIN(CONFIG_ARM_SMMU_V3);
  518. case ACPI_IORT_NODE_SMMU:
  519. return IS_BUILTIN(CONFIG_ARM_SMMU);
  520. default:
  521. pr_warn("IORT node type %u does not describe an SMMU\n", type);
  522. return false;
  523. }
  524. }
  525. #ifdef CONFIG_IOMMU_API
  526. static inline
  527. const struct iommu_ops *iort_fwspec_iommu_ops(struct iommu_fwspec *fwspec)
  528. {
  529. return (fwspec && fwspec->ops) ? fwspec->ops : NULL;
  530. }
  531. static inline
  532. int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev)
  533. {
  534. int err = 0;
  535. if (!IS_ERR_OR_NULL(ops) && ops->add_device && dev->bus &&
  536. !dev->iommu_group)
  537. err = ops->add_device(dev);
  538. return err;
  539. }
  540. #else
  541. static inline
  542. const struct iommu_ops *iort_fwspec_iommu_ops(struct iommu_fwspec *fwspec)
  543. { return NULL; }
  544. static inline
  545. int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev)
  546. { return 0; }
  547. #endif
  548. static const struct iommu_ops *iort_iommu_xlate(struct device *dev,
  549. struct acpi_iort_node *node,
  550. u32 streamid)
  551. {
  552. const struct iommu_ops *ops = NULL;
  553. int ret = -ENODEV;
  554. struct fwnode_handle *iort_fwnode;
  555. /*
  556. * If we already translated the fwspec there
  557. * is nothing left to do, return the iommu_ops.
  558. */
  559. ops = iort_fwspec_iommu_ops(dev->iommu_fwspec);
  560. if (ops)
  561. return ops;
  562. if (node) {
  563. iort_fwnode = iort_get_fwnode(node);
  564. if (!iort_fwnode)
  565. return NULL;
  566. ops = iommu_ops_from_fwnode(iort_fwnode);
  567. /*
  568. * If the ops look-up fails, this means that either
  569. * the SMMU drivers have not been probed yet or that
  570. * the SMMU drivers are not built in the kernel;
  571. * Depending on whether the SMMU drivers are built-in
  572. * in the kernel or not, defer the IOMMU configuration
  573. * or just abort it.
  574. */
  575. if (!ops)
  576. return iort_iommu_driver_enabled(node->type) ?
  577. ERR_PTR(-EPROBE_DEFER) : NULL;
  578. ret = arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops);
  579. }
  580. return ret ? NULL : ops;
  581. }
  582. /**
  583. * iort_set_dma_mask - Set-up dma mask for a device.
  584. *
  585. * @dev: device to configure
  586. */
  587. void iort_set_dma_mask(struct device *dev)
  588. {
  589. /*
  590. * Set default coherent_dma_mask to 32 bit. Drivers are expected to
  591. * setup the correct supported mask.
  592. */
  593. if (!dev->coherent_dma_mask)
  594. dev->coherent_dma_mask = DMA_BIT_MASK(32);
  595. /*
  596. * Set it to coherent_dma_mask by default if the architecture
  597. * code has not set it.
  598. */
  599. if (!dev->dma_mask)
  600. dev->dma_mask = &dev->coherent_dma_mask;
  601. }
  602. /**
  603. * iort_iommu_configure - Set-up IOMMU configuration for a device.
  604. *
  605. * @dev: device to configure
  606. *
  607. * Returns: iommu_ops pointer on configuration success
  608. * NULL on configuration failure
  609. */
  610. const struct iommu_ops *iort_iommu_configure(struct device *dev)
  611. {
  612. struct acpi_iort_node *node, *parent;
  613. const struct iommu_ops *ops = NULL;
  614. u32 streamid = 0;
  615. int err;
  616. if (dev_is_pci(dev)) {
  617. struct pci_bus *bus = to_pci_dev(dev)->bus;
  618. u32 rid;
  619. pci_for_each_dma_alias(to_pci_dev(dev), __get_pci_rid,
  620. &rid);
  621. node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
  622. iort_match_node_callback, &bus->dev);
  623. if (!node)
  624. return NULL;
  625. parent = iort_node_map_id(node, rid, &streamid,
  626. IORT_IOMMU_TYPE);
  627. ops = iort_iommu_xlate(dev, parent, streamid);
  628. } else {
  629. int i = 0;
  630. node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
  631. iort_match_node_callback, dev);
  632. if (!node)
  633. return NULL;
  634. parent = iort_node_map_platform_id(node, &streamid,
  635. IORT_IOMMU_TYPE, i++);
  636. while (parent) {
  637. ops = iort_iommu_xlate(dev, parent, streamid);
  638. if (IS_ERR_OR_NULL(ops))
  639. return ops;
  640. parent = iort_node_map_platform_id(node, &streamid,
  641. IORT_IOMMU_TYPE,
  642. i++);
  643. }
  644. }
  645. /*
  646. * If we have reason to believe the IOMMU driver missed the initial
  647. * add_device callback for dev, replay it to get things in order.
  648. */
  649. err = iort_add_device_replay(ops, dev);
  650. if (err)
  651. ops = ERR_PTR(err);
  652. return ops;
  653. }
  654. static void __init acpi_iort_register_irq(int hwirq, const char *name,
  655. int trigger,
  656. struct resource *res)
  657. {
  658. int irq = acpi_register_gsi(NULL, hwirq, trigger,
  659. ACPI_ACTIVE_HIGH);
  660. if (irq <= 0) {
  661. pr_err("could not register gsi hwirq %d name [%s]\n", hwirq,
  662. name);
  663. return;
  664. }
  665. res->start = irq;
  666. res->end = irq;
  667. res->flags = IORESOURCE_IRQ;
  668. res->name = name;
  669. }
  670. static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node)
  671. {
  672. struct acpi_iort_smmu_v3 *smmu;
  673. /* Always present mem resource */
  674. int num_res = 1;
  675. /* Retrieve SMMUv3 specific data */
  676. smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
  677. if (smmu->event_gsiv)
  678. num_res++;
  679. if (smmu->pri_gsiv)
  680. num_res++;
  681. if (smmu->gerr_gsiv)
  682. num_res++;
  683. if (smmu->sync_gsiv)
  684. num_res++;
  685. return num_res;
  686. }
  687. static void __init arm_smmu_v3_init_resources(struct resource *res,
  688. struct acpi_iort_node *node)
  689. {
  690. struct acpi_iort_smmu_v3 *smmu;
  691. int num_res = 0;
  692. /* Retrieve SMMUv3 specific data */
  693. smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
  694. res[num_res].start = smmu->base_address;
  695. res[num_res].end = smmu->base_address + SZ_128K - 1;
  696. res[num_res].flags = IORESOURCE_MEM;
  697. num_res++;
  698. if (smmu->event_gsiv)
  699. acpi_iort_register_irq(smmu->event_gsiv, "eventq",
  700. ACPI_EDGE_SENSITIVE,
  701. &res[num_res++]);
  702. if (smmu->pri_gsiv)
  703. acpi_iort_register_irq(smmu->pri_gsiv, "priq",
  704. ACPI_EDGE_SENSITIVE,
  705. &res[num_res++]);
  706. if (smmu->gerr_gsiv)
  707. acpi_iort_register_irq(smmu->gerr_gsiv, "gerror",
  708. ACPI_EDGE_SENSITIVE,
  709. &res[num_res++]);
  710. if (smmu->sync_gsiv)
  711. acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync",
  712. ACPI_EDGE_SENSITIVE,
  713. &res[num_res++]);
  714. }
  715. static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node)
  716. {
  717. struct acpi_iort_smmu_v3 *smmu;
  718. /* Retrieve SMMUv3 specific data */
  719. smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
  720. return smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE;
  721. }
  722. static int __init arm_smmu_count_resources(struct acpi_iort_node *node)
  723. {
  724. struct acpi_iort_smmu *smmu;
  725. /* Retrieve SMMU specific data */
  726. smmu = (struct acpi_iort_smmu *)node->node_data;
  727. /*
  728. * Only consider the global fault interrupt and ignore the
  729. * configuration access interrupt.
  730. *
  731. * MMIO address and global fault interrupt resources are always
  732. * present so add them to the context interrupt count as a static
  733. * value.
  734. */
  735. return smmu->context_interrupt_count + 2;
  736. }
  737. static void __init arm_smmu_init_resources(struct resource *res,
  738. struct acpi_iort_node *node)
  739. {
  740. struct acpi_iort_smmu *smmu;
  741. int i, hw_irq, trigger, num_res = 0;
  742. u64 *ctx_irq, *glb_irq;
  743. /* Retrieve SMMU specific data */
  744. smmu = (struct acpi_iort_smmu *)node->node_data;
  745. res[num_res].start = smmu->base_address;
  746. res[num_res].end = smmu->base_address + smmu->span - 1;
  747. res[num_res].flags = IORESOURCE_MEM;
  748. num_res++;
  749. glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset);
  750. /* Global IRQs */
  751. hw_irq = IORT_IRQ_MASK(glb_irq[0]);
  752. trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]);
  753. acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger,
  754. &res[num_res++]);
  755. /* Context IRQs */
  756. ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset);
  757. for (i = 0; i < smmu->context_interrupt_count; i++) {
  758. hw_irq = IORT_IRQ_MASK(ctx_irq[i]);
  759. trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]);
  760. acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger,
  761. &res[num_res++]);
  762. }
  763. }
  764. static bool __init arm_smmu_is_coherent(struct acpi_iort_node *node)
  765. {
  766. struct acpi_iort_smmu *smmu;
  767. /* Retrieve SMMU specific data */
  768. smmu = (struct acpi_iort_smmu *)node->node_data;
  769. return smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK;
  770. }
  771. struct iort_iommu_config {
  772. const char *name;
  773. int (*iommu_init)(struct acpi_iort_node *node);
  774. bool (*iommu_is_coherent)(struct acpi_iort_node *node);
  775. int (*iommu_count_resources)(struct acpi_iort_node *node);
  776. void (*iommu_init_resources)(struct resource *res,
  777. struct acpi_iort_node *node);
  778. };
  779. static const struct iort_iommu_config iort_arm_smmu_v3_cfg __initconst = {
  780. .name = "arm-smmu-v3",
  781. .iommu_is_coherent = arm_smmu_v3_is_coherent,
  782. .iommu_count_resources = arm_smmu_v3_count_resources,
  783. .iommu_init_resources = arm_smmu_v3_init_resources
  784. };
  785. static const struct iort_iommu_config iort_arm_smmu_cfg __initconst = {
  786. .name = "arm-smmu",
  787. .iommu_is_coherent = arm_smmu_is_coherent,
  788. .iommu_count_resources = arm_smmu_count_resources,
  789. .iommu_init_resources = arm_smmu_init_resources
  790. };
  791. static __init
  792. const struct iort_iommu_config *iort_get_iommu_cfg(struct acpi_iort_node *node)
  793. {
  794. switch (node->type) {
  795. case ACPI_IORT_NODE_SMMU_V3:
  796. return &iort_arm_smmu_v3_cfg;
  797. case ACPI_IORT_NODE_SMMU:
  798. return &iort_arm_smmu_cfg;
  799. default:
  800. return NULL;
  801. }
  802. }
  803. /**
  804. * iort_add_smmu_platform_device() - Allocate a platform device for SMMU
  805. * @node: Pointer to SMMU ACPI IORT node
  806. *
  807. * Returns: 0 on success, <0 failure
  808. */
  809. static int __init iort_add_smmu_platform_device(struct acpi_iort_node *node)
  810. {
  811. struct fwnode_handle *fwnode;
  812. struct platform_device *pdev;
  813. struct resource *r;
  814. enum dev_dma_attr attr;
  815. int ret, count;
  816. const struct iort_iommu_config *ops = iort_get_iommu_cfg(node);
  817. if (!ops)
  818. return -ENODEV;
  819. pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO);
  820. if (!pdev)
  821. return -ENOMEM;
  822. count = ops->iommu_count_resources(node);
  823. r = kcalloc(count, sizeof(*r), GFP_KERNEL);
  824. if (!r) {
  825. ret = -ENOMEM;
  826. goto dev_put;
  827. }
  828. ops->iommu_init_resources(r, node);
  829. ret = platform_device_add_resources(pdev, r, count);
  830. /*
  831. * Resources are duplicated in platform_device_add_resources,
  832. * free their allocated memory
  833. */
  834. kfree(r);
  835. if (ret)
  836. goto dev_put;
  837. /*
  838. * Add a copy of IORT node pointer to platform_data to
  839. * be used to retrieve IORT data information.
  840. */
  841. ret = platform_device_add_data(pdev, &node, sizeof(node));
  842. if (ret)
  843. goto dev_put;
  844. /*
  845. * We expect the dma masks to be equivalent for
  846. * all SMMUs set-ups
  847. */
  848. pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
  849. fwnode = iort_get_fwnode(node);
  850. if (!fwnode) {
  851. ret = -ENODEV;
  852. goto dev_put;
  853. }
  854. pdev->dev.fwnode = fwnode;
  855. attr = ops->iommu_is_coherent(node) ?
  856. DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
  857. /* Configure DMA for the page table walker */
  858. acpi_dma_configure(&pdev->dev, attr);
  859. ret = platform_device_add(pdev);
  860. if (ret)
  861. goto dma_deconfigure;
  862. return 0;
  863. dma_deconfigure:
  864. acpi_dma_deconfigure(&pdev->dev);
  865. dev_put:
  866. platform_device_put(pdev);
  867. return ret;
  868. }
  869. static void __init iort_init_platform_devices(void)
  870. {
  871. struct acpi_iort_node *iort_node, *iort_end;
  872. struct acpi_table_iort *iort;
  873. struct fwnode_handle *fwnode;
  874. int i, ret;
  875. /*
  876. * iort_table and iort both point to the start of IORT table, but
  877. * have different struct types
  878. */
  879. iort = (struct acpi_table_iort *)iort_table;
  880. /* Get the first IORT node */
  881. iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
  882. iort->node_offset);
  883. iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
  884. iort_table->length);
  885. for (i = 0; i < iort->node_count; i++) {
  886. if (iort_node >= iort_end) {
  887. pr_err("iort node pointer overflows, bad table\n");
  888. return;
  889. }
  890. if ((iort_node->type == ACPI_IORT_NODE_SMMU) ||
  891. (iort_node->type == ACPI_IORT_NODE_SMMU_V3)) {
  892. fwnode = acpi_alloc_fwnode_static();
  893. if (!fwnode)
  894. return;
  895. iort_set_fwnode(iort_node, fwnode);
  896. ret = iort_add_smmu_platform_device(iort_node);
  897. if (ret) {
  898. iort_delete_fwnode(iort_node);
  899. acpi_free_fwnode_static(fwnode);
  900. return;
  901. }
  902. }
  903. iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
  904. iort_node->length);
  905. }
  906. }
  907. void __init acpi_iort_init(void)
  908. {
  909. acpi_status status;
  910. status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table);
  911. if (ACPI_FAILURE(status)) {
  912. if (status != AE_NOT_FOUND) {
  913. const char *msg = acpi_format_exception(status);
  914. pr_err("Failed to get table, %s\n", msg);
  915. }
  916. return;
  917. }
  918. iort_init_platform_devices();
  919. }