fsl_pamu_domain.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright (C) 2013 Freescale Semiconductor, Inc.
  16. * Author: Varun Sethi <varun.sethi@freescale.com>
  17. *
  18. */
  19. #define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__
  20. #include <linux/init.h>
  21. #include <linux/iommu.h>
  22. #include <linux/notifier.h>
  23. #include <linux/slab.h>
  24. #include <linux/module.h>
  25. #include <linux/types.h>
  26. #include <linux/mm.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/device.h>
  29. #include <linux/of_platform.h>
  30. #include <linux/bootmem.h>
  31. #include <linux/err.h>
  32. #include <asm/io.h>
  33. #include <asm/bitops.h>
  34. #include <asm/pci-bridge.h>
  35. #include <sysdev/fsl_pci.h>
  36. #include "fsl_pamu_domain.h"
  37. /*
  38. * Global spinlock that needs to be held while
  39. * configuring PAMU.
  40. */
  41. static DEFINE_SPINLOCK(iommu_lock);
  42. static struct kmem_cache *fsl_pamu_domain_cache;
  43. static struct kmem_cache *iommu_devinfo_cache;
  44. static DEFINE_SPINLOCK(device_domain_lock);
  45. static int __init iommu_init_mempool(void)
  46. {
  47. fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
  48. sizeof(struct fsl_dma_domain),
  49. 0,
  50. SLAB_HWCACHE_ALIGN,
  51. NULL);
  52. if (!fsl_pamu_domain_cache) {
  53. pr_debug("Couldn't create fsl iommu_domain cache\n");
  54. return -ENOMEM;
  55. }
  56. iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
  57. sizeof(struct device_domain_info),
  58. 0,
  59. SLAB_HWCACHE_ALIGN,
  60. NULL);
  61. if (!iommu_devinfo_cache) {
  62. pr_debug("Couldn't create devinfo cache\n");
  63. kmem_cache_destroy(fsl_pamu_domain_cache);
  64. return -ENOMEM;
  65. }
  66. return 0;
  67. }
  68. static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova)
  69. {
  70. u32 win_cnt = dma_domain->win_cnt;
  71. struct dma_window *win_ptr =
  72. &dma_domain->win_arr[0];
  73. struct iommu_domain_geometry *geom;
  74. geom = &dma_domain->iommu_domain->geometry;
  75. if (!win_cnt || !dma_domain->geom_size) {
  76. pr_debug("Number of windows/geometry not configured for the domain\n");
  77. return 0;
  78. }
  79. if (win_cnt > 1) {
  80. u64 subwin_size;
  81. dma_addr_t subwin_iova;
  82. u32 wnd;
  83. subwin_size = dma_domain->geom_size >> ilog2(win_cnt);
  84. subwin_iova = iova & ~(subwin_size - 1);
  85. wnd = (subwin_iova - geom->aperture_start) >> ilog2(subwin_size);
  86. win_ptr = &dma_domain->win_arr[wnd];
  87. }
  88. if (win_ptr->valid)
  89. return (win_ptr->paddr + (iova & (win_ptr->size - 1)));
  90. return 0;
  91. }
  92. static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain)
  93. {
  94. struct dma_window *sub_win_ptr =
  95. &dma_domain->win_arr[0];
  96. int i, ret;
  97. unsigned long rpn, flags;
  98. for (i = 0; i < dma_domain->win_cnt; i++) {
  99. if (sub_win_ptr[i].valid) {
  100. rpn = sub_win_ptr[i].paddr >>
  101. PAMU_PAGE_SHIFT;
  102. spin_lock_irqsave(&iommu_lock, flags);
  103. ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i,
  104. sub_win_ptr[i].size,
  105. ~(u32)0,
  106. rpn,
  107. dma_domain->snoop_id,
  108. dma_domain->stash_id,
  109. (i > 0) ? 1 : 0,
  110. sub_win_ptr[i].prot);
  111. spin_unlock_irqrestore(&iommu_lock, flags);
  112. if (ret) {
  113. pr_debug("PAMU SPAACE configuration failed for liodn %d\n",
  114. liodn);
  115. return ret;
  116. }
  117. }
  118. }
  119. return ret;
  120. }
  121. static int map_win(int liodn, struct fsl_dma_domain *dma_domain)
  122. {
  123. int ret;
  124. struct dma_window *wnd = &dma_domain->win_arr[0];
  125. phys_addr_t wnd_addr = dma_domain->iommu_domain->geometry.aperture_start;
  126. unsigned long flags;
  127. spin_lock_irqsave(&iommu_lock, flags);
  128. ret = pamu_config_ppaace(liodn, wnd_addr,
  129. wnd->size,
  130. ~(u32)0,
  131. wnd->paddr >> PAMU_PAGE_SHIFT,
  132. dma_domain->snoop_id, dma_domain->stash_id,
  133. 0, wnd->prot);
  134. spin_unlock_irqrestore(&iommu_lock, flags);
  135. if (ret)
  136. pr_debug("PAMU PAACE configuration failed for liodn %d\n",
  137. liodn);
  138. return ret;
  139. }
  140. /* Map the DMA window corresponding to the LIODN */
  141. static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain)
  142. {
  143. if (dma_domain->win_cnt > 1)
  144. return map_subwins(liodn, dma_domain);
  145. else
  146. return map_win(liodn, dma_domain);
  147. }
  148. /* Update window/subwindow mapping for the LIODN */
  149. static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr)
  150. {
  151. int ret;
  152. struct dma_window *wnd = &dma_domain->win_arr[wnd_nr];
  153. unsigned long flags;
  154. spin_lock_irqsave(&iommu_lock, flags);
  155. if (dma_domain->win_cnt > 1) {
  156. ret = pamu_config_spaace(liodn, dma_domain->win_cnt, wnd_nr,
  157. wnd->size,
  158. ~(u32)0,
  159. wnd->paddr >> PAMU_PAGE_SHIFT,
  160. dma_domain->snoop_id,
  161. dma_domain->stash_id,
  162. (wnd_nr > 0) ? 1 : 0,
  163. wnd->prot);
  164. if (ret)
  165. pr_debug("Subwindow reconfiguration failed for liodn %d\n", liodn);
  166. } else {
  167. phys_addr_t wnd_addr;
  168. wnd_addr = dma_domain->iommu_domain->geometry.aperture_start;
  169. ret = pamu_config_ppaace(liodn, wnd_addr,
  170. wnd->size,
  171. ~(u32)0,
  172. wnd->paddr >> PAMU_PAGE_SHIFT,
  173. dma_domain->snoop_id, dma_domain->stash_id,
  174. 0, wnd->prot);
  175. if (ret)
  176. pr_debug("Window reconfiguration failed for liodn %d\n", liodn);
  177. }
  178. spin_unlock_irqrestore(&iommu_lock, flags);
  179. return ret;
  180. }
  181. static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
  182. u32 val)
  183. {
  184. int ret = 0, i;
  185. unsigned long flags;
  186. spin_lock_irqsave(&iommu_lock, flags);
  187. if (!dma_domain->win_arr) {
  188. pr_debug("Windows not configured, stash destination update failed for liodn %d\n", liodn);
  189. spin_unlock_irqrestore(&iommu_lock, flags);
  190. return -EINVAL;
  191. }
  192. for (i = 0; i < dma_domain->win_cnt; i++) {
  193. ret = pamu_update_paace_stash(liodn, i, val);
  194. if (ret) {
  195. pr_debug("Failed to update SPAACE %d field for liodn %d\n ", i, liodn);
  196. spin_unlock_irqrestore(&iommu_lock, flags);
  197. return ret;
  198. }
  199. }
  200. spin_unlock_irqrestore(&iommu_lock, flags);
  201. return ret;
  202. }
  203. /* Set the geometry parameters for a LIODN */
  204. static int pamu_set_liodn(int liodn, struct device *dev,
  205. struct fsl_dma_domain *dma_domain,
  206. struct iommu_domain_geometry *geom_attr,
  207. u32 win_cnt)
  208. {
  209. phys_addr_t window_addr, window_size;
  210. phys_addr_t subwin_size;
  211. int ret = 0, i;
  212. u32 omi_index = ~(u32)0;
  213. unsigned long flags;
  214. /*
  215. * Configure the omi_index at the geometry setup time.
  216. * This is a static value which depends on the type of
  217. * device and would not change thereafter.
  218. */
  219. get_ome_index(&omi_index, dev);
  220. window_addr = geom_attr->aperture_start;
  221. window_size = dma_domain->geom_size;
  222. spin_lock_irqsave(&iommu_lock, flags);
  223. ret = pamu_disable_liodn(liodn);
  224. if (!ret)
  225. ret = pamu_config_ppaace(liodn, window_addr, window_size, omi_index,
  226. 0, dma_domain->snoop_id,
  227. dma_domain->stash_id, win_cnt, 0);
  228. spin_unlock_irqrestore(&iommu_lock, flags);
  229. if (ret) {
  230. pr_debug("PAMU PAACE configuration failed for liodn %d, win_cnt =%d\n", liodn, win_cnt);
  231. return ret;
  232. }
  233. if (win_cnt > 1) {
  234. subwin_size = window_size >> ilog2(win_cnt);
  235. for (i = 0; i < win_cnt; i++) {
  236. spin_lock_irqsave(&iommu_lock, flags);
  237. ret = pamu_disable_spaace(liodn, i);
  238. if (!ret)
  239. ret = pamu_config_spaace(liodn, win_cnt, i,
  240. subwin_size, omi_index,
  241. 0, dma_domain->snoop_id,
  242. dma_domain->stash_id,
  243. 0, 0);
  244. spin_unlock_irqrestore(&iommu_lock, flags);
  245. if (ret) {
  246. pr_debug("PAMU SPAACE configuration failed for liodn %d\n", liodn);
  247. return ret;
  248. }
  249. }
  250. }
  251. return ret;
  252. }
  253. static int check_size(u64 size, dma_addr_t iova)
  254. {
  255. /*
  256. * Size must be a power of two and at least be equal
  257. * to PAMU page size.
  258. */
  259. if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) {
  260. pr_debug("%s: size too small or not a power of two\n", __func__);
  261. return -EINVAL;
  262. }
  263. /* iova must be page size aligned*/
  264. if (iova & (size - 1)) {
  265. pr_debug("%s: address is not aligned with window size\n", __func__);
  266. return -EINVAL;
  267. }
  268. return 0;
  269. }
  270. static struct fsl_dma_domain *iommu_alloc_dma_domain(void)
  271. {
  272. struct fsl_dma_domain *domain;
  273. domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
  274. if (!domain)
  275. return NULL;
  276. domain->stash_id = ~(u32)0;
  277. domain->snoop_id = ~(u32)0;
  278. domain->win_cnt = pamu_get_max_subwin_cnt();
  279. domain->geom_size = 0;
  280. INIT_LIST_HEAD(&domain->devices);
  281. spin_lock_init(&domain->domain_lock);
  282. return domain;
  283. }
  284. static void remove_device_ref(struct device_domain_info *info, u32 win_cnt)
  285. {
  286. unsigned long flags;
  287. list_del(&info->link);
  288. spin_lock_irqsave(&iommu_lock, flags);
  289. if (win_cnt > 1)
  290. pamu_free_subwins(info->liodn);
  291. pamu_disable_liodn(info->liodn);
  292. spin_unlock_irqrestore(&iommu_lock, flags);
  293. spin_lock_irqsave(&device_domain_lock, flags);
  294. info->dev->archdata.iommu_domain = NULL;
  295. kmem_cache_free(iommu_devinfo_cache, info);
  296. spin_unlock_irqrestore(&device_domain_lock, flags);
  297. }
  298. static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain)
  299. {
  300. struct device_domain_info *info, *tmp;
  301. unsigned long flags;
  302. spin_lock_irqsave(&dma_domain->domain_lock, flags);
  303. /* Remove the device from the domain device list */
  304. list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) {
  305. if (!dev || (info->dev == dev))
  306. remove_device_ref(info, dma_domain->win_cnt);
  307. }
  308. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  309. }
  310. static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev)
  311. {
  312. struct device_domain_info *info, *old_domain_info;
  313. unsigned long flags;
  314. spin_lock_irqsave(&device_domain_lock, flags);
  315. /*
  316. * Check here if the device is already attached to domain or not.
  317. * If the device is already attached to a domain detach it.
  318. */
  319. old_domain_info = dev->archdata.iommu_domain;
  320. if (old_domain_info && old_domain_info->domain != dma_domain) {
  321. spin_unlock_irqrestore(&device_domain_lock, flags);
  322. detach_device(dev, old_domain_info->domain);
  323. spin_lock_irqsave(&device_domain_lock, flags);
  324. }
  325. info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC);
  326. info->dev = dev;
  327. info->liodn = liodn;
  328. info->domain = dma_domain;
  329. list_add(&info->link, &dma_domain->devices);
  330. /*
  331. * In case of devices with multiple LIODNs just store
  332. * the info for the first LIODN as all
  333. * LIODNs share the same domain
  334. */
  335. if (!dev->archdata.iommu_domain)
  336. dev->archdata.iommu_domain = info;
  337. spin_unlock_irqrestore(&device_domain_lock, flags);
  338. }
  339. static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
  340. dma_addr_t iova)
  341. {
  342. struct fsl_dma_domain *dma_domain = domain->priv;
  343. if ((iova < domain->geometry.aperture_start) ||
  344. iova > (domain->geometry.aperture_end))
  345. return 0;
  346. return get_phys_addr(dma_domain, iova);
  347. }
  348. static bool fsl_pamu_capable(enum iommu_cap cap)
  349. {
  350. return cap == IOMMU_CAP_CACHE_COHERENCY;
  351. }
  352. static void fsl_pamu_domain_destroy(struct iommu_domain *domain)
  353. {
  354. struct fsl_dma_domain *dma_domain = domain->priv;
  355. domain->priv = NULL;
  356. /* remove all the devices from the device list */
  357. detach_device(NULL, dma_domain);
  358. dma_domain->enabled = 0;
  359. dma_domain->mapped = 0;
  360. kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
  361. }
  362. static int fsl_pamu_domain_init(struct iommu_domain *domain)
  363. {
  364. struct fsl_dma_domain *dma_domain;
  365. dma_domain = iommu_alloc_dma_domain();
  366. if (!dma_domain) {
  367. pr_debug("dma_domain allocation failed\n");
  368. return -ENOMEM;
  369. }
  370. domain->priv = dma_domain;
  371. dma_domain->iommu_domain = domain;
  372. /* defaul geometry 64 GB i.e. maximum system address */
  373. domain->geometry.aperture_start = 0;
  374. domain->geometry.aperture_end = (1ULL << 36) - 1;
  375. domain->geometry.force_aperture = true;
  376. return 0;
  377. }
  378. /* Configure geometry settings for all LIODNs associated with domain */
  379. static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain,
  380. struct iommu_domain_geometry *geom_attr,
  381. u32 win_cnt)
  382. {
  383. struct device_domain_info *info;
  384. int ret = 0;
  385. list_for_each_entry(info, &dma_domain->devices, link) {
  386. ret = pamu_set_liodn(info->liodn, info->dev, dma_domain,
  387. geom_attr, win_cnt);
  388. if (ret)
  389. break;
  390. }
  391. return ret;
  392. }
  393. /* Update stash destination for all LIODNs associated with the domain */
  394. static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
  395. {
  396. struct device_domain_info *info;
  397. int ret = 0;
  398. list_for_each_entry(info, &dma_domain->devices, link) {
  399. ret = update_liodn_stash(info->liodn, dma_domain, val);
  400. if (ret)
  401. break;
  402. }
  403. return ret;
  404. }
  405. /* Update domain mappings for all LIODNs associated with the domain */
  406. static int update_domain_mapping(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
  407. {
  408. struct device_domain_info *info;
  409. int ret = 0;
  410. list_for_each_entry(info, &dma_domain->devices, link) {
  411. ret = update_liodn(info->liodn, dma_domain, wnd_nr);
  412. if (ret)
  413. break;
  414. }
  415. return ret;
  416. }
  417. static int disable_domain_win(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
  418. {
  419. struct device_domain_info *info;
  420. int ret = 0;
  421. list_for_each_entry(info, &dma_domain->devices, link) {
  422. if (dma_domain->win_cnt == 1 && dma_domain->enabled) {
  423. ret = pamu_disable_liodn(info->liodn);
  424. if (!ret)
  425. dma_domain->enabled = 0;
  426. } else {
  427. ret = pamu_disable_spaace(info->liodn, wnd_nr);
  428. }
  429. }
  430. return ret;
  431. }
  432. static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr)
  433. {
  434. struct fsl_dma_domain *dma_domain = domain->priv;
  435. unsigned long flags;
  436. int ret;
  437. spin_lock_irqsave(&dma_domain->domain_lock, flags);
  438. if (!dma_domain->win_arr) {
  439. pr_debug("Number of windows not configured\n");
  440. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  441. return;
  442. }
  443. if (wnd_nr >= dma_domain->win_cnt) {
  444. pr_debug("Invalid window index\n");
  445. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  446. return;
  447. }
  448. if (dma_domain->win_arr[wnd_nr].valid) {
  449. ret = disable_domain_win(dma_domain, wnd_nr);
  450. if (!ret) {
  451. dma_domain->win_arr[wnd_nr].valid = 0;
  452. dma_domain->mapped--;
  453. }
  454. }
  455. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  456. }
  457. static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
  458. phys_addr_t paddr, u64 size, int prot)
  459. {
  460. struct fsl_dma_domain *dma_domain = domain->priv;
  461. struct dma_window *wnd;
  462. int pamu_prot = 0;
  463. int ret;
  464. unsigned long flags;
  465. u64 win_size;
  466. if (prot & IOMMU_READ)
  467. pamu_prot |= PAACE_AP_PERMS_QUERY;
  468. if (prot & IOMMU_WRITE)
  469. pamu_prot |= PAACE_AP_PERMS_UPDATE;
  470. spin_lock_irqsave(&dma_domain->domain_lock, flags);
  471. if (!dma_domain->win_arr) {
  472. pr_debug("Number of windows not configured\n");
  473. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  474. return -ENODEV;
  475. }
  476. if (wnd_nr >= dma_domain->win_cnt) {
  477. pr_debug("Invalid window index\n");
  478. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  479. return -EINVAL;
  480. }
  481. win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt);
  482. if (size > win_size) {
  483. pr_debug("Invalid window size \n");
  484. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  485. return -EINVAL;
  486. }
  487. if (dma_domain->win_cnt == 1) {
  488. if (dma_domain->enabled) {
  489. pr_debug("Disable the window before updating the mapping\n");
  490. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  491. return -EBUSY;
  492. }
  493. ret = check_size(size, domain->geometry.aperture_start);
  494. if (ret) {
  495. pr_debug("Aperture start not aligned to the size\n");
  496. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  497. return -EINVAL;
  498. }
  499. }
  500. wnd = &dma_domain->win_arr[wnd_nr];
  501. if (!wnd->valid) {
  502. wnd->paddr = paddr;
  503. wnd->size = size;
  504. wnd->prot = pamu_prot;
  505. ret = update_domain_mapping(dma_domain, wnd_nr);
  506. if (!ret) {
  507. wnd->valid = 1;
  508. dma_domain->mapped++;
  509. }
  510. } else {
  511. pr_debug("Disable the window before updating the mapping\n");
  512. ret = -EBUSY;
  513. }
  514. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  515. return ret;
  516. }
  517. /*
  518. * Attach the LIODN to the DMA domain and configure the geometry
  519. * and window mappings.
  520. */
  521. static int handle_attach_device(struct fsl_dma_domain *dma_domain,
  522. struct device *dev, const u32 *liodn,
  523. int num)
  524. {
  525. unsigned long flags;
  526. struct iommu_domain *domain = dma_domain->iommu_domain;
  527. int ret = 0;
  528. int i;
  529. spin_lock_irqsave(&dma_domain->domain_lock, flags);
  530. for (i = 0; i < num; i++) {
  531. /* Ensure that LIODN value is valid */
  532. if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
  533. pr_debug("Invalid liodn %d, attach device failed for %s\n",
  534. liodn[i], dev->of_node->full_name);
  535. ret = -EINVAL;
  536. break;
  537. }
  538. attach_device(dma_domain, liodn[i], dev);
  539. /*
  540. * Check if geometry has already been configured
  541. * for the domain. If yes, set the geometry for
  542. * the LIODN.
  543. */
  544. if (dma_domain->win_arr) {
  545. u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0;
  546. ret = pamu_set_liodn(liodn[i], dev, dma_domain,
  547. &domain->geometry,
  548. win_cnt);
  549. if (ret)
  550. break;
  551. if (dma_domain->mapped) {
  552. /*
  553. * Create window/subwindow mapping for
  554. * the LIODN.
  555. */
  556. ret = map_liodn(liodn[i], dma_domain);
  557. if (ret)
  558. break;
  559. }
  560. }
  561. }
  562. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  563. return ret;
  564. }
  565. static int fsl_pamu_attach_device(struct iommu_domain *domain,
  566. struct device *dev)
  567. {
  568. struct fsl_dma_domain *dma_domain = domain->priv;
  569. const u32 *liodn;
  570. u32 liodn_cnt;
  571. int len, ret = 0;
  572. struct pci_dev *pdev = NULL;
  573. struct pci_controller *pci_ctl;
  574. /*
  575. * Use LIODN of the PCI controller while attaching a
  576. * PCI device.
  577. */
  578. if (dev_is_pci(dev)) {
  579. pdev = to_pci_dev(dev);
  580. pci_ctl = pci_bus_to_host(pdev->bus);
  581. /*
  582. * make dev point to pci controller device
  583. * so we can get the LIODN programmed by
  584. * u-boot.
  585. */
  586. dev = pci_ctl->parent;
  587. }
  588. liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
  589. if (liodn) {
  590. liodn_cnt = len / sizeof(u32);
  591. ret = handle_attach_device(dma_domain, dev,
  592. liodn, liodn_cnt);
  593. } else {
  594. pr_debug("missing fsl,liodn property at %s\n",
  595. dev->of_node->full_name);
  596. ret = -EINVAL;
  597. }
  598. return ret;
  599. }
  600. static void fsl_pamu_detach_device(struct iommu_domain *domain,
  601. struct device *dev)
  602. {
  603. struct fsl_dma_domain *dma_domain = domain->priv;
  604. const u32 *prop;
  605. int len;
  606. struct pci_dev *pdev = NULL;
  607. struct pci_controller *pci_ctl;
  608. /*
  609. * Use LIODN of the PCI controller while detaching a
  610. * PCI device.
  611. */
  612. if (dev_is_pci(dev)) {
  613. pdev = to_pci_dev(dev);
  614. pci_ctl = pci_bus_to_host(pdev->bus);
  615. /*
  616. * make dev point to pci controller device
  617. * so we can get the LIODN programmed by
  618. * u-boot.
  619. */
  620. dev = pci_ctl->parent;
  621. }
  622. prop = of_get_property(dev->of_node, "fsl,liodn", &len);
  623. if (prop)
  624. detach_device(dev, dma_domain);
  625. else
  626. pr_debug("missing fsl,liodn property at %s\n",
  627. dev->of_node->full_name);
  628. }
  629. static int configure_domain_geometry(struct iommu_domain *domain, void *data)
  630. {
  631. struct iommu_domain_geometry *geom_attr = data;
  632. struct fsl_dma_domain *dma_domain = domain->priv;
  633. dma_addr_t geom_size;
  634. unsigned long flags;
  635. geom_size = geom_attr->aperture_end - geom_attr->aperture_start + 1;
  636. /*
  637. * Sanity check the geometry size. Also, we do not support
  638. * DMA outside of the geometry.
  639. */
  640. if (check_size(geom_size, geom_attr->aperture_start) ||
  641. !geom_attr->force_aperture) {
  642. pr_debug("Invalid PAMU geometry attributes\n");
  643. return -EINVAL;
  644. }
  645. spin_lock_irqsave(&dma_domain->domain_lock, flags);
  646. if (dma_domain->enabled) {
  647. pr_debug("Can't set geometry attributes as domain is active\n");
  648. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  649. return -EBUSY;
  650. }
  651. /* Copy the domain geometry information */
  652. memcpy(&domain->geometry, geom_attr,
  653. sizeof(struct iommu_domain_geometry));
  654. dma_domain->geom_size = geom_size;
  655. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  656. return 0;
  657. }
  658. /* Set the domain stash attribute */
  659. static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data)
  660. {
  661. struct pamu_stash_attribute *stash_attr = data;
  662. unsigned long flags;
  663. int ret;
  664. spin_lock_irqsave(&dma_domain->domain_lock, flags);
  665. memcpy(&dma_domain->dma_stash, stash_attr,
  666. sizeof(struct pamu_stash_attribute));
  667. dma_domain->stash_id = get_stash_id(stash_attr->cache,
  668. stash_attr->cpu);
  669. if (dma_domain->stash_id == ~(u32)0) {
  670. pr_debug("Invalid stash attributes\n");
  671. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  672. return -EINVAL;
  673. }
  674. ret = update_domain_stash(dma_domain, dma_domain->stash_id);
  675. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  676. return ret;
  677. }
  678. /* Configure domain dma state i.e. enable/disable DMA*/
  679. static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable)
  680. {
  681. struct device_domain_info *info;
  682. unsigned long flags;
  683. int ret;
  684. spin_lock_irqsave(&dma_domain->domain_lock, flags);
  685. if (enable && !dma_domain->mapped) {
  686. pr_debug("Can't enable DMA domain without valid mapping\n");
  687. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  688. return -ENODEV;
  689. }
  690. dma_domain->enabled = enable;
  691. list_for_each_entry(info, &dma_domain->devices,
  692. link) {
  693. ret = (enable) ? pamu_enable_liodn(info->liodn) :
  694. pamu_disable_liodn(info->liodn);
  695. if (ret)
  696. pr_debug("Unable to set dma state for liodn %d",
  697. info->liodn);
  698. }
  699. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  700. return 0;
  701. }
  702. static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
  703. enum iommu_attr attr_type, void *data)
  704. {
  705. struct fsl_dma_domain *dma_domain = domain->priv;
  706. int ret = 0;
  707. switch (attr_type) {
  708. case DOMAIN_ATTR_GEOMETRY:
  709. ret = configure_domain_geometry(domain, data);
  710. break;
  711. case DOMAIN_ATTR_FSL_PAMU_STASH:
  712. ret = configure_domain_stash(dma_domain, data);
  713. break;
  714. case DOMAIN_ATTR_FSL_PAMU_ENABLE:
  715. ret = configure_domain_dma_state(dma_domain, *(int *)data);
  716. break;
  717. default:
  718. pr_debug("Unsupported attribute type\n");
  719. ret = -EINVAL;
  720. break;
  721. };
  722. return ret;
  723. }
  724. static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
  725. enum iommu_attr attr_type, void *data)
  726. {
  727. struct fsl_dma_domain *dma_domain = domain->priv;
  728. int ret = 0;
  729. switch (attr_type) {
  730. case DOMAIN_ATTR_FSL_PAMU_STASH:
  731. memcpy((struct pamu_stash_attribute *) data, &dma_domain->dma_stash,
  732. sizeof(struct pamu_stash_attribute));
  733. break;
  734. case DOMAIN_ATTR_FSL_PAMU_ENABLE:
  735. *(int *)data = dma_domain->enabled;
  736. break;
  737. case DOMAIN_ATTR_FSL_PAMUV1:
  738. *(int *)data = DOMAIN_ATTR_FSL_PAMUV1;
  739. break;
  740. default:
  741. pr_debug("Unsupported attribute type\n");
  742. ret = -EINVAL;
  743. break;
  744. };
  745. return ret;
  746. }
  747. static struct iommu_group *get_device_iommu_group(struct device *dev)
  748. {
  749. struct iommu_group *group;
  750. group = iommu_group_get(dev);
  751. if (!group)
  752. group = iommu_group_alloc();
  753. return group;
  754. }
  755. static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
  756. {
  757. u32 version;
  758. /* Check the PCI controller version number by readding BRR1 register */
  759. version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
  760. version &= PCI_FSL_BRR1_VER;
  761. /* If PCI controller version is >= 0x204 we can partition endpoints*/
  762. if (version >= 0x204)
  763. return 1;
  764. return 0;
  765. }
  766. /* Get iommu group information from peer devices or devices on the parent bus */
  767. static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
  768. {
  769. struct pci_dev *tmp;
  770. struct iommu_group *group;
  771. struct pci_bus *bus = pdev->bus;
  772. /*
  773. * Traverese the pci bus device list to get
  774. * the shared iommu group.
  775. */
  776. while (bus) {
  777. list_for_each_entry(tmp, &bus->devices, bus_list) {
  778. if (tmp == pdev)
  779. continue;
  780. group = iommu_group_get(&tmp->dev);
  781. if (group)
  782. return group;
  783. }
  784. bus = bus->parent;
  785. }
  786. return NULL;
  787. }
  788. static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
  789. {
  790. struct pci_controller *pci_ctl;
  791. bool pci_endpt_partioning;
  792. struct iommu_group *group = NULL;
  793. pci_ctl = pci_bus_to_host(pdev->bus);
  794. pci_endpt_partioning = check_pci_ctl_endpt_part(pci_ctl);
  795. /* We can partition PCIe devices so assign device group to the device */
  796. if (pci_endpt_partioning) {
  797. group = iommu_group_get_for_dev(&pdev->dev);
  798. /*
  799. * PCIe controller is not a paritionable entity
  800. * free the controller device iommu_group.
  801. */
  802. if (pci_ctl->parent->iommu_group)
  803. iommu_group_remove_device(pci_ctl->parent);
  804. } else {
  805. /*
  806. * All devices connected to the controller will share the
  807. * PCI controllers device group. If this is the first
  808. * device to be probed for the pci controller, copy the
  809. * device group information from the PCI controller device
  810. * node and remove the PCI controller iommu group.
  811. * For subsequent devices, the iommu group information can
  812. * be obtained from sibling devices (i.e. from the bus_devices
  813. * link list).
  814. */
  815. if (pci_ctl->parent->iommu_group) {
  816. group = get_device_iommu_group(pci_ctl->parent);
  817. iommu_group_remove_device(pci_ctl->parent);
  818. } else
  819. group = get_shared_pci_device_group(pdev);
  820. }
  821. if (!group)
  822. group = ERR_PTR(-ENODEV);
  823. return group;
  824. }
  825. static int fsl_pamu_add_device(struct device *dev)
  826. {
  827. struct iommu_group *group = ERR_PTR(-ENODEV);
  828. struct pci_dev *pdev;
  829. const u32 *prop;
  830. int ret = 0, len;
  831. /*
  832. * For platform devices we allocate a separate group for
  833. * each of the devices.
  834. */
  835. if (dev_is_pci(dev)) {
  836. pdev = to_pci_dev(dev);
  837. /* Don't create device groups for virtual PCI bridges */
  838. if (pdev->subordinate)
  839. return 0;
  840. group = get_pci_device_group(pdev);
  841. } else {
  842. prop = of_get_property(dev->of_node, "fsl,liodn", &len);
  843. if (prop)
  844. group = get_device_iommu_group(dev);
  845. }
  846. if (IS_ERR(group))
  847. return PTR_ERR(group);
  848. /*
  849. * Check if device has already been added to an iommu group.
  850. * Group could have already been created for a PCI device in
  851. * the iommu_group_get_for_dev path.
  852. */
  853. if (!dev->iommu_group)
  854. ret = iommu_group_add_device(group, dev);
  855. iommu_group_put(group);
  856. return ret;
  857. }
  858. static void fsl_pamu_remove_device(struct device *dev)
  859. {
  860. iommu_group_remove_device(dev);
  861. }
  862. static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
  863. {
  864. struct fsl_dma_domain *dma_domain = domain->priv;
  865. unsigned long flags;
  866. int ret;
  867. spin_lock_irqsave(&dma_domain->domain_lock, flags);
  868. /* Ensure domain is inactive i.e. DMA should be disabled for the domain */
  869. if (dma_domain->enabled) {
  870. pr_debug("Can't set geometry attributes as domain is active\n");
  871. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  872. return -EBUSY;
  873. }
  874. /* Ensure that the geometry has been set for the domain */
  875. if (!dma_domain->geom_size) {
  876. pr_debug("Please configure geometry before setting the number of windows\n");
  877. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  878. return -EINVAL;
  879. }
  880. /*
  881. * Ensure we have valid window count i.e. it should be less than
  882. * maximum permissible limit and should be a power of two.
  883. */
  884. if (w_count > pamu_get_max_subwin_cnt() || !is_power_of_2(w_count)) {
  885. pr_debug("Invalid window count\n");
  886. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  887. return -EINVAL;
  888. }
  889. ret = pamu_set_domain_geometry(dma_domain, &domain->geometry,
  890. ((w_count > 1) ? w_count : 0));
  891. if (!ret) {
  892. kfree(dma_domain->win_arr);
  893. dma_domain->win_arr = kzalloc(sizeof(struct dma_window) *
  894. w_count, GFP_ATOMIC);
  895. if (!dma_domain->win_arr) {
  896. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  897. return -ENOMEM;
  898. }
  899. dma_domain->win_cnt = w_count;
  900. }
  901. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  902. return ret;
  903. }
  904. static u32 fsl_pamu_get_windows(struct iommu_domain *domain)
  905. {
  906. struct fsl_dma_domain *dma_domain = domain->priv;
  907. return dma_domain->win_cnt;
  908. }
  909. static const struct iommu_ops fsl_pamu_ops = {
  910. .capable = fsl_pamu_capable,
  911. .domain_init = fsl_pamu_domain_init,
  912. .domain_destroy = fsl_pamu_domain_destroy,
  913. .attach_dev = fsl_pamu_attach_device,
  914. .detach_dev = fsl_pamu_detach_device,
  915. .domain_window_enable = fsl_pamu_window_enable,
  916. .domain_window_disable = fsl_pamu_window_disable,
  917. .domain_get_windows = fsl_pamu_get_windows,
  918. .domain_set_windows = fsl_pamu_set_windows,
  919. .iova_to_phys = fsl_pamu_iova_to_phys,
  920. .domain_set_attr = fsl_pamu_set_domain_attr,
  921. .domain_get_attr = fsl_pamu_get_domain_attr,
  922. .add_device = fsl_pamu_add_device,
  923. .remove_device = fsl_pamu_remove_device,
  924. };
  925. int pamu_domain_init(void)
  926. {
  927. int ret = 0;
  928. ret = iommu_init_mempool();
  929. if (ret)
  930. return ret;
  931. bus_set_iommu(&platform_bus_type, &fsl_pamu_ops);
  932. bus_set_iommu(&pci_bus_type, &fsl_pamu_ops);
  933. return ret;
  934. }