ntb_hw_amd.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155
  1. /*
  2. * This file is provided under a dual BSD/GPLv2 license. When using or
  3. * redistributing this file, you may do so under either license.
  4. *
  5. * GPL LICENSE SUMMARY
  6. *
  7. * Copyright (C) 2016 Advanced Micro Devices, Inc. All Rights Reserved.
  8. * Copyright (C) 2016 T-Platforms. All Rights Reserved.
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of version 2 of the GNU General Public License as
  12. * published by the Free Software Foundation.
  13. *
  14. * BSD LICENSE
  15. *
  16. * Copyright (C) 2016 Advanced Micro Devices, Inc. All Rights Reserved.
  17. * Copyright (C) 2016 T-Platforms. All Rights Reserved.
  18. *
  19. * Redistribution and use in source and binary forms, with or without
  20. * modification, are permitted provided that the following conditions
  21. * are met:
  22. *
  23. * * Redistributions of source code must retain the above copyright
  24. * notice, this list of conditions and the following disclaimer.
  25. * * Redistributions in binary form must reproduce the above copy
  26. * notice, this list of conditions and the following disclaimer in
  27. * the documentation and/or other materials provided with the
  28. * distribution.
  29. * * Neither the name of AMD Corporation nor the names of its
  30. * contributors may be used to endorse or promote products derived
  31. * from this software without specific prior written permission.
  32. *
  33. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  34. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  35. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  36. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  37. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  38. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  39. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  40. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  41. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  42. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  43. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  44. *
  45. * AMD PCIe NTB Linux driver
  46. *
  47. * Contact Information:
  48. * Xiangliang Yu <Xiangliang.Yu@amd.com>
  49. */
  50. #include <linux/debugfs.h>
  51. #include <linux/delay.h>
  52. #include <linux/init.h>
  53. #include <linux/interrupt.h>
  54. #include <linux/module.h>
  55. #include <linux/acpi.h>
  56. #include <linux/pci.h>
  57. #include <linux/random.h>
  58. #include <linux/slab.h>
  59. #include <linux/ntb.h>
  60. #include "ntb_hw_amd.h"
  61. #define NTB_NAME "ntb_hw_amd"
  62. #define NTB_DESC "AMD(R) PCI-E Non-Transparent Bridge Driver"
  63. #define NTB_VER "1.0"
  64. MODULE_DESCRIPTION(NTB_DESC);
  65. MODULE_VERSION(NTB_VER);
  66. MODULE_LICENSE("Dual BSD/GPL");
  67. MODULE_AUTHOR("AMD Inc.");
  68. static const struct file_operations amd_ntb_debugfs_info;
  69. static struct dentry *debugfs_dir;
  70. static int ndev_mw_to_bar(struct amd_ntb_dev *ndev, int idx)
  71. {
  72. if (idx < 0 || idx > ndev->mw_count)
  73. return -EINVAL;
  74. return 1 << idx;
  75. }
  76. static int amd_ntb_mw_count(struct ntb_dev *ntb, int pidx)
  77. {
  78. if (pidx != NTB_DEF_PEER_IDX)
  79. return -EINVAL;
  80. return ntb_ndev(ntb)->mw_count;
  81. }
  82. static int amd_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int idx,
  83. resource_size_t *addr_align,
  84. resource_size_t *size_align,
  85. resource_size_t *size_max)
  86. {
  87. struct amd_ntb_dev *ndev = ntb_ndev(ntb);
  88. int bar;
  89. if (pidx != NTB_DEF_PEER_IDX)
  90. return -EINVAL;
  91. bar = ndev_mw_to_bar(ndev, idx);
  92. if (bar < 0)
  93. return bar;
  94. if (addr_align)
  95. *addr_align = SZ_4K;
  96. if (size_align)
  97. *size_align = 1;
  98. if (size_max)
  99. *size_max = pci_resource_len(ndev->ntb.pdev, bar);
  100. return 0;
  101. }
  102. static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
  103. dma_addr_t addr, resource_size_t size)
  104. {
  105. struct amd_ntb_dev *ndev = ntb_ndev(ntb);
  106. unsigned long xlat_reg, limit_reg = 0;
  107. resource_size_t mw_size;
  108. void __iomem *mmio, *peer_mmio;
  109. u64 base_addr, limit, reg_val;
  110. int bar;
  111. if (pidx != NTB_DEF_PEER_IDX)
  112. return -EINVAL;
  113. bar = ndev_mw_to_bar(ndev, idx);
  114. if (bar < 0)
  115. return bar;
  116. mw_size = pci_resource_len(ntb->pdev, bar);
  117. /* make sure the range fits in the usable mw size */
  118. if (size > mw_size)
  119. return -EINVAL;
  120. mmio = ndev->self_mmio;
  121. peer_mmio = ndev->peer_mmio;
  122. base_addr = pci_resource_start(ntb->pdev, bar);
  123. if (bar != 1) {
  124. xlat_reg = AMD_BAR23XLAT_OFFSET + ((bar - 2) << 2);
  125. limit_reg = AMD_BAR23LMT_OFFSET + ((bar - 2) << 2);
  126. /* Set the limit if supported */
  127. limit = size;
  128. /* set and verify setting the translation address */
  129. write64(addr, peer_mmio + xlat_reg);
  130. reg_val = read64(peer_mmio + xlat_reg);
  131. if (reg_val != addr) {
  132. write64(0, peer_mmio + xlat_reg);
  133. return -EIO;
  134. }
  135. /* set and verify setting the limit */
  136. write64(limit, mmio + limit_reg);
  137. reg_val = read64(mmio + limit_reg);
  138. if (reg_val != limit) {
  139. write64(base_addr, mmio + limit_reg);
  140. write64(0, peer_mmio + xlat_reg);
  141. return -EIO;
  142. }
  143. } else {
  144. xlat_reg = AMD_BAR1XLAT_OFFSET;
  145. limit_reg = AMD_BAR1LMT_OFFSET;
  146. /* Set the limit if supported */
  147. limit = size;
  148. /* set and verify setting the translation address */
  149. write64(addr, peer_mmio + xlat_reg);
  150. reg_val = read64(peer_mmio + xlat_reg);
  151. if (reg_val != addr) {
  152. write64(0, peer_mmio + xlat_reg);
  153. return -EIO;
  154. }
  155. /* set and verify setting the limit */
  156. writel(limit, mmio + limit_reg);
  157. reg_val = readl(mmio + limit_reg);
  158. if (reg_val != limit) {
  159. writel(base_addr, mmio + limit_reg);
  160. writel(0, peer_mmio + xlat_reg);
  161. return -EIO;
  162. }
  163. }
  164. return 0;
  165. }
  166. static int amd_link_is_up(struct amd_ntb_dev *ndev)
  167. {
  168. if (!ndev->peer_sta)
  169. return NTB_LNK_STA_ACTIVE(ndev->cntl_sta);
  170. if (ndev->peer_sta & AMD_LINK_UP_EVENT) {
  171. ndev->peer_sta = 0;
  172. return 1;
  173. }
  174. /* If peer_sta is reset or D0 event, the ISR has
  175. * started a timer to check link status of hardware.
  176. * So here just clear status bit. And if peer_sta is
  177. * D3 or PME_TO, D0/reset event will be happened when
  178. * system wakeup/poweron, so do nothing here.
  179. */
  180. if (ndev->peer_sta & AMD_PEER_RESET_EVENT)
  181. ndev->peer_sta &= ~AMD_PEER_RESET_EVENT;
  182. else if (ndev->peer_sta & (AMD_PEER_D0_EVENT | AMD_LINK_DOWN_EVENT))
  183. ndev->peer_sta = 0;
  184. return 0;
  185. }
  186. static u64 amd_ntb_link_is_up(struct ntb_dev *ntb,
  187. enum ntb_speed *speed,
  188. enum ntb_width *width)
  189. {
  190. struct amd_ntb_dev *ndev = ntb_ndev(ntb);
  191. int ret = 0;
  192. if (amd_link_is_up(ndev)) {
  193. if (speed)
  194. *speed = NTB_LNK_STA_SPEED(ndev->lnk_sta);
  195. if (width)
  196. *width = NTB_LNK_STA_WIDTH(ndev->lnk_sta);
  197. dev_dbg(&ntb->pdev->dev, "link is up.\n");
  198. ret = 1;
  199. } else {
  200. if (speed)
  201. *speed = NTB_SPEED_NONE;
  202. if (width)
  203. *width = NTB_WIDTH_NONE;
  204. dev_dbg(&ntb->pdev->dev, "link is down.\n");
  205. }
  206. return ret;
  207. }
  208. static int amd_ntb_link_enable(struct ntb_dev *ntb,
  209. enum ntb_speed max_speed,
  210. enum ntb_width max_width)
  211. {
  212. struct amd_ntb_dev *ndev = ntb_ndev(ntb);
  213. void __iomem *mmio = ndev->self_mmio;
  214. u32 ntb_ctl;
  215. /* Enable event interrupt */
  216. ndev->int_mask &= ~AMD_EVENT_INTMASK;
  217. writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET);
  218. if (ndev->ntb.topo == NTB_TOPO_SEC)
  219. return -EINVAL;
  220. dev_dbg(&ntb->pdev->dev, "Enabling Link.\n");
  221. ntb_ctl = readl(mmio + AMD_CNTL_OFFSET);
  222. ntb_ctl |= (PMM_REG_CTL | SMM_REG_CTL);
  223. writel(ntb_ctl, mmio + AMD_CNTL_OFFSET);
  224. return 0;
  225. }
  226. static int amd_ntb_link_disable(struct ntb_dev *ntb)
  227. {
  228. struct amd_ntb_dev *ndev = ntb_ndev(ntb);
  229. void __iomem *mmio = ndev->self_mmio;
  230. u32 ntb_ctl;
  231. /* Disable event interrupt */
  232. ndev->int_mask |= AMD_EVENT_INTMASK;
  233. writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET);
  234. if (ndev->ntb.topo == NTB_TOPO_SEC)
  235. return -EINVAL;
  236. dev_dbg(&ntb->pdev->dev, "Enabling Link.\n");
  237. ntb_ctl = readl(mmio + AMD_CNTL_OFFSET);
  238. ntb_ctl &= ~(PMM_REG_CTL | SMM_REG_CTL);
  239. writel(ntb_ctl, mmio + AMD_CNTL_OFFSET);
  240. return 0;
  241. }
  242. static int amd_ntb_peer_mw_count(struct ntb_dev *ntb)
  243. {
  244. /* The same as for inbound MWs */
  245. return ntb_ndev(ntb)->mw_count;
  246. }
  247. static int amd_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
  248. phys_addr_t *base, resource_size_t *size)
  249. {
  250. struct amd_ntb_dev *ndev = ntb_ndev(ntb);
  251. int bar;
  252. bar = ndev_mw_to_bar(ndev, idx);
  253. if (bar < 0)
  254. return bar;
  255. if (base)
  256. *base = pci_resource_start(ndev->ntb.pdev, bar);
  257. if (size)
  258. *size = pci_resource_len(ndev->ntb.pdev, bar);
  259. return 0;
  260. }
  261. static u64 amd_ntb_db_valid_mask(struct ntb_dev *ntb)
  262. {
  263. return ntb_ndev(ntb)->db_valid_mask;
  264. }
  265. static int amd_ntb_db_vector_count(struct ntb_dev *ntb)
  266. {
  267. return ntb_ndev(ntb)->db_count;
  268. }
  269. static u64 amd_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
  270. {
  271. struct amd_ntb_dev *ndev = ntb_ndev(ntb);
  272. if (db_vector < 0 || db_vector > ndev->db_count)
  273. return 0;
  274. return ntb_ndev(ntb)->db_valid_mask & (1 << db_vector);
  275. }
  276. static u64 amd_ntb_db_read(struct ntb_dev *ntb)
  277. {
  278. struct amd_ntb_dev *ndev = ntb_ndev(ntb);
  279. void __iomem *mmio = ndev->self_mmio;
  280. return (u64)readw(mmio + AMD_DBSTAT_OFFSET);
  281. }
  282. static int amd_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
  283. {
  284. struct amd_ntb_dev *ndev = ntb_ndev(ntb);
  285. void __iomem *mmio = ndev->self_mmio;
  286. writew((u16)db_bits, mmio + AMD_DBSTAT_OFFSET);
  287. return 0;
  288. }
  289. static int amd_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
  290. {
  291. struct amd_ntb_dev *ndev = ntb_ndev(ntb);
  292. void __iomem *mmio = ndev->self_mmio;
  293. unsigned long flags;
  294. if (db_bits & ~ndev->db_valid_mask)
  295. return -EINVAL;
  296. spin_lock_irqsave(&ndev->db_mask_lock, flags);
  297. ndev->db_mask |= db_bits;
  298. writew((u16)ndev->db_mask, mmio + AMD_DBMASK_OFFSET);
  299. spin_unlock_irqrestore(&ndev->db_mask_lock, flags);
  300. return 0;
  301. }
  302. static int amd_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
  303. {
  304. struct amd_ntb_dev *ndev = ntb_ndev(ntb);
  305. void __iomem *mmio = ndev->self_mmio;
  306. unsigned long flags;
  307. if (db_bits & ~ndev->db_valid_mask)
  308. return -EINVAL;
  309. spin_lock_irqsave(&ndev->db_mask_lock, flags);
  310. ndev->db_mask &= ~db_bits;
  311. writew((u16)ndev->db_mask, mmio + AMD_DBMASK_OFFSET);
  312. spin_unlock_irqrestore(&ndev->db_mask_lock, flags);
  313. return 0;
  314. }
  315. static int amd_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
  316. {
  317. struct amd_ntb_dev *ndev = ntb_ndev(ntb);
  318. void __iomem *mmio = ndev->self_mmio;
  319. writew((u16)db_bits, mmio + AMD_DBREQ_OFFSET);
  320. return 0;
  321. }
  322. static int amd_ntb_spad_count(struct ntb_dev *ntb)
  323. {
  324. return ntb_ndev(ntb)->spad_count;
  325. }
  326. static u32 amd_ntb_spad_read(struct ntb_dev *ntb, int idx)
  327. {
  328. struct amd_ntb_dev *ndev = ntb_ndev(ntb);
  329. void __iomem *mmio = ndev->self_mmio;
  330. u32 offset;
  331. if (idx < 0 || idx >= ndev->spad_count)
  332. return 0;
  333. offset = ndev->self_spad + (idx << 2);
  334. return readl(mmio + AMD_SPAD_OFFSET + offset);
  335. }
  336. static int amd_ntb_spad_write(struct ntb_dev *ntb,
  337. int idx, u32 val)
  338. {
  339. struct amd_ntb_dev *ndev = ntb_ndev(ntb);
  340. void __iomem *mmio = ndev->self_mmio;
  341. u32 offset;
  342. if (idx < 0 || idx >= ndev->spad_count)
  343. return -EINVAL;
  344. offset = ndev->self_spad + (idx << 2);
  345. writel(val, mmio + AMD_SPAD_OFFSET + offset);
  346. return 0;
  347. }
  348. static u32 amd_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx)
  349. {
  350. struct amd_ntb_dev *ndev = ntb_ndev(ntb);
  351. void __iomem *mmio = ndev->self_mmio;
  352. u32 offset;
  353. if (sidx < 0 || sidx >= ndev->spad_count)
  354. return -EINVAL;
  355. offset = ndev->peer_spad + (sidx << 2);
  356. return readl(mmio + AMD_SPAD_OFFSET + offset);
  357. }
  358. static int amd_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
  359. int sidx, u32 val)
  360. {
  361. struct amd_ntb_dev *ndev = ntb_ndev(ntb);
  362. void __iomem *mmio = ndev->self_mmio;
  363. u32 offset;
  364. if (sidx < 0 || sidx >= ndev->spad_count)
  365. return -EINVAL;
  366. offset = ndev->peer_spad + (sidx << 2);
  367. writel(val, mmio + AMD_SPAD_OFFSET + offset);
  368. return 0;
  369. }
  370. static const struct ntb_dev_ops amd_ntb_ops = {
  371. .mw_count = amd_ntb_mw_count,
  372. .mw_get_align = amd_ntb_mw_get_align,
  373. .mw_set_trans = amd_ntb_mw_set_trans,
  374. .peer_mw_count = amd_ntb_peer_mw_count,
  375. .peer_mw_get_addr = amd_ntb_peer_mw_get_addr,
  376. .link_is_up = amd_ntb_link_is_up,
  377. .link_enable = amd_ntb_link_enable,
  378. .link_disable = amd_ntb_link_disable,
  379. .db_valid_mask = amd_ntb_db_valid_mask,
  380. .db_vector_count = amd_ntb_db_vector_count,
  381. .db_vector_mask = amd_ntb_db_vector_mask,
  382. .db_read = amd_ntb_db_read,
  383. .db_clear = amd_ntb_db_clear,
  384. .db_set_mask = amd_ntb_db_set_mask,
  385. .db_clear_mask = amd_ntb_db_clear_mask,
  386. .peer_db_set = amd_ntb_peer_db_set,
  387. .spad_count = amd_ntb_spad_count,
  388. .spad_read = amd_ntb_spad_read,
  389. .spad_write = amd_ntb_spad_write,
  390. .peer_spad_read = amd_ntb_peer_spad_read,
  391. .peer_spad_write = amd_ntb_peer_spad_write,
  392. };
  393. static void amd_ack_smu(struct amd_ntb_dev *ndev, u32 bit)
  394. {
  395. void __iomem *mmio = ndev->self_mmio;
  396. int reg;
  397. reg = readl(mmio + AMD_SMUACK_OFFSET);
  398. reg |= bit;
  399. writel(reg, mmio + AMD_SMUACK_OFFSET);
  400. ndev->peer_sta |= bit;
  401. }
  402. static void amd_handle_event(struct amd_ntb_dev *ndev, int vec)
  403. {
  404. void __iomem *mmio = ndev->self_mmio;
  405. struct device *dev = &ndev->ntb.pdev->dev;
  406. u32 status;
  407. status = readl(mmio + AMD_INTSTAT_OFFSET);
  408. if (!(status & AMD_EVENT_INTMASK))
  409. return;
  410. dev_dbg(dev, "status = 0x%x and vec = %d\n", status, vec);
  411. status &= AMD_EVENT_INTMASK;
  412. switch (status) {
  413. case AMD_PEER_FLUSH_EVENT:
  414. dev_info(dev, "Flush is done.\n");
  415. break;
  416. case AMD_PEER_RESET_EVENT:
  417. amd_ack_smu(ndev, AMD_PEER_RESET_EVENT);
  418. /* link down first */
  419. ntb_link_event(&ndev->ntb);
  420. /* polling peer status */
  421. schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT);
  422. break;
  423. case AMD_PEER_D3_EVENT:
  424. case AMD_PEER_PMETO_EVENT:
  425. case AMD_LINK_UP_EVENT:
  426. case AMD_LINK_DOWN_EVENT:
  427. amd_ack_smu(ndev, status);
  428. /* link down */
  429. ntb_link_event(&ndev->ntb);
  430. break;
  431. case AMD_PEER_D0_EVENT:
  432. mmio = ndev->peer_mmio;
  433. status = readl(mmio + AMD_PMESTAT_OFFSET);
  434. /* check if this is WAKEUP event */
  435. if (status & 0x1)
  436. dev_info(dev, "Wakeup is done.\n");
  437. amd_ack_smu(ndev, AMD_PEER_D0_EVENT);
  438. /* start a timer to poll link status */
  439. schedule_delayed_work(&ndev->hb_timer,
  440. AMD_LINK_HB_TIMEOUT);
  441. break;
  442. default:
  443. dev_info(dev, "event status = 0x%x.\n", status);
  444. break;
  445. }
  446. }
  447. static irqreturn_t ndev_interrupt(struct amd_ntb_dev *ndev, int vec)
  448. {
  449. dev_dbg(&ndev->ntb.pdev->dev, "vec %d\n", vec);
  450. if (vec > (AMD_DB_CNT - 1) || (ndev->msix_vec_count == 1))
  451. amd_handle_event(ndev, vec);
  452. if (vec < AMD_DB_CNT)
  453. ntb_db_event(&ndev->ntb, vec);
  454. return IRQ_HANDLED;
  455. }
  456. static irqreturn_t ndev_vec_isr(int irq, void *dev)
  457. {
  458. struct amd_ntb_vec *nvec = dev;
  459. return ndev_interrupt(nvec->ndev, nvec->num);
  460. }
  461. static irqreturn_t ndev_irq_isr(int irq, void *dev)
  462. {
  463. struct amd_ntb_dev *ndev = dev;
  464. return ndev_interrupt(ndev, irq - ndev->ntb.pdev->irq);
  465. }
  466. static int ndev_init_isr(struct amd_ntb_dev *ndev,
  467. int msix_min, int msix_max)
  468. {
  469. struct pci_dev *pdev;
  470. int rc, i, msix_count, node;
  471. pdev = ndev->ntb.pdev;
  472. node = dev_to_node(&pdev->dev);
  473. ndev->db_mask = ndev->db_valid_mask;
  474. /* Try to set up msix irq */
  475. ndev->vec = kcalloc_node(msix_max, sizeof(*ndev->vec),
  476. GFP_KERNEL, node);
  477. if (!ndev->vec)
  478. goto err_msix_vec_alloc;
  479. ndev->msix = kcalloc_node(msix_max, sizeof(*ndev->msix),
  480. GFP_KERNEL, node);
  481. if (!ndev->msix)
  482. goto err_msix_alloc;
  483. for (i = 0; i < msix_max; ++i)
  484. ndev->msix[i].entry = i;
  485. msix_count = pci_enable_msix_range(pdev, ndev->msix,
  486. msix_min, msix_max);
  487. if (msix_count < 0)
  488. goto err_msix_enable;
  489. /* NOTE: Disable MSIX if msix count is less than 16 because of
  490. * hardware limitation.
  491. */
  492. if (msix_count < msix_min) {
  493. pci_disable_msix(pdev);
  494. goto err_msix_enable;
  495. }
  496. for (i = 0; i < msix_count; ++i) {
  497. ndev->vec[i].ndev = ndev;
  498. ndev->vec[i].num = i;
  499. rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0,
  500. "ndev_vec_isr", &ndev->vec[i]);
  501. if (rc)
  502. goto err_msix_request;
  503. }
  504. dev_dbg(&pdev->dev, "Using msix interrupts\n");
  505. ndev->db_count = msix_min;
  506. ndev->msix_vec_count = msix_max;
  507. return 0;
  508. err_msix_request:
  509. while (i-- > 0)
  510. free_irq(ndev->msix[i].vector, &ndev->vec[i]);
  511. pci_disable_msix(pdev);
  512. err_msix_enable:
  513. kfree(ndev->msix);
  514. err_msix_alloc:
  515. kfree(ndev->vec);
  516. err_msix_vec_alloc:
  517. ndev->msix = NULL;
  518. ndev->vec = NULL;
  519. /* Try to set up msi irq */
  520. rc = pci_enable_msi(pdev);
  521. if (rc)
  522. goto err_msi_enable;
  523. rc = request_irq(pdev->irq, ndev_irq_isr, 0,
  524. "ndev_irq_isr", ndev);
  525. if (rc)
  526. goto err_msi_request;
  527. dev_dbg(&pdev->dev, "Using msi interrupts\n");
  528. ndev->db_count = 1;
  529. ndev->msix_vec_count = 1;
  530. return 0;
  531. err_msi_request:
  532. pci_disable_msi(pdev);
  533. err_msi_enable:
  534. /* Try to set up intx irq */
  535. pci_intx(pdev, 1);
  536. rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
  537. "ndev_irq_isr", ndev);
  538. if (rc)
  539. goto err_intx_request;
  540. dev_dbg(&pdev->dev, "Using intx interrupts\n");
  541. ndev->db_count = 1;
  542. ndev->msix_vec_count = 1;
  543. return 0;
  544. err_intx_request:
  545. return rc;
  546. }
  547. static void ndev_deinit_isr(struct amd_ntb_dev *ndev)
  548. {
  549. struct pci_dev *pdev;
  550. void __iomem *mmio = ndev->self_mmio;
  551. int i;
  552. pdev = ndev->ntb.pdev;
  553. /* Mask all doorbell interrupts */
  554. ndev->db_mask = ndev->db_valid_mask;
  555. writel(ndev->db_mask, mmio + AMD_DBMASK_OFFSET);
  556. if (ndev->msix) {
  557. i = ndev->msix_vec_count;
  558. while (i--)
  559. free_irq(ndev->msix[i].vector, &ndev->vec[i]);
  560. pci_disable_msix(pdev);
  561. kfree(ndev->msix);
  562. kfree(ndev->vec);
  563. } else {
  564. free_irq(pdev->irq, ndev);
  565. if (pci_dev_msi_enabled(pdev))
  566. pci_disable_msi(pdev);
  567. else
  568. pci_intx(pdev, 0);
  569. }
  570. }
  571. static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
  572. size_t count, loff_t *offp)
  573. {
  574. struct amd_ntb_dev *ndev;
  575. void __iomem *mmio;
  576. char *buf;
  577. size_t buf_size;
  578. ssize_t ret, off;
  579. union { u64 v64; u32 v32; u16 v16; } u;
  580. ndev = filp->private_data;
  581. mmio = ndev->self_mmio;
  582. buf_size = min(count, 0x800ul);
  583. buf = kmalloc(buf_size, GFP_KERNEL);
  584. if (!buf)
  585. return -ENOMEM;
  586. off = 0;
  587. off += scnprintf(buf + off, buf_size - off,
  588. "NTB Device Information:\n");
  589. off += scnprintf(buf + off, buf_size - off,
  590. "Connection Topology -\t%s\n",
  591. ntb_topo_string(ndev->ntb.topo));
  592. off += scnprintf(buf + off, buf_size - off,
  593. "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
  594. if (!amd_link_is_up(ndev)) {
  595. off += scnprintf(buf + off, buf_size - off,
  596. "Link Status -\t\tDown\n");
  597. } else {
  598. off += scnprintf(buf + off, buf_size - off,
  599. "Link Status -\t\tUp\n");
  600. off += scnprintf(buf + off, buf_size - off,
  601. "Link Speed -\t\tPCI-E Gen %u\n",
  602. NTB_LNK_STA_SPEED(ndev->lnk_sta));
  603. off += scnprintf(buf + off, buf_size - off,
  604. "Link Width -\t\tx%u\n",
  605. NTB_LNK_STA_WIDTH(ndev->lnk_sta));
  606. }
  607. off += scnprintf(buf + off, buf_size - off,
  608. "Memory Window Count -\t%u\n", ndev->mw_count);
  609. off += scnprintf(buf + off, buf_size - off,
  610. "Scratchpad Count -\t%u\n", ndev->spad_count);
  611. off += scnprintf(buf + off, buf_size - off,
  612. "Doorbell Count -\t%u\n", ndev->db_count);
  613. off += scnprintf(buf + off, buf_size - off,
  614. "MSIX Vector Count -\t%u\n", ndev->msix_vec_count);
  615. off += scnprintf(buf + off, buf_size - off,
  616. "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
  617. u.v32 = readl(ndev->self_mmio + AMD_DBMASK_OFFSET);
  618. off += scnprintf(buf + off, buf_size - off,
  619. "Doorbell Mask -\t\t\t%#06x\n", u.v32);
  620. u.v32 = readl(mmio + AMD_DBSTAT_OFFSET);
  621. off += scnprintf(buf + off, buf_size - off,
  622. "Doorbell Bell -\t\t\t%#06x\n", u.v32);
  623. off += scnprintf(buf + off, buf_size - off,
  624. "\nNTB Incoming XLAT:\n");
  625. u.v64 = read64(mmio + AMD_BAR1XLAT_OFFSET);
  626. off += scnprintf(buf + off, buf_size - off,
  627. "XLAT1 -\t\t%#018llx\n", u.v64);
  628. u.v64 = read64(ndev->self_mmio + AMD_BAR23XLAT_OFFSET);
  629. off += scnprintf(buf + off, buf_size - off,
  630. "XLAT23 -\t\t%#018llx\n", u.v64);
  631. u.v64 = read64(ndev->self_mmio + AMD_BAR45XLAT_OFFSET);
  632. off += scnprintf(buf + off, buf_size - off,
  633. "XLAT45 -\t\t%#018llx\n", u.v64);
  634. u.v32 = readl(mmio + AMD_BAR1LMT_OFFSET);
  635. off += scnprintf(buf + off, buf_size - off,
  636. "LMT1 -\t\t\t%#06x\n", u.v32);
  637. u.v64 = read64(ndev->self_mmio + AMD_BAR23LMT_OFFSET);
  638. off += scnprintf(buf + off, buf_size - off,
  639. "LMT23 -\t\t\t%#018llx\n", u.v64);
  640. u.v64 = read64(ndev->self_mmio + AMD_BAR45LMT_OFFSET);
  641. off += scnprintf(buf + off, buf_size - off,
  642. "LMT45 -\t\t\t%#018llx\n", u.v64);
  643. ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
  644. kfree(buf);
  645. return ret;
  646. }
  647. static void ndev_init_debugfs(struct amd_ntb_dev *ndev)
  648. {
  649. if (!debugfs_dir) {
  650. ndev->debugfs_dir = NULL;
  651. ndev->debugfs_info = NULL;
  652. } else {
  653. ndev->debugfs_dir =
  654. debugfs_create_dir(pci_name(ndev->ntb.pdev),
  655. debugfs_dir);
  656. if (!ndev->debugfs_dir)
  657. ndev->debugfs_info = NULL;
  658. else
  659. ndev->debugfs_info =
  660. debugfs_create_file("info", S_IRUSR,
  661. ndev->debugfs_dir, ndev,
  662. &amd_ntb_debugfs_info);
  663. }
  664. }
  665. static void ndev_deinit_debugfs(struct amd_ntb_dev *ndev)
  666. {
  667. debugfs_remove_recursive(ndev->debugfs_dir);
  668. }
  669. static inline void ndev_init_struct(struct amd_ntb_dev *ndev,
  670. struct pci_dev *pdev)
  671. {
  672. ndev->ntb.pdev = pdev;
  673. ndev->ntb.topo = NTB_TOPO_NONE;
  674. ndev->ntb.ops = &amd_ntb_ops;
  675. ndev->int_mask = AMD_EVENT_INTMASK;
  676. spin_lock_init(&ndev->db_mask_lock);
  677. }
  678. static int amd_poll_link(struct amd_ntb_dev *ndev)
  679. {
  680. void __iomem *mmio = ndev->peer_mmio;
  681. u32 reg, stat;
  682. int rc;
  683. reg = readl(mmio + AMD_SIDEINFO_OFFSET);
  684. reg &= NTB_LIN_STA_ACTIVE_BIT;
  685. dev_dbg(&ndev->ntb.pdev->dev, "%s: reg_val = 0x%x.\n", __func__, reg);
  686. if (reg == ndev->cntl_sta)
  687. return 0;
  688. ndev->cntl_sta = reg;
  689. rc = pci_read_config_dword(ndev->ntb.pdev,
  690. AMD_LINK_STATUS_OFFSET, &stat);
  691. if (rc)
  692. return 0;
  693. ndev->lnk_sta = stat;
  694. return 1;
  695. }
  696. static void amd_link_hb(struct work_struct *work)
  697. {
  698. struct amd_ntb_dev *ndev = hb_ndev(work);
  699. if (amd_poll_link(ndev))
  700. ntb_link_event(&ndev->ntb);
  701. if (!amd_link_is_up(ndev))
  702. schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT);
  703. }
  704. static int amd_init_isr(struct amd_ntb_dev *ndev)
  705. {
  706. return ndev_init_isr(ndev, AMD_DB_CNT, AMD_MSIX_VECTOR_CNT);
  707. }
  708. static void amd_init_side_info(struct amd_ntb_dev *ndev)
  709. {
  710. void __iomem *mmio = ndev->self_mmio;
  711. unsigned int reg;
  712. reg = readl(mmio + AMD_SIDEINFO_OFFSET);
  713. if (!(reg & AMD_SIDE_READY)) {
  714. reg |= AMD_SIDE_READY;
  715. writel(reg, mmio + AMD_SIDEINFO_OFFSET);
  716. }
  717. }
  718. static void amd_deinit_side_info(struct amd_ntb_dev *ndev)
  719. {
  720. void __iomem *mmio = ndev->self_mmio;
  721. unsigned int reg;
  722. reg = readl(mmio + AMD_SIDEINFO_OFFSET);
  723. if (reg & AMD_SIDE_READY) {
  724. reg &= ~AMD_SIDE_READY;
  725. writel(reg, mmio + AMD_SIDEINFO_OFFSET);
  726. readl(mmio + AMD_SIDEINFO_OFFSET);
  727. }
  728. }
  729. static int amd_init_ntb(struct amd_ntb_dev *ndev)
  730. {
  731. void __iomem *mmio = ndev->self_mmio;
  732. ndev->mw_count = AMD_MW_CNT;
  733. ndev->spad_count = AMD_SPADS_CNT;
  734. ndev->db_count = AMD_DB_CNT;
  735. switch (ndev->ntb.topo) {
  736. case NTB_TOPO_PRI:
  737. case NTB_TOPO_SEC:
  738. ndev->spad_count >>= 1;
  739. if (ndev->ntb.topo == NTB_TOPO_PRI) {
  740. ndev->self_spad = 0;
  741. ndev->peer_spad = 0x20;
  742. } else {
  743. ndev->self_spad = 0x20;
  744. ndev->peer_spad = 0;
  745. }
  746. INIT_DELAYED_WORK(&ndev->hb_timer, amd_link_hb);
  747. schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT);
  748. break;
  749. default:
  750. dev_err(&ndev->ntb.pdev->dev,
  751. "AMD NTB does not support B2B mode.\n");
  752. return -EINVAL;
  753. }
  754. ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
  755. /* Mask event interrupts */
  756. writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET);
  757. return 0;
  758. }
  759. static enum ntb_topo amd_get_topo(struct amd_ntb_dev *ndev)
  760. {
  761. void __iomem *mmio = ndev->self_mmio;
  762. u32 info;
  763. info = readl(mmio + AMD_SIDEINFO_OFFSET);
  764. if (info & AMD_SIDE_MASK)
  765. return NTB_TOPO_SEC;
  766. else
  767. return NTB_TOPO_PRI;
  768. }
  769. static int amd_init_dev(struct amd_ntb_dev *ndev)
  770. {
  771. struct pci_dev *pdev;
  772. int rc = 0;
  773. pdev = ndev->ntb.pdev;
  774. ndev->ntb.topo = amd_get_topo(ndev);
  775. dev_dbg(&pdev->dev, "AMD NTB topo is %s\n",
  776. ntb_topo_string(ndev->ntb.topo));
  777. rc = amd_init_ntb(ndev);
  778. if (rc)
  779. return rc;
  780. rc = amd_init_isr(ndev);
  781. if (rc) {
  782. dev_err(&pdev->dev, "fail to init isr.\n");
  783. return rc;
  784. }
  785. ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
  786. return 0;
  787. }
  788. static void amd_deinit_dev(struct amd_ntb_dev *ndev)
  789. {
  790. cancel_delayed_work_sync(&ndev->hb_timer);
  791. ndev_deinit_isr(ndev);
  792. }
  793. static int amd_ntb_init_pci(struct amd_ntb_dev *ndev,
  794. struct pci_dev *pdev)
  795. {
  796. int rc;
  797. pci_set_drvdata(pdev, ndev);
  798. rc = pci_enable_device(pdev);
  799. if (rc)
  800. goto err_pci_enable;
  801. rc = pci_request_regions(pdev, NTB_NAME);
  802. if (rc)
  803. goto err_pci_regions;
  804. pci_set_master(pdev);
  805. rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  806. if (rc) {
  807. rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  808. if (rc)
  809. goto err_dma_mask;
  810. dev_warn(&pdev->dev, "Cannot DMA highmem\n");
  811. }
  812. rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
  813. if (rc) {
  814. rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
  815. if (rc)
  816. goto err_dma_mask;
  817. dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n");
  818. }
  819. rc = dma_coerce_mask_and_coherent(&ndev->ntb.dev,
  820. dma_get_mask(&pdev->dev));
  821. if (rc)
  822. goto err_dma_mask;
  823. ndev->self_mmio = pci_iomap(pdev, 0, 0);
  824. if (!ndev->self_mmio) {
  825. rc = -EIO;
  826. goto err_dma_mask;
  827. }
  828. ndev->peer_mmio = ndev->self_mmio + AMD_PEER_OFFSET;
  829. return 0;
  830. err_dma_mask:
  831. pci_clear_master(pdev);
  832. err_pci_regions:
  833. pci_disable_device(pdev);
  834. err_pci_enable:
  835. pci_set_drvdata(pdev, NULL);
  836. return rc;
  837. }
  838. static void amd_ntb_deinit_pci(struct amd_ntb_dev *ndev)
  839. {
  840. struct pci_dev *pdev = ndev->ntb.pdev;
  841. pci_iounmap(pdev, ndev->self_mmio);
  842. pci_clear_master(pdev);
  843. pci_release_regions(pdev);
  844. pci_disable_device(pdev);
  845. pci_set_drvdata(pdev, NULL);
  846. }
  847. static int amd_ntb_pci_probe(struct pci_dev *pdev,
  848. const struct pci_device_id *id)
  849. {
  850. struct amd_ntb_dev *ndev;
  851. int rc, node;
  852. node = dev_to_node(&pdev->dev);
  853. ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
  854. if (!ndev) {
  855. rc = -ENOMEM;
  856. goto err_ndev;
  857. }
  858. ndev_init_struct(ndev, pdev);
  859. rc = amd_ntb_init_pci(ndev, pdev);
  860. if (rc)
  861. goto err_init_pci;
  862. rc = amd_init_dev(ndev);
  863. if (rc)
  864. goto err_init_dev;
  865. /* write side info */
  866. amd_init_side_info(ndev);
  867. amd_poll_link(ndev);
  868. ndev_init_debugfs(ndev);
  869. rc = ntb_register_device(&ndev->ntb);
  870. if (rc)
  871. goto err_register;
  872. dev_info(&pdev->dev, "NTB device registered.\n");
  873. return 0;
  874. err_register:
  875. ndev_deinit_debugfs(ndev);
  876. amd_deinit_dev(ndev);
  877. err_init_dev:
  878. amd_ntb_deinit_pci(ndev);
  879. err_init_pci:
  880. kfree(ndev);
  881. err_ndev:
  882. return rc;
  883. }
  884. static void amd_ntb_pci_remove(struct pci_dev *pdev)
  885. {
  886. struct amd_ntb_dev *ndev = pci_get_drvdata(pdev);
  887. ntb_unregister_device(&ndev->ntb);
  888. ndev_deinit_debugfs(ndev);
  889. amd_deinit_side_info(ndev);
  890. amd_deinit_dev(ndev);
  891. amd_ntb_deinit_pci(ndev);
  892. kfree(ndev);
  893. }
  894. static const struct file_operations amd_ntb_debugfs_info = {
  895. .owner = THIS_MODULE,
  896. .open = simple_open,
  897. .read = ndev_debugfs_read,
  898. };
  899. static const struct pci_device_id amd_ntb_pci_tbl[] = {
  900. {PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_NTB)},
  901. {0}
  902. };
  903. MODULE_DEVICE_TABLE(pci, amd_ntb_pci_tbl);
  904. static struct pci_driver amd_ntb_pci_driver = {
  905. .name = KBUILD_MODNAME,
  906. .id_table = amd_ntb_pci_tbl,
  907. .probe = amd_ntb_pci_probe,
  908. .remove = amd_ntb_pci_remove,
  909. };
  910. static int __init amd_ntb_pci_driver_init(void)
  911. {
  912. pr_info("%s %s\n", NTB_DESC, NTB_VER);
  913. if (debugfs_initialized())
  914. debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
  915. return pci_register_driver(&amd_ntb_pci_driver);
  916. }
  917. module_init(amd_ntb_pci_driver_init);
  918. static void __exit amd_ntb_pci_driver_exit(void)
  919. {
  920. pci_unregister_driver(&amd_ntb_pci_driver);
  921. debugfs_remove_recursive(debugfs_dir);
  922. }
  923. module_exit(amd_ntb_pci_driver_exit);