pci_endpoint_test.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923
  1. /**
  2. * Host side test driver to test endpoint functionality
  3. *
  4. * Copyright (C) 2017 Texas Instruments
  5. * Author: Kishon Vijay Abraham I <kishon@ti.com>
  6. *
  7. * This program is free software: you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 of
  9. * the License as published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/crc32.h>
  20. #include <linux/delay.h>
  21. #include <linux/fs.h>
  22. #include <linux/io.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/irq.h>
  25. #include <linux/miscdevice.h>
  26. #include <linux/module.h>
  27. #include <linux/mutex.h>
  28. #include <linux/random.h>
  29. #include <linux/slab.h>
  30. #include <linux/pci.h>
  31. #include <linux/pci_ids.h>
  32. #include <linux/pci_regs.h>
  33. #include <uapi/linux/pcitest.h>
  34. #define DRV_MODULE_NAME "pci-endpoint-test"
  35. #define IRQ_TYPE_UNDEFINED -1
  36. #define IRQ_TYPE_LEGACY 0
  37. #define IRQ_TYPE_MSI 1
  38. #define IRQ_TYPE_MSIX 2
  39. #define PCI_ENDPOINT_TEST_MAGIC 0x0
  40. #define PCI_ENDPOINT_TEST_COMMAND 0x4
  41. #define COMMAND_RAISE_LEGACY_IRQ BIT(0)
  42. #define COMMAND_RAISE_MSI_IRQ BIT(1)
  43. #define COMMAND_RAISE_MSIX_IRQ BIT(2)
  44. #define COMMAND_READ BIT(3)
  45. #define COMMAND_WRITE BIT(4)
  46. #define COMMAND_COPY BIT(5)
  47. #define PCI_ENDPOINT_TEST_STATUS 0x8
  48. #define STATUS_READ_SUCCESS BIT(0)
  49. #define STATUS_READ_FAIL BIT(1)
  50. #define STATUS_WRITE_SUCCESS BIT(2)
  51. #define STATUS_WRITE_FAIL BIT(3)
  52. #define STATUS_COPY_SUCCESS BIT(4)
  53. #define STATUS_COPY_FAIL BIT(5)
  54. #define STATUS_IRQ_RAISED BIT(6)
  55. #define STATUS_SRC_ADDR_INVALID BIT(7)
  56. #define STATUS_DST_ADDR_INVALID BIT(8)
  57. #define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c
  58. #define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10
  59. #define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14
  60. #define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18
  61. #define PCI_ENDPOINT_TEST_SIZE 0x1c
  62. #define PCI_ENDPOINT_TEST_CHECKSUM 0x20
  63. #define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24
  64. #define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
  65. #define PCI_DEVICE_ID_TI_AM654 0xb00c
  66. #define PCI_DEVICE_ID_TI_K2G 0xb00b
  67. #define is_am654_pci_dev(pdev) \
  68. ((pdev)->device == PCI_DEVICE_ID_TI_AM654)
  69. #define K2G_IB_START_L0(n) (0x304 + (0x10 * (n)))
  70. #define K2G_IB_START_HI(n) (0x308 + (0x10 * (n)))
  71. #define is_k2g_pci_dev(pdev) ((pdev)->device == PCI_DEVICE_ID_TI_K2G)
  72. static DEFINE_IDA(pci_endpoint_test_ida);
  73. #define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
  74. miscdev)
  75. static bool no_msi;
  76. module_param(no_msi, bool, 0444);
  77. MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
  78. static int irq_type = IRQ_TYPE_MSI;
  79. module_param(irq_type, int, 0444);
  80. MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
  81. enum pci_barno {
  82. BAR_0,
  83. BAR_1,
  84. BAR_2,
  85. BAR_3,
  86. BAR_4,
  87. BAR_5,
  88. };
  89. struct pci_endpoint_test {
  90. struct pci_dev *pdev;
  91. void __iomem *base;
  92. void __iomem *bar[6];
  93. struct completion irq_raised;
  94. int last_irq;
  95. int num_irqs;
  96. /* mutex to protect the ioctls */
  97. struct mutex mutex;
  98. struct miscdevice miscdev;
  99. enum pci_barno test_reg_bar;
  100. size_t alignment;
  101. };
  102. struct pci_endpoint_test_data {
  103. enum pci_barno test_reg_bar;
  104. size_t alignment;
  105. int irq_type;
  106. };
  107. static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
  108. u32 offset)
  109. {
  110. return readl(test->base + offset);
  111. }
  112. static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
  113. u32 offset, u32 value)
  114. {
  115. writel(value, test->base + offset);
  116. }
  117. static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test,
  118. int bar, int offset)
  119. {
  120. return readl(test->bar[bar] + offset);
  121. }
  122. static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test,
  123. int bar, u32 offset, u32 value)
  124. {
  125. writel(value, test->bar[bar] + offset);
  126. }
  127. static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
  128. {
  129. struct pci_endpoint_test *test = dev_id;
  130. u32 reg;
  131. reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
  132. if (reg & STATUS_IRQ_RAISED) {
  133. test->last_irq = irq;
  134. complete(&test->irq_raised);
  135. reg &= ~STATUS_IRQ_RAISED;
  136. }
  137. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS,
  138. reg);
  139. return IRQ_HANDLED;
  140. }
  141. static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
  142. {
  143. struct pci_dev *pdev = test->pdev;
  144. pci_free_irq_vectors(pdev);
  145. }
  146. static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
  147. int type)
  148. {
  149. int irq = -1;
  150. struct pci_dev *pdev = test->pdev;
  151. struct device *dev = &pdev->dev;
  152. bool res = true;
  153. switch (type) {
  154. case IRQ_TYPE_LEGACY:
  155. irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
  156. if (irq < 0)
  157. dev_err(dev, "Failed to get Legacy interrupt\n");
  158. break;
  159. case IRQ_TYPE_MSI:
  160. irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
  161. if (irq < 0)
  162. dev_err(dev, "Failed to get MSI interrupts\n");
  163. break;
  164. case IRQ_TYPE_MSIX:
  165. irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
  166. if (irq < 0)
  167. dev_err(dev, "Failed to get MSI-X interrupts\n");
  168. break;
  169. default:
  170. dev_err(dev, "Invalid IRQ type selected\n");
  171. }
  172. if (irq < 0) {
  173. irq = 0;
  174. res = false;
  175. }
  176. test->num_irqs = irq;
  177. return res;
  178. }
  179. static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
  180. {
  181. int i;
  182. struct pci_dev *pdev = test->pdev;
  183. struct device *dev = &pdev->dev;
  184. for (i = 0; i < test->num_irqs; i++)
  185. devm_free_irq(dev, pci_irq_vector(pdev, i), test);
  186. test->num_irqs = 0;
  187. }
  188. static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
  189. {
  190. int i;
  191. int err;
  192. struct pci_dev *pdev = test->pdev;
  193. struct device *dev = &pdev->dev;
  194. for (i = 0; i < test->num_irqs; i++) {
  195. err = devm_request_irq(dev, pci_irq_vector(pdev, i),
  196. pci_endpoint_test_irqhandler,
  197. IRQF_SHARED, DRV_MODULE_NAME, test);
  198. if (err)
  199. goto fail;
  200. }
  201. return true;
  202. fail:
  203. switch (irq_type) {
  204. case IRQ_TYPE_LEGACY:
  205. dev_err(dev, "Failed to request IRQ %d for Legacy\n",
  206. pci_irq_vector(pdev, i));
  207. break;
  208. case IRQ_TYPE_MSI:
  209. dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
  210. pci_irq_vector(pdev, i),
  211. i + 1);
  212. break;
  213. case IRQ_TYPE_MSIX:
  214. dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
  215. pci_irq_vector(pdev, i),
  216. i + 1);
  217. break;
  218. }
  219. return false;
  220. }
  221. static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
  222. enum pci_barno barno)
  223. {
  224. int j;
  225. u32 val;
  226. int size;
  227. struct pci_dev *pdev = test->pdev;
  228. if (!test->bar[barno])
  229. return false;
  230. size = pci_resource_len(pdev, barno);
  231. if (barno == test->test_reg_bar)
  232. size = 0x4;
  233. for (j = 0; j < size; j += 4)
  234. pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0);
  235. for (j = 0; j < size; j += 4) {
  236. val = pci_endpoint_test_bar_readl(test, barno, j);
  237. if (val != 0xA0A0A0A0)
  238. return false;
  239. }
  240. return true;
  241. }
  242. static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test)
  243. {
  244. u32 val;
  245. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
  246. IRQ_TYPE_LEGACY);
  247. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
  248. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
  249. COMMAND_RAISE_LEGACY_IRQ);
  250. val = wait_for_completion_timeout(&test->irq_raised,
  251. msecs_to_jiffies(1000));
  252. if (!val)
  253. return false;
  254. return true;
  255. }
  256. static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
  257. u16 msi_num, bool msix)
  258. {
  259. u32 val;
  260. struct pci_dev *pdev = test->pdev;
  261. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
  262. msix == false ? IRQ_TYPE_MSI :
  263. IRQ_TYPE_MSIX);
  264. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
  265. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
  266. msix == false ? COMMAND_RAISE_MSI_IRQ :
  267. COMMAND_RAISE_MSIX_IRQ);
  268. val = wait_for_completion_timeout(&test->irq_raised,
  269. msecs_to_jiffies(1000));
  270. if (!val)
  271. return false;
  272. if (pci_irq_vector(pdev, msi_num - 1) == test->last_irq)
  273. return true;
  274. return false;
  275. }
  276. static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
  277. {
  278. bool ret = false;
  279. void *src_addr;
  280. void *dst_addr;
  281. dma_addr_t src_phys_addr;
  282. dma_addr_t dst_phys_addr;
  283. struct pci_dev *pdev = test->pdev;
  284. struct device *dev = &pdev->dev;
  285. void *orig_src_addr;
  286. dma_addr_t orig_src_phys_addr;
  287. void *orig_dst_addr;
  288. dma_addr_t orig_dst_phys_addr;
  289. size_t offset;
  290. size_t alignment = test->alignment;
  291. u32 src_crc32;
  292. u32 dst_crc32;
  293. if (size > SIZE_MAX - alignment)
  294. goto err;
  295. if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
  296. dev_err(dev, "Invalid IRQ type option\n");
  297. goto err;
  298. }
  299. orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
  300. if (!orig_src_addr) {
  301. dev_err(dev, "Failed to allocate source buffer\n");
  302. ret = false;
  303. goto err;
  304. }
  305. get_random_bytes(orig_src_addr, size + alignment);
  306. orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
  307. size + alignment, DMA_TO_DEVICE);
  308. if (dma_mapping_error(dev, orig_src_phys_addr)) {
  309. dev_err(dev, "failed to map source buffer address\n");
  310. ret = false;
  311. goto err_src_phys_addr;
  312. }
  313. if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
  314. src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
  315. offset = src_phys_addr - orig_src_phys_addr;
  316. src_addr = orig_src_addr + offset;
  317. } else {
  318. src_phys_addr = orig_src_phys_addr;
  319. src_addr = orig_src_addr;
  320. }
  321. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
  322. lower_32_bits(src_phys_addr));
  323. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
  324. upper_32_bits(src_phys_addr));
  325. src_crc32 = crc32_le(~0, src_addr, size);
  326. orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
  327. if (!orig_dst_addr) {
  328. dev_err(dev, "Failed to allocate destination address\n");
  329. ret = false;
  330. goto err_dst_addr;
  331. }
  332. orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
  333. size + alignment, DMA_FROM_DEVICE);
  334. if (dma_mapping_error(dev, orig_dst_phys_addr)) {
  335. dev_err(dev, "failed to map destination buffer address\n");
  336. ret = false;
  337. goto err_dst_phys_addr;
  338. }
  339. if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
  340. dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
  341. offset = dst_phys_addr - orig_dst_phys_addr;
  342. dst_addr = orig_dst_addr + offset;
  343. } else {
  344. dst_phys_addr = orig_dst_phys_addr;
  345. dst_addr = orig_dst_addr;
  346. }
  347. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
  348. lower_32_bits(dst_phys_addr));
  349. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
  350. upper_32_bits(dst_phys_addr));
  351. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
  352. size);
  353. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
  354. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
  355. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
  356. COMMAND_COPY);
  357. wait_for_completion(&test->irq_raised);
  358. dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
  359. DMA_FROM_DEVICE);
  360. dst_crc32 = crc32_le(~0, dst_addr, size);
  361. if (dst_crc32 == src_crc32)
  362. ret = true;
  363. err_dst_phys_addr:
  364. kfree(orig_dst_addr);
  365. err_dst_addr:
  366. dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
  367. DMA_TO_DEVICE);
  368. err_src_phys_addr:
  369. kfree(orig_src_addr);
  370. err:
  371. return ret;
  372. }
  373. static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
  374. {
  375. bool ret = false;
  376. u32 reg;
  377. void *addr;
  378. dma_addr_t phys_addr;
  379. struct pci_dev *pdev = test->pdev;
  380. struct device *dev = &pdev->dev;
  381. void *orig_addr;
  382. dma_addr_t orig_phys_addr;
  383. size_t offset;
  384. size_t alignment = test->alignment;
  385. u32 crc32;
  386. if (size > SIZE_MAX - alignment)
  387. goto err;
  388. if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
  389. dev_err(dev, "Invalid IRQ type option\n");
  390. goto err;
  391. }
  392. orig_addr = kzalloc(size + alignment, GFP_KERNEL);
  393. if (!orig_addr) {
  394. dev_err(dev, "Failed to allocate address\n");
  395. ret = false;
  396. goto err;
  397. }
  398. get_random_bytes(orig_addr, size + alignment);
  399. orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
  400. DMA_TO_DEVICE);
  401. if (dma_mapping_error(dev, orig_phys_addr)) {
  402. dev_err(dev, "failed to map source buffer address\n");
  403. ret = false;
  404. goto err_phys_addr;
  405. }
  406. if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
  407. phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
  408. offset = phys_addr - orig_phys_addr;
  409. addr = orig_addr + offset;
  410. } else {
  411. phys_addr = orig_phys_addr;
  412. addr = orig_addr;
  413. }
  414. crc32 = crc32_le(~0, addr, size);
  415. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
  416. crc32);
  417. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
  418. lower_32_bits(phys_addr));
  419. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
  420. upper_32_bits(phys_addr));
  421. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
  422. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
  423. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
  424. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
  425. COMMAND_READ);
  426. wait_for_completion(&test->irq_raised);
  427. reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
  428. if (reg & STATUS_READ_SUCCESS)
  429. ret = true;
  430. dma_unmap_single(dev, orig_phys_addr, size + alignment,
  431. DMA_TO_DEVICE);
  432. err_phys_addr:
  433. kfree(orig_addr);
  434. err:
  435. return ret;
  436. }
  437. static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
  438. {
  439. bool ret = false;
  440. void *addr;
  441. dma_addr_t phys_addr;
  442. struct pci_dev *pdev = test->pdev;
  443. struct device *dev = &pdev->dev;
  444. void *orig_addr;
  445. dma_addr_t orig_phys_addr;
  446. size_t offset;
  447. size_t alignment = test->alignment;
  448. u32 crc32;
  449. if (size > SIZE_MAX - alignment)
  450. goto err;
  451. if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
  452. dev_err(dev, "Invalid IRQ type option\n");
  453. goto err;
  454. }
  455. orig_addr = kzalloc(size + alignment, GFP_KERNEL);
  456. if (!orig_addr) {
  457. dev_err(dev, "Failed to allocate destination address\n");
  458. ret = false;
  459. goto err;
  460. }
  461. orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
  462. DMA_FROM_DEVICE);
  463. if (dma_mapping_error(dev, orig_phys_addr)) {
  464. dev_err(dev, "failed to map source buffer address\n");
  465. ret = false;
  466. goto err_phys_addr;
  467. }
  468. if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
  469. phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
  470. offset = phys_addr - orig_phys_addr;
  471. addr = orig_addr + offset;
  472. } else {
  473. phys_addr = orig_phys_addr;
  474. addr = orig_addr;
  475. }
  476. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
  477. lower_32_bits(phys_addr));
  478. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
  479. upper_32_bits(phys_addr));
  480. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
  481. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
  482. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
  483. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
  484. COMMAND_WRITE);
  485. wait_for_completion(&test->irq_raised);
  486. dma_unmap_single(dev, orig_phys_addr, size + alignment,
  487. DMA_FROM_DEVICE);
  488. crc32 = crc32_le(~0, addr, size);
  489. if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
  490. ret = true;
  491. err_phys_addr:
  492. kfree(orig_addr);
  493. err:
  494. return ret;
  495. }
  496. static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
  497. int req_irq_type)
  498. {
  499. struct pci_dev *pdev = test->pdev;
  500. struct device *dev = &pdev->dev;
  501. if (req_irq_type < IRQ_TYPE_LEGACY || req_irq_type > IRQ_TYPE_MSIX) {
  502. dev_err(dev, "Invalid IRQ type option\n");
  503. return false;
  504. }
  505. if (irq_type == req_irq_type)
  506. return true;
  507. pci_endpoint_test_release_irq(test);
  508. pci_endpoint_test_free_irq_vectors(test);
  509. if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
  510. goto err;
  511. if (!pci_endpoint_test_request_irq(test))
  512. goto err;
  513. irq_type = req_irq_type;
  514. return true;
  515. err:
  516. pci_endpoint_test_free_irq_vectors(test);
  517. irq_type = IRQ_TYPE_UNDEFINED;
  518. return false;
  519. }
  520. static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
  521. unsigned long arg)
  522. {
  523. int ret = -EINVAL;
  524. enum pci_barno bar;
  525. struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
  526. struct pci_dev *pdev = test->pdev;
  527. mutex_lock(&test->mutex);
  528. switch (cmd) {
  529. case PCITEST_BAR:
  530. bar = arg;
  531. if (bar < 0 || bar > 5)
  532. goto ret;
  533. if ((is_am654_pci_dev(pdev) || is_k2g_pci_dev(pdev)) &&
  534. bar == BAR_0)
  535. goto ret;
  536. ret = pci_endpoint_test_bar(test, bar);
  537. break;
  538. case PCITEST_LEGACY_IRQ:
  539. ret = pci_endpoint_test_legacy_irq(test);
  540. break;
  541. case PCITEST_MSI:
  542. case PCITEST_MSIX:
  543. ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
  544. break;
  545. case PCITEST_WRITE:
  546. ret = pci_endpoint_test_write(test, arg);
  547. break;
  548. case PCITEST_READ:
  549. ret = pci_endpoint_test_read(test, arg);
  550. break;
  551. case PCITEST_COPY:
  552. ret = pci_endpoint_test_copy(test, arg);
  553. break;
  554. case PCITEST_SET_IRQTYPE:
  555. ret = pci_endpoint_test_set_irq(test, arg);
  556. break;
  557. case PCITEST_GET_IRQTYPE:
  558. ret = irq_type;
  559. break;
  560. }
  561. ret:
  562. mutex_unlock(&test->mutex);
  563. return ret;
  564. }
  565. static const struct file_operations pci_endpoint_test_fops = {
  566. .owner = THIS_MODULE,
  567. .unlocked_ioctl = pci_endpoint_test_ioctl,
  568. };
  569. static int pci_endpoint_test_k2g_init(struct pci_endpoint_test *test)
  570. {
  571. struct pci_dev *pdev = test->pdev;
  572. enum pci_barno bar;
  573. resource_size_t start;
  574. if (!test->bar[0])
  575. return -EINVAL;
  576. for (bar = BAR_1; bar <= BAR_5; bar++) {
  577. start = pci_resource_start(pdev, bar);
  578. pci_endpoint_test_bar_writel(test, BAR_0,
  579. K2G_IB_START_L0(bar - 1),
  580. lower_32_bits(start));
  581. pci_endpoint_test_bar_writel(test, BAR_0,
  582. K2G_IB_START_HI(bar - 1),
  583. upper_32_bits(start));
  584. }
  585. return 0;
  586. }
  587. static int pci_endpoint_test_probe(struct pci_dev *pdev,
  588. const struct pci_device_id *ent)
  589. {
  590. int err;
  591. int id;
  592. char name[20];
  593. enum pci_barno bar;
  594. void __iomem *base;
  595. struct device *dev = &pdev->dev;
  596. struct pci_endpoint_test *test;
  597. struct pci_endpoint_test_data *data;
  598. enum pci_barno test_reg_bar = BAR_0;
  599. struct miscdevice *misc_device;
  600. if (pci_is_bridge(pdev))
  601. return -ENODEV;
  602. test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
  603. if (!test)
  604. return -ENOMEM;
  605. test->test_reg_bar = 0;
  606. test->alignment = 0;
  607. test->pdev = pdev;
  608. if (no_msi)
  609. irq_type = IRQ_TYPE_LEGACY;
  610. data = (struct pci_endpoint_test_data *)ent->driver_data;
  611. if (data) {
  612. test_reg_bar = data->test_reg_bar;
  613. test->test_reg_bar = test_reg_bar;
  614. test->alignment = data->alignment;
  615. irq_type = data->irq_type;
  616. }
  617. init_completion(&test->irq_raised);
  618. mutex_init(&test->mutex);
  619. if ((dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)) != 0) &&
  620. dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
  621. dev_err(dev, "Cannot set DMA mask\n");
  622. return -EINVAL;
  623. }
  624. err = pci_enable_device(pdev);
  625. if (err) {
  626. dev_err(dev, "Cannot enable PCI device\n");
  627. return err;
  628. }
  629. err = pci_request_regions(pdev, DRV_MODULE_NAME);
  630. if (err) {
  631. dev_err(dev, "Cannot obtain PCI resources\n");
  632. goto err_disable_pdev;
  633. }
  634. pci_set_master(pdev);
  635. if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type))
  636. goto err_disable_irq;
  637. if (!pci_endpoint_test_request_irq(test))
  638. goto err_disable_irq;
  639. for (bar = BAR_0; bar <= BAR_5; bar++) {
  640. if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
  641. base = pci_ioremap_bar(pdev, bar);
  642. if (!base) {
  643. dev_err(dev, "Failed to read BAR%d\n", bar);
  644. WARN_ON(bar == test_reg_bar);
  645. }
  646. test->bar[bar] = base;
  647. }
  648. }
  649. test->base = test->bar[test_reg_bar];
  650. if (!test->base) {
  651. err = -ENOMEM;
  652. dev_err(dev, "Cannot perform PCI test without BAR%d\n",
  653. test_reg_bar);
  654. goto err_iounmap;
  655. }
  656. if (is_k2g_pci_dev(pdev)) {
  657. err = pci_endpoint_test_k2g_init(test);
  658. if (err)
  659. goto err_iounmap;
  660. }
  661. pci_set_drvdata(pdev, test);
  662. id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
  663. if (id < 0) {
  664. err = id;
  665. dev_err(dev, "Unable to get id\n");
  666. goto err_iounmap;
  667. }
  668. snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
  669. misc_device = &test->miscdev;
  670. misc_device->minor = MISC_DYNAMIC_MINOR;
  671. misc_device->name = kstrdup(name, GFP_KERNEL);
  672. if (!misc_device->name) {
  673. err = -ENOMEM;
  674. goto err_ida_remove;
  675. }
  676. misc_device->fops = &pci_endpoint_test_fops,
  677. err = misc_register(misc_device);
  678. if (err) {
  679. dev_err(dev, "Failed to register device\n");
  680. goto err_kfree_name;
  681. }
  682. return 0;
  683. err_kfree_name:
  684. kfree(misc_device->name);
  685. err_ida_remove:
  686. ida_simple_remove(&pci_endpoint_test_ida, id);
  687. err_iounmap:
  688. for (bar = BAR_0; bar <= BAR_5; bar++) {
  689. if (test->bar[bar])
  690. pci_iounmap(pdev, test->bar[bar]);
  691. }
  692. pci_endpoint_test_release_irq(test);
  693. err_disable_irq:
  694. pci_endpoint_test_free_irq_vectors(test);
  695. pci_release_regions(pdev);
  696. err_disable_pdev:
  697. pci_disable_device(pdev);
  698. return err;
  699. }
  700. static void pci_endpoint_test_remove(struct pci_dev *pdev)
  701. {
  702. int id;
  703. enum pci_barno bar;
  704. struct pci_endpoint_test *test = pci_get_drvdata(pdev);
  705. struct miscdevice *misc_device = &test->miscdev;
  706. if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
  707. return;
  708. if (id < 0)
  709. return;
  710. misc_deregister(&test->miscdev);
  711. kfree(misc_device->name);
  712. ida_simple_remove(&pci_endpoint_test_ida, id);
  713. for (bar = BAR_0; bar <= BAR_5; bar++) {
  714. if (test->bar[bar])
  715. pci_iounmap(pdev, test->bar[bar]);
  716. }
  717. pci_endpoint_test_release_irq(test);
  718. pci_endpoint_test_free_irq_vectors(test);
  719. pci_release_regions(pdev);
  720. pci_disable_device(pdev);
  721. }
  722. static const struct pci_endpoint_test_data default_data = {
  723. .test_reg_bar = BAR_0,
  724. .alignment = SZ_4K,
  725. };
  726. static const struct pci_endpoint_test_data am654_data = {
  727. .test_reg_bar = BAR_2,
  728. .alignment = SZ_64K,
  729. };
  730. static const struct pci_endpoint_test_data k2g_data = {
  731. .test_reg_bar = BAR_1,
  732. .alignment = SZ_1M,
  733. };
  734. static const struct pci_device_id pci_endpoint_test_tbl[] = {
  735. { PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 0xedda) },
  736. { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
  737. .driver_data = (kernel_ulong_t)&default_data,
  738. },
  739. { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
  740. .driver_data = (kernel_ulong_t)&default_data,
  741. },
  742. { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
  743. .driver_data = (kernel_ulong_t)&am654_data
  744. },
  745. { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_K2G),
  746. .driver_data = (kernel_ulong_t)&k2g_data
  747. },
  748. { }
  749. };
  750. MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
  751. static struct pci_driver pci_endpoint_test_driver = {
  752. .name = DRV_MODULE_NAME,
  753. .id_table = pci_endpoint_test_tbl,
  754. .probe = pci_endpoint_test_probe,
  755. .remove = pci_endpoint_test_remove,
  756. };
  757. module_pci_driver(pci_endpoint_test_driver);
  758. MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
  759. MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
  760. MODULE_LICENSE("GPL v2");