pci_endpoint_test.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957
  1. /**
  2. * Host side test driver to test endpoint functionality
  3. *
  4. * Copyright (C) 2017 Texas Instruments
  5. * Author: Kishon Vijay Abraham I <kishon@ti.com>
  6. *
  7. * This program is free software: you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 of
  9. * the License as published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/crc32.h>
  20. #include <linux/delay.h>
  21. #include <linux/fs.h>
  22. #include <linux/io.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/irq.h>
  25. #include <linux/miscdevice.h>
  26. #include <linux/module.h>
  27. #include <linux/mutex.h>
  28. #include <linux/random.h>
  29. #include <linux/slab.h>
  30. #include <linux/pci.h>
  31. #include <linux/pci_ids.h>
  32. #include <linux/pci_regs.h>
  33. #include <uapi/linux/pcitest.h>
  34. #define DRV_MODULE_NAME "pci-endpoint-test"
  35. #define IRQ_TYPE_UNDEFINED -1
  36. #define IRQ_TYPE_LEGACY 0
  37. #define IRQ_TYPE_MSI 1
  38. #define IRQ_TYPE_MSIX 2
  39. #define PCI_ENDPOINT_TEST_MAGIC 0x0
  40. #define PCI_ENDPOINT_TEST_COMMAND 0x4
  41. #define COMMAND_RAISE_LEGACY_IRQ BIT(0)
  42. #define COMMAND_RAISE_MSI_IRQ BIT(1)
  43. #define COMMAND_RAISE_MSIX_IRQ BIT(2)
  44. #define COMMAND_READ BIT(3)
  45. #define COMMAND_WRITE BIT(4)
  46. #define COMMAND_COPY BIT(5)
  47. #define PCI_ENDPOINT_TEST_STATUS 0x8
  48. #define STATUS_READ_SUCCESS BIT(0)
  49. #define STATUS_READ_FAIL BIT(1)
  50. #define STATUS_WRITE_SUCCESS BIT(2)
  51. #define STATUS_WRITE_FAIL BIT(3)
  52. #define STATUS_COPY_SUCCESS BIT(4)
  53. #define STATUS_COPY_FAIL BIT(5)
  54. #define STATUS_IRQ_RAISED BIT(6)
  55. #define STATUS_SRC_ADDR_INVALID BIT(7)
  56. #define STATUS_DST_ADDR_INVALID BIT(8)
  57. #define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c
  58. #define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10
  59. #define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14
  60. #define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18
  61. #define PCI_ENDPOINT_TEST_SIZE 0x1c
  62. #define PCI_ENDPOINT_TEST_CHECKSUM 0x20
  63. #define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24
  64. #define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
  65. #define PCI_DEVICE_ID_TI_J721E 0xb00d
  66. #define PCI_DEVICE_ID_TI_AM654 0xb00c
  67. #define PCI_DEVICE_ID_TI_K2G 0xb00b
  68. #define is_am654_pci_dev(pdev) \
  69. ((pdev)->device == PCI_DEVICE_ID_TI_AM654)
  70. #define is_j721e_pci_dev(pdev) \
  71. ((pdev)->device == PCI_DEVICE_ID_TI_J721E)
  72. #define K2G_IB_START_L0(n) (0x304 + (0x10 * (n)))
  73. #define K2G_IB_START_HI(n) (0x308 + (0x10 * (n)))
  74. #define is_k2g_pci_dev(pdev) ((pdev)->device == PCI_DEVICE_ID_TI_K2G)
  75. static DEFINE_IDA(pci_endpoint_test_ida);
  76. #define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
  77. miscdev)
  78. static bool no_msi;
  79. module_param(no_msi, bool, 0444);
  80. MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
  81. static int irq_type = IRQ_TYPE_MSI;
  82. module_param(irq_type, int, 0444);
  83. MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
  84. enum pci_barno {
  85. BAR_0,
  86. BAR_1,
  87. BAR_2,
  88. BAR_3,
  89. BAR_4,
  90. BAR_5,
  91. };
  92. struct pci_endpoint_test {
  93. struct pci_dev *pdev;
  94. void __iomem *base;
  95. void __iomem *bar[6];
  96. struct completion irq_raised;
  97. int last_irq;
  98. int num_irqs;
  99. int irq_type;
  100. /* mutex to protect the ioctls */
  101. struct mutex mutex;
  102. struct miscdevice miscdev;
  103. enum pci_barno test_reg_bar;
  104. size_t alignment;
  105. const char *name;
  106. };
  107. struct pci_endpoint_test_data {
  108. enum pci_barno test_reg_bar;
  109. size_t alignment;
  110. int irq_type;
  111. };
  112. static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
  113. u32 offset)
  114. {
  115. return readl(test->base + offset);
  116. }
  117. static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
  118. u32 offset, u32 value)
  119. {
  120. writel(value, test->base + offset);
  121. }
  122. static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test,
  123. int bar, int offset)
  124. {
  125. return readl(test->bar[bar] + offset);
  126. }
  127. static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test,
  128. int bar, u32 offset, u32 value)
  129. {
  130. writel(value, test->bar[bar] + offset);
  131. }
  132. static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
  133. {
  134. struct pci_endpoint_test *test = dev_id;
  135. u32 reg;
  136. reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
  137. if (reg & STATUS_IRQ_RAISED) {
  138. test->last_irq = irq;
  139. complete(&test->irq_raised);
  140. reg &= ~STATUS_IRQ_RAISED;
  141. }
  142. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS,
  143. reg);
  144. return IRQ_HANDLED;
  145. }
  146. static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
  147. {
  148. struct pci_dev *pdev = test->pdev;
  149. pci_free_irq_vectors(pdev);
  150. test->irq_type = IRQ_TYPE_UNDEFINED;
  151. }
  152. static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
  153. int type)
  154. {
  155. int irq = -1;
  156. struct pci_dev *pdev = test->pdev;
  157. struct device *dev = &pdev->dev;
  158. bool res = true;
  159. switch (type) {
  160. case IRQ_TYPE_LEGACY:
  161. irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
  162. if (irq < 0)
  163. dev_err(dev, "Failed to get Legacy interrupt\n");
  164. break;
  165. case IRQ_TYPE_MSI:
  166. irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
  167. if (irq < 0)
  168. dev_err(dev, "Failed to get MSI interrupts\n");
  169. break;
  170. case IRQ_TYPE_MSIX:
  171. irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
  172. if (irq < 0)
  173. dev_err(dev, "Failed to get MSI-X interrupts\n");
  174. break;
  175. default:
  176. dev_err(dev, "Invalid IRQ type selected\n");
  177. }
  178. if (irq < 0) {
  179. irq = 0;
  180. res = false;
  181. }
  182. test->irq_type = type;
  183. test->num_irqs = irq;
  184. return res;
  185. }
  186. static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
  187. {
  188. int i;
  189. struct pci_dev *pdev = test->pdev;
  190. struct device *dev = &pdev->dev;
  191. for (i = 0; i < test->num_irqs; i++)
  192. devm_free_irq(dev, pci_irq_vector(pdev, i), test);
  193. test->num_irqs = 0;
  194. }
  195. static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
  196. {
  197. int i;
  198. int err;
  199. struct pci_dev *pdev = test->pdev;
  200. struct device *dev = &pdev->dev;
  201. for (i = 0; i < test->num_irqs; i++) {
  202. err = devm_request_irq(dev, pci_irq_vector(pdev, i),
  203. pci_endpoint_test_irqhandler,
  204. IRQF_SHARED, test->name, test);
  205. if (err)
  206. goto fail;
  207. }
  208. return true;
  209. fail:
  210. switch (irq_type) {
  211. case IRQ_TYPE_LEGACY:
  212. dev_err(dev, "Failed to request IRQ %d for Legacy\n",
  213. pci_irq_vector(pdev, i));
  214. break;
  215. case IRQ_TYPE_MSI:
  216. dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
  217. pci_irq_vector(pdev, i),
  218. i + 1);
  219. break;
  220. case IRQ_TYPE_MSIX:
  221. dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
  222. pci_irq_vector(pdev, i),
  223. i + 1);
  224. break;
  225. }
  226. return false;
  227. }
  228. static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
  229. enum pci_barno barno)
  230. {
  231. int j;
  232. u32 val;
  233. int size;
  234. struct pci_dev *pdev = test->pdev;
  235. if (!test->bar[barno])
  236. return false;
  237. size = pci_resource_len(pdev, barno);
  238. if (barno == test->test_reg_bar)
  239. size = 0x4;
  240. for (j = 0; j < size; j += 4)
  241. pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0);
  242. for (j = 0; j < size; j += 4) {
  243. val = pci_endpoint_test_bar_readl(test, barno, j);
  244. if (val != 0xA0A0A0A0)
  245. return false;
  246. }
  247. return true;
  248. }
  249. static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test)
  250. {
  251. u32 val;
  252. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
  253. IRQ_TYPE_LEGACY);
  254. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
  255. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
  256. COMMAND_RAISE_LEGACY_IRQ);
  257. val = wait_for_completion_timeout(&test->irq_raised,
  258. msecs_to_jiffies(1000));
  259. if (!val)
  260. return false;
  261. return true;
  262. }
  263. static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
  264. u16 msi_num, bool msix)
  265. {
  266. u32 val;
  267. struct pci_dev *pdev = test->pdev;
  268. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
  269. msix == false ? IRQ_TYPE_MSI :
  270. IRQ_TYPE_MSIX);
  271. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
  272. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
  273. msix == false ? COMMAND_RAISE_MSI_IRQ :
  274. COMMAND_RAISE_MSIX_IRQ);
  275. val = wait_for_completion_timeout(&test->irq_raised,
  276. msecs_to_jiffies(1000));
  277. if (!val)
  278. return false;
  279. if (pci_irq_vector(pdev, msi_num - 1) == test->last_irq)
  280. return true;
  281. return false;
  282. }
  283. static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
  284. {
  285. bool ret = false;
  286. void *src_addr;
  287. void *dst_addr;
  288. dma_addr_t src_phys_addr;
  289. dma_addr_t dst_phys_addr;
  290. struct pci_dev *pdev = test->pdev;
  291. struct device *dev = &pdev->dev;
  292. void *orig_src_addr;
  293. dma_addr_t orig_src_phys_addr;
  294. void *orig_dst_addr;
  295. dma_addr_t orig_dst_phys_addr;
  296. size_t offset;
  297. size_t alignment = test->alignment;
  298. int irq_type = test->irq_type;
  299. u32 src_crc32;
  300. u32 dst_crc32;
  301. if (size > SIZE_MAX - alignment)
  302. goto err;
  303. if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
  304. dev_err(dev, "Invalid IRQ type option\n");
  305. goto err;
  306. }
  307. orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
  308. if (!orig_src_addr) {
  309. dev_err(dev, "Failed to allocate source buffer\n");
  310. ret = false;
  311. goto err;
  312. }
  313. get_random_bytes(orig_src_addr, size + alignment);
  314. orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
  315. size + alignment, DMA_TO_DEVICE);
  316. if (dma_mapping_error(dev, orig_src_phys_addr)) {
  317. dev_err(dev, "failed to map source buffer address\n");
  318. ret = false;
  319. goto err_src_phys_addr;
  320. }
  321. if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
  322. src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
  323. offset = src_phys_addr - orig_src_phys_addr;
  324. src_addr = orig_src_addr + offset;
  325. } else {
  326. src_phys_addr = orig_src_phys_addr;
  327. src_addr = orig_src_addr;
  328. }
  329. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
  330. lower_32_bits(src_phys_addr));
  331. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
  332. upper_32_bits(src_phys_addr));
  333. src_crc32 = crc32_le(~0, src_addr, size);
  334. orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
  335. if (!orig_dst_addr) {
  336. dev_err(dev, "Failed to allocate destination address\n");
  337. ret = false;
  338. goto err_dst_addr;
  339. }
  340. orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
  341. size + alignment, DMA_FROM_DEVICE);
  342. if (dma_mapping_error(dev, orig_dst_phys_addr)) {
  343. dev_err(dev, "failed to map destination buffer address\n");
  344. ret = false;
  345. goto err_dst_phys_addr;
  346. }
  347. if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
  348. dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
  349. offset = dst_phys_addr - orig_dst_phys_addr;
  350. dst_addr = orig_dst_addr + offset;
  351. } else {
  352. dst_phys_addr = orig_dst_phys_addr;
  353. dst_addr = orig_dst_addr;
  354. }
  355. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
  356. lower_32_bits(dst_phys_addr));
  357. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
  358. upper_32_bits(dst_phys_addr));
  359. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
  360. size);
  361. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
  362. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
  363. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
  364. COMMAND_COPY);
  365. wait_for_completion(&test->irq_raised);
  366. dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
  367. DMA_FROM_DEVICE);
  368. dst_crc32 = crc32_le(~0, dst_addr, size);
  369. if (dst_crc32 == src_crc32)
  370. ret = true;
  371. err_dst_phys_addr:
  372. kfree(orig_dst_addr);
  373. err_dst_addr:
  374. dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
  375. DMA_TO_DEVICE);
  376. err_src_phys_addr:
  377. kfree(orig_src_addr);
  378. err:
  379. return ret;
  380. }
  381. static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
  382. {
  383. bool ret = false;
  384. u32 reg;
  385. void *addr;
  386. dma_addr_t phys_addr;
  387. struct pci_dev *pdev = test->pdev;
  388. struct device *dev = &pdev->dev;
  389. void *orig_addr;
  390. dma_addr_t orig_phys_addr;
  391. size_t offset;
  392. size_t alignment = test->alignment;
  393. int irq_type = test->irq_type;
  394. u32 crc32;
  395. if (size > SIZE_MAX - alignment)
  396. goto err;
  397. if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
  398. dev_err(dev, "Invalid IRQ type option\n");
  399. goto err;
  400. }
  401. orig_addr = kzalloc(size + alignment, GFP_KERNEL);
  402. if (!orig_addr) {
  403. dev_err(dev, "Failed to allocate address\n");
  404. ret = false;
  405. goto err;
  406. }
  407. get_random_bytes(orig_addr, size + alignment);
  408. orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
  409. DMA_TO_DEVICE);
  410. if (dma_mapping_error(dev, orig_phys_addr)) {
  411. dev_err(dev, "failed to map source buffer address\n");
  412. ret = false;
  413. goto err_phys_addr;
  414. }
  415. if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
  416. phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
  417. offset = phys_addr - orig_phys_addr;
  418. addr = orig_addr + offset;
  419. } else {
  420. phys_addr = orig_phys_addr;
  421. addr = orig_addr;
  422. }
  423. crc32 = crc32_le(~0, addr, size);
  424. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
  425. crc32);
  426. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
  427. lower_32_bits(phys_addr));
  428. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
  429. upper_32_bits(phys_addr));
  430. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
  431. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
  432. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
  433. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
  434. COMMAND_READ);
  435. wait_for_completion(&test->irq_raised);
  436. reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
  437. if (reg & STATUS_READ_SUCCESS)
  438. ret = true;
  439. dma_unmap_single(dev, orig_phys_addr, size + alignment,
  440. DMA_TO_DEVICE);
  441. err_phys_addr:
  442. kfree(orig_addr);
  443. err:
  444. return ret;
  445. }
  446. static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
  447. {
  448. bool ret = false;
  449. void *addr;
  450. dma_addr_t phys_addr;
  451. struct pci_dev *pdev = test->pdev;
  452. struct device *dev = &pdev->dev;
  453. void *orig_addr;
  454. dma_addr_t orig_phys_addr;
  455. size_t offset;
  456. size_t alignment = test->alignment;
  457. int irq_type = test->irq_type;
  458. u32 crc32;
  459. if (size > SIZE_MAX - alignment)
  460. goto err;
  461. if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
  462. dev_err(dev, "Invalid IRQ type option\n");
  463. goto err;
  464. }
  465. orig_addr = kzalloc(size + alignment, GFP_KERNEL);
  466. if (!orig_addr) {
  467. dev_err(dev, "Failed to allocate destination address\n");
  468. ret = false;
  469. goto err;
  470. }
  471. orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
  472. DMA_FROM_DEVICE);
  473. if (dma_mapping_error(dev, orig_phys_addr)) {
  474. dev_err(dev, "failed to map source buffer address\n");
  475. ret = false;
  476. goto err_phys_addr;
  477. }
  478. if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
  479. phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
  480. offset = phys_addr - orig_phys_addr;
  481. addr = orig_addr + offset;
  482. } else {
  483. phys_addr = orig_phys_addr;
  484. addr = orig_addr;
  485. }
  486. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
  487. lower_32_bits(phys_addr));
  488. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
  489. upper_32_bits(phys_addr));
  490. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
  491. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
  492. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
  493. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
  494. COMMAND_WRITE);
  495. wait_for_completion(&test->irq_raised);
  496. dma_unmap_single(dev, orig_phys_addr, size + alignment,
  497. DMA_FROM_DEVICE);
  498. crc32 = crc32_le(~0, addr, size);
  499. if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
  500. ret = true;
  501. err_phys_addr:
  502. kfree(orig_addr);
  503. err:
  504. return ret;
  505. }
  506. static bool pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
  507. {
  508. pci_endpoint_test_release_irq(test);
  509. pci_endpoint_test_free_irq_vectors(test);
  510. return true;
  511. }
  512. static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
  513. int req_irq_type)
  514. {
  515. struct pci_dev *pdev = test->pdev;
  516. struct device *dev = &pdev->dev;
  517. if (req_irq_type < IRQ_TYPE_LEGACY || req_irq_type > IRQ_TYPE_MSIX) {
  518. dev_err(dev, "Invalid IRQ type option\n");
  519. return false;
  520. }
  521. if (test->irq_type == req_irq_type)
  522. return true;
  523. pci_endpoint_test_release_irq(test);
  524. pci_endpoint_test_free_irq_vectors(test);
  525. if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
  526. goto err;
  527. if (!pci_endpoint_test_request_irq(test))
  528. goto err;
  529. return true;
  530. err:
  531. pci_endpoint_test_free_irq_vectors(test);
  532. return false;
  533. }
  534. static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
  535. unsigned long arg)
  536. {
  537. int ret = -EINVAL;
  538. enum pci_barno bar;
  539. struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
  540. struct pci_dev *pdev = test->pdev;
  541. mutex_lock(&test->mutex);
  542. switch (cmd) {
  543. case PCITEST_BAR:
  544. bar = arg;
  545. if (bar < 0 || bar > 5)
  546. goto ret;
  547. if ((is_am654_pci_dev(pdev) || is_k2g_pci_dev(pdev)) &&
  548. bar == BAR_0)
  549. goto ret;
  550. ret = pci_endpoint_test_bar(test, bar);
  551. break;
  552. case PCITEST_LEGACY_IRQ:
  553. ret = pci_endpoint_test_legacy_irq(test);
  554. break;
  555. case PCITEST_MSI:
  556. case PCITEST_MSIX:
  557. ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
  558. break;
  559. case PCITEST_WRITE:
  560. ret = pci_endpoint_test_write(test, arg);
  561. break;
  562. case PCITEST_READ:
  563. ret = pci_endpoint_test_read(test, arg);
  564. break;
  565. case PCITEST_COPY:
  566. ret = pci_endpoint_test_copy(test, arg);
  567. break;
  568. case PCITEST_SET_IRQTYPE:
  569. ret = pci_endpoint_test_set_irq(test, arg);
  570. break;
  571. case PCITEST_GET_IRQTYPE:
  572. ret = irq_type;
  573. break;
  574. case PCITEST_CLEAR_IRQ:
  575. ret = pci_endpoint_test_clear_irq(test);
  576. break;
  577. }
  578. ret:
  579. mutex_unlock(&test->mutex);
  580. return ret;
  581. }
  582. static const struct file_operations pci_endpoint_test_fops = {
  583. .owner = THIS_MODULE,
  584. .unlocked_ioctl = pci_endpoint_test_ioctl,
  585. };
  586. static int pci_endpoint_test_k2g_init(struct pci_endpoint_test *test)
  587. {
  588. struct pci_dev *pdev = test->pdev;
  589. enum pci_barno bar;
  590. resource_size_t start;
  591. if (!test->bar[0])
  592. return -EINVAL;
  593. for (bar = BAR_1; bar <= BAR_5; bar++) {
  594. start = pci_resource_start(pdev, bar);
  595. pci_endpoint_test_bar_writel(test, BAR_0,
  596. K2G_IB_START_L0(bar - 1),
  597. lower_32_bits(start));
  598. pci_endpoint_test_bar_writel(test, BAR_0,
  599. K2G_IB_START_HI(bar - 1),
  600. upper_32_bits(start));
  601. }
  602. return 0;
  603. }
  604. static int pci_endpoint_test_probe(struct pci_dev *pdev,
  605. const struct pci_device_id *ent)
  606. {
  607. int err;
  608. int id;
  609. char name[24];
  610. enum pci_barno bar;
  611. void __iomem *base;
  612. struct device *dev = &pdev->dev;
  613. struct pci_endpoint_test *test;
  614. struct pci_endpoint_test_data *data;
  615. enum pci_barno test_reg_bar = BAR_0;
  616. struct miscdevice *misc_device;
  617. if (pci_is_bridge(pdev))
  618. return -ENODEV;
  619. test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
  620. if (!test)
  621. return -ENOMEM;
  622. test->test_reg_bar = 0;
  623. test->alignment = 0;
  624. test->pdev = pdev;
  625. if (no_msi)
  626. irq_type = IRQ_TYPE_LEGACY;
  627. data = (struct pci_endpoint_test_data *)ent->driver_data;
  628. if (data) {
  629. test_reg_bar = data->test_reg_bar;
  630. test->test_reg_bar = test_reg_bar;
  631. test->alignment = data->alignment;
  632. irq_type = data->irq_type;
  633. }
  634. init_completion(&test->irq_raised);
  635. mutex_init(&test->mutex);
  636. if ((dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)) != 0) &&
  637. dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
  638. dev_err(dev, "Cannot set DMA mask\n");
  639. return -EINVAL;
  640. }
  641. err = pci_enable_device(pdev);
  642. if (err) {
  643. dev_err(dev, "Cannot enable PCI device\n");
  644. return err;
  645. }
  646. err = pci_request_regions(pdev, DRV_MODULE_NAME);
  647. if (err) {
  648. dev_err(dev, "Cannot obtain PCI resources\n");
  649. goto err_disable_pdev;
  650. }
  651. pci_set_master(pdev);
  652. pci_intx(pdev, true);
  653. if (!(is_am654_pci_dev(pdev) || is_j721e_pci_dev(pdev))) {
  654. if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type))
  655. goto err_disable_irq;
  656. if (!pci_endpoint_test_request_irq(test))
  657. goto err_disable_irq;
  658. }
  659. for (bar = BAR_0; bar <= BAR_5; bar++) {
  660. if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
  661. base = pci_ioremap_bar(pdev, bar);
  662. if (!base) {
  663. dev_err(dev, "Failed to read BAR%d\n", bar);
  664. WARN_ON(bar == test_reg_bar);
  665. }
  666. test->bar[bar] = base;
  667. }
  668. }
  669. test->base = test->bar[test_reg_bar];
  670. if (!test->base) {
  671. err = -ENOMEM;
  672. dev_err(dev, "Cannot perform PCI test without BAR%d\n",
  673. test_reg_bar);
  674. goto err_iounmap;
  675. }
  676. if (is_k2g_pci_dev(pdev)) {
  677. err = pci_endpoint_test_k2g_init(test);
  678. if (err)
  679. goto err_iounmap;
  680. }
  681. pci_set_drvdata(pdev, test);
  682. id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
  683. if (id < 0) {
  684. err = id;
  685. dev_err(dev, "Unable to get id\n");
  686. goto err_iounmap;
  687. }
  688. snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
  689. misc_device = &test->miscdev;
  690. misc_device->minor = MISC_DYNAMIC_MINOR;
  691. misc_device->name = kstrdup(name, GFP_KERNEL);
  692. if (!misc_device->name) {
  693. err = -ENOMEM;
  694. goto err_ida_remove;
  695. }
  696. misc_device->fops = &pci_endpoint_test_fops,
  697. err = misc_register(misc_device);
  698. if (err) {
  699. dev_err(dev, "Failed to register device\n");
  700. goto err_kfree_name;
  701. }
  702. test->name = kstrdup(name, GFP_KERNEL);
  703. return 0;
  704. err_kfree_name:
  705. kfree(misc_device->name);
  706. err_ida_remove:
  707. ida_simple_remove(&pci_endpoint_test_ida, id);
  708. err_iounmap:
  709. for (bar = BAR_0; bar <= BAR_5; bar++) {
  710. if (test->bar[bar])
  711. pci_iounmap(pdev, test->bar[bar]);
  712. }
  713. pci_endpoint_test_release_irq(test);
  714. err_disable_irq:
  715. pci_endpoint_test_free_irq_vectors(test);
  716. pci_release_regions(pdev);
  717. err_disable_pdev:
  718. pci_disable_device(pdev);
  719. return err;
  720. }
  721. static void pci_endpoint_test_remove(struct pci_dev *pdev)
  722. {
  723. int id;
  724. enum pci_barno bar;
  725. struct pci_endpoint_test *test = pci_get_drvdata(pdev);
  726. struct miscdevice *misc_device = &test->miscdev;
  727. if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
  728. return;
  729. if (id < 0)
  730. return;
  731. misc_deregister(&test->miscdev);
  732. kfree(misc_device->name);
  733. kfree(test->name);
  734. ida_simple_remove(&pci_endpoint_test_ida, id);
  735. for (bar = BAR_0; bar <= BAR_5; bar++) {
  736. if (test->bar[bar])
  737. pci_iounmap(pdev, test->bar[bar]);
  738. }
  739. pci_endpoint_test_release_irq(test);
  740. pci_endpoint_test_free_irq_vectors(test);
  741. pci_release_regions(pdev);
  742. pci_disable_device(pdev);
  743. }
  744. static const struct pci_endpoint_test_data default_data = {
  745. .test_reg_bar = BAR_0,
  746. .alignment = SZ_4K,
  747. };
  748. static const struct pci_endpoint_test_data am654_data = {
  749. .test_reg_bar = BAR_2,
  750. .alignment = SZ_64K,
  751. };
  752. static const struct pci_endpoint_test_data k2g_data = {
  753. .test_reg_bar = BAR_1,
  754. .alignment = SZ_1M,
  755. };
  756. static const struct pci_endpoint_test_data j721e_data = {
  757. .alignment = 256,
  758. .irq_type = IRQ_TYPE_MSI,
  759. };
  760. static const struct pci_device_id pci_endpoint_test_tbl[] = {
  761. { PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 0xedda) },
  762. { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
  763. .driver_data = (kernel_ulong_t)&default_data,
  764. },
  765. { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
  766. .driver_data = (kernel_ulong_t)&default_data,
  767. },
  768. { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
  769. .driver_data = (kernel_ulong_t)&am654_data
  770. },
  771. { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_K2G),
  772. .driver_data = (kernel_ulong_t)&k2g_data
  773. },
  774. { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
  775. .driver_data = (kernel_ulong_t)&j721e_data,
  776. },
  777. { }
  778. };
  779. MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
  780. static struct pci_driver pci_endpoint_test_driver = {
  781. .name = DRV_MODULE_NAME,
  782. .id_table = pci_endpoint_test_tbl,
  783. .probe = pci_endpoint_test_probe,
  784. .remove = pci_endpoint_test_remove,
  785. .sriov_configure = pci_sriov_configure_simple,
  786. };
  787. module_pci_driver(pci_endpoint_test_driver);
  788. MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
  789. MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
  790. MODULE_LICENSE("GPL v2");