ntb_perf.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842
  1. /*
  2. * This file is provided under a dual BSD/GPLv2 license. When using or
  3. * redistributing this file, you may do so under either license.
  4. *
  5. * GPL LICENSE SUMMARY
  6. *
  7. * Copyright(c) 2015 Intel Corporation. All rights reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * BSD LICENSE
  14. *
  15. * Copyright(c) 2015 Intel Corporation. All rights reserved.
  16. *
  17. * Redistribution and use in source and binary forms, with or without
  18. * modification, are permitted provided that the following conditions
  19. * are met:
  20. *
  21. * * Redistributions of source code must retain the above copyright
  22. * notice, this list of conditions and the following disclaimer.
  23. * * Redistributions in binary form must reproduce the above copy
  24. * notice, this list of conditions and the following disclaimer in
  25. * the documentation and/or other materials provided with the
  26. * distribution.
  27. * * Neither the name of Intel Corporation nor the names of its
  28. * contributors may be used to endorse or promote products derived
  29. * from this software without specific prior written permission.
  30. *
  31. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  32. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  33. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  34. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  35. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  36. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  37. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  38. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  39. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  40. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  41. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  42. *
  43. * PCIe NTB Perf Linux driver
  44. */
  45. #include <linux/init.h>
  46. #include <linux/kernel.h>
  47. #include <linux/module.h>
  48. #include <linux/kthread.h>
  49. #include <linux/time.h>
  50. #include <linux/timer.h>
  51. #include <linux/dma-mapping.h>
  52. #include <linux/pci.h>
  53. #include <linux/slab.h>
  54. #include <linux/spinlock.h>
  55. #include <linux/debugfs.h>
  56. #include <linux/dmaengine.h>
  57. #include <linux/delay.h>
  58. #include <linux/sizes.h>
  59. #include <linux/ntb.h>
  60. #include <linux/mutex.h>
  61. #define DRIVER_NAME "ntb_perf"
  62. #define DRIVER_DESCRIPTION "PCIe NTB Performance Measurement Tool"
  63. #define DRIVER_LICENSE "Dual BSD/GPL"
  64. #define DRIVER_VERSION "1.0"
  65. #define DRIVER_AUTHOR "Dave Jiang <dave.jiang@intel.com>"
  66. #define PERF_LINK_DOWN_TIMEOUT 10
  67. #define PERF_VERSION 0xffff0001
  68. #define MAX_THREADS 32
  69. #define MAX_TEST_SIZE SZ_1M
  70. #define MAX_SRCS 32
  71. #define DMA_OUT_RESOURCE_TO msecs_to_jiffies(50)
  72. #define DMA_RETRIES 20
  73. #define SZ_4G (1ULL << 32)
  74. #define MAX_SEG_ORDER 20 /* no larger than 1M for kmalloc buffer */
  75. MODULE_LICENSE(DRIVER_LICENSE);
  76. MODULE_VERSION(DRIVER_VERSION);
  77. MODULE_AUTHOR(DRIVER_AUTHOR);
  78. MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
  79. static struct dentry *perf_debugfs_dir;
  80. static unsigned long max_mw_size;
  81. module_param(max_mw_size, ulong, 0644);
  82. MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows");
  83. static unsigned int seg_order = 19; /* 512K */
  84. module_param(seg_order, uint, 0644);
  85. MODULE_PARM_DESC(seg_order, "size order [n^2] of buffer segment for testing");
  86. static unsigned int run_order = 32; /* 4G */
  87. module_param(run_order, uint, 0644);
  88. MODULE_PARM_DESC(run_order, "size order [n^2] of total data to transfer");
  89. static bool use_dma; /* default to 0 */
  90. module_param(use_dma, bool, 0644);
  91. MODULE_PARM_DESC(use_dma, "Using DMA engine to measure performance");
  92. struct perf_mw {
  93. phys_addr_t phys_addr;
  94. resource_size_t phys_size;
  95. resource_size_t xlat_align;
  96. resource_size_t xlat_align_size;
  97. void __iomem *vbase;
  98. size_t xlat_size;
  99. size_t buf_size;
  100. void *virt_addr;
  101. dma_addr_t dma_addr;
  102. };
  103. struct perf_ctx;
  104. struct pthr_ctx {
  105. struct task_struct *thread;
  106. struct perf_ctx *perf;
  107. atomic_t dma_sync;
  108. struct dma_chan *dma_chan;
  109. int dma_prep_err;
  110. int src_idx;
  111. void *srcs[MAX_SRCS];
  112. wait_queue_head_t *wq;
  113. int status;
  114. u64 copied;
  115. u64 diff_us;
  116. };
  117. struct perf_ctx {
  118. struct ntb_dev *ntb;
  119. spinlock_t db_lock;
  120. struct perf_mw mw;
  121. bool link_is_up;
  122. struct delayed_work link_work;
  123. wait_queue_head_t link_wq;
  124. struct dentry *debugfs_node_dir;
  125. struct dentry *debugfs_run;
  126. struct dentry *debugfs_threads;
  127. u8 perf_threads;
  128. /* mutex ensures only one set of threads run at once */
  129. struct mutex run_mutex;
  130. struct pthr_ctx pthr_ctx[MAX_THREADS];
  131. atomic_t tsync;
  132. atomic_t tdone;
  133. };
  134. enum {
  135. VERSION = 0,
  136. MW_SZ_HIGH,
  137. MW_SZ_LOW,
  138. MAX_SPAD
  139. };
  140. static void perf_link_event(void *ctx)
  141. {
  142. struct perf_ctx *perf = ctx;
  143. if (ntb_link_is_up(perf->ntb, NULL, NULL) == 1) {
  144. schedule_delayed_work(&perf->link_work, 2*HZ);
  145. } else {
  146. dev_dbg(&perf->ntb->pdev->dev, "link down\n");
  147. if (!perf->link_is_up)
  148. cancel_delayed_work_sync(&perf->link_work);
  149. perf->link_is_up = false;
  150. }
  151. }
  152. static void perf_db_event(void *ctx, int vec)
  153. {
  154. struct perf_ctx *perf = ctx;
  155. u64 db_bits, db_mask;
  156. db_mask = ntb_db_vector_mask(perf->ntb, vec);
  157. db_bits = ntb_db_read(perf->ntb);
  158. dev_dbg(&perf->ntb->dev, "doorbell vec %d mask %#llx bits %#llx\n",
  159. vec, db_mask, db_bits);
  160. }
  161. static const struct ntb_ctx_ops perf_ops = {
  162. .link_event = perf_link_event,
  163. .db_event = perf_db_event,
  164. };
  165. static void perf_copy_callback(void *data)
  166. {
  167. struct pthr_ctx *pctx = data;
  168. atomic_dec(&pctx->dma_sync);
  169. }
  170. static ssize_t perf_copy(struct pthr_ctx *pctx, char __iomem *dst,
  171. char *src, size_t size)
  172. {
  173. struct perf_ctx *perf = pctx->perf;
  174. struct dma_async_tx_descriptor *txd;
  175. struct dma_chan *chan = pctx->dma_chan;
  176. struct dma_device *device;
  177. struct dmaengine_unmap_data *unmap;
  178. dma_cookie_t cookie;
  179. size_t src_off, dst_off;
  180. struct perf_mw *mw = &perf->mw;
  181. void __iomem *vbase;
  182. void __iomem *dst_vaddr;
  183. dma_addr_t dst_phys;
  184. int retries = 0;
  185. if (!use_dma) {
  186. memcpy_toio(dst, src, size);
  187. return size;
  188. }
  189. if (!chan) {
  190. dev_err(&perf->ntb->dev, "DMA engine does not exist\n");
  191. return -EINVAL;
  192. }
  193. device = chan->device;
  194. src_off = (uintptr_t)src & ~PAGE_MASK;
  195. dst_off = (uintptr_t __force)dst & ~PAGE_MASK;
  196. if (!is_dma_copy_aligned(device, src_off, dst_off, size))
  197. return -ENODEV;
  198. vbase = mw->vbase;
  199. dst_vaddr = dst;
  200. dst_phys = mw->phys_addr + (dst_vaddr - vbase);
  201. unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
  202. if (!unmap)
  203. return -ENOMEM;
  204. unmap->len = size;
  205. unmap->addr[0] = dma_map_page(device->dev, virt_to_page(src),
  206. src_off, size, DMA_TO_DEVICE);
  207. if (dma_mapping_error(device->dev, unmap->addr[0]))
  208. goto err_get_unmap;
  209. unmap->to_cnt = 1;
  210. do {
  211. txd = device->device_prep_dma_memcpy(chan, dst_phys,
  212. unmap->addr[0],
  213. size, DMA_PREP_INTERRUPT);
  214. if (!txd) {
  215. set_current_state(TASK_INTERRUPTIBLE);
  216. schedule_timeout(DMA_OUT_RESOURCE_TO);
  217. }
  218. } while (!txd && (++retries < DMA_RETRIES));
  219. if (!txd) {
  220. pctx->dma_prep_err++;
  221. goto err_get_unmap;
  222. }
  223. txd->callback = perf_copy_callback;
  224. txd->callback_param = pctx;
  225. dma_set_unmap(txd, unmap);
  226. cookie = dmaengine_submit(txd);
  227. if (dma_submit_error(cookie))
  228. goto err_set_unmap;
  229. atomic_inc(&pctx->dma_sync);
  230. dma_async_issue_pending(chan);
  231. return size;
  232. err_set_unmap:
  233. dmaengine_unmap_put(unmap);
  234. err_get_unmap:
  235. dmaengine_unmap_put(unmap);
  236. return 0;
  237. }
  238. static int perf_move_data(struct pthr_ctx *pctx, char __iomem *dst, char *src,
  239. u64 buf_size, u64 win_size, u64 total)
  240. {
  241. int chunks, total_chunks, i;
  242. int copied_chunks = 0;
  243. u64 copied = 0, result;
  244. char __iomem *tmp = dst;
  245. u64 perf, diff_us;
  246. ktime_t kstart, kstop, kdiff;
  247. unsigned long last_sleep = jiffies;
  248. chunks = div64_u64(win_size, buf_size);
  249. total_chunks = div64_u64(total, buf_size);
  250. kstart = ktime_get();
  251. for (i = 0; i < total_chunks; i++) {
  252. result = perf_copy(pctx, tmp, src, buf_size);
  253. copied += result;
  254. copied_chunks++;
  255. if (copied_chunks == chunks) {
  256. tmp = dst;
  257. copied_chunks = 0;
  258. } else
  259. tmp += buf_size;
  260. /* Probably should schedule every 5s to prevent soft hang. */
  261. if (unlikely((jiffies - last_sleep) > 5 * HZ)) {
  262. last_sleep = jiffies;
  263. set_current_state(TASK_INTERRUPTIBLE);
  264. schedule_timeout(1);
  265. }
  266. if (unlikely(kthread_should_stop()))
  267. break;
  268. }
  269. if (use_dma) {
  270. pr_debug("%s: All DMA descriptors submitted\n", current->comm);
  271. while (atomic_read(&pctx->dma_sync) != 0) {
  272. if (kthread_should_stop())
  273. break;
  274. msleep(20);
  275. }
  276. }
  277. kstop = ktime_get();
  278. kdiff = ktime_sub(kstop, kstart);
  279. diff_us = ktime_to_us(kdiff);
  280. pr_debug("%s: copied %llu bytes\n", current->comm, copied);
  281. pr_debug("%s: lasted %llu usecs\n", current->comm, diff_us);
  282. perf = div64_u64(copied, diff_us);
  283. pr_debug("%s: MBytes/s: %llu\n", current->comm, perf);
  284. pctx->copied = copied;
  285. pctx->diff_us = diff_us;
  286. return 0;
  287. }
  288. static bool perf_dma_filter_fn(struct dma_chan *chan, void *node)
  289. {
  290. return dev_to_node(&chan->dev->device) == (int)(unsigned long)node;
  291. }
  292. static int ntb_perf_thread(void *data)
  293. {
  294. struct pthr_ctx *pctx = data;
  295. struct perf_ctx *perf = pctx->perf;
  296. struct pci_dev *pdev = perf->ntb->pdev;
  297. struct perf_mw *mw = &perf->mw;
  298. char __iomem *dst;
  299. u64 win_size, buf_size, total;
  300. void *src;
  301. int rc, node, i;
  302. struct dma_chan *dma_chan = NULL;
  303. pr_debug("kthread %s starting...\n", current->comm);
  304. node = dev_to_node(&pdev->dev);
  305. if (use_dma && !pctx->dma_chan) {
  306. dma_cap_mask_t dma_mask;
  307. dma_cap_zero(dma_mask);
  308. dma_cap_set(DMA_MEMCPY, dma_mask);
  309. dma_chan = dma_request_channel(dma_mask, perf_dma_filter_fn,
  310. (void *)(unsigned long)node);
  311. if (!dma_chan) {
  312. pr_warn("%s: cannot acquire DMA channel, quitting\n",
  313. current->comm);
  314. return -ENODEV;
  315. }
  316. pctx->dma_chan = dma_chan;
  317. }
  318. for (i = 0; i < MAX_SRCS; i++) {
  319. pctx->srcs[i] = kmalloc_node(MAX_TEST_SIZE, GFP_KERNEL, node);
  320. if (!pctx->srcs[i]) {
  321. rc = -ENOMEM;
  322. goto err;
  323. }
  324. }
  325. win_size = mw->phys_size;
  326. buf_size = 1ULL << seg_order;
  327. total = 1ULL << run_order;
  328. if (buf_size > MAX_TEST_SIZE)
  329. buf_size = MAX_TEST_SIZE;
  330. dst = (char __iomem *)mw->vbase;
  331. atomic_inc(&perf->tsync);
  332. while (atomic_read(&perf->tsync) != perf->perf_threads)
  333. schedule();
  334. src = pctx->srcs[pctx->src_idx];
  335. pctx->src_idx = (pctx->src_idx + 1) & (MAX_SRCS - 1);
  336. rc = perf_move_data(pctx, dst, src, buf_size, win_size, total);
  337. atomic_dec(&perf->tsync);
  338. if (rc < 0) {
  339. pr_err("%s: failed\n", current->comm);
  340. rc = -ENXIO;
  341. goto err;
  342. }
  343. for (i = 0; i < MAX_SRCS; i++) {
  344. kfree(pctx->srcs[i]);
  345. pctx->srcs[i] = NULL;
  346. }
  347. atomic_inc(&perf->tdone);
  348. wake_up(pctx->wq);
  349. rc = 0;
  350. goto done;
  351. err:
  352. for (i = 0; i < MAX_SRCS; i++) {
  353. kfree(pctx->srcs[i]);
  354. pctx->srcs[i] = NULL;
  355. }
  356. if (dma_chan) {
  357. dma_release_channel(dma_chan);
  358. pctx->dma_chan = NULL;
  359. }
  360. done:
  361. /* Wait until we are told to stop */
  362. for (;;) {
  363. set_current_state(TASK_INTERRUPTIBLE);
  364. if (kthread_should_stop())
  365. break;
  366. schedule();
  367. }
  368. __set_current_state(TASK_RUNNING);
  369. return rc;
  370. }
  371. static void perf_free_mw(struct perf_ctx *perf)
  372. {
  373. struct perf_mw *mw = &perf->mw;
  374. struct pci_dev *pdev = perf->ntb->pdev;
  375. if (!mw->virt_addr)
  376. return;
  377. ntb_mw_clear_trans(perf->ntb, 0);
  378. dma_free_coherent(&pdev->dev, mw->buf_size,
  379. mw->virt_addr, mw->dma_addr);
  380. mw->xlat_size = 0;
  381. mw->buf_size = 0;
  382. mw->virt_addr = NULL;
  383. }
  384. static int perf_set_mw(struct perf_ctx *perf, resource_size_t size)
  385. {
  386. struct perf_mw *mw = &perf->mw;
  387. size_t xlat_size, buf_size;
  388. int rc;
  389. if (!size)
  390. return -EINVAL;
  391. xlat_size = round_up(size, mw->xlat_align_size);
  392. buf_size = round_up(size, mw->xlat_align);
  393. if (mw->xlat_size == xlat_size)
  394. return 0;
  395. if (mw->buf_size)
  396. perf_free_mw(perf);
  397. mw->xlat_size = xlat_size;
  398. mw->buf_size = buf_size;
  399. mw->virt_addr = dma_alloc_coherent(&perf->ntb->pdev->dev, buf_size,
  400. &mw->dma_addr, GFP_KERNEL);
  401. if (!mw->virt_addr) {
  402. mw->xlat_size = 0;
  403. mw->buf_size = 0;
  404. }
  405. rc = ntb_mw_set_trans(perf->ntb, 0, mw->dma_addr, mw->xlat_size);
  406. if (rc) {
  407. dev_err(&perf->ntb->dev, "Unable to set mw0 translation\n");
  408. perf_free_mw(perf);
  409. return -EIO;
  410. }
  411. return 0;
  412. }
  413. static void perf_link_work(struct work_struct *work)
  414. {
  415. struct perf_ctx *perf =
  416. container_of(work, struct perf_ctx, link_work.work);
  417. struct ntb_dev *ndev = perf->ntb;
  418. struct pci_dev *pdev = ndev->pdev;
  419. u32 val;
  420. u64 size;
  421. int rc;
  422. dev_dbg(&perf->ntb->pdev->dev, "%s called\n", __func__);
  423. size = perf->mw.phys_size;
  424. if (max_mw_size && size > max_mw_size)
  425. size = max_mw_size;
  426. ntb_peer_spad_write(ndev, MW_SZ_HIGH, upper_32_bits(size));
  427. ntb_peer_spad_write(ndev, MW_SZ_LOW, lower_32_bits(size));
  428. ntb_peer_spad_write(ndev, VERSION, PERF_VERSION);
  429. /* now read what peer wrote */
  430. val = ntb_spad_read(ndev, VERSION);
  431. if (val != PERF_VERSION) {
  432. dev_dbg(&pdev->dev, "Remote version = %#x\n", val);
  433. goto out;
  434. }
  435. val = ntb_spad_read(ndev, MW_SZ_HIGH);
  436. size = (u64)val << 32;
  437. val = ntb_spad_read(ndev, MW_SZ_LOW);
  438. size |= val;
  439. dev_dbg(&pdev->dev, "Remote MW size = %#llx\n", size);
  440. rc = perf_set_mw(perf, size);
  441. if (rc)
  442. goto out1;
  443. perf->link_is_up = true;
  444. wake_up(&perf->link_wq);
  445. return;
  446. out1:
  447. perf_free_mw(perf);
  448. out:
  449. if (ntb_link_is_up(ndev, NULL, NULL) == 1)
  450. schedule_delayed_work(&perf->link_work,
  451. msecs_to_jiffies(PERF_LINK_DOWN_TIMEOUT));
  452. }
  453. static int perf_setup_mw(struct ntb_dev *ntb, struct perf_ctx *perf)
  454. {
  455. struct perf_mw *mw;
  456. int rc;
  457. mw = &perf->mw;
  458. rc = ntb_mw_get_range(ntb, 0, &mw->phys_addr, &mw->phys_size,
  459. &mw->xlat_align, &mw->xlat_align_size);
  460. if (rc)
  461. return rc;
  462. perf->mw.vbase = ioremap_wc(mw->phys_addr, mw->phys_size);
  463. if (!mw->vbase)
  464. return -ENOMEM;
  465. return 0;
  466. }
  467. static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf,
  468. size_t count, loff_t *offp)
  469. {
  470. struct perf_ctx *perf = filp->private_data;
  471. char *buf;
  472. ssize_t ret, out_off = 0;
  473. struct pthr_ctx *pctx;
  474. int i;
  475. u64 rate;
  476. if (!perf)
  477. return 0;
  478. buf = kmalloc(1024, GFP_KERNEL);
  479. if (!buf)
  480. return -ENOMEM;
  481. if (mutex_is_locked(&perf->run_mutex)) {
  482. out_off = scnprintf(buf, 64, "running\n");
  483. goto read_from_buf;
  484. }
  485. for (i = 0; i < MAX_THREADS; i++) {
  486. pctx = &perf->pthr_ctx[i];
  487. if (pctx->status == -ENODATA)
  488. break;
  489. if (pctx->status) {
  490. out_off += scnprintf(buf + out_off, 1024 - out_off,
  491. "%d: error %d\n", i,
  492. pctx->status);
  493. continue;
  494. }
  495. rate = div64_u64(pctx->copied, pctx->diff_us);
  496. out_off += scnprintf(buf + out_off, 1024 - out_off,
  497. "%d: copied %llu bytes in %llu usecs, %llu MBytes/s\n",
  498. i, pctx->copied, pctx->diff_us, rate);
  499. }
  500. read_from_buf:
  501. ret = simple_read_from_buffer(ubuf, count, offp, buf, out_off);
  502. kfree(buf);
  503. return ret;
  504. }
  505. static void threads_cleanup(struct perf_ctx *perf)
  506. {
  507. struct pthr_ctx *pctx;
  508. int i;
  509. for (i = 0; i < MAX_THREADS; i++) {
  510. pctx = &perf->pthr_ctx[i];
  511. if (pctx->thread) {
  512. pctx->status = kthread_stop(pctx->thread);
  513. pctx->thread = NULL;
  514. }
  515. }
  516. }
  517. static void perf_clear_thread_status(struct perf_ctx *perf)
  518. {
  519. int i;
  520. for (i = 0; i < MAX_THREADS; i++)
  521. perf->pthr_ctx[i].status = -ENODATA;
  522. }
  523. static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf,
  524. size_t count, loff_t *offp)
  525. {
  526. struct perf_ctx *perf = filp->private_data;
  527. int node, i;
  528. DECLARE_WAIT_QUEUE_HEAD(wq);
  529. if (wait_event_interruptible(perf->link_wq, perf->link_is_up))
  530. return -ENOLINK;
  531. if (perf->perf_threads == 0)
  532. return -EINVAL;
  533. if (!mutex_trylock(&perf->run_mutex))
  534. return -EBUSY;
  535. perf_clear_thread_status(perf);
  536. if (perf->perf_threads > MAX_THREADS) {
  537. perf->perf_threads = MAX_THREADS;
  538. pr_info("Reset total threads to: %u\n", MAX_THREADS);
  539. }
  540. /* no greater than 1M */
  541. if (seg_order > MAX_SEG_ORDER) {
  542. seg_order = MAX_SEG_ORDER;
  543. pr_info("Fix seg_order to %u\n", seg_order);
  544. }
  545. if (run_order < seg_order) {
  546. run_order = seg_order;
  547. pr_info("Fix run_order to %u\n", run_order);
  548. }
  549. node = dev_to_node(&perf->ntb->pdev->dev);
  550. atomic_set(&perf->tdone, 0);
  551. /* launch kernel thread */
  552. for (i = 0; i < perf->perf_threads; i++) {
  553. struct pthr_ctx *pctx;
  554. pctx = &perf->pthr_ctx[i];
  555. atomic_set(&pctx->dma_sync, 0);
  556. pctx->perf = perf;
  557. pctx->wq = &wq;
  558. pctx->thread =
  559. kthread_create_on_node(ntb_perf_thread,
  560. (void *)pctx,
  561. node, "ntb_perf %d", i);
  562. if (IS_ERR(pctx->thread)) {
  563. pctx->thread = NULL;
  564. goto err;
  565. } else {
  566. wake_up_process(pctx->thread);
  567. }
  568. }
  569. wait_event_interruptible(wq,
  570. atomic_read(&perf->tdone) == perf->perf_threads);
  571. threads_cleanup(perf);
  572. mutex_unlock(&perf->run_mutex);
  573. return count;
  574. err:
  575. threads_cleanup(perf);
  576. mutex_unlock(&perf->run_mutex);
  577. return -ENXIO;
  578. }
  579. static const struct file_operations ntb_perf_debugfs_run = {
  580. .owner = THIS_MODULE,
  581. .open = simple_open,
  582. .read = debugfs_run_read,
  583. .write = debugfs_run_write,
  584. };
  585. static int perf_debugfs_setup(struct perf_ctx *perf)
  586. {
  587. struct pci_dev *pdev = perf->ntb->pdev;
  588. if (!debugfs_initialized())
  589. return -ENODEV;
  590. if (!perf_debugfs_dir) {
  591. perf_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
  592. if (!perf_debugfs_dir)
  593. return -ENODEV;
  594. }
  595. perf->debugfs_node_dir = debugfs_create_dir(pci_name(pdev),
  596. perf_debugfs_dir);
  597. if (!perf->debugfs_node_dir)
  598. return -ENODEV;
  599. perf->debugfs_run = debugfs_create_file("run", S_IRUSR | S_IWUSR,
  600. perf->debugfs_node_dir, perf,
  601. &ntb_perf_debugfs_run);
  602. if (!perf->debugfs_run)
  603. return -ENODEV;
  604. perf->debugfs_threads = debugfs_create_u8("threads", S_IRUSR | S_IWUSR,
  605. perf->debugfs_node_dir,
  606. &perf->perf_threads);
  607. if (!perf->debugfs_threads)
  608. return -ENODEV;
  609. return 0;
  610. }
  611. static int perf_probe(struct ntb_client *client, struct ntb_dev *ntb)
  612. {
  613. struct pci_dev *pdev = ntb->pdev;
  614. struct perf_ctx *perf;
  615. int node;
  616. int rc = 0;
  617. if (ntb_spad_count(ntb) < MAX_SPAD) {
  618. dev_err(&ntb->dev, "Not enough scratch pad registers for %s",
  619. DRIVER_NAME);
  620. return -EIO;
  621. }
  622. node = dev_to_node(&pdev->dev);
  623. perf = kzalloc_node(sizeof(*perf), GFP_KERNEL, node);
  624. if (!perf) {
  625. rc = -ENOMEM;
  626. goto err_perf;
  627. }
  628. perf->ntb = ntb;
  629. perf->perf_threads = 1;
  630. atomic_set(&perf->tsync, 0);
  631. mutex_init(&perf->run_mutex);
  632. spin_lock_init(&perf->db_lock);
  633. perf_setup_mw(ntb, perf);
  634. init_waitqueue_head(&perf->link_wq);
  635. INIT_DELAYED_WORK(&perf->link_work, perf_link_work);
  636. rc = ntb_set_ctx(ntb, perf, &perf_ops);
  637. if (rc)
  638. goto err_ctx;
  639. perf->link_is_up = false;
  640. ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
  641. ntb_link_event(ntb);
  642. rc = perf_debugfs_setup(perf);
  643. if (rc)
  644. goto err_ctx;
  645. perf_clear_thread_status(perf);
  646. return 0;
  647. err_ctx:
  648. cancel_delayed_work_sync(&perf->link_work);
  649. kfree(perf);
  650. err_perf:
  651. return rc;
  652. }
  653. static void perf_remove(struct ntb_client *client, struct ntb_dev *ntb)
  654. {
  655. struct perf_ctx *perf = ntb->ctx;
  656. int i;
  657. dev_dbg(&perf->ntb->dev, "%s called\n", __func__);
  658. mutex_lock(&perf->run_mutex);
  659. cancel_delayed_work_sync(&perf->link_work);
  660. ntb_clear_ctx(ntb);
  661. ntb_link_disable(ntb);
  662. debugfs_remove_recursive(perf_debugfs_dir);
  663. perf_debugfs_dir = NULL;
  664. if (use_dma) {
  665. for (i = 0; i < MAX_THREADS; i++) {
  666. struct pthr_ctx *pctx = &perf->pthr_ctx[i];
  667. if (pctx->dma_chan)
  668. dma_release_channel(pctx->dma_chan);
  669. }
  670. }
  671. kfree(perf);
  672. }
  673. static struct ntb_client perf_client = {
  674. .ops = {
  675. .probe = perf_probe,
  676. .remove = perf_remove,
  677. },
  678. };
  679. module_ntb_client(perf_client);