scif_fence.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771
  1. /*
  2. * Intel MIC Platform Software Stack (MPSS)
  3. *
  4. * Copyright(c) 2015 Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License, version 2, as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * Intel SCIF driver.
  16. *
  17. */
  18. #include "scif_main.h"
  19. /**
  20. * scif_recv_mark: Handle SCIF_MARK request
  21. * @msg: Interrupt message
  22. *
  23. * The peer has requested a mark.
  24. */
  25. void scif_recv_mark(struct scif_dev *scifdev, struct scifmsg *msg)
  26. {
  27. struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
  28. int mark, err;
  29. err = _scif_fence_mark(ep, &mark);
  30. if (err)
  31. msg->uop = SCIF_MARK_NACK;
  32. else
  33. msg->uop = SCIF_MARK_ACK;
  34. msg->payload[0] = ep->remote_ep;
  35. msg->payload[2] = mark;
  36. scif_nodeqp_send(ep->remote_dev, msg);
  37. }
  38. /**
  39. * scif_recv_mark_resp: Handle SCIF_MARK_(N)ACK messages.
  40. * @msg: Interrupt message
  41. *
  42. * The peer has responded to a SCIF_MARK message.
  43. */
  44. void scif_recv_mark_resp(struct scif_dev *scifdev, struct scifmsg *msg)
  45. {
  46. struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
  47. struct scif_fence_info *fence_req =
  48. (struct scif_fence_info *)msg->payload[1];
  49. mutex_lock(&ep->rma_info.rma_lock);
  50. if (msg->uop == SCIF_MARK_ACK) {
  51. fence_req->state = OP_COMPLETED;
  52. fence_req->dma_mark = (int)msg->payload[2];
  53. } else {
  54. fence_req->state = OP_FAILED;
  55. }
  56. mutex_unlock(&ep->rma_info.rma_lock);
  57. complete(&fence_req->comp);
  58. }
  59. /**
  60. * scif_recv_wait: Handle SCIF_WAIT request
  61. * @msg: Interrupt message
  62. *
  63. * The peer has requested waiting on a fence.
  64. */
  65. void scif_recv_wait(struct scif_dev *scifdev, struct scifmsg *msg)
  66. {
  67. struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
  68. struct scif_remote_fence_info *fence;
  69. /*
  70. * Allocate structure for remote fence information and
  71. * send a NACK if the allocation failed. The peer will
  72. * return ENOMEM upon receiving a NACK.
  73. */
  74. fence = kmalloc(sizeof(*fence), GFP_KERNEL);
  75. if (!fence) {
  76. msg->payload[0] = ep->remote_ep;
  77. msg->uop = SCIF_WAIT_NACK;
  78. scif_nodeqp_send(ep->remote_dev, msg);
  79. return;
  80. }
  81. /* Prepare the fence request */
  82. memcpy(&fence->msg, msg, sizeof(struct scifmsg));
  83. INIT_LIST_HEAD(&fence->list);
  84. /* Insert to the global remote fence request list */
  85. mutex_lock(&scif_info.fencelock);
  86. atomic_inc(&ep->rma_info.fence_refcount);
  87. list_add_tail(&fence->list, &scif_info.fence);
  88. mutex_unlock(&scif_info.fencelock);
  89. schedule_work(&scif_info.misc_work);
  90. }
  91. /**
  92. * scif_recv_wait_resp: Handle SCIF_WAIT_(N)ACK messages.
  93. * @msg: Interrupt message
  94. *
  95. * The peer has responded to a SCIF_WAIT message.
  96. */
  97. void scif_recv_wait_resp(struct scif_dev *scifdev, struct scifmsg *msg)
  98. {
  99. struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
  100. struct scif_fence_info *fence_req =
  101. (struct scif_fence_info *)msg->payload[1];
  102. mutex_lock(&ep->rma_info.rma_lock);
  103. if (msg->uop == SCIF_WAIT_ACK)
  104. fence_req->state = OP_COMPLETED;
  105. else
  106. fence_req->state = OP_FAILED;
  107. mutex_unlock(&ep->rma_info.rma_lock);
  108. complete(&fence_req->comp);
  109. }
  110. /**
  111. * scif_recv_sig_local: Handle SCIF_SIG_LOCAL request
  112. * @msg: Interrupt message
  113. *
  114. * The peer has requested a signal on a local offset.
  115. */
  116. void scif_recv_sig_local(struct scif_dev *scifdev, struct scifmsg *msg)
  117. {
  118. struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
  119. int err;
  120. err = scif_prog_signal(ep, msg->payload[1], msg->payload[2],
  121. SCIF_WINDOW_SELF);
  122. if (err)
  123. msg->uop = SCIF_SIG_NACK;
  124. else
  125. msg->uop = SCIF_SIG_ACK;
  126. msg->payload[0] = ep->remote_ep;
  127. scif_nodeqp_send(ep->remote_dev, msg);
  128. }
  129. /**
  130. * scif_recv_sig_remote: Handle SCIF_SIGNAL_REMOTE request
  131. * @msg: Interrupt message
  132. *
  133. * The peer has requested a signal on a remote offset.
  134. */
  135. void scif_recv_sig_remote(struct scif_dev *scifdev, struct scifmsg *msg)
  136. {
  137. struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
  138. int err;
  139. err = scif_prog_signal(ep, msg->payload[1], msg->payload[2],
  140. SCIF_WINDOW_PEER);
  141. if (err)
  142. msg->uop = SCIF_SIG_NACK;
  143. else
  144. msg->uop = SCIF_SIG_ACK;
  145. msg->payload[0] = ep->remote_ep;
  146. scif_nodeqp_send(ep->remote_dev, msg);
  147. }
  148. /**
  149. * scif_recv_sig_resp: Handle SCIF_SIG_(N)ACK messages.
  150. * @msg: Interrupt message
  151. *
  152. * The peer has responded to a signal request.
  153. */
  154. void scif_recv_sig_resp(struct scif_dev *scifdev, struct scifmsg *msg)
  155. {
  156. struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
  157. struct scif_fence_info *fence_req =
  158. (struct scif_fence_info *)msg->payload[3];
  159. mutex_lock(&ep->rma_info.rma_lock);
  160. if (msg->uop == SCIF_SIG_ACK)
  161. fence_req->state = OP_COMPLETED;
  162. else
  163. fence_req->state = OP_FAILED;
  164. mutex_unlock(&ep->rma_info.rma_lock);
  165. complete(&fence_req->comp);
  166. }
  167. static inline void *scif_get_local_va(off_t off, struct scif_window *window)
  168. {
  169. struct page **pages = window->pinned_pages->pages;
  170. int page_nr = (off - window->offset) >> PAGE_SHIFT;
  171. off_t page_off = off & ~PAGE_MASK;
  172. return page_address(pages[page_nr]) + page_off;
  173. }
  174. static void scif_prog_signal_cb(void *arg)
  175. {
  176. struct scif_status *status = arg;
  177. dma_pool_free(status->ep->remote_dev->signal_pool, status,
  178. status->src_dma_addr);
  179. }
  180. static int _scif_prog_signal(scif_epd_t epd, dma_addr_t dst, u64 val)
  181. {
  182. struct scif_endpt *ep = (struct scif_endpt *)epd;
  183. struct dma_chan *chan = ep->rma_info.dma_chan;
  184. struct dma_device *ddev = chan->device;
  185. bool x100 = !is_dma_copy_aligned(chan->device, 1, 1, 1);
  186. struct dma_async_tx_descriptor *tx;
  187. struct scif_status *status = NULL;
  188. dma_addr_t src;
  189. dma_cookie_t cookie;
  190. int err;
  191. tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, DMA_PREP_FENCE);
  192. if (!tx) {
  193. err = -ENOMEM;
  194. dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
  195. __func__, __LINE__, err);
  196. goto alloc_fail;
  197. }
  198. cookie = tx->tx_submit(tx);
  199. if (dma_submit_error(cookie)) {
  200. err = (int)cookie;
  201. dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
  202. __func__, __LINE__, err);
  203. goto alloc_fail;
  204. }
  205. dma_async_issue_pending(chan);
  206. if (x100) {
  207. /*
  208. * For X100 use the status descriptor to write the value to
  209. * the destination.
  210. */
  211. tx = ddev->device_prep_dma_imm_data(chan, dst, val, 0);
  212. } else {
  213. status = dma_pool_alloc(ep->remote_dev->signal_pool, GFP_KERNEL,
  214. &src);
  215. if (!status) {
  216. err = -ENOMEM;
  217. dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
  218. __func__, __LINE__, err);
  219. goto alloc_fail;
  220. }
  221. status->val = val;
  222. status->src_dma_addr = src;
  223. status->ep = ep;
  224. src += offsetof(struct scif_status, val);
  225. tx = ddev->device_prep_dma_memcpy(chan, dst, src, sizeof(val),
  226. DMA_PREP_INTERRUPT);
  227. }
  228. if (!tx) {
  229. err = -ENOMEM;
  230. dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
  231. __func__, __LINE__, err);
  232. goto dma_fail;
  233. }
  234. if (!x100) {
  235. tx->callback = scif_prog_signal_cb;
  236. tx->callback_param = status;
  237. }
  238. cookie = tx->tx_submit(tx);
  239. if (dma_submit_error(cookie)) {
  240. err = -EIO;
  241. dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
  242. __func__, __LINE__, err);
  243. goto dma_fail;
  244. }
  245. dma_async_issue_pending(chan);
  246. return 0;
  247. dma_fail:
  248. if (!x100)
  249. dma_pool_free(ep->remote_dev->signal_pool, status,
  250. status->src_dma_addr);
  251. alloc_fail:
  252. return err;
  253. }
  254. /*
  255. * scif_prog_signal:
  256. * @epd - Endpoint Descriptor
  257. * @offset - registered address to write @val to
  258. * @val - Value to be written at @offset
  259. * @type - Type of the window.
  260. *
  261. * Arrange to write a value to the registered offset after ensuring that the
  262. * offset provided is indeed valid.
  263. */
  264. int scif_prog_signal(scif_epd_t epd, off_t offset, u64 val,
  265. enum scif_window_type type)
  266. {
  267. struct scif_endpt *ep = (struct scif_endpt *)epd;
  268. struct scif_window *window = NULL;
  269. struct scif_rma_req req;
  270. dma_addr_t dst_dma_addr;
  271. int err;
  272. mutex_lock(&ep->rma_info.rma_lock);
  273. req.out_window = &window;
  274. req.offset = offset;
  275. req.nr_bytes = sizeof(u64);
  276. req.prot = SCIF_PROT_WRITE;
  277. req.type = SCIF_WINDOW_SINGLE;
  278. if (type == SCIF_WINDOW_SELF)
  279. req.head = &ep->rma_info.reg_list;
  280. else
  281. req.head = &ep->rma_info.remote_reg_list;
  282. /* Does a valid window exist? */
  283. err = scif_query_window(&req);
  284. if (err) {
  285. dev_err(scif_info.mdev.this_device,
  286. "%s %d err %d\n", __func__, __LINE__, err);
  287. goto unlock_ret;
  288. }
  289. if (scif_is_mgmt_node() && scifdev_self(ep->remote_dev)) {
  290. u64 *dst_virt;
  291. if (type == SCIF_WINDOW_SELF)
  292. dst_virt = scif_get_local_va(offset, window);
  293. else
  294. dst_virt =
  295. scif_get_local_va(offset, (struct scif_window *)
  296. window->peer_window);
  297. *dst_virt = val;
  298. } else {
  299. dst_dma_addr = __scif_off_to_dma_addr(window, offset);
  300. err = _scif_prog_signal(epd, dst_dma_addr, val);
  301. }
  302. unlock_ret:
  303. mutex_unlock(&ep->rma_info.rma_lock);
  304. return err;
  305. }
  306. static int _scif_fence_wait(scif_epd_t epd, int mark)
  307. {
  308. struct scif_endpt *ep = (struct scif_endpt *)epd;
  309. dma_cookie_t cookie = mark & ~SCIF_REMOTE_FENCE;
  310. int err;
  311. /* Wait for DMA callback in scif_fence_mark_cb(..) */
  312. err = wait_event_interruptible_timeout(ep->rma_info.markwq,
  313. dma_async_is_tx_complete(
  314. ep->rma_info.dma_chan,
  315. cookie, NULL, NULL) ==
  316. DMA_COMPLETE,
  317. SCIF_NODE_ALIVE_TIMEOUT);
  318. if (!err)
  319. err = -ETIMEDOUT;
  320. else if (err > 0)
  321. err = 0;
  322. return err;
  323. }
  324. /**
  325. * scif_rma_handle_remote_fences:
  326. *
  327. * This routine services remote fence requests.
  328. */
  329. void scif_rma_handle_remote_fences(void)
  330. {
  331. struct list_head *item, *tmp;
  332. struct scif_remote_fence_info *fence;
  333. struct scif_endpt *ep;
  334. int mark, err;
  335. might_sleep();
  336. mutex_lock(&scif_info.fencelock);
  337. list_for_each_safe(item, tmp, &scif_info.fence) {
  338. fence = list_entry(item, struct scif_remote_fence_info,
  339. list);
  340. /* Remove fence from global list */
  341. list_del(&fence->list);
  342. /* Initiate the fence operation */
  343. ep = (struct scif_endpt *)fence->msg.payload[0];
  344. mark = fence->msg.payload[2];
  345. err = _scif_fence_wait(ep, mark);
  346. if (err)
  347. fence->msg.uop = SCIF_WAIT_NACK;
  348. else
  349. fence->msg.uop = SCIF_WAIT_ACK;
  350. fence->msg.payload[0] = ep->remote_ep;
  351. scif_nodeqp_send(ep->remote_dev, &fence->msg);
  352. kfree(fence);
  353. if (!atomic_sub_return(1, &ep->rma_info.fence_refcount))
  354. schedule_work(&scif_info.misc_work);
  355. }
  356. mutex_unlock(&scif_info.fencelock);
  357. }
  358. static int _scif_send_fence(scif_epd_t epd, int uop, int mark, int *out_mark)
  359. {
  360. int err;
  361. struct scifmsg msg;
  362. struct scif_fence_info *fence_req;
  363. struct scif_endpt *ep = (struct scif_endpt *)epd;
  364. fence_req = kmalloc(sizeof(*fence_req), GFP_KERNEL);
  365. if (!fence_req) {
  366. err = -ENOMEM;
  367. goto error;
  368. }
  369. fence_req->state = OP_IN_PROGRESS;
  370. init_completion(&fence_req->comp);
  371. msg.src = ep->port;
  372. msg.uop = uop;
  373. msg.payload[0] = ep->remote_ep;
  374. msg.payload[1] = (u64)fence_req;
  375. if (uop == SCIF_WAIT)
  376. msg.payload[2] = mark;
  377. spin_lock(&ep->lock);
  378. if (ep->state == SCIFEP_CONNECTED)
  379. err = scif_nodeqp_send(ep->remote_dev, &msg);
  380. else
  381. err = -ENOTCONN;
  382. spin_unlock(&ep->lock);
  383. if (err)
  384. goto error_free;
  385. retry:
  386. /* Wait for a SCIF_WAIT_(N)ACK message */
  387. err = wait_for_completion_timeout(&fence_req->comp,
  388. SCIF_NODE_ALIVE_TIMEOUT);
  389. if (!err && scifdev_alive(ep))
  390. goto retry;
  391. if (!err)
  392. err = -ENODEV;
  393. if (err > 0)
  394. err = 0;
  395. mutex_lock(&ep->rma_info.rma_lock);
  396. if (err < 0) {
  397. if (fence_req->state == OP_IN_PROGRESS)
  398. fence_req->state = OP_FAILED;
  399. }
  400. if (fence_req->state == OP_FAILED && !err)
  401. err = -ENOMEM;
  402. if (uop == SCIF_MARK && fence_req->state == OP_COMPLETED)
  403. *out_mark = SCIF_REMOTE_FENCE | fence_req->dma_mark;
  404. mutex_unlock(&ep->rma_info.rma_lock);
  405. error_free:
  406. kfree(fence_req);
  407. error:
  408. return err;
  409. }
  410. /**
  411. * scif_send_fence_mark:
  412. * @epd: end point descriptor.
  413. * @out_mark: Output DMA mark reported by peer.
  414. *
  415. * Send a remote fence mark request.
  416. */
  417. static int scif_send_fence_mark(scif_epd_t epd, int *out_mark)
  418. {
  419. return _scif_send_fence(epd, SCIF_MARK, 0, out_mark);
  420. }
  421. /**
  422. * scif_send_fence_wait:
  423. * @epd: end point descriptor.
  424. * @mark: DMA mark to wait for.
  425. *
  426. * Send a remote fence wait request.
  427. */
  428. static int scif_send_fence_wait(scif_epd_t epd, int mark)
  429. {
  430. return _scif_send_fence(epd, SCIF_WAIT, mark, NULL);
  431. }
  432. static int _scif_send_fence_signal_wait(struct scif_endpt *ep,
  433. struct scif_fence_info *fence_req)
  434. {
  435. int err;
  436. retry:
  437. /* Wait for a SCIF_SIG_(N)ACK message */
  438. err = wait_for_completion_timeout(&fence_req->comp,
  439. SCIF_NODE_ALIVE_TIMEOUT);
  440. if (!err && scifdev_alive(ep))
  441. goto retry;
  442. if (!err)
  443. err = -ENODEV;
  444. if (err > 0)
  445. err = 0;
  446. if (err < 0) {
  447. mutex_lock(&ep->rma_info.rma_lock);
  448. if (fence_req->state == OP_IN_PROGRESS)
  449. fence_req->state = OP_FAILED;
  450. mutex_unlock(&ep->rma_info.rma_lock);
  451. }
  452. if (fence_req->state == OP_FAILED && !err)
  453. err = -ENXIO;
  454. return err;
  455. }
  456. /**
  457. * scif_send_fence_signal:
  458. * @epd - endpoint descriptor
  459. * @loff - local offset
  460. * @lval - local value to write to loffset
  461. * @roff - remote offset
  462. * @rval - remote value to write to roffset
  463. * @flags - flags
  464. *
  465. * Sends a remote fence signal request
  466. */
  467. static int scif_send_fence_signal(scif_epd_t epd, off_t roff, u64 rval,
  468. off_t loff, u64 lval, int flags)
  469. {
  470. int err = 0;
  471. struct scifmsg msg;
  472. struct scif_fence_info *fence_req;
  473. struct scif_endpt *ep = (struct scif_endpt *)epd;
  474. fence_req = kmalloc(sizeof(*fence_req), GFP_KERNEL);
  475. if (!fence_req) {
  476. err = -ENOMEM;
  477. goto error;
  478. }
  479. fence_req->state = OP_IN_PROGRESS;
  480. init_completion(&fence_req->comp);
  481. msg.src = ep->port;
  482. if (flags & SCIF_SIGNAL_LOCAL) {
  483. msg.uop = SCIF_SIG_LOCAL;
  484. msg.payload[0] = ep->remote_ep;
  485. msg.payload[1] = roff;
  486. msg.payload[2] = rval;
  487. msg.payload[3] = (u64)fence_req;
  488. spin_lock(&ep->lock);
  489. if (ep->state == SCIFEP_CONNECTED)
  490. err = scif_nodeqp_send(ep->remote_dev, &msg);
  491. else
  492. err = -ENOTCONN;
  493. spin_unlock(&ep->lock);
  494. if (err)
  495. goto error_free;
  496. err = _scif_send_fence_signal_wait(ep, fence_req);
  497. if (err)
  498. goto error_free;
  499. }
  500. fence_req->state = OP_IN_PROGRESS;
  501. if (flags & SCIF_SIGNAL_REMOTE) {
  502. msg.uop = SCIF_SIG_REMOTE;
  503. msg.payload[0] = ep->remote_ep;
  504. msg.payload[1] = loff;
  505. msg.payload[2] = lval;
  506. msg.payload[3] = (u64)fence_req;
  507. spin_lock(&ep->lock);
  508. if (ep->state == SCIFEP_CONNECTED)
  509. err = scif_nodeqp_send(ep->remote_dev, &msg);
  510. else
  511. err = -ENOTCONN;
  512. spin_unlock(&ep->lock);
  513. if (err)
  514. goto error_free;
  515. err = _scif_send_fence_signal_wait(ep, fence_req);
  516. }
  517. error_free:
  518. kfree(fence_req);
  519. error:
  520. return err;
  521. }
  522. static void scif_fence_mark_cb(void *arg)
  523. {
  524. struct scif_endpt *ep = (struct scif_endpt *)arg;
  525. wake_up_interruptible(&ep->rma_info.markwq);
  526. atomic_dec(&ep->rma_info.fence_refcount);
  527. }
  528. /*
  529. * _scif_fence_mark:
  530. *
  531. * @epd - endpoint descriptor
  532. * Set up a mark for this endpoint and return the value of the mark.
  533. */
  534. int _scif_fence_mark(scif_epd_t epd, int *mark)
  535. {
  536. struct scif_endpt *ep = (struct scif_endpt *)epd;
  537. struct dma_chan *chan = ep->rma_info.dma_chan;
  538. struct dma_device *ddev = chan->device;
  539. struct dma_async_tx_descriptor *tx;
  540. dma_cookie_t cookie;
  541. int err;
  542. tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, DMA_PREP_FENCE);
  543. if (!tx) {
  544. err = -ENOMEM;
  545. dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
  546. __func__, __LINE__, err);
  547. return err;
  548. }
  549. cookie = tx->tx_submit(tx);
  550. if (dma_submit_error(cookie)) {
  551. err = (int)cookie;
  552. dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
  553. __func__, __LINE__, err);
  554. return err;
  555. }
  556. dma_async_issue_pending(chan);
  557. tx = ddev->device_prep_dma_interrupt(chan, DMA_PREP_INTERRUPT);
  558. if (!tx) {
  559. err = -ENOMEM;
  560. dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
  561. __func__, __LINE__, err);
  562. return err;
  563. }
  564. tx->callback = scif_fence_mark_cb;
  565. tx->callback_param = ep;
  566. *mark = cookie = tx->tx_submit(tx);
  567. if (dma_submit_error(cookie)) {
  568. err = (int)cookie;
  569. dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
  570. __func__, __LINE__, err);
  571. return err;
  572. }
  573. atomic_inc(&ep->rma_info.fence_refcount);
  574. dma_async_issue_pending(chan);
  575. return 0;
  576. }
  577. #define SCIF_LOOPB_MAGIC_MARK 0xdead
  578. int scif_fence_mark(scif_epd_t epd, int flags, int *mark)
  579. {
  580. struct scif_endpt *ep = (struct scif_endpt *)epd;
  581. int err = 0;
  582. dev_dbg(scif_info.mdev.this_device,
  583. "SCIFAPI fence_mark: ep %p flags 0x%x mark 0x%x\n",
  584. ep, flags, *mark);
  585. err = scif_verify_epd(ep);
  586. if (err)
  587. return err;
  588. /* Invalid flags? */
  589. if (flags & ~(SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER))
  590. return -EINVAL;
  591. /* At least one of init self or peer RMA should be set */
  592. if (!(flags & (SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER)))
  593. return -EINVAL;
  594. /* Exactly one of init self or peer RMA should be set but not both */
  595. if ((flags & SCIF_FENCE_INIT_SELF) && (flags & SCIF_FENCE_INIT_PEER))
  596. return -EINVAL;
  597. /*
  598. * Management node loopback does not need to use DMA.
  599. * Return a valid mark to be symmetric.
  600. */
  601. if (scifdev_self(ep->remote_dev) && scif_is_mgmt_node()) {
  602. *mark = SCIF_LOOPB_MAGIC_MARK;
  603. return 0;
  604. }
  605. if (flags & SCIF_FENCE_INIT_SELF)
  606. err = _scif_fence_mark(epd, mark);
  607. else
  608. err = scif_send_fence_mark(ep, mark);
  609. if (err)
  610. dev_err(scif_info.mdev.this_device,
  611. "%s %d err %d\n", __func__, __LINE__, err);
  612. dev_dbg(scif_info.mdev.this_device,
  613. "SCIFAPI fence_mark: ep %p flags 0x%x mark 0x%x err %d\n",
  614. ep, flags, *mark, err);
  615. return err;
  616. }
  617. EXPORT_SYMBOL_GPL(scif_fence_mark);
  618. int scif_fence_wait(scif_epd_t epd, int mark)
  619. {
  620. struct scif_endpt *ep = (struct scif_endpt *)epd;
  621. int err = 0;
  622. dev_dbg(scif_info.mdev.this_device,
  623. "SCIFAPI fence_wait: ep %p mark 0x%x\n",
  624. ep, mark);
  625. err = scif_verify_epd(ep);
  626. if (err)
  627. return err;
  628. /*
  629. * Management node loopback does not need to use DMA.
  630. * The only valid mark provided is 0 so simply
  631. * return success if the mark is valid.
  632. */
  633. if (scifdev_self(ep->remote_dev) && scif_is_mgmt_node()) {
  634. if (mark == SCIF_LOOPB_MAGIC_MARK)
  635. return 0;
  636. else
  637. return -EINVAL;
  638. }
  639. if (mark & SCIF_REMOTE_FENCE)
  640. err = scif_send_fence_wait(epd, mark);
  641. else
  642. err = _scif_fence_wait(epd, mark);
  643. if (err < 0)
  644. dev_err(scif_info.mdev.this_device,
  645. "%s %d err %d\n", __func__, __LINE__, err);
  646. return err;
  647. }
  648. EXPORT_SYMBOL_GPL(scif_fence_wait);
  649. int scif_fence_signal(scif_epd_t epd, off_t loff, u64 lval,
  650. off_t roff, u64 rval, int flags)
  651. {
  652. struct scif_endpt *ep = (struct scif_endpt *)epd;
  653. int err = 0;
  654. dev_dbg(scif_info.mdev.this_device,
  655. "SCIFAPI fence_signal: ep %p loff 0x%lx lval 0x%llx roff 0x%lx rval 0x%llx flags 0x%x\n",
  656. ep, loff, lval, roff, rval, flags);
  657. err = scif_verify_epd(ep);
  658. if (err)
  659. return err;
  660. /* Invalid flags? */
  661. if (flags & ~(SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER |
  662. SCIF_SIGNAL_LOCAL | SCIF_SIGNAL_REMOTE))
  663. return -EINVAL;
  664. /* At least one of init self or peer RMA should be set */
  665. if (!(flags & (SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER)))
  666. return -EINVAL;
  667. /* Exactly one of init self or peer RMA should be set but not both */
  668. if ((flags & SCIF_FENCE_INIT_SELF) && (flags & SCIF_FENCE_INIT_PEER))
  669. return -EINVAL;
  670. /* At least one of SCIF_SIGNAL_LOCAL or SCIF_SIGNAL_REMOTE required */
  671. if (!(flags & (SCIF_SIGNAL_LOCAL | SCIF_SIGNAL_REMOTE)))
  672. return -EINVAL;
  673. /* Only Dword offsets allowed */
  674. if ((flags & SCIF_SIGNAL_LOCAL) && (loff & (sizeof(u32) - 1)))
  675. return -EINVAL;
  676. /* Only Dword aligned offsets allowed */
  677. if ((flags & SCIF_SIGNAL_REMOTE) && (roff & (sizeof(u32) - 1)))
  678. return -EINVAL;
  679. if (flags & SCIF_FENCE_INIT_PEER) {
  680. err = scif_send_fence_signal(epd, roff, rval, loff,
  681. lval, flags);
  682. } else {
  683. /* Local Signal in Local RAS */
  684. if (flags & SCIF_SIGNAL_LOCAL) {
  685. err = scif_prog_signal(epd, loff, lval,
  686. SCIF_WINDOW_SELF);
  687. if (err)
  688. goto error_ret;
  689. }
  690. /* Signal in Remote RAS */
  691. if (flags & SCIF_SIGNAL_REMOTE)
  692. err = scif_prog_signal(epd, roff,
  693. rval, SCIF_WINDOW_PEER);
  694. }
  695. error_ret:
  696. if (err)
  697. dev_err(scif_info.mdev.this_device,
  698. "%s %d err %d\n", __func__, __LINE__, err);
  699. return err;
  700. }
  701. EXPORT_SYMBOL_GPL(scif_fence_signal);