scif_epd.c 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357
  1. /*
  2. * Intel MIC Platform Software Stack (MPSS)
  3. *
  4. * Copyright(c) 2014 Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License, version 2, as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * Intel SCIF driver.
  16. *
  17. */
  18. #include "scif_main.h"
  19. #include "scif_map.h"
  20. void scif_cleanup_ep_qp(struct scif_endpt *ep)
  21. {
  22. struct scif_qp *qp = ep->qp_info.qp;
  23. if (qp->outbound_q.rb_base) {
  24. scif_iounmap((void *)qp->outbound_q.rb_base,
  25. qp->outbound_q.size, ep->remote_dev);
  26. qp->outbound_q.rb_base = NULL;
  27. }
  28. if (qp->remote_qp) {
  29. scif_iounmap((void *)qp->remote_qp,
  30. sizeof(struct scif_qp), ep->remote_dev);
  31. qp->remote_qp = NULL;
  32. }
  33. if (qp->local_qp) {
  34. scif_unmap_single(qp->local_qp, ep->remote_dev,
  35. sizeof(struct scif_qp));
  36. qp->local_qp = 0x0;
  37. }
  38. if (qp->local_buf) {
  39. scif_unmap_single(qp->local_buf, ep->remote_dev,
  40. SCIF_ENDPT_QP_SIZE);
  41. qp->local_buf = 0;
  42. }
  43. }
  44. void scif_teardown_ep(void *endpt)
  45. {
  46. struct scif_endpt *ep = endpt;
  47. struct scif_qp *qp = ep->qp_info.qp;
  48. if (qp) {
  49. spin_lock(&ep->lock);
  50. scif_cleanup_ep_qp(ep);
  51. spin_unlock(&ep->lock);
  52. kfree(qp->inbound_q.rb_base);
  53. kfree(qp);
  54. }
  55. }
  56. /*
  57. * Enqueue the endpoint to the zombie list for cleanup.
  58. * The endpoint should not be accessed once this API returns.
  59. */
  60. void scif_add_epd_to_zombie_list(struct scif_endpt *ep, bool eplock_held)
  61. {
  62. if (!eplock_held)
  63. mutex_lock(&scif_info.eplock);
  64. spin_lock(&ep->lock);
  65. ep->state = SCIFEP_ZOMBIE;
  66. spin_unlock(&ep->lock);
  67. list_add_tail(&ep->list, &scif_info.zombie);
  68. scif_info.nr_zombies++;
  69. if (!eplock_held)
  70. mutex_unlock(&scif_info.eplock);
  71. schedule_work(&scif_info.misc_work);
  72. }
  73. static struct scif_endpt *scif_find_listen_ep(u16 port)
  74. {
  75. struct scif_endpt *ep = NULL;
  76. struct list_head *pos, *tmpq;
  77. mutex_lock(&scif_info.eplock);
  78. list_for_each_safe(pos, tmpq, &scif_info.listen) {
  79. ep = list_entry(pos, struct scif_endpt, list);
  80. if (ep->port.port == port) {
  81. mutex_unlock(&scif_info.eplock);
  82. return ep;
  83. }
  84. }
  85. mutex_unlock(&scif_info.eplock);
  86. return NULL;
  87. }
  88. void scif_cleanup_zombie_epd(void)
  89. {
  90. struct list_head *pos, *tmpq;
  91. struct scif_endpt *ep;
  92. mutex_lock(&scif_info.eplock);
  93. list_for_each_safe(pos, tmpq, &scif_info.zombie) {
  94. ep = list_entry(pos, struct scif_endpt, list);
  95. if (scif_rma_ep_can_uninit(ep)) {
  96. list_del(pos);
  97. scif_info.nr_zombies--;
  98. put_iova_domain(&ep->rma_info.iovad);
  99. kfree(ep);
  100. }
  101. }
  102. mutex_unlock(&scif_info.eplock);
  103. }
  104. /**
  105. * scif_cnctreq() - Respond to SCIF_CNCT_REQ interrupt message
  106. * @msg: Interrupt message
  107. *
  108. * This message is initiated by the remote node to request a connection
  109. * to the local node. This function looks for an end point in the
  110. * listen state on the requested port id.
  111. *
  112. * If it finds a listening port it places the connect request on the
  113. * listening end points queue and wakes up any pending accept calls.
  114. *
  115. * If it does not find a listening end point it sends a connection
  116. * reject message to the remote node.
  117. */
  118. void scif_cnctreq(struct scif_dev *scifdev, struct scifmsg *msg)
  119. {
  120. struct scif_endpt *ep = NULL;
  121. struct scif_conreq *conreq;
  122. conreq = kmalloc(sizeof(*conreq), GFP_KERNEL);
  123. if (!conreq)
  124. /* Lack of resources so reject the request. */
  125. goto conreq_sendrej;
  126. ep = scif_find_listen_ep(msg->dst.port);
  127. if (!ep)
  128. /* Send reject due to no listening ports */
  129. goto conreq_sendrej_free;
  130. else
  131. spin_lock(&ep->lock);
  132. if (ep->backlog <= ep->conreqcnt) {
  133. /* Send reject due to too many pending requests */
  134. spin_unlock(&ep->lock);
  135. goto conreq_sendrej_free;
  136. }
  137. conreq->msg = *msg;
  138. list_add_tail(&conreq->list, &ep->conlist);
  139. ep->conreqcnt++;
  140. wake_up_interruptible(&ep->conwq);
  141. spin_unlock(&ep->lock);
  142. return;
  143. conreq_sendrej_free:
  144. kfree(conreq);
  145. conreq_sendrej:
  146. msg->uop = SCIF_CNCT_REJ;
  147. scif_nodeqp_send(&scif_dev[msg->src.node], msg);
  148. }
  149. /**
  150. * scif_cnctgnt() - Respond to SCIF_CNCT_GNT interrupt message
  151. * @msg: Interrupt message
  152. *
  153. * An accept() on the remote node has occurred and sent this message
  154. * to indicate success. Place the end point in the MAPPING state and
  155. * save the remote nodes memory information. Then wake up the connect
  156. * request so it can finish.
  157. */
  158. void scif_cnctgnt(struct scif_dev *scifdev, struct scifmsg *msg)
  159. {
  160. struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
  161. spin_lock(&ep->lock);
  162. if (SCIFEP_CONNECTING == ep->state) {
  163. ep->peer.node = msg->src.node;
  164. ep->peer.port = msg->src.port;
  165. ep->qp_info.gnt_pld = msg->payload[1];
  166. ep->remote_ep = msg->payload[2];
  167. ep->state = SCIFEP_MAPPING;
  168. wake_up(&ep->conwq);
  169. }
  170. spin_unlock(&ep->lock);
  171. }
  172. /**
  173. * scif_cnctgnt_ack() - Respond to SCIF_CNCT_GNTACK interrupt message
  174. * @msg: Interrupt message
  175. *
  176. * The remote connection request has finished mapping the local memory.
  177. * Place the connection in the connected state and wake up the pending
  178. * accept() call.
  179. */
  180. void scif_cnctgnt_ack(struct scif_dev *scifdev, struct scifmsg *msg)
  181. {
  182. struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
  183. mutex_lock(&scif_info.connlock);
  184. spin_lock(&ep->lock);
  185. /* New ep is now connected with all resources set. */
  186. ep->state = SCIFEP_CONNECTED;
  187. list_add_tail(&ep->list, &scif_info.connected);
  188. wake_up(&ep->conwq);
  189. spin_unlock(&ep->lock);
  190. mutex_unlock(&scif_info.connlock);
  191. }
  192. /**
  193. * scif_cnctgnt_nack() - Respond to SCIF_CNCT_GNTNACK interrupt message
  194. * @msg: Interrupt message
  195. *
  196. * The remote connection request failed to map the local memory it was sent.
  197. * Place the end point in the CLOSING state to indicate it and wake up
  198. * the pending accept();
  199. */
  200. void scif_cnctgnt_nack(struct scif_dev *scifdev, struct scifmsg *msg)
  201. {
  202. struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
  203. spin_lock(&ep->lock);
  204. ep->state = SCIFEP_CLOSING;
  205. wake_up(&ep->conwq);
  206. spin_unlock(&ep->lock);
  207. }
  208. /**
  209. * scif_cnctrej() - Respond to SCIF_CNCT_REJ interrupt message
  210. * @msg: Interrupt message
  211. *
  212. * The remote end has rejected the connection request. Set the end
  213. * point back to the bound state and wake up the pending connect().
  214. */
  215. void scif_cnctrej(struct scif_dev *scifdev, struct scifmsg *msg)
  216. {
  217. struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
  218. spin_lock(&ep->lock);
  219. if (SCIFEP_CONNECTING == ep->state) {
  220. ep->state = SCIFEP_BOUND;
  221. wake_up(&ep->conwq);
  222. }
  223. spin_unlock(&ep->lock);
  224. }
  225. /**
  226. * scif_discnct() - Respond to SCIF_DISCNCT interrupt message
  227. * @msg: Interrupt message
  228. *
  229. * The remote node has indicated close() has been called on its end
  230. * point. Remove the local end point from the connected list, set its
  231. * state to disconnected and ensure accesses to the remote node are
  232. * shutdown.
  233. *
  234. * When all accesses to the remote end have completed then send a
  235. * DISCNT_ACK to indicate it can remove its resources and complete
  236. * the close routine.
  237. */
  238. void scif_discnct(struct scif_dev *scifdev, struct scifmsg *msg)
  239. {
  240. struct scif_endpt *ep = NULL;
  241. struct scif_endpt *tmpep;
  242. struct list_head *pos, *tmpq;
  243. mutex_lock(&scif_info.connlock);
  244. list_for_each_safe(pos, tmpq, &scif_info.connected) {
  245. tmpep = list_entry(pos, struct scif_endpt, list);
  246. /*
  247. * The local ep may have sent a disconnect and and been closed
  248. * due to a message response time out. It may have been
  249. * allocated again and formed a new connection so we want to
  250. * check if the remote ep matches
  251. */
  252. if (((u64)tmpep == msg->payload[1]) &&
  253. ((u64)tmpep->remote_ep == msg->payload[0])) {
  254. list_del(pos);
  255. ep = tmpep;
  256. spin_lock(&ep->lock);
  257. break;
  258. }
  259. }
  260. /*
  261. * If the terminated end is not found then this side started closing
  262. * before the other side sent the disconnect. If so the ep will no
  263. * longer be on the connected list. Regardless the other side
  264. * needs to be acked to let it know close is complete.
  265. */
  266. if (!ep) {
  267. mutex_unlock(&scif_info.connlock);
  268. goto discnct_ack;
  269. }
  270. ep->state = SCIFEP_DISCONNECTED;
  271. list_add_tail(&ep->list, &scif_info.disconnected);
  272. wake_up_interruptible(&ep->sendwq);
  273. wake_up_interruptible(&ep->recvwq);
  274. spin_unlock(&ep->lock);
  275. mutex_unlock(&scif_info.connlock);
  276. discnct_ack:
  277. msg->uop = SCIF_DISCNT_ACK;
  278. scif_nodeqp_send(&scif_dev[msg->src.node], msg);
  279. }
  280. /**
  281. * scif_discnct_ack() - Respond to SCIF_DISCNT_ACK interrupt message
  282. * @msg: Interrupt message
  283. *
  284. * Remote side has indicated it has not more references to local resources
  285. */
  286. void scif_discnt_ack(struct scif_dev *scifdev, struct scifmsg *msg)
  287. {
  288. struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
  289. spin_lock(&ep->lock);
  290. ep->state = SCIFEP_DISCONNECTED;
  291. spin_unlock(&ep->lock);
  292. complete(&ep->discon);
  293. }
  294. /**
  295. * scif_clientsend() - Respond to SCIF_CLIENT_SEND interrupt message
  296. * @msg: Interrupt message
  297. *
  298. * Remote side is confirming send or receive interrupt handling is complete.
  299. */
  300. void scif_clientsend(struct scif_dev *scifdev, struct scifmsg *msg)
  301. {
  302. struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
  303. spin_lock(&ep->lock);
  304. if (SCIFEP_CONNECTED == ep->state)
  305. wake_up_interruptible(&ep->recvwq);
  306. spin_unlock(&ep->lock);
  307. }
  308. /**
  309. * scif_clientrcvd() - Respond to SCIF_CLIENT_RCVD interrupt message
  310. * @msg: Interrupt message
  311. *
  312. * Remote side is confirming send or receive interrupt handling is complete.
  313. */
  314. void scif_clientrcvd(struct scif_dev *scifdev, struct scifmsg *msg)
  315. {
  316. struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
  317. spin_lock(&ep->lock);
  318. if (SCIFEP_CONNECTED == ep->state)
  319. wake_up_interruptible(&ep->sendwq);
  320. spin_unlock(&ep->lock);
  321. }