scif_epd.c 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353
  1. /*
  2. * Intel MIC Platform Software Stack (MPSS)
  3. *
  4. * Copyright(c) 2014 Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License, version 2, as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * Intel SCIF driver.
  16. *
  17. */
  18. #include "scif_main.h"
  19. #include "scif_map.h"
  20. void scif_cleanup_ep_qp(struct scif_endpt *ep)
  21. {
  22. struct scif_qp *qp = ep->qp_info.qp;
  23. if (qp->outbound_q.rb_base) {
  24. scif_iounmap((void *)qp->outbound_q.rb_base,
  25. qp->outbound_q.size, ep->remote_dev);
  26. qp->outbound_q.rb_base = NULL;
  27. }
  28. if (qp->remote_qp) {
  29. scif_iounmap((void *)qp->remote_qp,
  30. sizeof(struct scif_qp), ep->remote_dev);
  31. qp->remote_qp = NULL;
  32. }
  33. if (qp->local_qp) {
  34. scif_unmap_single(qp->local_qp, ep->remote_dev,
  35. sizeof(struct scif_qp));
  36. qp->local_qp = 0x0;
  37. }
  38. if (qp->local_buf) {
  39. scif_unmap_single(qp->local_buf, ep->remote_dev,
  40. SCIF_ENDPT_QP_SIZE);
  41. qp->local_buf = 0;
  42. }
  43. }
  44. void scif_teardown_ep(void *endpt)
  45. {
  46. struct scif_endpt *ep = endpt;
  47. struct scif_qp *qp = ep->qp_info.qp;
  48. if (qp) {
  49. spin_lock(&ep->lock);
  50. scif_cleanup_ep_qp(ep);
  51. spin_unlock(&ep->lock);
  52. kfree(qp->inbound_q.rb_base);
  53. kfree(qp);
  54. }
  55. }
  56. /*
  57. * Enqueue the endpoint to the zombie list for cleanup.
  58. * The endpoint should not be accessed once this API returns.
  59. */
  60. void scif_add_epd_to_zombie_list(struct scif_endpt *ep, bool eplock_held)
  61. {
  62. if (!eplock_held)
  63. spin_lock(&scif_info.eplock);
  64. spin_lock(&ep->lock);
  65. ep->state = SCIFEP_ZOMBIE;
  66. spin_unlock(&ep->lock);
  67. list_add_tail(&ep->list, &scif_info.zombie);
  68. scif_info.nr_zombies++;
  69. if (!eplock_held)
  70. spin_unlock(&scif_info.eplock);
  71. schedule_work(&scif_info.misc_work);
  72. }
  73. static struct scif_endpt *scif_find_listen_ep(u16 port)
  74. {
  75. struct scif_endpt *ep = NULL;
  76. struct list_head *pos, *tmpq;
  77. spin_lock(&scif_info.eplock);
  78. list_for_each_safe(pos, tmpq, &scif_info.listen) {
  79. ep = list_entry(pos, struct scif_endpt, list);
  80. if (ep->port.port == port) {
  81. spin_lock(&ep->lock);
  82. spin_unlock(&scif_info.eplock);
  83. return ep;
  84. }
  85. }
  86. spin_unlock(&scif_info.eplock);
  87. return NULL;
  88. }
  89. void scif_cleanup_zombie_epd(void)
  90. {
  91. struct list_head *pos, *tmpq;
  92. struct scif_endpt *ep;
  93. spin_lock(&scif_info.eplock);
  94. list_for_each_safe(pos, tmpq, &scif_info.zombie) {
  95. ep = list_entry(pos, struct scif_endpt, list);
  96. list_del(pos);
  97. scif_info.nr_zombies--;
  98. kfree(ep);
  99. }
  100. spin_unlock(&scif_info.eplock);
  101. }
  102. /**
  103. * scif_cnctreq() - Respond to SCIF_CNCT_REQ interrupt message
  104. * @msg: Interrupt message
  105. *
  106. * This message is initiated by the remote node to request a connection
  107. * to the local node. This function looks for an end point in the
  108. * listen state on the requested port id.
  109. *
  110. * If it finds a listening port it places the connect request on the
  111. * listening end points queue and wakes up any pending accept calls.
  112. *
  113. * If it does not find a listening end point it sends a connection
  114. * reject message to the remote node.
  115. */
  116. void scif_cnctreq(struct scif_dev *scifdev, struct scifmsg *msg)
  117. {
  118. struct scif_endpt *ep = NULL;
  119. struct scif_conreq *conreq;
  120. conreq = kmalloc(sizeof(*conreq), GFP_KERNEL);
  121. if (!conreq)
  122. /* Lack of resources so reject the request. */
  123. goto conreq_sendrej;
  124. ep = scif_find_listen_ep(msg->dst.port);
  125. if (!ep)
  126. /* Send reject due to no listening ports */
  127. goto conreq_sendrej_free;
  128. if (ep->backlog <= ep->conreqcnt) {
  129. /* Send reject due to too many pending requests */
  130. spin_unlock(&ep->lock);
  131. goto conreq_sendrej_free;
  132. }
  133. conreq->msg = *msg;
  134. list_add_tail(&conreq->list, &ep->conlist);
  135. ep->conreqcnt++;
  136. wake_up_interruptible(&ep->conwq);
  137. spin_unlock(&ep->lock);
  138. return;
  139. conreq_sendrej_free:
  140. kfree(conreq);
  141. conreq_sendrej:
  142. msg->uop = SCIF_CNCT_REJ;
  143. scif_nodeqp_send(&scif_dev[msg->src.node], msg);
  144. }
  145. /**
  146. * scif_cnctgnt() - Respond to SCIF_CNCT_GNT interrupt message
  147. * @msg: Interrupt message
  148. *
  149. * An accept() on the remote node has occurred and sent this message
  150. * to indicate success. Place the end point in the MAPPING state and
  151. * save the remote nodes memory information. Then wake up the connect
  152. * request so it can finish.
  153. */
  154. void scif_cnctgnt(struct scif_dev *scifdev, struct scifmsg *msg)
  155. {
  156. struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
  157. spin_lock(&ep->lock);
  158. if (SCIFEP_CONNECTING == ep->state) {
  159. ep->peer.node = msg->src.node;
  160. ep->peer.port = msg->src.port;
  161. ep->qp_info.gnt_pld = msg->payload[1];
  162. ep->remote_ep = msg->payload[2];
  163. ep->state = SCIFEP_MAPPING;
  164. wake_up(&ep->conwq);
  165. }
  166. spin_unlock(&ep->lock);
  167. }
  168. /**
  169. * scif_cnctgnt_ack() - Respond to SCIF_CNCT_GNTACK interrupt message
  170. * @msg: Interrupt message
  171. *
  172. * The remote connection request has finished mapping the local memory.
  173. * Place the connection in the connected state and wake up the pending
  174. * accept() call.
  175. */
  176. void scif_cnctgnt_ack(struct scif_dev *scifdev, struct scifmsg *msg)
  177. {
  178. struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
  179. mutex_lock(&scif_info.connlock);
  180. spin_lock(&ep->lock);
  181. /* New ep is now connected with all resources set. */
  182. ep->state = SCIFEP_CONNECTED;
  183. list_add_tail(&ep->list, &scif_info.connected);
  184. wake_up(&ep->conwq);
  185. spin_unlock(&ep->lock);
  186. mutex_unlock(&scif_info.connlock);
  187. }
  188. /**
  189. * scif_cnctgnt_nack() - Respond to SCIF_CNCT_GNTNACK interrupt message
  190. * @msg: Interrupt message
  191. *
  192. * The remote connection request failed to map the local memory it was sent.
  193. * Place the end point in the CLOSING state to indicate it and wake up
  194. * the pending accept();
  195. */
  196. void scif_cnctgnt_nack(struct scif_dev *scifdev, struct scifmsg *msg)
  197. {
  198. struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
  199. spin_lock(&ep->lock);
  200. ep->state = SCIFEP_CLOSING;
  201. wake_up(&ep->conwq);
  202. spin_unlock(&ep->lock);
  203. }
  204. /**
  205. * scif_cnctrej() - Respond to SCIF_CNCT_REJ interrupt message
  206. * @msg: Interrupt message
  207. *
  208. * The remote end has rejected the connection request. Set the end
  209. * point back to the bound state and wake up the pending connect().
  210. */
  211. void scif_cnctrej(struct scif_dev *scifdev, struct scifmsg *msg)
  212. {
  213. struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
  214. spin_lock(&ep->lock);
  215. if (SCIFEP_CONNECTING == ep->state) {
  216. ep->state = SCIFEP_BOUND;
  217. wake_up(&ep->conwq);
  218. }
  219. spin_unlock(&ep->lock);
  220. }
  221. /**
  222. * scif_discnct() - Respond to SCIF_DISCNCT interrupt message
  223. * @msg: Interrupt message
  224. *
  225. * The remote node has indicated close() has been called on its end
  226. * point. Remove the local end point from the connected list, set its
  227. * state to disconnected and ensure accesses to the remote node are
  228. * shutdown.
  229. *
  230. * When all accesses to the remote end have completed then send a
  231. * DISCNT_ACK to indicate it can remove its resources and complete
  232. * the close routine.
  233. */
  234. void scif_discnct(struct scif_dev *scifdev, struct scifmsg *msg)
  235. {
  236. struct scif_endpt *ep = NULL;
  237. struct scif_endpt *tmpep;
  238. struct list_head *pos, *tmpq;
  239. mutex_lock(&scif_info.connlock);
  240. list_for_each_safe(pos, tmpq, &scif_info.connected) {
  241. tmpep = list_entry(pos, struct scif_endpt, list);
  242. /*
  243. * The local ep may have sent a disconnect and and been closed
  244. * due to a message response time out. It may have been
  245. * allocated again and formed a new connection so we want to
  246. * check if the remote ep matches
  247. */
  248. if (((u64)tmpep == msg->payload[1]) &&
  249. ((u64)tmpep->remote_ep == msg->payload[0])) {
  250. list_del(pos);
  251. ep = tmpep;
  252. spin_lock(&ep->lock);
  253. break;
  254. }
  255. }
  256. /*
  257. * If the terminated end is not found then this side started closing
  258. * before the other side sent the disconnect. If so the ep will no
  259. * longer be on the connected list. Regardless the other side
  260. * needs to be acked to let it know close is complete.
  261. */
  262. if (!ep) {
  263. mutex_unlock(&scif_info.connlock);
  264. goto discnct_ack;
  265. }
  266. ep->state = SCIFEP_DISCONNECTED;
  267. list_add_tail(&ep->list, &scif_info.disconnected);
  268. wake_up_interruptible(&ep->sendwq);
  269. wake_up_interruptible(&ep->recvwq);
  270. spin_unlock(&ep->lock);
  271. mutex_unlock(&scif_info.connlock);
  272. discnct_ack:
  273. msg->uop = SCIF_DISCNT_ACK;
  274. scif_nodeqp_send(&scif_dev[msg->src.node], msg);
  275. }
  276. /**
  277. * scif_discnct_ack() - Respond to SCIF_DISCNT_ACK interrupt message
  278. * @msg: Interrupt message
  279. *
  280. * Remote side has indicated it has not more references to local resources
  281. */
  282. void scif_discnt_ack(struct scif_dev *scifdev, struct scifmsg *msg)
  283. {
  284. struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
  285. spin_lock(&ep->lock);
  286. ep->state = SCIFEP_DISCONNECTED;
  287. spin_unlock(&ep->lock);
  288. complete(&ep->discon);
  289. }
  290. /**
  291. * scif_clientsend() - Respond to SCIF_CLIENT_SEND interrupt message
  292. * @msg: Interrupt message
  293. *
  294. * Remote side is confirming send or receive interrupt handling is complete.
  295. */
  296. void scif_clientsend(struct scif_dev *scifdev, struct scifmsg *msg)
  297. {
  298. struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
  299. spin_lock(&ep->lock);
  300. if (SCIFEP_CONNECTED == ep->state)
  301. wake_up_interruptible(&ep->recvwq);
  302. spin_unlock(&ep->lock);
  303. }
  304. /**
  305. * scif_clientrcvd() - Respond to SCIF_CLIENT_RCVD interrupt message
  306. * @msg: Interrupt message
  307. *
  308. * Remote side is confirming send or receive interrupt handling is complete.
  309. */
  310. void scif_clientrcvd(struct scif_dev *scifdev, struct scifmsg *msg)
  311. {
  312. struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
  313. spin_lock(&ep->lock);
  314. if (SCIFEP_CONNECTED == ep->state)
  315. wake_up_interruptible(&ep->sendwq);
  316. spin_unlock(&ep->lock);
  317. }