|
@@ -665,6 +665,46 @@ static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
|
|
|
return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
|
|
|
}
|
|
|
|
|
|
+static void
|
|
|
+__cxgbit_abort_conn(struct cxgbit_sock *csk, struct sk_buff *skb)
|
|
|
+{
|
|
|
+ __kfree_skb(skb);
|
|
|
+
|
|
|
+ if (csk->com.state != CSK_STATE_ESTABLISHED)
|
|
|
+ goto no_abort;
|
|
|
+
|
|
|
+ set_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags);
|
|
|
+ csk->com.state = CSK_STATE_ABORTING;
|
|
|
+
|
|
|
+ cxgbit_send_abort_req(csk);
|
|
|
+
|
|
|
+ return;
|
|
|
+
|
|
|
+no_abort:
|
|
|
+ cxgbit_wake_up(&csk->com.wr_wait, __func__, CPL_ERR_NONE);
|
|
|
+ cxgbit_put_csk(csk);
|
|
|
+}
|
|
|
+
|
|
|
+void cxgbit_abort_conn(struct cxgbit_sock *csk)
|
|
|
+{
|
|
|
+ struct sk_buff *skb = alloc_skb(0, GFP_KERNEL | __GFP_NOFAIL);
|
|
|
+
|
|
|
+ cxgbit_get_csk(csk);
|
|
|
+ cxgbit_init_wr_wait(&csk->com.wr_wait);
|
|
|
+
|
|
|
+ spin_lock_bh(&csk->lock);
|
|
|
+ if (csk->lock_owner) {
|
|
|
+ cxgbit_skcb_rx_backlog_fn(skb) = __cxgbit_abort_conn;
|
|
|
+ __skb_queue_tail(&csk->backlogq, skb);
|
|
|
+ } else {
|
|
|
+ __cxgbit_abort_conn(csk, skb);
|
|
|
+ }
|
|
|
+ spin_unlock_bh(&csk->lock);
|
|
|
+
|
|
|
+ cxgbit_wait_for_reply(csk->com.cdev, &csk->com.wr_wait,
|
|
|
+ csk->tid, 600, __func__);
|
|
|
+}
|
|
|
+
|
|
|
void cxgbit_free_conn(struct iscsi_conn *conn)
|
|
|
{
|
|
|
struct cxgbit_sock *csk = conn->context;
|
|
@@ -1709,12 +1749,17 @@ rel_skb:
|
|
|
|
|
|
static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
|
|
|
{
|
|
|
+ struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
|
|
|
+
|
|
|
pr_debug("%s: csk %p; tid %u; state %d\n",
|
|
|
__func__, csk, csk->tid, csk->com.state);
|
|
|
|
|
|
switch (csk->com.state) {
|
|
|
case CSK_STATE_ABORTING:
|
|
|
csk->com.state = CSK_STATE_DEAD;
|
|
|
+ if (test_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags))
|
|
|
+ cxgbit_wake_up(&csk->com.wr_wait, __func__,
|
|
|
+ rpl->status);
|
|
|
cxgbit_put_csk(csk);
|
|
|
break;
|
|
|
default:
|