|
@@ -78,7 +78,7 @@ static const char *link_unk_evt = "Unknown link event ";
|
|
|
static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
|
|
|
struct sk_buff *buf);
|
|
|
static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf);
|
|
|
-static int tipc_link_tunnel_rcv(struct tipc_link **l_ptr,
|
|
|
+static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
|
|
|
struct sk_buff **buf);
|
|
|
static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance);
|
|
|
static int link_send_sections_long(struct tipc_port *sender,
|
|
@@ -147,11 +147,6 @@ int tipc_link_is_active(struct tipc_link *l_ptr)
|
|
|
/**
|
|
|
* link_timeout - handle expiration of link timer
|
|
|
* @l_ptr: pointer to link
|
|
|
- *
|
|
|
- * This routine must not grab "tipc_net_lock" to avoid a potential deadlock conflict
|
|
|
- * with tipc_link_delete(). (There is no risk that the node will be deleted by
|
|
|
- * another thread because tipc_link_delete() always cancels the link timer before
|
|
|
- * tipc_node_delete() is called.)
|
|
|
*/
|
|
|
static void link_timeout(struct tipc_link *l_ptr)
|
|
|
{
|
|
@@ -213,8 +208,8 @@ static void link_set_timer(struct tipc_link *l_ptr, u32 time)
|
|
|
* Returns pointer to link.
|
|
|
*/
|
|
|
struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
|
|
|
- struct tipc_bearer *b_ptr,
|
|
|
- const struct tipc_media_addr *media_addr)
|
|
|
+ struct tipc_bearer *b_ptr,
|
|
|
+ const struct tipc_media_addr *media_addr)
|
|
|
{
|
|
|
struct tipc_link *l_ptr;
|
|
|
struct tipc_msg *msg;
|
|
@@ -279,41 +274,43 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
|
|
|
|
|
|
k_init_timer(&l_ptr->timer, (Handler)link_timeout,
|
|
|
(unsigned long)l_ptr);
|
|
|
- list_add_tail(&l_ptr->link_list, &b_ptr->links);
|
|
|
|
|
|
link_state_event(l_ptr, STARTING_EVT);
|
|
|
|
|
|
return l_ptr;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * tipc_link_delete - delete a link
|
|
|
- * @l_ptr: pointer to link
|
|
|
- *
|
|
|
- * Note: 'tipc_net_lock' is write_locked, bearer is locked.
|
|
|
- * This routine must not grab the node lock until after link timer cancellation
|
|
|
- * to avoid a potential deadlock situation.
|
|
|
- */
|
|
|
-void tipc_link_delete(struct tipc_link *l_ptr)
|
|
|
-{
|
|
|
- if (!l_ptr) {
|
|
|
- pr_err("Attempt to delete non-existent link\n");
|
|
|
- return;
|
|
|
- }
|
|
|
|
|
|
- k_cancel_timer(&l_ptr->timer);
|
|
|
+void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down)
|
|
|
+{
|
|
|
+ struct tipc_link *l_ptr;
|
|
|
+ struct tipc_node *n_ptr;
|
|
|
|
|
|
- tipc_node_lock(l_ptr->owner);
|
|
|
- tipc_link_reset(l_ptr);
|
|
|
- tipc_node_detach_link(l_ptr->owner, l_ptr);
|
|
|
- tipc_link_purge_queues(l_ptr);
|
|
|
- list_del_init(&l_ptr->link_list);
|
|
|
- tipc_node_unlock(l_ptr->owner);
|
|
|
- k_term_timer(&l_ptr->timer);
|
|
|
- kfree(l_ptr);
|
|
|
+ list_for_each_entry(n_ptr, &tipc_node_list, list) {
|
|
|
+ spin_lock_bh(&n_ptr->lock);
|
|
|
+ l_ptr = n_ptr->links[bearer_id];
|
|
|
+ if (l_ptr) {
|
|
|
+ tipc_link_reset(l_ptr);
|
|
|
+ if (shutting_down || !tipc_node_is_up(n_ptr)) {
|
|
|
+ tipc_node_detach_link(l_ptr->owner, l_ptr);
|
|
|
+ tipc_link_reset_fragments(l_ptr);
|
|
|
+ spin_unlock_bh(&n_ptr->lock);
|
|
|
+
|
|
|
+ /* Nobody else can access this link now: */
|
|
|
+ del_timer_sync(&l_ptr->timer);
|
|
|
+ kfree(l_ptr);
|
|
|
+ } else {
|
|
|
+ /* Detach/delete when failover is finished: */
|
|
|
+ l_ptr->flags |= LINK_STOPPED;
|
|
|
+ spin_unlock_bh(&n_ptr->lock);
|
|
|
+ del_timer_sync(&l_ptr->timer);
|
|
|
+ }
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ spin_unlock_bh(&n_ptr->lock);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
-
|
|
|
/**
|
|
|
* link_schedule_port - schedule port for deferred sending
|
|
|
* @l_ptr: pointer to link
|
|
@@ -461,6 +458,19 @@ void tipc_link_reset(struct tipc_link *l_ptr)
|
|
|
link_reset_statistics(l_ptr);
|
|
|
}
|
|
|
|
|
|
+void tipc_link_reset_list(unsigned int bearer_id)
|
|
|
+{
|
|
|
+ struct tipc_link *l_ptr;
|
|
|
+ struct tipc_node *n_ptr;
|
|
|
+
|
|
|
+ list_for_each_entry(n_ptr, &tipc_node_list, list) {
|
|
|
+ spin_lock_bh(&n_ptr->lock);
|
|
|
+ l_ptr = n_ptr->links[bearer_id];
|
|
|
+ if (l_ptr)
|
|
|
+ tipc_link_reset(l_ptr);
|
|
|
+ spin_unlock_bh(&n_ptr->lock);
|
|
|
+ }
|
|
|
+}
|
|
|
|
|
|
static void link_activate(struct tipc_link *l_ptr)
|
|
|
{
|
|
@@ -479,7 +489,10 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
|
|
|
struct tipc_link *other;
|
|
|
u32 cont_intv = l_ptr->continuity_interval;
|
|
|
|
|
|
- if (!l_ptr->started && (event != STARTING_EVT))
|
|
|
+ if (l_ptr->flags & LINK_STOPPED)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
|
|
|
return; /* Not yet. */
|
|
|
|
|
|
/* Check whether changeover is going on */
|
|
@@ -605,7 +618,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
|
|
|
link_set_timer(l_ptr, cont_intv);
|
|
|
break;
|
|
|
case STARTING_EVT:
|
|
|
- l_ptr->started = 1;
|
|
|
+ l_ptr->flags |= LINK_STARTED;
|
|
|
/* fall through */
|
|
|
case TIMEOUT_EVT:
|
|
|
tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
|
|
@@ -1435,7 +1448,6 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
|
|
|
u32 seq_no;
|
|
|
u32 ackd;
|
|
|
u32 released = 0;
|
|
|
- int type;
|
|
|
|
|
|
head = head->next;
|
|
|
buf->next = NULL;
|
|
@@ -1483,7 +1495,7 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
|
|
|
if ((n_ptr->block_setup & WAIT_PEER_DOWN) &&
|
|
|
msg_user(msg) == LINK_PROTOCOL &&
|
|
|
(msg_type(msg) == RESET_MSG ||
|
|
|
- msg_type(msg) == ACTIVATE_MSG) &&
|
|
|
+ msg_type(msg) == ACTIVATE_MSG) &&
|
|
|
!msg_redundant_link(msg))
|
|
|
n_ptr->block_setup &= ~WAIT_PEER_DOWN;
|
|
|
|
|
@@ -1502,7 +1514,6 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
|
|
|
while ((crs != l_ptr->next_out) &&
|
|
|
less_eq(buf_seqno(crs), ackd)) {
|
|
|
struct sk_buff *next = crs->next;
|
|
|
-
|
|
|
kfree_skb(crs);
|
|
|
crs = next;
|
|
|
released++;
|
|
@@ -1515,15 +1526,17 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
|
|
|
/* Try sending any messages link endpoint has pending */
|
|
|
if (unlikely(l_ptr->next_out))
|
|
|
tipc_link_push_queue(l_ptr);
|
|
|
+
|
|
|
if (unlikely(!list_empty(&l_ptr->waiting_ports)))
|
|
|
tipc_link_wakeup_ports(l_ptr, 0);
|
|
|
+
|
|
|
if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
|
|
|
l_ptr->stats.sent_acks++;
|
|
|
- tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
|
|
|
+ tipc_link_send_proto_msg(l_ptr, STATE_MSG,
|
|
|
+ 0, 0, 0, 0, 0);
|
|
|
}
|
|
|
|
|
|
- /* Now (finally!) process the incoming message */
|
|
|
-protocol_check:
|
|
|
+ /* Process the incoming packet */
|
|
|
if (unlikely(!link_working_working(l_ptr))) {
|
|
|
if (msg_user(msg) == LINK_PROTOCOL) {
|
|
|
link_recv_proto_msg(l_ptr, buf);
|
|
@@ -1555,14 +1568,40 @@ protocol_check:
|
|
|
l_ptr->next_in_no++;
|
|
|
if (unlikely(l_ptr->oldest_deferred_in))
|
|
|
head = link_insert_deferred_queue(l_ptr, head);
|
|
|
-deliver:
|
|
|
- if (likely(msg_isdata(msg))) {
|
|
|
+
|
|
|
+ /* Deliver packet/message to correct user: */
|
|
|
+ if (unlikely(msg_user(msg) == CHANGEOVER_PROTOCOL)) {
|
|
|
+ if (!tipc_link_tunnel_rcv(n_ptr, &buf)) {
|
|
|
+ tipc_node_unlock(n_ptr);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ msg = buf_msg(buf);
|
|
|
+ } else if (msg_user(msg) == MSG_FRAGMENTER) {
|
|
|
+ int rc;
|
|
|
+
|
|
|
+ l_ptr->stats.recv_fragments++;
|
|
|
+ rc = tipc_link_frag_rcv(&l_ptr->reasm_head,
|
|
|
+ &l_ptr->reasm_tail,
|
|
|
+ &buf);
|
|
|
+ if (rc == LINK_REASM_COMPLETE) {
|
|
|
+ l_ptr->stats.recv_fragmented++;
|
|
|
+ msg = buf_msg(buf);
|
|
|
+ } else {
|
|
|
+ if (rc == LINK_REASM_ERROR)
|
|
|
+ tipc_link_reset(l_ptr);
|
|
|
+ tipc_node_unlock(n_ptr);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ switch (msg_user(msg)) {
|
|
|
+ case TIPC_LOW_IMPORTANCE:
|
|
|
+ case TIPC_MEDIUM_IMPORTANCE:
|
|
|
+ case TIPC_HIGH_IMPORTANCE:
|
|
|
+ case TIPC_CRITICAL_IMPORTANCE:
|
|
|
tipc_node_unlock(n_ptr);
|
|
|
tipc_port_recv_msg(buf);
|
|
|
continue;
|
|
|
- }
|
|
|
- switch (msg_user(msg)) {
|
|
|
- int ret;
|
|
|
case MSG_BUNDLER:
|
|
|
l_ptr->stats.recv_bundles++;
|
|
|
l_ptr->stats.recv_bundled += msg_msgcnt(msg);
|
|
@@ -1574,48 +1613,20 @@ deliver:
|
|
|
tipc_node_unlock(n_ptr);
|
|
|
tipc_named_recv(buf);
|
|
|
continue;
|
|
|
- case BCAST_PROTOCOL:
|
|
|
- tipc_link_recv_sync(n_ptr, buf);
|
|
|
- tipc_node_unlock(n_ptr);
|
|
|
- continue;
|
|
|
case CONN_MANAGER:
|
|
|
tipc_node_unlock(n_ptr);
|
|
|
tipc_port_recv_proto_msg(buf);
|
|
|
continue;
|
|
|
- case MSG_FRAGMENTER:
|
|
|
- l_ptr->stats.recv_fragments++;
|
|
|
- ret = tipc_link_recv_fragment(&l_ptr->reasm_head,
|
|
|
- &l_ptr->reasm_tail,
|
|
|
- &buf);
|
|
|
- if (ret == LINK_REASM_COMPLETE) {
|
|
|
- l_ptr->stats.recv_fragmented++;
|
|
|
- msg = buf_msg(buf);
|
|
|
- goto deliver;
|
|
|
- }
|
|
|
- if (ret == LINK_REASM_ERROR)
|
|
|
- tipc_link_reset(l_ptr);
|
|
|
- tipc_node_unlock(n_ptr);
|
|
|
- continue;
|
|
|
- case CHANGEOVER_PROTOCOL:
|
|
|
- type = msg_type(msg);
|
|
|
- if (tipc_link_tunnel_rcv(&l_ptr, &buf)) {
|
|
|
- msg = buf_msg(buf);
|
|
|
- seq_no = msg_seqno(msg);
|
|
|
- if (type == ORIGINAL_MSG)
|
|
|
- goto deliver;
|
|
|
- goto protocol_check;
|
|
|
- }
|
|
|
+ case BCAST_PROTOCOL:
|
|
|
+ tipc_link_recv_sync(n_ptr, buf);
|
|
|
break;
|
|
|
default:
|
|
|
kfree_skb(buf);
|
|
|
- buf = NULL;
|
|
|
break;
|
|
|
}
|
|
|
tipc_node_unlock(n_ptr);
|
|
|
- tipc_net_route_msg(buf);
|
|
|
continue;
|
|
|
unlock_discard:
|
|
|
-
|
|
|
tipc_node_unlock(n_ptr);
|
|
|
discard:
|
|
|
kfree_skb(buf);
|
|
@@ -2105,83 +2116,108 @@ static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
|
|
|
return eb;
|
|
|
}
|
|
|
|
|
|
-/* tipc_link_tunnel_rcv(): Receive a tunneled packet, sent
|
|
|
- * via other link as result of a failover (ORIGINAL_MSG) or
|
|
|
- * a new active link (DUPLICATE_MSG). Failover packets are
|
|
|
- * returned to the active link for delivery upwards.
|
|
|
+
|
|
|
+
|
|
|
+/* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet.
|
|
|
+ * Owner node is locked.
|
|
|
+ */
|
|
|
+static void tipc_link_dup_rcv(struct tipc_link *l_ptr,
|
|
|
+ struct sk_buff *t_buf)
|
|
|
+{
|
|
|
+ struct sk_buff *buf;
|
|
|
+
|
|
|
+ if (!tipc_link_is_up(l_ptr))
|
|
|
+ return;
|
|
|
+
|
|
|
+ buf = buf_extract(t_buf, INT_H_SIZE);
|
|
|
+ if (buf == NULL) {
|
|
|
+ pr_warn("%sfailed to extract inner dup pkt\n", link_co_err);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Add buffer to deferred queue, if applicable: */
|
|
|
+ link_handle_out_of_seq_msg(l_ptr, buf);
|
|
|
+}
|
|
|
+
|
|
|
+/* tipc_link_failover_rcv(): Receive a tunnelled ORIGINAL_MSG packet
|
|
|
* Owner node is locked.
|
|
|
*/
|
|
|
-static int tipc_link_tunnel_rcv(struct tipc_link **l_ptr,
|
|
|
- struct sk_buff **buf)
|
|
|
+static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr,
|
|
|
+ struct sk_buff *t_buf)
|
|
|
{
|
|
|
- struct sk_buff *tunnel_buf = *buf;
|
|
|
- struct tipc_link *dest_link;
|
|
|
+ struct tipc_msg *t_msg = buf_msg(t_buf);
|
|
|
+ struct sk_buff *buf = NULL;
|
|
|
struct tipc_msg *msg;
|
|
|
- struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf);
|
|
|
- u32 msg_typ = msg_type(tunnel_msg);
|
|
|
- u32 msg_count = msg_msgcnt(tunnel_msg);
|
|
|
- u32 bearer_id = msg_bearer_id(tunnel_msg);
|
|
|
|
|
|
- if (bearer_id >= MAX_BEARERS)
|
|
|
- goto exit;
|
|
|
- dest_link = (*l_ptr)->owner->links[bearer_id];
|
|
|
- if (!dest_link)
|
|
|
- goto exit;
|
|
|
- if (dest_link == *l_ptr) {
|
|
|
- pr_err("Unexpected changeover message on link <%s>\n",
|
|
|
- (*l_ptr)->name);
|
|
|
- goto exit;
|
|
|
- }
|
|
|
- *l_ptr = dest_link;
|
|
|
- msg = msg_get_wrapped(tunnel_msg);
|
|
|
+ if (tipc_link_is_up(l_ptr))
|
|
|
+ tipc_link_reset(l_ptr);
|
|
|
|
|
|
- if (msg_typ == DUPLICATE_MSG) {
|
|
|
- if (less(msg_seqno(msg), mod(dest_link->next_in_no)))
|
|
|
- goto exit;
|
|
|
- *buf = buf_extract(tunnel_buf, INT_H_SIZE);
|
|
|
- if (*buf == NULL) {
|
|
|
- pr_warn("%sduplicate msg dropped\n", link_co_err);
|
|
|
+ /* First failover packet? */
|
|
|
+ if (l_ptr->exp_msg_count == START_CHANGEOVER)
|
|
|
+ l_ptr->exp_msg_count = msg_msgcnt(t_msg);
|
|
|
+
|
|
|
+ /* Should there be an inner packet? */
|
|
|
+ if (l_ptr->exp_msg_count) {
|
|
|
+ l_ptr->exp_msg_count--;
|
|
|
+ buf = buf_extract(t_buf, INT_H_SIZE);
|
|
|
+ if (buf == NULL) {
|
|
|
+ pr_warn("%sno inner failover pkt\n", link_co_err);
|
|
|
goto exit;
|
|
|
}
|
|
|
- kfree_skb(tunnel_buf);
|
|
|
- return 1;
|
|
|
- }
|
|
|
+ msg = buf_msg(buf);
|
|
|
|
|
|
- /* First original message ?: */
|
|
|
- if (tipc_link_is_up(dest_link)) {
|
|
|
- pr_info("%s<%s>, changeover initiated by peer\n", link_rst_msg,
|
|
|
- dest_link->name);
|
|
|
- tipc_link_reset(dest_link);
|
|
|
- dest_link->exp_msg_count = msg_count;
|
|
|
- if (!msg_count)
|
|
|
- goto exit;
|
|
|
- } else if (dest_link->exp_msg_count == START_CHANGEOVER) {
|
|
|
- dest_link->exp_msg_count = msg_count;
|
|
|
- if (!msg_count)
|
|
|
+ if (less(msg_seqno(msg), l_ptr->reset_checkpoint)) {
|
|
|
+ kfree_skb(buf);
|
|
|
+ buf = NULL;
|
|
|
goto exit;
|
|
|
+ }
|
|
|
+ if (msg_user(msg) == MSG_FRAGMENTER) {
|
|
|
+ l_ptr->stats.recv_fragments++;
|
|
|
+ tipc_link_frag_rcv(&l_ptr->reasm_head,
|
|
|
+ &l_ptr->reasm_tail,
|
|
|
+ &buf);
|
|
|
+ }
|
|
|
+ }
|
|
|
+exit:
|
|
|
+ if ((l_ptr->exp_msg_count == 0) && (l_ptr->flags & LINK_STOPPED)) {
|
|
|
+ tipc_node_detach_link(l_ptr->owner, l_ptr);
|
|
|
+ kfree(l_ptr);
|
|
|
}
|
|
|
+ return buf;
|
|
|
+}
|
|
|
+
|
|
|
+/* tipc_link_tunnel_rcv(): Receive a tunnelled packet, sent
|
|
|
+ * via other link as result of a failover (ORIGINAL_MSG) or
|
|
|
+ * a new active link (DUPLICATE_MSG). Failover packets are
|
|
|
+ * returned to the active link for delivery upwards.
|
|
|
+ * Owner node is locked.
|
|
|
+ */
|
|
|
+static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
|
|
|
+ struct sk_buff **buf)
|
|
|
+{
|
|
|
+ struct sk_buff *t_buf = *buf;
|
|
|
+ struct tipc_link *l_ptr;
|
|
|
+ struct tipc_msg *t_msg = buf_msg(t_buf);
|
|
|
+ u32 bearer_id = msg_bearer_id(t_msg);
|
|
|
|
|
|
- /* Receive original message */
|
|
|
- if (dest_link->exp_msg_count == 0) {
|
|
|
- pr_warn("%sgot too many tunnelled messages\n", link_co_err);
|
|
|
+ *buf = NULL;
|
|
|
+
|
|
|
+ if (bearer_id >= MAX_BEARERS)
|
|
|
goto exit;
|
|
|
- }
|
|
|
- dest_link->exp_msg_count--;
|
|
|
- if (less(msg_seqno(msg), dest_link->reset_checkpoint)) {
|
|
|
+
|
|
|
+ l_ptr = n_ptr->links[bearer_id];
|
|
|
+ if (!l_ptr)
|
|
|
goto exit;
|
|
|
- } else {
|
|
|
- *buf = buf_extract(tunnel_buf, INT_H_SIZE);
|
|
|
- if (*buf != NULL) {
|
|
|
- kfree_skb(tunnel_buf);
|
|
|
- return 1;
|
|
|
- } else {
|
|
|
- pr_warn("%soriginal msg dropped\n", link_co_err);
|
|
|
- }
|
|
|
- }
|
|
|
+
|
|
|
+ if (msg_type(t_msg) == DUPLICATE_MSG)
|
|
|
+ tipc_link_dup_rcv(l_ptr, t_buf);
|
|
|
+ else if (msg_type(t_msg) == ORIGINAL_MSG)
|
|
|
+ *buf = tipc_link_failover_rcv(l_ptr, t_buf);
|
|
|
+ else
|
|
|
+ pr_warn("%sunknown tunnel pkt received\n", link_co_err);
|
|
|
exit:
|
|
|
- *buf = NULL;
|
|
|
- kfree_skb(tunnel_buf);
|
|
|
- return 0;
|
|
|
+ kfree_skb(t_buf);
|
|
|
+ return *buf != NULL;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -2277,12 +2313,11 @@ static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
|
|
|
return dsz;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * tipc_link_recv_fragment(): Called with node lock on. Returns
|
|
|
+/* tipc_link_frag_rcv(): Called with node lock on. Returns
|
|
|
* the reassembled buffer if message is complete.
|
|
|
*/
|
|
|
-int tipc_link_recv_fragment(struct sk_buff **head, struct sk_buff **tail,
|
|
|
- struct sk_buff **fbuf)
|
|
|
+int tipc_link_frag_rcv(struct sk_buff **head, struct sk_buff **tail,
|
|
|
+ struct sk_buff **fbuf)
|
|
|
{
|
|
|
struct sk_buff *frag = *fbuf;
|
|
|
struct tipc_msg *msg = buf_msg(frag);
|
|
@@ -2296,6 +2331,7 @@ int tipc_link_recv_fragment(struct sk_buff **head, struct sk_buff **tail,
|
|
|
goto out_free;
|
|
|
*head = frag;
|
|
|
skb_frag_list_init(*head);
|
|
|
+ *fbuf = NULL;
|
|
|
return 0;
|
|
|
} else if (*head &&
|
|
|
skb_try_coalesce(*head, frag, &headstolen, &delta)) {
|
|
@@ -2315,10 +2351,12 @@ int tipc_link_recv_fragment(struct sk_buff **head, struct sk_buff **tail,
|
|
|
*tail = *head = NULL;
|
|
|
return LINK_REASM_COMPLETE;
|
|
|
}
|
|
|
+ *fbuf = NULL;
|
|
|
return 0;
|
|
|
out_free:
|
|
|
pr_warn_ratelimited("Link unable to reassemble fragmented message\n");
|
|
|
kfree_skb(*fbuf);
|
|
|
+ *fbuf = NULL;
|
|
|
return LINK_REASM_ERROR;
|
|
|
}
|
|
|
|
|
@@ -2352,35 +2390,40 @@ void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
|
|
|
l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * link_find_link - locate link by name
|
|
|
- * @name: ptr to link name string
|
|
|
- * @node: ptr to area to be filled with ptr to associated node
|
|
|
- *
|
|
|
+/* tipc_link_find_owner - locate owner node of link by link's name
|
|
|
+ * @name: pointer to link name string
|
|
|
+ * @bearer_id: pointer to index in 'node->links' array where the link was found.
|
|
|
* Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted;
|
|
|
* this also prevents link deletion.
|
|
|
*
|
|
|
- * Returns pointer to link (or 0 if invalid link name).
|
|
|
+ * Returns pointer to node owning the link, or 0 if no matching link is found.
|
|
|
*/
|
|
|
-static struct tipc_link *link_find_link(const char *name,
|
|
|
- struct tipc_node **node)
|
|
|
+static struct tipc_node *tipc_link_find_owner(const char *link_name,
|
|
|
+ unsigned int *bearer_id)
|
|
|
{
|
|
|
struct tipc_link *l_ptr;
|
|
|
struct tipc_node *n_ptr;
|
|
|
+ struct tipc_node *tmp_n_ptr;
|
|
|
+ struct tipc_node *found_node = 0;
|
|
|
+
|
|
|
int i;
|
|
|
|
|
|
- list_for_each_entry(n_ptr, &tipc_node_list, list) {
|
|
|
+ *bearer_id = 0;
|
|
|
+ list_for_each_entry_safe(n_ptr, tmp_n_ptr, &tipc_node_list, list) {
|
|
|
+ spin_lock(&n_ptr->lock);
|
|
|
for (i = 0; i < MAX_BEARERS; i++) {
|
|
|
l_ptr = n_ptr->links[i];
|
|
|
- if (l_ptr && !strcmp(l_ptr->name, name))
|
|
|
- goto found;
|
|
|
+ if (l_ptr && !strcmp(l_ptr->name, link_name)) {
|
|
|
+ *bearer_id = i;
|
|
|
+ found_node = n_ptr;
|
|
|
+ break;
|
|
|
+ }
|
|
|
}
|
|
|
+ spin_unlock(&n_ptr->lock);
|
|
|
+ if (found_node)
|
|
|
+ break;
|
|
|
}
|
|
|
- l_ptr = NULL;
|
|
|
- n_ptr = NULL;
|
|
|
-found:
|
|
|
- *node = n_ptr;
|
|
|
- return l_ptr;
|
|
|
+ return found_node;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -2422,32 +2465,33 @@ static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
|
|
|
struct tipc_link *l_ptr;
|
|
|
struct tipc_bearer *b_ptr;
|
|
|
struct tipc_media *m_ptr;
|
|
|
+ int bearer_id;
|
|
|
int res = 0;
|
|
|
|
|
|
- l_ptr = link_find_link(name, &node);
|
|
|
- if (l_ptr) {
|
|
|
- /*
|
|
|
- * acquire node lock for tipc_link_send_proto_msg().
|
|
|
- * see "TIPC locking policy" in net.c.
|
|
|
- */
|
|
|
+ node = tipc_link_find_owner(name, &bearer_id);
|
|
|
+ if (node) {
|
|
|
tipc_node_lock(node);
|
|
|
- switch (cmd) {
|
|
|
- case TIPC_CMD_SET_LINK_TOL:
|
|
|
- link_set_supervision_props(l_ptr, new_value);
|
|
|
- tipc_link_send_proto_msg(l_ptr,
|
|
|
- STATE_MSG, 0, 0, new_value, 0, 0);
|
|
|
- break;
|
|
|
- case TIPC_CMD_SET_LINK_PRI:
|
|
|
- l_ptr->priority = new_value;
|
|
|
- tipc_link_send_proto_msg(l_ptr,
|
|
|
- STATE_MSG, 0, 0, 0, new_value, 0);
|
|
|
- break;
|
|
|
- case TIPC_CMD_SET_LINK_WINDOW:
|
|
|
- tipc_link_set_queue_limits(l_ptr, new_value);
|
|
|
- break;
|
|
|
- default:
|
|
|
- res = -EINVAL;
|
|
|
- break;
|
|
|
+ l_ptr = node->links[bearer_id];
|
|
|
+
|
|
|
+ if (l_ptr) {
|
|
|
+ switch (cmd) {
|
|
|
+ case TIPC_CMD_SET_LINK_TOL:
|
|
|
+ link_set_supervision_props(l_ptr, new_value);
|
|
|
+ tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0,
|
|
|
+ 0, new_value, 0, 0);
|
|
|
+ break;
|
|
|
+ case TIPC_CMD_SET_LINK_PRI:
|
|
|
+ l_ptr->priority = new_value;
|
|
|
+ tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0,
|
|
|
+ 0, 0, new_value, 0);
|
|
|
+ break;
|
|
|
+ case TIPC_CMD_SET_LINK_WINDOW:
|
|
|
+ tipc_link_set_queue_limits(l_ptr, new_value);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ res = -EINVAL;
|
|
|
+ break;
|
|
|
+ }
|
|
|
}
|
|
|
tipc_node_unlock(node);
|
|
|
return res;
|
|
@@ -2542,6 +2586,7 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_
|
|
|
char *link_name;
|
|
|
struct tipc_link *l_ptr;
|
|
|
struct tipc_node *node;
|
|
|
+ unsigned int bearer_id;
|
|
|
|
|
|
if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
|
|
|
return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
|
|
@@ -2552,15 +2597,19 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_
|
|
|
return tipc_cfg_reply_error_string("link not found");
|
|
|
return tipc_cfg_reply_none();
|
|
|
}
|
|
|
-
|
|
|
read_lock_bh(&tipc_net_lock);
|
|
|
- l_ptr = link_find_link(link_name, &node);
|
|
|
+ node = tipc_link_find_owner(link_name, &bearer_id);
|
|
|
+ if (!node) {
|
|
|
+ read_unlock_bh(&tipc_net_lock);
|
|
|
+ return tipc_cfg_reply_error_string("link not found");
|
|
|
+ }
|
|
|
+ spin_lock(&node->lock);
|
|
|
+ l_ptr = node->links[bearer_id];
|
|
|
if (!l_ptr) {
|
|
|
+ tipc_node_unlock(node);
|
|
|
read_unlock_bh(&tipc_net_lock);
|
|
|
return tipc_cfg_reply_error_string("link not found");
|
|
|
}
|
|
|
-
|
|
|
- tipc_node_lock(node);
|
|
|
link_reset_statistics(l_ptr);
|
|
|
tipc_node_unlock(node);
|
|
|
read_unlock_bh(&tipc_net_lock);
|
|
@@ -2590,18 +2639,27 @@ static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
|
|
|
struct tipc_node *node;
|
|
|
char *status;
|
|
|
u32 profile_total = 0;
|
|
|
+ unsigned int bearer_id;
|
|
|
int ret;
|
|
|
|
|
|
if (!strcmp(name, tipc_bclink_name))
|
|
|
return tipc_bclink_stats(buf, buf_size);
|
|
|
|
|
|
read_lock_bh(&tipc_net_lock);
|
|
|
- l = link_find_link(name, &node);
|
|
|
- if (!l) {
|
|
|
+ node = tipc_link_find_owner(name, &bearer_id);
|
|
|
+ if (!node) {
|
|
|
read_unlock_bh(&tipc_net_lock);
|
|
|
return 0;
|
|
|
}
|
|
|
tipc_node_lock(node);
|
|
|
+
|
|
|
+ l = node->links[bearer_id];
|
|
|
+ if (!l) {
|
|
|
+ tipc_node_unlock(node);
|
|
|
+ read_unlock_bh(&tipc_net_lock);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
s = &l->stats;
|
|
|
|
|
|
if (tipc_link_is_active(l))
|