|
|
@@ -48,9 +48,8 @@
|
|
|
/*
|
|
|
* Error message prefixes
|
|
|
*/
|
|
|
-static const char *link_co_err = "Link changeover error, ";
|
|
|
+static const char *link_co_err = "Link tunneling error, ";
|
|
|
static const char *link_rst_msg = "Resetting link ";
|
|
|
-static const char *link_unk_evt = "Unknown link event ";
|
|
|
|
|
|
static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
|
|
|
[TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC },
|
|
|
@@ -85,46 +84,23 @@ static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
|
|
|
*/
|
|
|
#define WILDCARD_SESSION 0x10000
|
|
|
|
|
|
-/* State value stored in 'failover_pkts'
|
|
|
+/* Link FSM states:
|
|
|
*/
|
|
|
-#define FIRST_FAILOVER 0xffffu
|
|
|
-
|
|
|
-/* Link FSM states and events:
|
|
|
- */
|
|
|
-enum {
|
|
|
- TIPC_LINK_WORKING,
|
|
|
- TIPC_LINK_PROBING,
|
|
|
- TIPC_LINK_RESETTING,
|
|
|
- TIPC_LINK_ESTABLISHING
|
|
|
-};
|
|
|
-
|
|
|
enum {
|
|
|
- PEER_RESET_EVT = RESET_MSG,
|
|
|
- ACTIVATE_EVT = ACTIVATE_MSG,
|
|
|
- TRAFFIC_EVT, /* Any other valid msg from peer */
|
|
|
- SILENCE_EVT /* Peer was silent during last timer interval*/
|
|
|
+ LINK_ESTABLISHED = 0xe,
|
|
|
+ LINK_ESTABLISHING = 0xe << 4,
|
|
|
+ LINK_RESET = 0x1 << 8,
|
|
|
+ LINK_RESETTING = 0x2 << 12,
|
|
|
+ LINK_PEER_RESET = 0xd << 16,
|
|
|
+ LINK_FAILINGOVER = 0xf << 20,
|
|
|
+ LINK_SYNCHING = 0xc << 24
|
|
|
};
|
|
|
|
|
|
/* Link FSM state checking routines
|
|
|
*/
|
|
|
-static int link_working(struct tipc_link *l)
|
|
|
+static int link_is_up(struct tipc_link *l)
|
|
|
{
|
|
|
- return l->state == TIPC_LINK_WORKING;
|
|
|
-}
|
|
|
-
|
|
|
-static int link_probing(struct tipc_link *l)
|
|
|
-{
|
|
|
- return l->state == TIPC_LINK_PROBING;
|
|
|
-}
|
|
|
-
|
|
|
-static int link_resetting(struct tipc_link *l)
|
|
|
-{
|
|
|
- return l->state == TIPC_LINK_RESETTING;
|
|
|
-}
|
|
|
-
|
|
|
-static int link_establishing(struct tipc_link *l)
|
|
|
-{
|
|
|
- return l->state == TIPC_LINK_ESTABLISHING;
|
|
|
+ return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
|
|
|
}
|
|
|
|
|
|
static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
|
|
|
@@ -134,38 +110,34 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
|
|
|
struct sk_buff_head *xmitq);
|
|
|
static void link_reset_statistics(struct tipc_link *l_ptr);
|
|
|
static void link_print(struct tipc_link *l_ptr, const char *str);
|
|
|
-static void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
|
|
|
- struct sk_buff_head *xmitq);
|
|
|
static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
|
|
|
-static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
|
|
|
-static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
|
|
|
-static bool tipc_link_failover_rcv(struct tipc_link *l, struct sk_buff **skb);
|
|
|
|
|
|
/*
|
|
|
- * Simple link routines
|
|
|
+ * Simple non-static link routines (i.e. referenced outside this file)
|
|
|
*/
|
|
|
-static unsigned int align(unsigned int i)
|
|
|
+bool tipc_link_is_up(struct tipc_link *l)
|
|
|
{
|
|
|
- return (i + 3) & ~3u;
|
|
|
+ return link_is_up(l);
|
|
|
}
|
|
|
|
|
|
-static struct tipc_link *tipc_parallel_link(struct tipc_link *l)
|
|
|
+bool tipc_link_is_reset(struct tipc_link *l)
|
|
|
{
|
|
|
- struct tipc_node *n = l->owner;
|
|
|
+ return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
|
|
|
+}
|
|
|
|
|
|
- if (node_active_link(n, 0) != l)
|
|
|
- return node_active_link(n, 0);
|
|
|
- return node_active_link(n, 1);
|
|
|
+bool tipc_link_is_synching(struct tipc_link *l)
|
|
|
+{
|
|
|
+ return l->state == LINK_SYNCHING;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Simple non-static link routines (i.e. referenced outside this file)
|
|
|
- */
|
|
|
-int tipc_link_is_up(struct tipc_link *l_ptr)
|
|
|
+bool tipc_link_is_failingover(struct tipc_link *l)
|
|
|
{
|
|
|
- if (!l_ptr)
|
|
|
- return 0;
|
|
|
- return link_working(l_ptr) || link_probing(l_ptr);
|
|
|
+ return l->state == LINK_FAILINGOVER;
|
|
|
+}
|
|
|
+
|
|
|
+bool tipc_link_is_blocked(struct tipc_link *l)
|
|
|
+{
|
|
|
+ return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
|
|
|
}
|
|
|
|
|
|
int tipc_link_is_active(struct tipc_link *l)
|
|
|
@@ -175,113 +147,71 @@ int tipc_link_is_active(struct tipc_link *l)
|
|
|
return (node_active_link(n, 0) == l) || (node_active_link(n, 1) == l);
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * tipc_link_create - create a new link
|
|
|
- * @n_ptr: pointer to associated node
|
|
|
- * @b_ptr: pointer to associated bearer
|
|
|
- * @media_addr: media address to use when sending messages over link
|
|
|
- *
|
|
|
- * Returns pointer to link.
|
|
|
- */
|
|
|
-struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
|
|
|
- struct tipc_bearer *b_ptr,
|
|
|
- const struct tipc_media_addr *media_addr,
|
|
|
- struct sk_buff_head *inputq,
|
|
|
- struct sk_buff_head *namedq)
|
|
|
+static u32 link_own_addr(struct tipc_link *l)
|
|
|
{
|
|
|
- struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
|
|
|
- struct tipc_link *l_ptr;
|
|
|
- struct tipc_msg *msg;
|
|
|
- char *if_name;
|
|
|
- char addr_string[16];
|
|
|
- u32 peer = n_ptr->addr;
|
|
|
-
|
|
|
- if (n_ptr->link_cnt >= MAX_BEARERS) {
|
|
|
- tipc_addr_string_fill(addr_string, n_ptr->addr);
|
|
|
- pr_err("Cannot establish %uth link to %s. Max %u allowed.\n",
|
|
|
- n_ptr->link_cnt, addr_string, MAX_BEARERS);
|
|
|
- return NULL;
|
|
|
- }
|
|
|
-
|
|
|
- if (n_ptr->links[b_ptr->identity].link) {
|
|
|
- tipc_addr_string_fill(addr_string, n_ptr->addr);
|
|
|
- pr_err("Attempt to establish second link on <%s> to %s\n",
|
|
|
- b_ptr->name, addr_string);
|
|
|
- return NULL;
|
|
|
- }
|
|
|
-
|
|
|
- l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
|
|
|
- if (!l_ptr) {
|
|
|
- pr_warn("Link creation failed, no memory\n");
|
|
|
- return NULL;
|
|
|
- }
|
|
|
- l_ptr->addr = peer;
|
|
|
- if_name = strchr(b_ptr->name, ':') + 1;
|
|
|
- sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
|
|
|
- tipc_zone(tn->own_addr), tipc_cluster(tn->own_addr),
|
|
|
- tipc_node(tn->own_addr),
|
|
|
- if_name,
|
|
|
- tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
|
|
|
- /* note: peer i/f name is updated by reset/activate message */
|
|
|
- memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
|
|
|
- l_ptr->owner = n_ptr;
|
|
|
- l_ptr->peer_session = WILDCARD_SESSION;
|
|
|
- l_ptr->bearer_id = b_ptr->identity;
|
|
|
- l_ptr->tolerance = b_ptr->tolerance;
|
|
|
- l_ptr->state = TIPC_LINK_RESETTING;
|
|
|
-
|
|
|
- l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
|
|
|
- msg = l_ptr->pmsg;
|
|
|
- tipc_msg_init(tn->own_addr, msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE,
|
|
|
- l_ptr->addr);
|
|
|
- msg_set_size(msg, sizeof(l_ptr->proto_msg));
|
|
|
- msg_set_session(msg, (tn->random & 0xffff));
|
|
|
- msg_set_bearer_id(msg, b_ptr->identity);
|
|
|
- strcpy((char *)msg_data(msg), if_name);
|
|
|
- l_ptr->net_plane = b_ptr->net_plane;
|
|
|
- l_ptr->advertised_mtu = b_ptr->mtu;
|
|
|
- l_ptr->mtu = l_ptr->advertised_mtu;
|
|
|
- l_ptr->priority = b_ptr->priority;
|
|
|
- tipc_link_set_queue_limits(l_ptr, b_ptr->window);
|
|
|
- l_ptr->snd_nxt = 1;
|
|
|
- __skb_queue_head_init(&l_ptr->transmq);
|
|
|
- __skb_queue_head_init(&l_ptr->backlogq);
|
|
|
- __skb_queue_head_init(&l_ptr->deferdq);
|
|
|
- skb_queue_head_init(&l_ptr->wakeupq);
|
|
|
- l_ptr->inputq = inputq;
|
|
|
- l_ptr->namedq = namedq;
|
|
|
- skb_queue_head_init(l_ptr->inputq);
|
|
|
- link_reset_statistics(l_ptr);
|
|
|
- tipc_node_attach_link(n_ptr, l_ptr);
|
|
|
- return l_ptr;
|
|
|
+ return msg_prevnode(l->pmsg);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * tipc_link_delete - Delete a link
|
|
|
- * @l: link to be deleted
|
|
|
+ * tipc_link_create - create a new link
|
|
|
+ * @n: pointer to associated node
|
|
|
+ * @b: pointer to associated bearer
|
|
|
+ * @ownnode: identity of own node
|
|
|
+ * @peer: identity of peer node
|
|
|
+ * @maddr: media address to be used
|
|
|
+ * @inputq: queue to put messages ready for delivery
|
|
|
+ * @namedq: queue to put binding table update messages ready for delivery
|
|
|
+ * @link: return value, pointer to put the created link
|
|
|
+ *
|
|
|
+ * Returns true if link was created, otherwise false
|
|
|
*/
|
|
|
-void tipc_link_delete(struct tipc_link *l)
|
|
|
-{
|
|
|
- tipc_link_reset(l);
|
|
|
- tipc_link_reset_fragments(l);
|
|
|
- tipc_node_detach_link(l->owner, l);
|
|
|
-}
|
|
|
-
|
|
|
-void tipc_link_delete_list(struct net *net, unsigned int bearer_id)
|
|
|
+bool tipc_link_create(struct tipc_node *n, struct tipc_bearer *b, u32 session,
|
|
|
+ u32 ownnode, u32 peer, struct tipc_media_addr *maddr,
|
|
|
+ struct sk_buff_head *inputq, struct sk_buff_head *namedq,
|
|
|
+ struct tipc_link **link)
|
|
|
{
|
|
|
- struct tipc_net *tn = net_generic(net, tipc_net_id);
|
|
|
- struct tipc_link *link;
|
|
|
- struct tipc_node *node;
|
|
|
+ struct tipc_link *l;
|
|
|
+ struct tipc_msg *hdr;
|
|
|
+ char *if_name;
|
|
|
|
|
|
- rcu_read_lock();
|
|
|
- list_for_each_entry_rcu(node, &tn->node_list, list) {
|
|
|
- tipc_node_lock(node);
|
|
|
- link = node->links[bearer_id].link;
|
|
|
- if (link)
|
|
|
- tipc_link_delete(link);
|
|
|
- tipc_node_unlock(node);
|
|
|
- }
|
|
|
- rcu_read_unlock();
|
|
|
+ l = kzalloc(sizeof(*l), GFP_ATOMIC);
|
|
|
+ if (!l)
|
|
|
+ return false;
|
|
|
+ *link = l;
|
|
|
+
|
|
|
+ /* Note: peer i/f name is completed by reset/activate message */
|
|
|
+ if_name = strchr(b->name, ':') + 1;
|
|
|
+ sprintf(l->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
|
|
|
+ tipc_zone(ownnode), tipc_cluster(ownnode), tipc_node(ownnode),
|
|
|
+ if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
|
|
|
+
|
|
|
+ l->addr = peer;
|
|
|
+ l->media_addr = maddr;
|
|
|
+ l->owner = n;
|
|
|
+ l->peer_session = WILDCARD_SESSION;
|
|
|
+ l->bearer_id = b->identity;
|
|
|
+ l->tolerance = b->tolerance;
|
|
|
+ l->net_plane = b->net_plane;
|
|
|
+ l->advertised_mtu = b->mtu;
|
|
|
+ l->mtu = b->mtu;
|
|
|
+ l->priority = b->priority;
|
|
|
+ tipc_link_set_queue_limits(l, b->window);
|
|
|
+ l->inputq = inputq;
|
|
|
+ l->namedq = namedq;
|
|
|
+ l->state = LINK_RESETTING;
|
|
|
+ l->pmsg = (struct tipc_msg *)&l->proto_msg;
|
|
|
+ hdr = l->pmsg;
|
|
|
+ tipc_msg_init(ownnode, hdr, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, peer);
|
|
|
+ msg_set_size(hdr, sizeof(l->proto_msg));
|
|
|
+ msg_set_session(hdr, session);
|
|
|
+ msg_set_bearer_id(hdr, l->bearer_id);
|
|
|
+ strcpy((char *)msg_data(hdr), if_name);
|
|
|
+ __skb_queue_head_init(&l->transmq);
|
|
|
+ __skb_queue_head_init(&l->backlogq);
|
|
|
+ __skb_queue_head_init(&l->deferdq);
|
|
|
+ skb_queue_head_init(&l->wakeupq);
|
|
|
+ skb_queue_head_init(l->inputq);
|
|
|
+ return true;
|
|
|
}
|
|
|
|
|
|
/* tipc_link_build_bcast_sync_msg() - synchronize broadcast link endpoints.
|
|
|
@@ -289,8 +219,8 @@ void tipc_link_delete_list(struct net *net, unsigned int bearer_id)
|
|
|
* Give a newly added peer node the sequence number where it should
|
|
|
* start receiving and acking broadcast packets.
|
|
|
*/
|
|
|
-static void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
|
|
|
- struct sk_buff_head *xmitq)
|
|
|
+void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
|
|
|
+ struct sk_buff_head *xmitq)
|
|
|
{
|
|
|
struct sk_buff *skb;
|
|
|
struct sk_buff_head list;
|
|
|
@@ -311,130 +241,159 @@ static void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
|
|
|
* tipc_link_fsm_evt - link finite state machine
|
|
|
* @l: pointer to link
|
|
|
* @evt: state machine event to be processed
|
|
|
- * @xmitq: queue to prepend created protocol message, if any
|
|
|
*/
|
|
|
-static int tipc_link_fsm_evt(struct tipc_link *l, int evt,
|
|
|
- struct sk_buff_head *xmitq)
|
|
|
+int tipc_link_fsm_evt(struct tipc_link *l, int evt)
|
|
|
{
|
|
|
- int mtyp = 0, rc = 0;
|
|
|
- struct tipc_link *pl;
|
|
|
- enum {
|
|
|
- LINK_RESET = 1,
|
|
|
- LINK_ACTIVATE = (1 << 1),
|
|
|
- SND_PROBE = (1 << 2),
|
|
|
- SND_STATE = (1 << 3),
|
|
|
- SND_RESET = (1 << 4),
|
|
|
- SND_ACTIVATE = (1 << 5),
|
|
|
- SND_BCAST_SYNC = (1 << 6)
|
|
|
- } actions = 0;
|
|
|
-
|
|
|
- if (l->exec_mode == TIPC_LINK_BLOCKED)
|
|
|
- return rc;
|
|
|
+ int rc = 0;
|
|
|
|
|
|
switch (l->state) {
|
|
|
- case TIPC_LINK_WORKING:
|
|
|
+ case LINK_RESETTING:
|
|
|
switch (evt) {
|
|
|
- case TRAFFIC_EVT:
|
|
|
- case ACTIVATE_EVT:
|
|
|
+ case LINK_PEER_RESET_EVT:
|
|
|
+ l->state = LINK_PEER_RESET;
|
|
|
+ break;
|
|
|
+ case LINK_RESET_EVT:
|
|
|
+ l->state = LINK_RESET;
|
|
|
break;
|
|
|
- case SILENCE_EVT:
|
|
|
- l->state = TIPC_LINK_PROBING;
|
|
|
- actions |= SND_PROBE;
|
|
|
+ case LINK_FAILURE_EVT:
|
|
|
+ case LINK_FAILOVER_BEGIN_EVT:
|
|
|
+ case LINK_ESTABLISH_EVT:
|
|
|
+ case LINK_FAILOVER_END_EVT:
|
|
|
+ case LINK_SYNCH_BEGIN_EVT:
|
|
|
+ case LINK_SYNCH_END_EVT:
|
|
|
+ default:
|
|
|
+ goto illegal_evt;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ case LINK_RESET:
|
|
|
+ switch (evt) {
|
|
|
+ case LINK_PEER_RESET_EVT:
|
|
|
+ l->state = LINK_ESTABLISHING;
|
|
|
break;
|
|
|
- case PEER_RESET_EVT:
|
|
|
- actions |= LINK_RESET | SND_ACTIVATE;
|
|
|
+ case LINK_FAILOVER_BEGIN_EVT:
|
|
|
+ l->state = LINK_FAILINGOVER;
|
|
|
+ case LINK_FAILURE_EVT:
|
|
|
+ case LINK_RESET_EVT:
|
|
|
+ case LINK_ESTABLISH_EVT:
|
|
|
+ case LINK_FAILOVER_END_EVT:
|
|
|
break;
|
|
|
+ case LINK_SYNCH_BEGIN_EVT:
|
|
|
+ case LINK_SYNCH_END_EVT:
|
|
|
default:
|
|
|
- pr_debug("%s%u WORKING\n", link_unk_evt, evt);
|
|
|
+ goto illegal_evt;
|
|
|
}
|
|
|
break;
|
|
|
- case TIPC_LINK_PROBING:
|
|
|
+ case LINK_PEER_RESET:
|
|
|
switch (evt) {
|
|
|
- case TRAFFIC_EVT:
|
|
|
- case ACTIVATE_EVT:
|
|
|
- l->state = TIPC_LINK_WORKING;
|
|
|
+ case LINK_RESET_EVT:
|
|
|
+ l->state = LINK_ESTABLISHING;
|
|
|
break;
|
|
|
- case PEER_RESET_EVT:
|
|
|
- actions |= LINK_RESET | SND_ACTIVATE;
|
|
|
+ case LINK_PEER_RESET_EVT:
|
|
|
+ case LINK_ESTABLISH_EVT:
|
|
|
+ case LINK_FAILURE_EVT:
|
|
|
break;
|
|
|
- case SILENCE_EVT:
|
|
|
- if (l->silent_intv_cnt <= l->abort_limit) {
|
|
|
- actions |= SND_PROBE;
|
|
|
- break;
|
|
|
- }
|
|
|
- actions |= LINK_RESET | SND_RESET;
|
|
|
+ case LINK_SYNCH_BEGIN_EVT:
|
|
|
+ case LINK_SYNCH_END_EVT:
|
|
|
+ case LINK_FAILOVER_BEGIN_EVT:
|
|
|
+ case LINK_FAILOVER_END_EVT:
|
|
|
+ default:
|
|
|
+ goto illegal_evt;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ case LINK_FAILINGOVER:
|
|
|
+ switch (evt) {
|
|
|
+ case LINK_FAILOVER_END_EVT:
|
|
|
+ l->state = LINK_RESET;
|
|
|
break;
|
|
|
+ case LINK_PEER_RESET_EVT:
|
|
|
+ case LINK_RESET_EVT:
|
|
|
+ case LINK_ESTABLISH_EVT:
|
|
|
+ case LINK_FAILURE_EVT:
|
|
|
+ break;
|
|
|
+ case LINK_FAILOVER_BEGIN_EVT:
|
|
|
+ case LINK_SYNCH_BEGIN_EVT:
|
|
|
+ case LINK_SYNCH_END_EVT:
|
|
|
default:
|
|
|
- pr_err("%s%u PROBING\n", link_unk_evt, evt);
|
|
|
+ goto illegal_evt;
|
|
|
}
|
|
|
break;
|
|
|
- case TIPC_LINK_RESETTING:
|
|
|
+ case LINK_ESTABLISHING:
|
|
|
switch (evt) {
|
|
|
- case TRAFFIC_EVT:
|
|
|
+ case LINK_ESTABLISH_EVT:
|
|
|
+ l->state = LINK_ESTABLISHED;
|
|
|
+ rc |= TIPC_LINK_UP_EVT;
|
|
|
break;
|
|
|
- case ACTIVATE_EVT:
|
|
|
- pl = node_active_link(l->owner, 0);
|
|
|
- if (pl && link_probing(pl))
|
|
|
- break;
|
|
|
- actions |= LINK_ACTIVATE;
|
|
|
- if (!l->owner->working_links)
|
|
|
- actions |= SND_BCAST_SYNC;
|
|
|
+ case LINK_FAILOVER_BEGIN_EVT:
|
|
|
+ l->state = LINK_FAILINGOVER;
|
|
|
+ break;
|
|
|
+ case LINK_PEER_RESET_EVT:
|
|
|
+ case LINK_RESET_EVT:
|
|
|
+ case LINK_FAILURE_EVT:
|
|
|
+ case LINK_SYNCH_BEGIN_EVT:
|
|
|
+ case LINK_FAILOVER_END_EVT:
|
|
|
+ break;
|
|
|
+ case LINK_SYNCH_END_EVT:
|
|
|
+ default:
|
|
|
+ goto illegal_evt;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ case LINK_ESTABLISHED:
|
|
|
+ switch (evt) {
|
|
|
+ case LINK_PEER_RESET_EVT:
|
|
|
+ l->state = LINK_PEER_RESET;
|
|
|
+ rc |= TIPC_LINK_DOWN_EVT;
|
|
|
+ break;
|
|
|
+ case LINK_FAILURE_EVT:
|
|
|
+ l->state = LINK_RESETTING;
|
|
|
+ rc |= TIPC_LINK_DOWN_EVT;
|
|
|
+ break;
|
|
|
+ case LINK_RESET_EVT:
|
|
|
+ l->state = LINK_RESET;
|
|
|
break;
|
|
|
- case PEER_RESET_EVT:
|
|
|
- l->state = TIPC_LINK_ESTABLISHING;
|
|
|
- actions |= SND_ACTIVATE;
|
|
|
+ case LINK_ESTABLISH_EVT:
|
|
|
break;
|
|
|
- case SILENCE_EVT:
|
|
|
- actions |= SND_RESET;
|
|
|
+ case LINK_SYNCH_BEGIN_EVT:
|
|
|
+ l->state = LINK_SYNCHING;
|
|
|
break;
|
|
|
+ case LINK_SYNCH_END_EVT:
|
|
|
+ case LINK_FAILOVER_BEGIN_EVT:
|
|
|
+ case LINK_FAILOVER_END_EVT:
|
|
|
default:
|
|
|
- pr_err("%s%u in RESETTING\n", link_unk_evt, evt);
|
|
|
+ goto illegal_evt;
|
|
|
}
|
|
|
break;
|
|
|
- case TIPC_LINK_ESTABLISHING:
|
|
|
+ case LINK_SYNCHING:
|
|
|
switch (evt) {
|
|
|
- case TRAFFIC_EVT:
|
|
|
- case ACTIVATE_EVT:
|
|
|
- pl = node_active_link(l->owner, 0);
|
|
|
- if (pl && link_probing(pl))
|
|
|
- break;
|
|
|
- actions |= LINK_ACTIVATE;
|
|
|
- if (!l->owner->working_links)
|
|
|
- actions |= SND_BCAST_SYNC;
|
|
|
+ case LINK_PEER_RESET_EVT:
|
|
|
+ l->state = LINK_PEER_RESET;
|
|
|
+ rc |= TIPC_LINK_DOWN_EVT;
|
|
|
+ break;
|
|
|
+ case LINK_FAILURE_EVT:
|
|
|
+ l->state = LINK_RESETTING;
|
|
|
+ rc |= TIPC_LINK_DOWN_EVT;
|
|
|
break;
|
|
|
- case PEER_RESET_EVT:
|
|
|
+ case LINK_RESET_EVT:
|
|
|
+ l->state = LINK_RESET;
|
|
|
break;
|
|
|
- case SILENCE_EVT:
|
|
|
- actions |= SND_ACTIVATE;
|
|
|
+ case LINK_ESTABLISH_EVT:
|
|
|
+ case LINK_SYNCH_BEGIN_EVT:
|
|
|
break;
|
|
|
+ case LINK_SYNCH_END_EVT:
|
|
|
+ l->state = LINK_ESTABLISHED;
|
|
|
+ break;
|
|
|
+ case LINK_FAILOVER_BEGIN_EVT:
|
|
|
+ case LINK_FAILOVER_END_EVT:
|
|
|
default:
|
|
|
- pr_err("%s%u ESTABLISHING\n", link_unk_evt, evt);
|
|
|
+ goto illegal_evt;
|
|
|
}
|
|
|
break;
|
|
|
default:
|
|
|
- pr_err("Unknown link state %u/%u\n", l->state, evt);
|
|
|
- }
|
|
|
-
|
|
|
- /* Perform actions as decided by FSM */
|
|
|
- if (actions & LINK_RESET) {
|
|
|
- l->exec_mode = TIPC_LINK_BLOCKED;
|
|
|
- rc |= TIPC_LINK_DOWN_EVT;
|
|
|
- }
|
|
|
- if (actions & LINK_ACTIVATE) {
|
|
|
- l->exec_mode = TIPC_LINK_OPEN;
|
|
|
- rc |= TIPC_LINK_UP_EVT;
|
|
|
+ pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
|
|
|
}
|
|
|
- if (actions & (SND_STATE | SND_PROBE))
|
|
|
- mtyp = STATE_MSG;
|
|
|
- if (actions & SND_RESET)
|
|
|
- mtyp = RESET_MSG;
|
|
|
- if (actions & SND_ACTIVATE)
|
|
|
- mtyp = ACTIVATE_MSG;
|
|
|
- if (actions & (SND_PROBE | SND_STATE | SND_RESET | SND_ACTIVATE))
|
|
|
- tipc_link_build_proto_msg(l, mtyp, actions & SND_PROBE,
|
|
|
- 0, 0, 0, xmitq);
|
|
|
- if (actions & SND_BCAST_SYNC)
|
|
|
- tipc_link_build_bcast_sync_msg(l, xmitq);
|
|
|
+ return rc;
|
|
|
+illegal_evt:
|
|
|
+ pr_err("Illegal FSM event %x in state %x on link %s\n",
|
|
|
+ evt, l->state, l->name);
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
@@ -484,13 +443,45 @@ static void link_profile_stats(struct tipc_link *l)
|
|
|
int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
|
|
|
{
|
|
|
int rc = 0;
|
|
|
+ int mtyp = STATE_MSG;
|
|
|
+ bool xmit = false;
|
|
|
+ bool prb = false;
|
|
|
|
|
|
link_profile_stats(l);
|
|
|
- if (l->silent_intv_cnt)
|
|
|
- rc = tipc_link_fsm_evt(l, SILENCE_EVT, xmitq);
|
|
|
- else if (link_working(l) && tipc_bclink_acks_missing(l->owner))
|
|
|
- tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
|
|
|
- l->silent_intv_cnt++;
|
|
|
+
|
|
|
+ switch (l->state) {
|
|
|
+ case LINK_ESTABLISHED:
|
|
|
+ case LINK_SYNCHING:
|
|
|
+ if (!l->silent_intv_cnt) {
|
|
|
+ if (tipc_bclink_acks_missing(l->owner))
|
|
|
+ xmit = true;
|
|
|
+ } else if (l->silent_intv_cnt <= l->abort_limit) {
|
|
|
+ xmit = true;
|
|
|
+ prb = true;
|
|
|
+ } else {
|
|
|
+ rc |= tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
|
|
|
+ }
|
|
|
+ l->silent_intv_cnt++;
|
|
|
+ break;
|
|
|
+ case LINK_RESET:
|
|
|
+ xmit = true;
|
|
|
+ mtyp = RESET_MSG;
|
|
|
+ break;
|
|
|
+ case LINK_ESTABLISHING:
|
|
|
+ xmit = true;
|
|
|
+ mtyp = ACTIVATE_MSG;
|
|
|
+ break;
|
|
|
+ case LINK_PEER_RESET:
|
|
|
+ case LINK_RESETTING:
|
|
|
+ case LINK_FAILINGOVER:
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (xmit)
|
|
|
+ tipc_link_build_proto_msg(l, mtyp, prb, 0, 0, 0, xmitq);
|
|
|
+
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
@@ -550,8 +541,6 @@ void link_prepare_wakeup(struct tipc_link *l)
|
|
|
break;
|
|
|
skb_unlink(skb, &l->wakeupq);
|
|
|
skb_queue_tail(l->inputq, skb);
|
|
|
- l->owner->inputq = l->inputq;
|
|
|
- l->owner->action_flags |= TIPC_MSG_EVT;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
@@ -587,69 +576,36 @@ void tipc_link_purge_queues(struct tipc_link *l_ptr)
|
|
|
tipc_link_reset_fragments(l_ptr);
|
|
|
}
|
|
|
|
|
|
-void tipc_link_reset(struct tipc_link *l_ptr)
|
|
|
+void tipc_link_reset(struct tipc_link *l)
|
|
|
{
|
|
|
- u32 prev_state = l_ptr->state;
|
|
|
- int was_active_link = tipc_link_is_active(l_ptr);
|
|
|
- struct tipc_node *owner = l_ptr->owner;
|
|
|
- struct tipc_link *pl = tipc_parallel_link(l_ptr);
|
|
|
-
|
|
|
- msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
|
|
|
+ tipc_link_fsm_evt(l, LINK_RESET_EVT);
|
|
|
|
|
|
/* Link is down, accept any session */
|
|
|
- l_ptr->peer_session = WILDCARD_SESSION;
|
|
|
-
|
|
|
- /* Prepare for renewed mtu size negotiation */
|
|
|
- l_ptr->mtu = l_ptr->advertised_mtu;
|
|
|
-
|
|
|
- l_ptr->state = TIPC_LINK_RESETTING;
|
|
|
-
|
|
|
- if ((prev_state == TIPC_LINK_RESETTING) ||
|
|
|
- (prev_state == TIPC_LINK_ESTABLISHING))
|
|
|
- return;
|
|
|
-
|
|
|
- tipc_node_link_down(l_ptr->owner, l_ptr->bearer_id);
|
|
|
- tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr);
|
|
|
-
|
|
|
- if (was_active_link && tipc_node_is_up(l_ptr->owner) && (pl != l_ptr)) {
|
|
|
- l_ptr->exec_mode = TIPC_LINK_BLOCKED;
|
|
|
- l_ptr->failover_checkpt = l_ptr->rcv_nxt;
|
|
|
- pl->failover_pkts = FIRST_FAILOVER;
|
|
|
- pl->failover_checkpt = l_ptr->rcv_nxt;
|
|
|
- pl->failover_skb = l_ptr->reasm_buf;
|
|
|
- } else {
|
|
|
- kfree_skb(l_ptr->reasm_buf);
|
|
|
- }
|
|
|
- /* Clean up all queues, except inputq: */
|
|
|
- __skb_queue_purge(&l_ptr->transmq);
|
|
|
- __skb_queue_purge(&l_ptr->deferdq);
|
|
|
- if (!owner->inputq)
|
|
|
- owner->inputq = l_ptr->inputq;
|
|
|
- skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq);
|
|
|
- if (!skb_queue_empty(owner->inputq))
|
|
|
- owner->action_flags |= TIPC_MSG_EVT;
|
|
|
- tipc_link_purge_backlog(l_ptr);
|
|
|
- l_ptr->reasm_buf = NULL;
|
|
|
- l_ptr->rcv_unacked = 0;
|
|
|
- l_ptr->snd_nxt = 1;
|
|
|
- l_ptr->rcv_nxt = 1;
|
|
|
- l_ptr->silent_intv_cnt = 0;
|
|
|
- l_ptr->stats.recv_info = 0;
|
|
|
- l_ptr->stale_count = 0;
|
|
|
- link_reset_statistics(l_ptr);
|
|
|
-}
|
|
|
+ l->peer_session = WILDCARD_SESSION;
|
|
|
|
|
|
-void tipc_link_activate(struct tipc_link *link)
|
|
|
-{
|
|
|
- struct tipc_node *node = link->owner;
|
|
|
+ /* If peer is up, it only accepts an incremented session number */
|
|
|
+ msg_set_session(l->pmsg, msg_session(l->pmsg) + 1);
|
|
|
|
|
|
- link->rcv_nxt = 1;
|
|
|
- link->stats.recv_info = 1;
|
|
|
- link->silent_intv_cnt = 0;
|
|
|
- link->state = TIPC_LINK_WORKING;
|
|
|
- link->exec_mode = TIPC_LINK_OPEN;
|
|
|
- tipc_node_link_up(node, link->bearer_id);
|
|
|
- tipc_bearer_add_dest(node->net, link->bearer_id, link->addr);
|
|
|
+ /* Prepare for renewed mtu size negotiation */
|
|
|
+ l->mtu = l->advertised_mtu;
|
|
|
+
|
|
|
+ /* Clean up all queues: */
|
|
|
+ __skb_queue_purge(&l->transmq);
|
|
|
+ __skb_queue_purge(&l->deferdq);
|
|
|
+ skb_queue_splice_init(&l->wakeupq, l->inputq);
|
|
|
+
|
|
|
+ tipc_link_purge_backlog(l);
|
|
|
+ kfree_skb(l->reasm_buf);
|
|
|
+ kfree_skb(l->failover_reasm_skb);
|
|
|
+ l->reasm_buf = NULL;
|
|
|
+ l->failover_reasm_skb = NULL;
|
|
|
+ l->rcv_unacked = 0;
|
|
|
+ l->snd_nxt = 1;
|
|
|
+ l->rcv_nxt = 1;
|
|
|
+ l->silent_intv_cnt = 0;
|
|
|
+ l->stats.recv_info = 0;
|
|
|
+ l->stale_count = 0;
|
|
|
+ link_reset_statistics(l);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -671,7 +627,7 @@ int __tipc_link_xmit(struct net *net, struct tipc_link *link,
|
|
|
u16 ack = mod(link->rcv_nxt - 1);
|
|
|
u16 seqno = link->snd_nxt;
|
|
|
u16 bc_last_in = link->owner->bclink.last_in;
|
|
|
- struct tipc_media_addr *addr = &link->media_addr;
|
|
|
+ struct tipc_media_addr *addr = link->media_addr;
|
|
|
struct sk_buff_head *transmq = &link->transmq;
|
|
|
struct sk_buff_head *backlogq = &link->backlogq;
|
|
|
struct sk_buff *skb, *bskb;
|
|
|
@@ -792,20 +748,6 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void skb2list(struct sk_buff *skb, struct sk_buff_head *list)
|
|
|
-{
|
|
|
- skb_queue_head_init(list);
|
|
|
- __skb_queue_tail(list, skb);
|
|
|
-}
|
|
|
-
|
|
|
-static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
|
|
|
-{
|
|
|
- struct sk_buff_head head;
|
|
|
-
|
|
|
- skb2list(skb, &head);
|
|
|
- return __tipc_link_xmit(link->owner->net, link, &head);
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* tipc_link_sync_rcv - synchronize broadcast link endpoints.
|
|
|
* Receive the sequence number where we should start receiving and
|
|
|
@@ -851,7 +793,7 @@ void tipc_link_push_packets(struct tipc_link *link)
|
|
|
link->rcv_unacked = 0;
|
|
|
__skb_queue_tail(&link->transmq, skb);
|
|
|
tipc_bearer_send(link->owner->net, link->bearer_id,
|
|
|
- skb, &link->media_addr);
|
|
|
+ skb, link->media_addr);
|
|
|
}
|
|
|
link->snd_nxt = seqno;
|
|
|
}
|
|
|
@@ -884,26 +826,6 @@ void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
|
|
|
l->snd_nxt = seqno;
|
|
|
}
|
|
|
|
|
|
-void tipc_link_reset_all(struct tipc_node *node)
|
|
|
-{
|
|
|
- char addr_string[16];
|
|
|
- u32 i;
|
|
|
-
|
|
|
- tipc_node_lock(node);
|
|
|
-
|
|
|
- pr_warn("Resetting all links to %s\n",
|
|
|
- tipc_addr_string_fill(addr_string, node->addr));
|
|
|
-
|
|
|
- for (i = 0; i < MAX_BEARERS; i++) {
|
|
|
- if (node->links[i].link) {
|
|
|
- link_print(node->links[i].link, "Resetting link\n");
|
|
|
- tipc_link_reset(node->links[i].link);
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- tipc_node_unlock(node);
|
|
|
-}
|
|
|
-
|
|
|
static void link_retransmit_failure(struct tipc_link *l_ptr,
|
|
|
struct sk_buff *buf)
|
|
|
{
|
|
|
@@ -920,7 +842,6 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
|
|
|
msg_errcode(msg));
|
|
|
pr_info("sqno %u, prev: %x, src: %x\n",
|
|
|
msg_seqno(msg), msg_prevnode(msg), msg_orignode(msg));
|
|
|
- tipc_link_reset(l_ptr);
|
|
|
} else {
|
|
|
/* Handle failure on broadcast link */
|
|
|
struct tipc_node *n_ptr;
|
|
|
@@ -975,7 +896,7 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
|
|
|
msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
|
|
|
msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
|
|
|
tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
|
|
|
- &l_ptr->media_addr);
|
|
|
+ l_ptr->media_addr);
|
|
|
retransmits--;
|
|
|
l_ptr->stats.retransmitted++;
|
|
|
}
|
|
|
@@ -996,7 +917,7 @@ static int tipc_link_retransm(struct tipc_link *l, int retransm,
|
|
|
l->stale_count = 1;
|
|
|
} else if (++l->stale_count > 100) {
|
|
|
link_retransmit_failure(l, skb);
|
|
|
- return TIPC_LINK_DOWN_EVT;
|
|
|
+ return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
|
|
|
}
|
|
|
skb_queue_walk(&l->transmq, skb) {
|
|
|
if (!retransm)
|
|
|
@@ -1016,60 +937,27 @@ static int tipc_link_retransm(struct tipc_link *l, int retransm,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-/* link_synch(): check if all packets arrived before the synch
|
|
|
- * point have been consumed
|
|
|
- * Returns true if the parallel links are synched, otherwise false
|
|
|
- */
|
|
|
-static bool link_synch(struct tipc_link *l)
|
|
|
-{
|
|
|
- unsigned int post_synch;
|
|
|
- struct tipc_link *pl;
|
|
|
-
|
|
|
- pl = tipc_parallel_link(l);
|
|
|
- if (pl == l)
|
|
|
- goto synched;
|
|
|
-
|
|
|
- /* Was last pre-synch packet added to input queue ? */
|
|
|
- if (less_eq(pl->rcv_nxt, l->synch_point))
|
|
|
- return false;
|
|
|
-
|
|
|
- /* Is it still in the input queue ? */
|
|
|
- post_synch = mod(pl->rcv_nxt - l->synch_point) - 1;
|
|
|
- if (skb_queue_len(pl->inputq) > post_synch)
|
|
|
- return false;
|
|
|
-synched:
|
|
|
- l->exec_mode = TIPC_LINK_OPEN;
|
|
|
- return true;
|
|
|
-}
|
|
|
-
|
|
|
/* tipc_data_input - deliver data and name distr msgs to upper layer
|
|
|
*
|
|
|
* Consumes buffer if message is of right type
|
|
|
* Node lock must be held
|
|
|
*/
|
|
|
-static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
|
|
|
+static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb,
|
|
|
+ struct sk_buff_head *inputq)
|
|
|
{
|
|
|
struct tipc_node *node = link->owner;
|
|
|
- struct tipc_msg *msg = buf_msg(skb);
|
|
|
- u32 dport = msg_destport(msg);
|
|
|
|
|
|
- switch (msg_user(msg)) {
|
|
|
+ switch (msg_user(buf_msg(skb))) {
|
|
|
case TIPC_LOW_IMPORTANCE:
|
|
|
case TIPC_MEDIUM_IMPORTANCE:
|
|
|
case TIPC_HIGH_IMPORTANCE:
|
|
|
case TIPC_CRITICAL_IMPORTANCE:
|
|
|
case CONN_MANAGER:
|
|
|
- if (tipc_skb_queue_tail(link->inputq, skb, dport)) {
|
|
|
- node->inputq = link->inputq;
|
|
|
- node->action_flags |= TIPC_MSG_EVT;
|
|
|
- }
|
|
|
+ __skb_queue_tail(inputq, skb);
|
|
|
return true;
|
|
|
case NAME_DISTRIBUTOR:
|
|
|
node->bclink.recv_permitted = true;
|
|
|
- node->namedq = link->namedq;
|
|
|
skb_queue_tail(link->namedq, skb);
|
|
|
- if (skb_queue_len(link->namedq) == 1)
|
|
|
- node->action_flags |= TIPC_NAMED_MSG_EVT;
|
|
|
return true;
|
|
|
case MSG_BUNDLER:
|
|
|
case TUNNEL_PROTOCOL:
|
|
|
@@ -1086,51 +974,59 @@ static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
|
|
|
/* tipc_link_input - process packet that has passed link protocol check
|
|
|
*
|
|
|
* Consumes buffer
|
|
|
- * Node lock must be held
|
|
|
*/
|
|
|
-static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
|
|
|
+static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
|
|
|
+ struct sk_buff_head *inputq)
|
|
|
{
|
|
|
- struct tipc_node *node = link->owner;
|
|
|
- struct tipc_msg *msg = buf_msg(skb);
|
|
|
+ struct tipc_node *node = l->owner;
|
|
|
+ struct tipc_msg *hdr = buf_msg(skb);
|
|
|
+ struct sk_buff **reasm_skb = &l->reasm_buf;
|
|
|
struct sk_buff *iskb;
|
|
|
+ int usr = msg_user(hdr);
|
|
|
+ int rc = 0;
|
|
|
int pos = 0;
|
|
|
+ int ipos = 0;
|
|
|
|
|
|
- switch (msg_user(msg)) {
|
|
|
- case TUNNEL_PROTOCOL:
|
|
|
- if (msg_dup(msg)) {
|
|
|
- link->exec_mode = TIPC_LINK_TUNNEL;
|
|
|
- link->synch_point = msg_seqno(msg_get_wrapped(msg));
|
|
|
- kfree_skb(skb);
|
|
|
- break;
|
|
|
+ if (unlikely(usr == TUNNEL_PROTOCOL)) {
|
|
|
+ if (msg_type(hdr) == SYNCH_MSG) {
|
|
|
+ __skb_queue_purge(&l->deferdq);
|
|
|
+ goto drop;
|
|
|
}
|
|
|
- if (!tipc_link_failover_rcv(link, &skb))
|
|
|
- break;
|
|
|
- if (msg_user(buf_msg(skb)) != MSG_BUNDLER) {
|
|
|
- tipc_data_input(link, skb);
|
|
|
- break;
|
|
|
- }
|
|
|
- case MSG_BUNDLER:
|
|
|
- link->stats.recv_bundles++;
|
|
|
- link->stats.recv_bundled += msg_msgcnt(msg);
|
|
|
+ if (!tipc_msg_extract(skb, &iskb, &ipos))
|
|
|
+ return rc;
|
|
|
+ kfree_skb(skb);
|
|
|
+ skb = iskb;
|
|
|
+ hdr = buf_msg(skb);
|
|
|
+ if (less(msg_seqno(hdr), l->drop_point))
|
|
|
+ goto drop;
|
|
|
+ if (tipc_data_input(l, skb, inputq))
|
|
|
+ return rc;
|
|
|
+ usr = msg_user(hdr);
|
|
|
+ reasm_skb = &l->failover_reasm_skb;
|
|
|
+ }
|
|
|
|
|
|
+ if (usr == MSG_BUNDLER) {
|
|
|
+ l->stats.recv_bundles++;
|
|
|
+ l->stats.recv_bundled += msg_msgcnt(hdr);
|
|
|
while (tipc_msg_extract(skb, &iskb, &pos))
|
|
|
- tipc_data_input(link, iskb);
|
|
|
- break;
|
|
|
- case MSG_FRAGMENTER:
|
|
|
- link->stats.recv_fragments++;
|
|
|
- if (tipc_buf_append(&link->reasm_buf, &skb)) {
|
|
|
- link->stats.recv_fragmented++;
|
|
|
- tipc_data_input(link, skb);
|
|
|
- } else if (!link->reasm_buf) {
|
|
|
- tipc_link_reset(link);
|
|
|
+ tipc_data_input(l, iskb, inputq);
|
|
|
+ return 0;
|
|
|
+ } else if (usr == MSG_FRAGMENTER) {
|
|
|
+ l->stats.recv_fragments++;
|
|
|
+ if (tipc_buf_append(reasm_skb, &skb)) {
|
|
|
+ l->stats.recv_fragmented++;
|
|
|
+ tipc_data_input(l, skb, inputq);
|
|
|
+ } else if (!*reasm_skb) {
|
|
|
+ return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
|
|
|
}
|
|
|
- break;
|
|
|
- case BCAST_PROTOCOL:
|
|
|
+ return 0;
|
|
|
+ } else if (usr == BCAST_PROTOCOL) {
|
|
|
tipc_link_sync_rcv(node, skb);
|
|
|
- break;
|
|
|
- default:
|
|
|
- break;
|
|
|
- };
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+drop:
|
|
|
+ kfree_skb(skb);
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
|
|
|
@@ -1157,11 +1053,13 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
|
|
|
struct sk_buff_head *xmitq)
|
|
|
{
|
|
|
struct sk_buff_head *arrvq = &l->deferdq;
|
|
|
- struct sk_buff *tmp;
|
|
|
+ struct sk_buff_head tmpq;
|
|
|
struct tipc_msg *hdr;
|
|
|
u16 seqno, rcv_nxt;
|
|
|
int rc = 0;
|
|
|
|
|
|
+ __skb_queue_head_init(&tmpq);
|
|
|
+
|
|
|
if (unlikely(!__tipc_skb_queue_sorted(arrvq, skb))) {
|
|
|
if (!(skb_queue_len(arrvq) % TIPC_NACK_INTV))
|
|
|
tipc_link_build_proto_msg(l, STATE_MSG, 0,
|
|
|
@@ -1169,21 +1067,21 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
- skb_queue_walk_safe(arrvq, skb, tmp) {
|
|
|
+ while ((skb = skb_peek(arrvq))) {
|
|
|
hdr = buf_msg(skb);
|
|
|
|
|
|
/* Verify and update link state */
|
|
|
if (unlikely(msg_user(hdr) == LINK_PROTOCOL)) {
|
|
|
__skb_dequeue(arrvq);
|
|
|
- rc |= tipc_link_proto_rcv(l, skb, xmitq);
|
|
|
+ rc = tipc_link_proto_rcv(l, skb, xmitq);
|
|
|
continue;
|
|
|
}
|
|
|
|
|
|
- if (unlikely(!link_working(l))) {
|
|
|
- rc |= tipc_link_fsm_evt(l, TRAFFIC_EVT, xmitq);
|
|
|
- if (!link_working(l)) {
|
|
|
+ if (unlikely(!link_is_up(l))) {
|
|
|
+ rc = tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
|
|
|
+ if (!link_is_up(l)) {
|
|
|
kfree_skb(__skb_dequeue(arrvq));
|
|
|
- return rc;
|
|
|
+ goto exit;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
@@ -1201,7 +1099,7 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
|
|
|
rcv_nxt = l->rcv_nxt;
|
|
|
if (unlikely(less(rcv_nxt, seqno))) {
|
|
|
l->stats.deferred_recv++;
|
|
|
- return rc;
|
|
|
+ goto exit;
|
|
|
}
|
|
|
|
|
|
__skb_dequeue(arrvq);
|
|
|
@@ -1210,21 +1108,14 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
|
|
|
if (unlikely(more(rcv_nxt, seqno))) {
|
|
|
l->stats.duplicates++;
|
|
|
kfree_skb(skb);
|
|
|
- return rc;
|
|
|
+ goto exit;
|
|
|
}
|
|
|
|
|
|
- /* Synchronize with parallel link if applicable */
|
|
|
- if (unlikely(l->exec_mode == TIPC_LINK_TUNNEL))
|
|
|
- if (!msg_dup(hdr) && !link_synch(l)) {
|
|
|
- kfree_skb(skb);
|
|
|
- return rc;
|
|
|
- }
|
|
|
-
|
|
|
/* Packet can be delivered */
|
|
|
l->rcv_nxt++;
|
|
|
l->stats.recv_info++;
|
|
|
- if (unlikely(!tipc_data_input(l, skb)))
|
|
|
- tipc_link_input(l, skb);
|
|
|
+ if (unlikely(!tipc_data_input(l, skb, &tmpq)))
|
|
|
+ rc = tipc_link_input(l, skb, &tmpq);
|
|
|
|
|
|
/* Ack at regular intervals */
|
|
|
if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
|
|
|
@@ -1234,6 +1125,8 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
|
|
|
0, 0, 0, 0, xmitq);
|
|
|
}
|
|
|
}
|
|
|
+exit:
|
|
|
+ tipc_skb_queue_splice_tail(&tmpq, l->inputq);
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
@@ -1291,7 +1184,7 @@ void tipc_link_proto_xmit(struct tipc_link *l, u32 msg_typ, int probe_msg,
|
|
|
skb = __skb_dequeue(&xmitq);
|
|
|
if (!skb)
|
|
|
return;
|
|
|
- tipc_bearer_send(l->owner->net, l->bearer_id, skb, &l->media_addr);
|
|
|
+ tipc_bearer_send(l->owner->net, l->bearer_id, skb, l->media_addr);
|
|
|
l->rcv_unacked = 0;
|
|
|
kfree_skb(skb);
|
|
|
}
|
|
|
@@ -1310,7 +1203,7 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
|
|
|
int node_up = l->owner->bclink.recv_permitted;
|
|
|
|
|
|
/* Don't send protocol message during reset or link failover */
|
|
|
- if (l->exec_mode == TIPC_LINK_BLOCKED)
|
|
|
+ if (tipc_link_is_blocked(l))
|
|
|
return;
|
|
|
|
|
|
msg_set_type(hdr, mtyp);
|
|
|
@@ -1345,7 +1238,7 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
|
|
|
} else {
|
|
|
/* RESET_MSG or ACTIVATE_MSG */
|
|
|
msg_set_max_pkt(hdr, l->advertised_mtu);
|
|
|
- msg_set_ack(hdr, l->failover_checkpt - 1);
|
|
|
+ msg_set_ack(hdr, l->rcv_nxt - 1);
|
|
|
msg_set_next_sent(hdr, 1);
|
|
|
}
|
|
|
skb = tipc_buf_acquire(msg_size(hdr));
|
|
|
@@ -1353,220 +1246,74 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
|
|
|
return;
|
|
|
skb_copy_to_linear_data(skb, hdr, msg_size(hdr));
|
|
|
skb->priority = TC_PRIO_CONTROL;
|
|
|
- __skb_queue_head(xmitq, skb);
|
|
|
-}
|
|
|
-
|
|
|
-/* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
|
|
|
- * a different bearer. Owner node is locked.
|
|
|
- */
|
|
|
-static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
|
|
|
- struct tipc_msg *tunnel_hdr,
|
|
|
- struct tipc_msg *msg,
|
|
|
- u32 selector)
|
|
|
-{
|
|
|
- struct tipc_link *tunnel;
|
|
|
- struct sk_buff *skb;
|
|
|
- u32 length = msg_size(msg);
|
|
|
-
|
|
|
- tunnel = node_active_link(l_ptr->owner, selector & 1);
|
|
|
- if (!tipc_link_is_up(tunnel)) {
|
|
|
- pr_warn("%stunnel link no longer available\n", link_co_err);
|
|
|
- return;
|
|
|
- }
|
|
|
- msg_set_size(tunnel_hdr, length + INT_H_SIZE);
|
|
|
- skb = tipc_buf_acquire(length + INT_H_SIZE);
|
|
|
- if (!skb) {
|
|
|
- pr_warn("%sunable to send tunnel msg\n", link_co_err);
|
|
|
- return;
|
|
|
- }
|
|
|
- skb_copy_to_linear_data(skb, tunnel_hdr, INT_H_SIZE);
|
|
|
- skb_copy_to_linear_data_offset(skb, INT_H_SIZE, msg, length);
|
|
|
- __tipc_link_xmit_skb(tunnel, skb);
|
|
|
+ __skb_queue_tail(xmitq, skb);
|
|
|
}
|
|
|
|
|
|
-
|
|
|
-/* tipc_link_failover_send_queue(): A link has gone down, but a second
|
|
|
- * link is still active. We can do failover. Tunnel the failing link's
|
|
|
- * whole send queue via the remaining link. This way, we don't lose
|
|
|
- * any packets, and sequence order is preserved for subsequent traffic
|
|
|
- * sent over the remaining link. Owner node is locked.
|
|
|
+/* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
|
|
|
+ * with contents of the link's tranmsit and backlog queues.
|
|
|
*/
|
|
|
-void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
|
|
|
+void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
|
|
|
+ int mtyp, struct sk_buff_head *xmitq)
|
|
|
{
|
|
|
- int msgcount;
|
|
|
- struct tipc_link *tunnel = node_active_link(l_ptr->owner, 0);
|
|
|
- struct tipc_msg tunnel_hdr;
|
|
|
- struct sk_buff *skb;
|
|
|
- int split_bundles;
|
|
|
+ struct sk_buff *skb, *tnlskb;
|
|
|
+ struct tipc_msg *hdr, tnlhdr;
|
|
|
+ struct sk_buff_head *queue = &l->transmq;
|
|
|
+ struct sk_buff_head tmpxq, tnlq;
|
|
|
+ u16 pktlen, pktcnt, seqno = l->snd_nxt;
|
|
|
|
|
|
- if (!tunnel)
|
|
|
+ if (!tnl)
|
|
|
return;
|
|
|
|
|
|
- tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, TUNNEL_PROTOCOL,
|
|
|
- FAILOVER_MSG, INT_H_SIZE, l_ptr->addr);
|
|
|
+ skb_queue_head_init(&tnlq);
|
|
|
+ skb_queue_head_init(&tmpxq);
|
|
|
|
|
|
- skb_queue_walk(&l_ptr->backlogq, skb) {
|
|
|
- msg_set_seqno(buf_msg(skb), l_ptr->snd_nxt);
|
|
|
- l_ptr->snd_nxt = mod(l_ptr->snd_nxt + 1);
|
|
|
- }
|
|
|
- skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq);
|
|
|
- tipc_link_purge_backlog(l_ptr);
|
|
|
- msgcount = skb_queue_len(&l_ptr->transmq);
|
|
|
- msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
|
|
|
- msg_set_msgcnt(&tunnel_hdr, msgcount);
|
|
|
-
|
|
|
- if (skb_queue_empty(&l_ptr->transmq)) {
|
|
|
- skb = tipc_buf_acquire(INT_H_SIZE);
|
|
|
- if (skb) {
|
|
|
- skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE);
|
|
|
- msg_set_size(&tunnel_hdr, INT_H_SIZE);
|
|
|
- __tipc_link_xmit_skb(tunnel, skb);
|
|
|
- } else {
|
|
|
- pr_warn("%sunable to send changeover msg\n",
|
|
|
- link_co_err);
|
|
|
- }
|
|
|
+ /* At least one packet required for safe algorithm => add dummy */
|
|
|
+ skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
|
|
|
+ BASIC_H_SIZE, 0, l->addr, link_own_addr(l),
|
|
|
+ 0, 0, TIPC_ERR_NO_PORT);
|
|
|
+ if (!skb) {
|
|
|
+ pr_warn("%sunable to create tunnel packet\n", link_co_err);
|
|
|
return;
|
|
|
}
|
|
|
-
|
|
|
- split_bundles = (node_active_link(l_ptr->owner, 0) !=
|
|
|
- node_active_link(l_ptr->owner, 0));
|
|
|
-
|
|
|
- skb_queue_walk(&l_ptr->transmq, skb) {
|
|
|
- struct tipc_msg *msg = buf_msg(skb);
|
|
|
-
|
|
|
- if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
|
|
|
- struct tipc_msg *m = msg_get_wrapped(msg);
|
|
|
- unchar *pos = (unchar *)m;
|
|
|
-
|
|
|
- msgcount = msg_msgcnt(msg);
|
|
|
- while (msgcount--) {
|
|
|
- msg_set_seqno(m, msg_seqno(msg));
|
|
|
- tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m,
|
|
|
- msg_link_selector(m));
|
|
|
- pos += align(msg_size(m));
|
|
|
- m = (struct tipc_msg *)pos;
|
|
|
- }
|
|
|
- } else {
|
|
|
- tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
|
|
|
- msg_link_selector(msg));
|
|
|
- }
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-/* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a
|
|
|
- * duplicate of the first link's send queue via the new link. This way, we
|
|
|
- * are guaranteed that currently queued packets from a socket are delivered
|
|
|
- * before future traffic from the same socket, even if this is using the
|
|
|
- * new link. The last arriving copy of each duplicate packet is dropped at
|
|
|
- * the receiving end by the regular protocol check, so packet cardinality
|
|
|
- * and sequence order is preserved per sender/receiver socket pair.
|
|
|
- * Owner node is locked.
|
|
|
- */
|
|
|
-void tipc_link_dup_queue_xmit(struct tipc_link *link,
|
|
|
- struct tipc_link *tnl)
|
|
|
-{
|
|
|
- struct sk_buff *skb;
|
|
|
- struct tipc_msg tnl_hdr;
|
|
|
- struct sk_buff_head *queue = &link->transmq;
|
|
|
- int mcnt;
|
|
|
- u16 seqno;
|
|
|
-
|
|
|
- tipc_msg_init(link_own_addr(link), &tnl_hdr, TUNNEL_PROTOCOL,
|
|
|
- SYNCH_MSG, INT_H_SIZE, link->addr);
|
|
|
- mcnt = skb_queue_len(&link->transmq) + skb_queue_len(&link->backlogq);
|
|
|
- msg_set_msgcnt(&tnl_hdr, mcnt);
|
|
|
- msg_set_bearer_id(&tnl_hdr, link->peer_bearer_id);
|
|
|
-
|
|
|
-tunnel_queue:
|
|
|
+ skb_queue_tail(&tnlq, skb);
|
|
|
+ tipc_link_xmit(l, &tnlq, &tmpxq);
|
|
|
+ __skb_queue_purge(&tmpxq);
|
|
|
+
|
|
|
+ /* Initialize reusable tunnel packet header */
|
|
|
+ tipc_msg_init(link_own_addr(l), &tnlhdr, TUNNEL_PROTOCOL,
|
|
|
+ mtyp, INT_H_SIZE, l->addr);
|
|
|
+ pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq);
|
|
|
+ msg_set_msgcnt(&tnlhdr, pktcnt);
|
|
|
+ msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
|
|
|
+tnl:
|
|
|
+ /* Wrap each packet into a tunnel packet */
|
|
|
skb_queue_walk(queue, skb) {
|
|
|
- struct sk_buff *outskb;
|
|
|
- struct tipc_msg *msg = buf_msg(skb);
|
|
|
- u32 len = msg_size(msg);
|
|
|
-
|
|
|
- msg_set_ack(msg, mod(link->rcv_nxt - 1));
|
|
|
- msg_set_bcast_ack(msg, link->owner->bclink.last_in);
|
|
|
- msg_set_size(&tnl_hdr, len + INT_H_SIZE);
|
|
|
- outskb = tipc_buf_acquire(len + INT_H_SIZE);
|
|
|
- if (outskb == NULL) {
|
|
|
- pr_warn("%sunable to send duplicate msg\n",
|
|
|
- link_co_err);
|
|
|
+ hdr = buf_msg(skb);
|
|
|
+ if (queue == &l->backlogq)
|
|
|
+ msg_set_seqno(hdr, seqno++);
|
|
|
+ pktlen = msg_size(hdr);
|
|
|
+ msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
|
|
|
+ tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE);
|
|
|
+ if (!tnlskb) {
|
|
|
+ pr_warn("%sunable to send packet\n", link_co_err);
|
|
|
return;
|
|
|
}
|
|
|
- skb_copy_to_linear_data(outskb, &tnl_hdr, INT_H_SIZE);
|
|
|
- skb_copy_to_linear_data_offset(outskb, INT_H_SIZE,
|
|
|
- skb->data, len);
|
|
|
- __tipc_link_xmit_skb(tnl, outskb);
|
|
|
- if (!tipc_link_is_up(link))
|
|
|
- return;
|
|
|
- }
|
|
|
- if (queue == &link->backlogq)
|
|
|
- return;
|
|
|
- seqno = link->snd_nxt;
|
|
|
- skb_queue_walk(&link->backlogq, skb) {
|
|
|
- msg_set_seqno(buf_msg(skb), seqno);
|
|
|
- seqno = mod(seqno + 1);
|
|
|
+ skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
|
|
|
+ skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
|
|
|
+ __skb_queue_tail(&tnlq, tnlskb);
|
|
|
}
|
|
|
- queue = &link->backlogq;
|
|
|
- goto tunnel_queue;
|
|
|
-}
|
|
|
-
|
|
|
-/* tipc_link_failover_rcv(): Receive a tunnelled FAILOVER_MSG packet
|
|
|
- * Owner node is locked.
|
|
|
- */
|
|
|
-static bool tipc_link_failover_rcv(struct tipc_link *link,
|
|
|
- struct sk_buff **skb)
|
|
|
-{
|
|
|
- struct tipc_msg *msg = buf_msg(*skb);
|
|
|
- struct sk_buff *iskb = NULL;
|
|
|
- struct tipc_link *pl = NULL;
|
|
|
- int bearer_id = msg_bearer_id(msg);
|
|
|
- int pos = 0;
|
|
|
-
|
|
|
- if (msg_type(msg) != FAILOVER_MSG) {
|
|
|
- pr_warn("%sunknown tunnel pkt received\n", link_co_err);
|
|
|
- goto exit;
|
|
|
+ if (queue != &l->backlogq) {
|
|
|
+ queue = &l->backlogq;
|
|
|
+ goto tnl;
|
|
|
}
|
|
|
- if (bearer_id >= MAX_BEARERS)
|
|
|
- goto exit;
|
|
|
-
|
|
|
- if (bearer_id == link->bearer_id)
|
|
|
- goto exit;
|
|
|
-
|
|
|
- pl = link->owner->links[bearer_id].link;
|
|
|
- if (pl && tipc_link_is_up(pl))
|
|
|
- tipc_link_reset(pl);
|
|
|
-
|
|
|
- if (link->failover_pkts == FIRST_FAILOVER)
|
|
|
- link->failover_pkts = msg_msgcnt(msg);
|
|
|
|
|
|
- /* Should we expect an inner packet? */
|
|
|
- if (!link->failover_pkts)
|
|
|
- goto exit;
|
|
|
-
|
|
|
- if (!tipc_msg_extract(*skb, &iskb, &pos)) {
|
|
|
- pr_warn("%sno inner failover pkt\n", link_co_err);
|
|
|
- *skb = NULL;
|
|
|
- goto exit;
|
|
|
- }
|
|
|
- link->failover_pkts--;
|
|
|
- *skb = NULL;
|
|
|
+ tipc_link_xmit(tnl, &tnlq, xmitq);
|
|
|
|
|
|
- /* Was this packet already delivered? */
|
|
|
- if (less(buf_seqno(iskb), link->failover_checkpt)) {
|
|
|
- kfree_skb(iskb);
|
|
|
- iskb = NULL;
|
|
|
- goto exit;
|
|
|
- }
|
|
|
- if (msg_user(buf_msg(iskb)) == MSG_FRAGMENTER) {
|
|
|
- link->stats.recv_fragments++;
|
|
|
- tipc_buf_append(&link->failover_skb, &iskb);
|
|
|
+ if (mtyp == FAILOVER_MSG) {
|
|
|
+ tnl->drop_point = l->rcv_nxt;
|
|
|
+ tnl->failover_reasm_skb = l->reasm_buf;
|
|
|
+ l->reasm_buf = NULL;
|
|
|
}
|
|
|
-exit:
|
|
|
- if (!link->failover_pkts && pl)
|
|
|
- pl->exec_mode = TIPC_LINK_OPEN;
|
|
|
- kfree_skb(*skb);
|
|
|
- *skb = iskb;
|
|
|
- return *skb;
|
|
|
}
|
|
|
|
|
|
/* tipc_link_proto_rcv(): receive link level protocol message :
|
|
|
@@ -1586,7 +1333,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
|
|
|
char *if_name;
|
|
|
int rc = 0;
|
|
|
|
|
|
- if (l->exec_mode == TIPC_LINK_BLOCKED)
|
|
|
+ if (tipc_link_is_blocked(l))
|
|
|
goto exit;
|
|
|
|
|
|
if (link_own_addr(l) > msg_prevnode(hdr))
|
|
|
@@ -1600,6 +1347,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
|
|
|
(l->peer_session != WILDCARD_SESSION))
|
|
|
break;
|
|
|
/* fall thru' */
|
|
|
+
|
|
|
case ACTIVATE_MSG:
|
|
|
|
|
|
/* Complete own link name with peer's interface name */
|
|
|
@@ -1618,13 +1366,20 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
|
|
|
if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
|
|
|
l->priority = peers_prio;
|
|
|
|
|
|
+ if (msg_type(hdr) == RESET_MSG) {
|
|
|
+ rc |= tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
|
|
|
+ } else if (!link_is_up(l)) {
|
|
|
+ tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
|
|
|
+ rc |= tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
|
|
|
+ }
|
|
|
l->peer_session = msg_session(hdr);
|
|
|
l->peer_bearer_id = msg_bearer_id(hdr);
|
|
|
- rc = tipc_link_fsm_evt(l, msg_type(hdr), xmitq);
|
|
|
if (l->mtu > msg_max_pkt(hdr))
|
|
|
l->mtu = msg_max_pkt(hdr);
|
|
|
break;
|
|
|
+
|
|
|
case STATE_MSG:
|
|
|
+
|
|
|
/* Update own tolerance if peer indicates a non-zero value */
|
|
|
if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
|
|
|
l->tolerance = peers_tol;
|
|
|
@@ -1633,11 +1388,11 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
|
|
|
l->stats.recv_states++;
|
|
|
if (msg_probe(hdr))
|
|
|
l->stats.recv_probes++;
|
|
|
- rc = tipc_link_fsm_evt(l, TRAFFIC_EVT, xmitq);
|
|
|
- if (!tipc_link_is_up(l))
|
|
|
+ rc = tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
|
|
|
+ if (!link_is_up(l))
|
|
|
break;
|
|
|
|
|
|
- /* Has peer sent packets we haven't received yet ? */
|
|
|
+ /* Send NACK if peer has sent pkts we haven't received yet */
|
|
|
if (more(peers_snd_nxt, l->rcv_nxt))
|
|
|
rcvgap = peers_snd_nxt - l->rcv_nxt;
|
|
|
if (rcvgap || (msg_probe(hdr)))
|
|
|
@@ -1647,9 +1402,10 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
|
|
|
|
|
|
/* If NACK, retransmit will now start at right position */
|
|
|
if (nacked_gap) {
|
|
|
- rc |= tipc_link_retransm(l, nacked_gap, xmitq);
|
|
|
+ rc = tipc_link_retransm(l, nacked_gap, xmitq);
|
|
|
l->stats.recv_nacks++;
|
|
|
}
|
|
|
+
|
|
|
tipc_link_advance_backlog(l, xmitq);
|
|
|
if (unlikely(!skb_queue_empty(&l->wakeupq)))
|
|
|
link_prepare_wakeup(l);
|
|
|
@@ -1726,19 +1482,7 @@ static void link_print(struct tipc_link *l, const char *str)
|
|
|
u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt;
|
|
|
u16 tail = l->snd_nxt - 1;
|
|
|
|
|
|
- pr_info("%s Link <%s>:", str, l->name);
|
|
|
-
|
|
|
- if (link_probing(l))
|
|
|
- pr_cont(":P\n");
|
|
|
- else if (link_establishing(l))
|
|
|
- pr_cont(":E\n");
|
|
|
- else if (link_resetting(l))
|
|
|
- pr_cont(":R\n");
|
|
|
- else if (link_working(l))
|
|
|
- pr_cont(":W\n");
|
|
|
- else
|
|
|
- pr_cont("\n");
|
|
|
-
|
|
|
+ pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
|
|
|
pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
|
|
|
skb_queue_len(&l->transmq), head, tail,
|
|
|
skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
|