Merge branch 'tipc-next'
Jon Maloy says: ==================== tipc: some link layer improvements We continue eliminating redundant complexity at the link layer, and add a couple of improvements to the packet sending functionality. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Коммит
b55b10bebb
|
@ -38,13 +38,6 @@
|
|||
#include "addr.h"
|
||||
#include "core.h"
|
||||
|
||||
u32 tipc_own_addr(struct net *net)
|
||||
{
|
||||
struct tipc_net *tn = net_generic(net, tipc_net_id);
|
||||
|
||||
return tn->own_addr;
|
||||
}
|
||||
|
||||
/**
|
||||
* in_own_cluster - test for cluster inclusion; <0.0.0> always matches
|
||||
*/
|
||||
|
|
|
@ -41,10 +41,18 @@
|
|||
#include <linux/tipc.h>
|
||||
#include <net/net_namespace.h>
|
||||
#include <net/netns/generic.h>
|
||||
#include "core.h"
|
||||
|
||||
#define TIPC_ZONE_MASK 0xff000000u
|
||||
#define TIPC_CLUSTER_MASK 0xfffff000u
|
||||
|
||||
static inline u32 tipc_own_addr(struct net *net)
|
||||
{
|
||||
struct tipc_net *tn = net_generic(net, tipc_net_id);
|
||||
|
||||
return tn->own_addr;
|
||||
}
|
||||
|
||||
static inline u32 tipc_zone_mask(u32 addr)
|
||||
{
|
||||
return addr & TIPC_ZONE_MASK;
|
||||
|
|
|
@ -115,19 +115,15 @@ static void bclink_set_last_sent(struct net *net)
|
|||
{
|
||||
struct tipc_net *tn = net_generic(net, tipc_net_id);
|
||||
struct tipc_link *bcl = tn->bcl;
|
||||
struct sk_buff *skb = skb_peek(&bcl->backlogq);
|
||||
|
||||
if (skb)
|
||||
bcl->fsm_msg_cnt = mod(buf_seqno(skb) - 1);
|
||||
else
|
||||
bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1);
|
||||
bcl->silent_intv_cnt = mod(bcl->snd_nxt - 1);
|
||||
}
|
||||
|
||||
u32 tipc_bclink_get_last_sent(struct net *net)
|
||||
{
|
||||
struct tipc_net *tn = net_generic(net, tipc_net_id);
|
||||
|
||||
return tn->bcl->fsm_msg_cnt;
|
||||
return tn->bcl->silent_intv_cnt;
|
||||
}
|
||||
|
||||
static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
|
||||
|
@ -212,16 +208,16 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
|
|||
* or both sent and unsent messages (otherwise)
|
||||
*/
|
||||
if (tn->bclink->bcast_nodes.count)
|
||||
acked = tn->bcl->fsm_msg_cnt;
|
||||
acked = tn->bcl->silent_intv_cnt;
|
||||
else
|
||||
acked = tn->bcl->next_out_no;
|
||||
acked = tn->bcl->snd_nxt;
|
||||
} else {
|
||||
/*
|
||||
* Bail out if specified sequence number does not correspond
|
||||
* to a message that has been sent and not yet acknowledged
|
||||
*/
|
||||
if (less(acked, buf_seqno(skb)) ||
|
||||
less(tn->bcl->fsm_msg_cnt, acked) ||
|
||||
less(tn->bcl->silent_intv_cnt, acked) ||
|
||||
less_eq(acked, n_ptr->bclink.acked))
|
||||
goto exit;
|
||||
}
|
||||
|
@ -803,9 +799,9 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
|
|||
goto attr_msg_full;
|
||||
if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
|
||||
goto attr_msg_full;
|
||||
if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->next_in_no))
|
||||
if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->rcv_nxt))
|
||||
goto attr_msg_full;
|
||||
if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->next_out_no))
|
||||
if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->snd_nxt))
|
||||
goto attr_msg_full;
|
||||
|
||||
prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
|
||||
|
@ -914,7 +910,7 @@ int tipc_bclink_init(struct net *net)
|
|||
__skb_queue_head_init(&bcl->backlogq);
|
||||
__skb_queue_head_init(&bcl->deferdq);
|
||||
skb_queue_head_init(&bcl->wakeupq);
|
||||
bcl->next_out_no = 1;
|
||||
bcl->snd_nxt = 1;
|
||||
spin_lock_init(&bclink->node.lock);
|
||||
__skb_queue_head_init(&bclink->arrvq);
|
||||
skb_queue_head_init(&bclink->inputq);
|
||||
|
|
|
@ -71,8 +71,7 @@ static const struct nla_policy tipc_nl_media_policy[TIPC_NLA_MEDIA_MAX + 1] = {
|
|||
[TIPC_NLA_MEDIA_PROP] = { .type = NLA_NESTED }
|
||||
};
|
||||
|
||||
static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr,
|
||||
bool shutting_down);
|
||||
static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr);
|
||||
|
||||
/**
|
||||
* tipc_media_find - locates specified media object by name
|
||||
|
@ -324,7 +323,7 @@ restart:
|
|||
|
||||
res = tipc_disc_create(net, b_ptr, &b_ptr->bcast_addr);
|
||||
if (res) {
|
||||
bearer_disable(net, b_ptr, false);
|
||||
bearer_disable(net, b_ptr);
|
||||
pr_warn("Bearer <%s> rejected, discovery object creation failed\n",
|
||||
name);
|
||||
return -EINVAL;
|
||||
|
@ -344,7 +343,7 @@ restart:
|
|||
static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b_ptr)
|
||||
{
|
||||
pr_info("Resetting bearer <%s>\n", b_ptr->name);
|
||||
tipc_link_reset_list(net, b_ptr->identity);
|
||||
tipc_link_delete_list(net, b_ptr->identity);
|
||||
tipc_disc_reset(net, b_ptr);
|
||||
return 0;
|
||||
}
|
||||
|
@ -354,8 +353,7 @@ static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b_ptr)
|
|||
*
|
||||
* Note: This routine assumes caller holds RTNL lock.
|
||||
*/
|
||||
static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr,
|
||||
bool shutting_down)
|
||||
static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr)
|
||||
{
|
||||
struct tipc_net *tn = net_generic(net, tipc_net_id);
|
||||
u32 i;
|
||||
|
@ -363,7 +361,7 @@ static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr,
|
|||
pr_info("Disabling bearer <%s>\n", b_ptr->name);
|
||||
b_ptr->media->disable_media(b_ptr);
|
||||
|
||||
tipc_link_delete_list(net, b_ptr->identity, shutting_down);
|
||||
tipc_link_delete_list(net, b_ptr->identity);
|
||||
if (b_ptr->link_req)
|
||||
tipc_disc_delete(b_ptr->link_req);
|
||||
|
||||
|
@ -541,7 +539,7 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
|
|||
break;
|
||||
case NETDEV_UNREGISTER:
|
||||
case NETDEV_CHANGENAME:
|
||||
bearer_disable(dev_net(dev), b_ptr, false);
|
||||
bearer_disable(dev_net(dev), b_ptr);
|
||||
break;
|
||||
}
|
||||
return NOTIFY_OK;
|
||||
|
@ -583,7 +581,7 @@ void tipc_bearer_stop(struct net *net)
|
|||
for (i = 0; i < MAX_BEARERS; i++) {
|
||||
b_ptr = rtnl_dereference(tn->bearer_list[i]);
|
||||
if (b_ptr) {
|
||||
bearer_disable(net, b_ptr, true);
|
||||
bearer_disable(net, b_ptr);
|
||||
tn->bearer_list[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -747,7 +745,7 @@ int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
bearer_disable(net, bearer, false);
|
||||
bearer_disable(net, bearer);
|
||||
rtnl_unlock();
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -38,9 +38,9 @@
|
|||
#define _TIPC_BEARER_H
|
||||
|
||||
#include "netlink.h"
|
||||
#include "core.h"
|
||||
#include <net/genetlink.h>
|
||||
|
||||
#define MAX_BEARERS 2
|
||||
#define MAX_MEDIA 3
|
||||
#define MAX_NODES 4096
|
||||
#define WSIZE 32
|
||||
|
|
|
@ -60,16 +60,19 @@
|
|||
#include <net/netns/generic.h>
|
||||
#include <linux/rhashtable.h>
|
||||
|
||||
#include "node.h"
|
||||
#include "bearer.h"
|
||||
#include "bcast.h"
|
||||
#include "netlink.h"
|
||||
#include "link.h"
|
||||
#include "node.h"
|
||||
#include "msg.h"
|
||||
struct tipc_node;
|
||||
struct tipc_bearer;
|
||||
struct tipc_bcbearer;
|
||||
struct tipc_bclink;
|
||||
struct tipc_link;
|
||||
struct tipc_name_table;
|
||||
struct tipc_server;
|
||||
|
||||
#define TIPC_MOD_VER "2.0.0"
|
||||
|
||||
#define NODE_HTABLE_SIZE 512
|
||||
#define MAX_BEARERS 3
|
||||
|
||||
extern int tipc_net_id __read_mostly;
|
||||
extern int sysctl_tipc_rmem[3] __read_mostly;
|
||||
extern int sysctl_tipc_named_timeout __read_mostly;
|
||||
|
@ -106,6 +109,26 @@ struct tipc_net {
|
|||
atomic_t subscription_count;
|
||||
};
|
||||
|
||||
static inline u16 mod(u16 x)
|
||||
{
|
||||
return x & 0xffffu;
|
||||
}
|
||||
|
||||
static inline int less_eq(u16 left, u16 right)
|
||||
{
|
||||
return mod(right - left) < 32768u;
|
||||
}
|
||||
|
||||
static inline int more(u16 left, u16 right)
|
||||
{
|
||||
return !less_eq(left, right);
|
||||
}
|
||||
|
||||
static inline int less(u16 left, u16 right)
|
||||
{
|
||||
return less_eq(left, right) && (mod(right) != mod(left));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
int tipc_register_sysctl(void);
|
||||
void tipc_unregister_sysctl(void);
|
||||
|
|
243
net/tipc/link.c
243
net/tipc/link.c
|
@ -86,7 +86,7 @@ static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
|
|||
*/
|
||||
#define STARTING_EVT 856384768 /* link processing trigger */
|
||||
#define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */
|
||||
#define TIMEOUT_EVT 560817u /* link timer expired */
|
||||
#define SILENCE_EVT 560817u /* timer dicovered silence from peer */
|
||||
|
||||
/*
|
||||
* State value stored in 'failover_pkts'
|
||||
|
@ -106,6 +106,7 @@ static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
|
|||
static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
|
||||
static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
|
||||
static bool tipc_link_failover_rcv(struct tipc_link *l, struct sk_buff **skb);
|
||||
static void link_set_timer(struct tipc_link *link, unsigned long time);
|
||||
/*
|
||||
* Simple link routines
|
||||
*/
|
||||
|
@ -197,11 +198,12 @@ static void link_timeout(unsigned long data)
|
|||
}
|
||||
|
||||
/* do all other link processing performed on a periodic basis */
|
||||
link_state_event(l_ptr, TIMEOUT_EVT);
|
||||
|
||||
if (l_ptr->silent_intv_cnt || tipc_bclink_acks_missing(l_ptr->owner))
|
||||
link_state_event(l_ptr, SILENCE_EVT);
|
||||
l_ptr->silent_intv_cnt++;
|
||||
if (skb_queue_len(&l_ptr->backlogq))
|
||||
tipc_link_push_packets(l_ptr);
|
||||
|
||||
link_set_timer(l_ptr, l_ptr->keepalive_intv);
|
||||
tipc_node_unlock(l_ptr->owner);
|
||||
tipc_link_put(l_ptr);
|
||||
}
|
||||
|
@ -233,8 +235,8 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
|
|||
|
||||
if (n_ptr->link_cnt >= MAX_BEARERS) {
|
||||
tipc_addr_string_fill(addr_string, n_ptr->addr);
|
||||
pr_err("Attempt to establish %uth link to %s. Max %u allowed.\n",
|
||||
n_ptr->link_cnt, addr_string, MAX_BEARERS);
|
||||
pr_err("Cannot establish %uth link to %s. Max %u allowed.\n",
|
||||
n_ptr->link_cnt, addr_string, MAX_BEARERS);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -261,7 +263,6 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
|
|||
/* note: peer i/f name is updated by reset/activate message */
|
||||
memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
|
||||
l_ptr->owner = n_ptr;
|
||||
l_ptr->checkpoint = 1;
|
||||
l_ptr->peer_session = INVALID_SESSION;
|
||||
l_ptr->bearer_id = b_ptr->identity;
|
||||
link_set_supervision_props(l_ptr, b_ptr->tolerance);
|
||||
|
@ -280,7 +281,7 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
|
|||
l_ptr->mtu = l_ptr->advertised_mtu;
|
||||
l_ptr->priority = b_ptr->priority;
|
||||
tipc_link_set_queue_limits(l_ptr, b_ptr->window);
|
||||
l_ptr->next_out_no = 1;
|
||||
l_ptr->snd_nxt = 1;
|
||||
__skb_queue_head_init(&l_ptr->transmq);
|
||||
__skb_queue_head_init(&l_ptr->backlogq);
|
||||
__skb_queue_head_init(&l_ptr->deferdq);
|
||||
|
@ -311,8 +312,7 @@ void tipc_link_delete(struct tipc_link *l)
|
|||
tipc_link_put(l);
|
||||
}
|
||||
|
||||
void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
|
||||
bool shutting_down)
|
||||
void tipc_link_delete_list(struct net *net, unsigned int bearer_id)
|
||||
{
|
||||
struct tipc_net *tn = net_generic(net, tipc_net_id);
|
||||
struct tipc_link *link;
|
||||
|
@ -451,9 +451,9 @@ void tipc_link_reset(struct tipc_link *l_ptr)
|
|||
|
||||
if (was_active_link && tipc_node_is_up(l_ptr->owner) && (pl != l_ptr)) {
|
||||
l_ptr->flags |= LINK_FAILINGOVER;
|
||||
l_ptr->failover_checkpt = l_ptr->next_in_no;
|
||||
l_ptr->failover_checkpt = l_ptr->rcv_nxt;
|
||||
pl->failover_pkts = FIRST_FAILOVER;
|
||||
pl->failover_checkpt = l_ptr->next_in_no;
|
||||
pl->failover_checkpt = l_ptr->rcv_nxt;
|
||||
pl->failover_skb = l_ptr->reasm_buf;
|
||||
} else {
|
||||
kfree_skb(l_ptr->reasm_buf);
|
||||
|
@ -469,36 +469,19 @@ void tipc_link_reset(struct tipc_link *l_ptr)
|
|||
tipc_link_purge_backlog(l_ptr);
|
||||
l_ptr->reasm_buf = NULL;
|
||||
l_ptr->rcv_unacked = 0;
|
||||
l_ptr->checkpoint = 1;
|
||||
l_ptr->next_out_no = 1;
|
||||
l_ptr->fsm_msg_cnt = 0;
|
||||
l_ptr->snd_nxt = 1;
|
||||
l_ptr->silent_intv_cnt = 0;
|
||||
l_ptr->stale_count = 0;
|
||||
link_reset_statistics(l_ptr);
|
||||
}
|
||||
|
||||
void tipc_link_reset_list(struct net *net, unsigned int bearer_id)
|
||||
{
|
||||
struct tipc_net *tn = net_generic(net, tipc_net_id);
|
||||
struct tipc_link *l_ptr;
|
||||
struct tipc_node *n_ptr;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
|
||||
tipc_node_lock(n_ptr);
|
||||
l_ptr = n_ptr->links[bearer_id];
|
||||
if (l_ptr)
|
||||
tipc_link_reset(l_ptr);
|
||||
tipc_node_unlock(n_ptr);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void link_activate(struct tipc_link *link)
|
||||
{
|
||||
struct tipc_node *node = link->owner;
|
||||
|
||||
link->next_in_no = 1;
|
||||
link->rcv_nxt = 1;
|
||||
link->stats.recv_info = 1;
|
||||
link->silent_intv_cnt = 0;
|
||||
tipc_node_link_up(node, link);
|
||||
tipc_bearer_add_dest(node->net, link->bearer_id, link->addr);
|
||||
}
|
||||
|
@ -511,7 +494,7 @@ static void link_activate(struct tipc_link *link)
|
|||
static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
|
||||
{
|
||||
struct tipc_link *other;
|
||||
unsigned long cont_intv = l_ptr->cont_intv;
|
||||
unsigned long timer_intv = l_ptr->keepalive_intv;
|
||||
|
||||
if (l_ptr->flags & LINK_STOPPED)
|
||||
return;
|
||||
|
@ -519,45 +502,33 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
|
|||
if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
|
||||
return; /* Not yet. */
|
||||
|
||||
if (l_ptr->flags & LINK_FAILINGOVER) {
|
||||
if (event == TIMEOUT_EVT)
|
||||
link_set_timer(l_ptr, cont_intv);
|
||||
if (l_ptr->flags & LINK_FAILINGOVER)
|
||||
return;
|
||||
}
|
||||
|
||||
switch (l_ptr->state) {
|
||||
case WORKING_WORKING:
|
||||
switch (event) {
|
||||
case TRAFFIC_MSG_EVT:
|
||||
case ACTIVATE_MSG:
|
||||
l_ptr->silent_intv_cnt = 0;
|
||||
break;
|
||||
case TIMEOUT_EVT:
|
||||
if (l_ptr->next_in_no != l_ptr->checkpoint) {
|
||||
l_ptr->checkpoint = l_ptr->next_in_no;
|
||||
if (tipc_bclink_acks_missing(l_ptr->owner)) {
|
||||
case SILENCE_EVT:
|
||||
if (!l_ptr->silent_intv_cnt) {
|
||||
if (tipc_bclink_acks_missing(l_ptr->owner))
|
||||
tipc_link_proto_xmit(l_ptr, STATE_MSG,
|
||||
0, 0, 0, 0);
|
||||
l_ptr->fsm_msg_cnt++;
|
||||
}
|
||||
link_set_timer(l_ptr, cont_intv);
|
||||
break;
|
||||
}
|
||||
l_ptr->state = WORKING_UNKNOWN;
|
||||
l_ptr->fsm_msg_cnt = 0;
|
||||
tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
|
||||
l_ptr->fsm_msg_cnt++;
|
||||
link_set_timer(l_ptr, cont_intv / 4);
|
||||
break;
|
||||
case RESET_MSG:
|
||||
pr_debug("%s<%s>, requested by peer\n",
|
||||
link_rst_msg, l_ptr->name);
|
||||
tipc_link_reset(l_ptr);
|
||||
l_ptr->state = RESET_RESET;
|
||||
l_ptr->fsm_msg_cnt = 0;
|
||||
tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
|
||||
0, 0, 0, 0);
|
||||
l_ptr->fsm_msg_cnt++;
|
||||
link_set_timer(l_ptr, cont_intv);
|
||||
break;
|
||||
default:
|
||||
pr_debug("%s%u in WW state\n", link_unk_evt, event);
|
||||
|
@ -568,46 +539,33 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
|
|||
case TRAFFIC_MSG_EVT:
|
||||
case ACTIVATE_MSG:
|
||||
l_ptr->state = WORKING_WORKING;
|
||||
l_ptr->fsm_msg_cnt = 0;
|
||||
link_set_timer(l_ptr, cont_intv);
|
||||
l_ptr->silent_intv_cnt = 0;
|
||||
break;
|
||||
case RESET_MSG:
|
||||
pr_debug("%s<%s>, requested by peer while probing\n",
|
||||
link_rst_msg, l_ptr->name);
|
||||
tipc_link_reset(l_ptr);
|
||||
l_ptr->state = RESET_RESET;
|
||||
l_ptr->fsm_msg_cnt = 0;
|
||||
tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
|
||||
0, 0, 0, 0);
|
||||
l_ptr->fsm_msg_cnt++;
|
||||
link_set_timer(l_ptr, cont_intv);
|
||||
break;
|
||||
case TIMEOUT_EVT:
|
||||
if (l_ptr->next_in_no != l_ptr->checkpoint) {
|
||||
case SILENCE_EVT:
|
||||
if (!l_ptr->silent_intv_cnt) {
|
||||
l_ptr->state = WORKING_WORKING;
|
||||
l_ptr->fsm_msg_cnt = 0;
|
||||
l_ptr->checkpoint = l_ptr->next_in_no;
|
||||
if (tipc_bclink_acks_missing(l_ptr->owner)) {
|
||||
if (tipc_bclink_acks_missing(l_ptr->owner))
|
||||
tipc_link_proto_xmit(l_ptr, STATE_MSG,
|
||||
0, 0, 0, 0);
|
||||
l_ptr->fsm_msg_cnt++;
|
||||
}
|
||||
link_set_timer(l_ptr, cont_intv);
|
||||
} else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
|
||||
} else if (l_ptr->silent_intv_cnt <
|
||||
l_ptr->abort_limit) {
|
||||
tipc_link_proto_xmit(l_ptr, STATE_MSG,
|
||||
1, 0, 0, 0);
|
||||
l_ptr->fsm_msg_cnt++;
|
||||
link_set_timer(l_ptr, cont_intv / 4);
|
||||
} else { /* Link has failed */
|
||||
pr_debug("%s<%s>, peer not responding\n",
|
||||
link_rst_msg, l_ptr->name);
|
||||
tipc_link_reset(l_ptr);
|
||||
l_ptr->state = RESET_UNKNOWN;
|
||||
l_ptr->fsm_msg_cnt = 0;
|
||||
tipc_link_proto_xmit(l_ptr, RESET_MSG,
|
||||
0, 0, 0, 0);
|
||||
l_ptr->fsm_msg_cnt++;
|
||||
link_set_timer(l_ptr, cont_intv);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
|
@ -623,31 +581,22 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
|
|||
if (other && link_working_unknown(other))
|
||||
break;
|
||||
l_ptr->state = WORKING_WORKING;
|
||||
l_ptr->fsm_msg_cnt = 0;
|
||||
link_activate(l_ptr);
|
||||
tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
|
||||
l_ptr->fsm_msg_cnt++;
|
||||
if (l_ptr->owner->working_links == 1)
|
||||
tipc_link_sync_xmit(l_ptr);
|
||||
link_set_timer(l_ptr, cont_intv);
|
||||
break;
|
||||
case RESET_MSG:
|
||||
l_ptr->state = RESET_RESET;
|
||||
l_ptr->fsm_msg_cnt = 0;
|
||||
tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
|
||||
1, 0, 0, 0);
|
||||
l_ptr->fsm_msg_cnt++;
|
||||
link_set_timer(l_ptr, cont_intv);
|
||||
break;
|
||||
case STARTING_EVT:
|
||||
l_ptr->flags |= LINK_STARTED;
|
||||
l_ptr->fsm_msg_cnt++;
|
||||
link_set_timer(l_ptr, cont_intv);
|
||||
link_set_timer(l_ptr, timer_intv);
|
||||
break;
|
||||
case TIMEOUT_EVT:
|
||||
case SILENCE_EVT:
|
||||
tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0);
|
||||
l_ptr->fsm_msg_cnt++;
|
||||
link_set_timer(l_ptr, cont_intv);
|
||||
break;
|
||||
default:
|
||||
pr_err("%s%u in RU state\n", link_unk_evt, event);
|
||||
|
@ -661,21 +610,16 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
|
|||
if (other && link_working_unknown(other))
|
||||
break;
|
||||
l_ptr->state = WORKING_WORKING;
|
||||
l_ptr->fsm_msg_cnt = 0;
|
||||
link_activate(l_ptr);
|
||||
tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
|
||||
l_ptr->fsm_msg_cnt++;
|
||||
if (l_ptr->owner->working_links == 1)
|
||||
tipc_link_sync_xmit(l_ptr);
|
||||
link_set_timer(l_ptr, cont_intv);
|
||||
break;
|
||||
case RESET_MSG:
|
||||
break;
|
||||
case TIMEOUT_EVT:
|
||||
case SILENCE_EVT:
|
||||
tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
|
||||
0, 0, 0, 0);
|
||||
l_ptr->fsm_msg_cnt++;
|
||||
link_set_timer(l_ptr, cont_intv);
|
||||
break;
|
||||
default:
|
||||
pr_err("%s%u in RR state\n", link_unk_evt, event);
|
||||
|
@ -701,53 +645,58 @@ int __tipc_link_xmit(struct net *net, struct tipc_link *link,
|
|||
{
|
||||
struct tipc_msg *msg = buf_msg(skb_peek(list));
|
||||
unsigned int maxwin = link->window;
|
||||
unsigned int imp = msg_importance(msg);
|
||||
unsigned int i, imp = msg_importance(msg);
|
||||
uint mtu = link->mtu;
|
||||
uint ack = mod(link->next_in_no - 1);
|
||||
uint seqno = link->next_out_no;
|
||||
uint bc_last_in = link->owner->bclink.last_in;
|
||||
u16 ack = mod(link->rcv_nxt - 1);
|
||||
u16 seqno = link->snd_nxt;
|
||||
u16 bc_last_in = link->owner->bclink.last_in;
|
||||
struct tipc_media_addr *addr = &link->media_addr;
|
||||
struct sk_buff_head *transmq = &link->transmq;
|
||||
struct sk_buff_head *backlogq = &link->backlogq;
|
||||
struct sk_buff *skb, *tmp;
|
||||
|
||||
/* Match backlog limit against msg importance: */
|
||||
if (unlikely(link->backlog[imp].len >= link->backlog[imp].limit))
|
||||
return link_schedule_user(link, list);
|
||||
struct sk_buff *skb, *bskb;
|
||||
|
||||
/* Match msg importance against this and all higher backlog limits: */
|
||||
for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
|
||||
if (unlikely(link->backlog[i].len >= link->backlog[i].limit))
|
||||
return link_schedule_user(link, list);
|
||||
}
|
||||
if (unlikely(msg_size(msg) > mtu)) {
|
||||
__skb_queue_purge(list);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
/* Prepare each packet for sending, and add to relevant queue: */
|
||||
skb_queue_walk_safe(list, skb, tmp) {
|
||||
__skb_unlink(skb, list);
|
||||
while (skb_queue_len(list)) {
|
||||
skb = skb_peek(list);
|
||||
msg = buf_msg(skb);
|
||||
msg_set_seqno(msg, seqno);
|
||||
msg_set_ack(msg, ack);
|
||||
msg_set_bcast_ack(msg, bc_last_in);
|
||||
|
||||
if (likely(skb_queue_len(transmq) < maxwin)) {
|
||||
__skb_dequeue(list);
|
||||
__skb_queue_tail(transmq, skb);
|
||||
tipc_bearer_send(net, link->bearer_id, skb, addr);
|
||||
link->rcv_unacked = 0;
|
||||
seqno++;
|
||||
continue;
|
||||
}
|
||||
if (tipc_msg_bundle(skb_peek_tail(backlogq), skb, mtu)) {
|
||||
if (tipc_msg_bundle(skb_peek_tail(backlogq), msg, mtu)) {
|
||||
kfree_skb(__skb_dequeue(list));
|
||||
link->stats.sent_bundled++;
|
||||
continue;
|
||||
}
|
||||
if (tipc_msg_make_bundle(&skb, mtu, link->addr)) {
|
||||
if (tipc_msg_make_bundle(&bskb, msg, mtu, link->addr)) {
|
||||
kfree_skb(__skb_dequeue(list));
|
||||
__skb_queue_tail(backlogq, bskb);
|
||||
link->backlog[msg_importance(buf_msg(bskb))].len++;
|
||||
link->stats.sent_bundled++;
|
||||
link->stats.sent_bundles++;
|
||||
imp = msg_importance(buf_msg(skb));
|
||||
continue;
|
||||
}
|
||||
__skb_queue_tail(backlogq, skb);
|
||||
link->backlog[imp].len++;
|
||||
seqno++;
|
||||
link->backlog[imp].len += skb_queue_len(list);
|
||||
skb_queue_splice_tail_init(list, backlogq);
|
||||
}
|
||||
link->next_out_no = seqno;
|
||||
link->snd_nxt = seqno;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -877,7 +826,8 @@ void tipc_link_push_packets(struct tipc_link *link)
|
|||
{
|
||||
struct sk_buff *skb;
|
||||
struct tipc_msg *msg;
|
||||
unsigned int ack = mod(link->next_in_no - 1);
|
||||
u16 seqno = link->snd_nxt;
|
||||
u16 ack = mod(link->rcv_nxt - 1);
|
||||
|
||||
while (skb_queue_len(&link->transmq) < link->window) {
|
||||
skb = __skb_dequeue(&link->backlogq);
|
||||
|
@ -886,12 +836,15 @@ void tipc_link_push_packets(struct tipc_link *link)
|
|||
msg = buf_msg(skb);
|
||||
link->backlog[msg_importance(msg)].len--;
|
||||
msg_set_ack(msg, ack);
|
||||
msg_set_seqno(msg, seqno);
|
||||
seqno = mod(seqno + 1);
|
||||
msg_set_bcast_ack(msg, link->owner->bclink.last_in);
|
||||
link->rcv_unacked = 0;
|
||||
__skb_queue_tail(&link->transmq, skb);
|
||||
tipc_bearer_send(link->owner->net, link->bearer_id,
|
||||
skb, &link->media_addr);
|
||||
}
|
||||
link->snd_nxt = seqno;
|
||||
}
|
||||
|
||||
void tipc_link_reset_all(struct tipc_node *node)
|
||||
|
@ -964,13 +917,13 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
|
|||
msg = buf_msg(skb);
|
||||
|
||||
/* Detect repeated retransmit failures */
|
||||
if (l_ptr->last_retransmitted == msg_seqno(msg)) {
|
||||
if (l_ptr->last_retransm == msg_seqno(msg)) {
|
||||
if (++l_ptr->stale_count > 100) {
|
||||
link_retransmit_failure(l_ptr, skb);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
l_ptr->last_retransmitted = msg_seqno(msg);
|
||||
l_ptr->last_retransm = msg_seqno(msg);
|
||||
l_ptr->stale_count = 1;
|
||||
}
|
||||
|
||||
|
@ -978,7 +931,7 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
|
|||
if (!retransmits)
|
||||
break;
|
||||
msg = buf_msg(skb);
|
||||
msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
|
||||
msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
|
||||
msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
|
||||
tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
|
||||
&l_ptr->media_addr);
|
||||
|
@ -1001,11 +954,11 @@ static bool link_synch(struct tipc_link *l)
|
|||
goto synched;
|
||||
|
||||
/* Was last pre-synch packet added to input queue ? */
|
||||
if (less_eq(pl->next_in_no, l->synch_point))
|
||||
if (less_eq(pl->rcv_nxt, l->synch_point))
|
||||
return false;
|
||||
|
||||
/* Is it still in the input queue ? */
|
||||
post_synch = mod(pl->next_in_no - l->synch_point) - 1;
|
||||
post_synch = mod(pl->rcv_nxt - l->synch_point) - 1;
|
||||
if (skb_queue_len(&pl->inputq) > post_synch)
|
||||
return false;
|
||||
synched:
|
||||
|
@ -1016,13 +969,13 @@ synched:
|
|||
static void link_retrieve_defq(struct tipc_link *link,
|
||||
struct sk_buff_head *list)
|
||||
{
|
||||
u32 seq_no;
|
||||
u16 seq_no;
|
||||
|
||||
if (skb_queue_empty(&link->deferdq))
|
||||
return;
|
||||
|
||||
seq_no = buf_seqno(skb_peek(&link->deferdq));
|
||||
if (seq_no == mod(link->next_in_no))
|
||||
if (seq_no == link->rcv_nxt)
|
||||
skb_queue_splice_tail_init(&link->deferdq, list);
|
||||
}
|
||||
|
||||
|
@ -1043,8 +996,8 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
|
|||
struct tipc_link *l_ptr;
|
||||
struct sk_buff *skb1, *tmp;
|
||||
struct tipc_msg *msg;
|
||||
u32 seq_no;
|
||||
u32 ackd;
|
||||
u16 seq_no;
|
||||
u16 ackd;
|
||||
u32 released;
|
||||
|
||||
skb2list(skb, &head);
|
||||
|
@ -1137,18 +1090,20 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
|
|||
}
|
||||
|
||||
/* Link is now in state WORKING_WORKING */
|
||||
if (unlikely(seq_no != mod(l_ptr->next_in_no))) {
|
||||
if (unlikely(seq_no != l_ptr->rcv_nxt)) {
|
||||
link_handle_out_of_seq_msg(l_ptr, skb);
|
||||
link_retrieve_defq(l_ptr, &head);
|
||||
skb = NULL;
|
||||
goto unlock;
|
||||
}
|
||||
l_ptr->silent_intv_cnt = 0;
|
||||
|
||||
/* Synchronize with parallel link if applicable */
|
||||
if (unlikely((l_ptr->flags & LINK_SYNCHING) && !msg_dup(msg))) {
|
||||
if (!link_synch(l_ptr))
|
||||
goto unlock;
|
||||
}
|
||||
l_ptr->next_in_no++;
|
||||
l_ptr->rcv_nxt++;
|
||||
if (unlikely(!skb_queue_empty(&l_ptr->deferdq)))
|
||||
link_retrieve_defq(l_ptr, &head);
|
||||
if (unlikely(++l_ptr->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
|
||||
|
@ -1268,7 +1223,7 @@ static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
|
|||
u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
|
||||
{
|
||||
struct sk_buff *skb1;
|
||||
u32 seq_no = buf_seqno(skb);
|
||||
u16 seq_no = buf_seqno(skb);
|
||||
|
||||
/* Empty queue ? */
|
||||
if (skb_queue_empty(list)) {
|
||||
|
@ -1284,7 +1239,7 @@ u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
|
|||
|
||||
/* Locate insertion point in queue, then insert; discard if duplicate */
|
||||
skb_queue_walk(list, skb1) {
|
||||
u32 curr_seqno = buf_seqno(skb1);
|
||||
u16 curr_seqno = buf_seqno(skb1);
|
||||
|
||||
if (seq_no == curr_seqno) {
|
||||
kfree_skb(skb);
|
||||
|
@ -1312,14 +1267,14 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
|
|||
return;
|
||||
}
|
||||
|
||||
/* Record OOS packet arrival (force mismatch on next timeout) */
|
||||
l_ptr->checkpoint--;
|
||||
/* Record OOS packet arrival */
|
||||
l_ptr->silent_intv_cnt = 0;
|
||||
|
||||
/*
|
||||
* Discard packet if a duplicate; otherwise add it to deferred queue
|
||||
* and notify peer of gap as per protocol specification
|
||||
*/
|
||||
if (less(seq_no, mod(l_ptr->next_in_no))) {
|
||||
if (less(seq_no, l_ptr->rcv_nxt)) {
|
||||
l_ptr->stats.duplicates++;
|
||||
kfree_skb(buf);
|
||||
return;
|
||||
|
@ -1344,6 +1299,7 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
|
|||
struct tipc_msg *msg = l_ptr->pmsg;
|
||||
u32 msg_size = sizeof(l_ptr->proto_msg);
|
||||
int r_flag;
|
||||
u16 last_rcv;
|
||||
|
||||
/* Don't send protocol message during link failover */
|
||||
if (l_ptr->flags & LINK_FAILINGOVER)
|
||||
|
@ -1360,7 +1316,7 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
|
|||
msg_set_last_bcast(msg, tipc_bclink_get_last_sent(l_ptr->owner->net));
|
||||
|
||||
if (msg_typ == STATE_MSG) {
|
||||
u32 next_sent = mod(l_ptr->next_out_no);
|
||||
u16 next_sent = l_ptr->snd_nxt;
|
||||
|
||||
if (!tipc_link_is_up(l_ptr))
|
||||
return;
|
||||
|
@ -1368,8 +1324,8 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
|
|||
next_sent = buf_seqno(skb_peek(&l_ptr->backlogq));
|
||||
msg_set_next_sent(msg, next_sent);
|
||||
if (!skb_queue_empty(&l_ptr->deferdq)) {
|
||||
u32 rec = buf_seqno(skb_peek(&l_ptr->deferdq));
|
||||
gap = mod(rec - mod(l_ptr->next_in_no));
|
||||
last_rcv = buf_seqno(skb_peek(&l_ptr->deferdq));
|
||||
gap = mod(last_rcv - l_ptr->rcv_nxt);
|
||||
}
|
||||
msg_set_seq_gap(msg, gap);
|
||||
if (gap)
|
||||
|
@ -1377,7 +1333,7 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
|
|||
msg_set_link_tolerance(msg, tolerance);
|
||||
msg_set_linkprio(msg, priority);
|
||||
msg_set_max_pkt(msg, l_ptr->mtu);
|
||||
msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
|
||||
msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
|
||||
msg_set_probe(msg, probe_msg != 0);
|
||||
if (probe_msg)
|
||||
l_ptr->stats.sent_probes++;
|
||||
|
@ -1397,7 +1353,7 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
|
|||
msg_set_linkprio(msg, l_ptr->priority);
|
||||
msg_set_size(msg, msg_size);
|
||||
|
||||
msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
|
||||
msg_set_seqno(msg, mod(l_ptr->snd_nxt + (0xffff / 2)));
|
||||
|
||||
buf = tipc_buf_acquire(msg_size);
|
||||
if (!buf)
|
||||
|
@ -1496,17 +1452,15 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
|
|||
}
|
||||
|
||||
/* Record reception; force mismatch at next timeout: */
|
||||
l_ptr->checkpoint--;
|
||||
l_ptr->silent_intv_cnt = 0;
|
||||
|
||||
link_state_event(l_ptr, TRAFFIC_MSG_EVT);
|
||||
l_ptr->stats.recv_states++;
|
||||
if (link_reset_unknown(l_ptr))
|
||||
break;
|
||||
|
||||
if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
|
||||
rec_gap = mod(msg_next_sent(msg) -
|
||||
mod(l_ptr->next_in_no));
|
||||
}
|
||||
if (less_eq(l_ptr->rcv_nxt, msg_next_sent(msg)))
|
||||
rec_gap = mod(msg_next_sent(msg) - l_ptr->rcv_nxt);
|
||||
|
||||
if (msg_probe(msg))
|
||||
l_ptr->stats.recv_probes++;
|
||||
|
@ -1580,6 +1534,11 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
|
|||
|
||||
tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, TUNNEL_PROTOCOL,
|
||||
FAILOVER_MSG, INT_H_SIZE, l_ptr->addr);
|
||||
|
||||
skb_queue_walk(&l_ptr->backlogq, skb) {
|
||||
msg_set_seqno(buf_msg(skb), l_ptr->snd_nxt);
|
||||
l_ptr->snd_nxt = mod(l_ptr->snd_nxt + 1);
|
||||
}
|
||||
skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq);
|
||||
tipc_link_purge_backlog(l_ptr);
|
||||
msgcount = skb_queue_len(&l_ptr->transmq);
|
||||
|
@ -1640,6 +1599,7 @@ void tipc_link_dup_queue_xmit(struct tipc_link *link,
|
|||
struct tipc_msg tnl_hdr;
|
||||
struct sk_buff_head *queue = &link->transmq;
|
||||
int mcnt;
|
||||
u16 seqno;
|
||||
|
||||
tipc_msg_init(link_own_addr(link), &tnl_hdr, TUNNEL_PROTOCOL,
|
||||
SYNCH_MSG, INT_H_SIZE, link->addr);
|
||||
|
@ -1653,7 +1613,7 @@ tunnel_queue:
|
|||
struct tipc_msg *msg = buf_msg(skb);
|
||||
u32 len = msg_size(msg);
|
||||
|
||||
msg_set_ack(msg, mod(link->next_in_no - 1));
|
||||
msg_set_ack(msg, mod(link->rcv_nxt - 1));
|
||||
msg_set_bcast_ack(msg, link->owner->bclink.last_in);
|
||||
msg_set_size(&tnl_hdr, len + INT_H_SIZE);
|
||||
outskb = tipc_buf_acquire(len + INT_H_SIZE);
|
||||
|
@ -1671,6 +1631,11 @@ tunnel_queue:
|
|||
}
|
||||
if (queue == &link->backlogq)
|
||||
return;
|
||||
seqno = link->snd_nxt;
|
||||
skb_queue_walk(&link->backlogq, skb) {
|
||||
msg_set_seqno(buf_msg(skb), seqno);
|
||||
seqno = mod(seqno + 1);
|
||||
}
|
||||
queue = &link->backlogq;
|
||||
goto tunnel_queue;
|
||||
}
|
||||
|
@ -1742,8 +1707,8 @@ static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)
|
|||
return;
|
||||
|
||||
l_ptr->tolerance = tol;
|
||||
l_ptr->cont_intv = msecs_to_jiffies(intv);
|
||||
l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->cont_intv) / 4);
|
||||
l_ptr->keepalive_intv = msecs_to_jiffies(intv);
|
||||
l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->keepalive_intv));
|
||||
}
|
||||
|
||||
void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
|
||||
|
@ -1803,8 +1768,8 @@ static struct tipc_node *tipc_link_find_owner(struct net *net,
|
|||
static void link_reset_statistics(struct tipc_link *l_ptr)
|
||||
{
|
||||
memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
|
||||
l_ptr->stats.sent_info = l_ptr->next_out_no;
|
||||
l_ptr->stats.recv_info = l_ptr->next_in_no;
|
||||
l_ptr->stats.sent_info = l_ptr->snd_nxt;
|
||||
l_ptr->stats.recv_info = l_ptr->rcv_nxt;
|
||||
}
|
||||
|
||||
static void link_print(struct tipc_link *l_ptr, const char *str)
|
||||
|
@ -2037,9 +2002,9 @@ static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
|
|||
goto attr_msg_full;
|
||||
if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
|
||||
goto attr_msg_full;
|
||||
if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no))
|
||||
if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->rcv_nxt))
|
||||
goto attr_msg_full;
|
||||
if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->next_out_no))
|
||||
if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->snd_nxt))
|
||||
goto attr_msg_full;
|
||||
|
||||
if (tipc_link_is_up(link))
|
||||
|
|
|
@ -107,30 +107,29 @@ struct tipc_stats {
|
|||
* @owner: pointer to peer node
|
||||
* @refcnt: reference counter for permanent references (owner node & timer)
|
||||
* @flags: execution state flags for link endpoint instance
|
||||
* @checkpoint: reference point for triggering link continuity checking
|
||||
* @peer_session: link session # being used by peer end of link
|
||||
* @peer_bearer_id: bearer id used by link's peer endpoint
|
||||
* @bearer_id: local bearer id used by link
|
||||
* @tolerance: minimum link continuity loss needed to reset link [in ms]
|
||||
* @cont_intv: link continuity testing interval
|
||||
* @keepalive_intv: link keepalive timer interval
|
||||
* @abort_limit: # of unacknowledged continuity probes needed to reset link
|
||||
* @state: current state of link FSM
|
||||
* @fsm_msg_cnt: # of protocol messages link FSM has sent in current state
|
||||
* @silent_intv_cnt: # of timer intervals without any reception from peer
|
||||
* @proto_msg: template for control messages generated by link
|
||||
* @pmsg: convenience pointer to "proto_msg" field
|
||||
* @priority: current link priority
|
||||
* @net_plane: current link network plane ('A' through 'H')
|
||||
* @backlog_limit: backlog queue congestion thresholds (indexed by importance)
|
||||
* @exp_msg_count: # of tunnelled messages expected during link changeover
|
||||
* @reset_checkpoint: seq # of last acknowledged message at time of link reset
|
||||
* @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
|
||||
* @mtu: current maximum packet size for this link
|
||||
* @advertised_mtu: advertised own mtu when link is being established
|
||||
* @transmitq: queue for sent, non-acked messages
|
||||
* @backlogq: queue for messages waiting to be sent
|
||||
* @next_out_no: next sequence number to use for outbound messages
|
||||
* @snt_nxt: next sequence number to use for outbound messages
|
||||
* @last_retransmitted: sequence number of most recently retransmitted message
|
||||
* @stale_count: # of identical retransmit requests made by peer
|
||||
* @next_in_no: next sequence number to expect for inbound messages
|
||||
* @rcv_nxt: next sequence number to expect for inbound messages
|
||||
* @deferred_queue: deferred queue saved OOS b'cast message received from node
|
||||
* @unacked_window: # of inbound messages rx'd without ack'ing back to peer
|
||||
* @inputq: buffer queue for messages to be delivered upwards
|
||||
|
@ -151,15 +150,14 @@ struct tipc_link {
|
|||
|
||||
/* Management and link supervision data */
|
||||
unsigned int flags;
|
||||
u32 checkpoint;
|
||||
u32 peer_session;
|
||||
u32 peer_bearer_id;
|
||||
u32 bearer_id;
|
||||
u32 tolerance;
|
||||
unsigned long cont_intv;
|
||||
unsigned long keepalive_intv;
|
||||
u32 abort_limit;
|
||||
int state;
|
||||
u32 fsm_msg_cnt;
|
||||
u32 silent_intv_cnt;
|
||||
struct {
|
||||
unchar hdr[INT_H_SIZE];
|
||||
unchar body[TIPC_MAX_IF_NAME];
|
||||
|
@ -185,13 +183,13 @@ struct tipc_link {
|
|||
u16 len;
|
||||
u16 limit;
|
||||
} backlog[5];
|
||||
u32 next_out_no;
|
||||
u16 snd_nxt;
|
||||
u16 last_retransm;
|
||||
u32 window;
|
||||
u32 last_retransmitted;
|
||||
u32 stale_count;
|
||||
|
||||
/* Reception */
|
||||
u32 next_in_no;
|
||||
u16 rcv_nxt;
|
||||
u32 rcv_unacked;
|
||||
struct sk_buff_head deferdq;
|
||||
struct sk_buff_head inputq;
|
||||
|
@ -213,8 +211,7 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
|
|||
struct tipc_bearer *b_ptr,
|
||||
const struct tipc_media_addr *media_addr);
|
||||
void tipc_link_delete(struct tipc_link *link);
|
||||
void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
|
||||
bool shutting_down);
|
||||
void tipc_link_delete_list(struct net *net, unsigned int bearer_id);
|
||||
void tipc_link_failover_send_queue(struct tipc_link *l_ptr);
|
||||
void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, struct tipc_link *dest);
|
||||
void tipc_link_reset_fragments(struct tipc_link *l_ptr);
|
||||
|
@ -223,7 +220,6 @@ int tipc_link_is_active(struct tipc_link *l_ptr);
|
|||
void tipc_link_purge_queues(struct tipc_link *l_ptr);
|
||||
void tipc_link_reset_all(struct tipc_node *node);
|
||||
void tipc_link_reset(struct tipc_link *l_ptr);
|
||||
void tipc_link_reset_list(struct net *net, unsigned int bearer_id);
|
||||
int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest,
|
||||
u32 selector);
|
||||
int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dest,
|
||||
|
@ -247,39 +243,6 @@ int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info);
|
|||
int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]);
|
||||
void link_prepare_wakeup(struct tipc_link *l);
|
||||
|
||||
/*
|
||||
* Link sequence number manipulation routines (uses modulo 2**16 arithmetic)
|
||||
*/
|
||||
static inline u32 buf_seqno(struct sk_buff *buf)
|
||||
{
|
||||
return msg_seqno(buf_msg(buf));
|
||||
}
|
||||
|
||||
static inline u32 mod(u32 x)
|
||||
{
|
||||
return x & 0xffffu;
|
||||
}
|
||||
|
||||
static inline int less_eq(u32 left, u32 right)
|
||||
{
|
||||
return mod(right - left) < 32768u;
|
||||
}
|
||||
|
||||
static inline int more(u32 left, u32 right)
|
||||
{
|
||||
return !less_eq(left, right);
|
||||
}
|
||||
|
||||
static inline int less(u32 left, u32 right)
|
||||
{
|
||||
return less_eq(left, right) && (mod(right) != mod(left));
|
||||
}
|
||||
|
||||
static inline u32 lesser(u32 left, u32 right)
|
||||
{
|
||||
return less_eq(left, right) ? left : right;
|
||||
}
|
||||
|
||||
static inline u32 link_own_addr(struct tipc_link *l)
|
||||
{
|
||||
return msg_prevnode(l->pmsg);
|
||||
|
|
|
@ -331,16 +331,15 @@ error:
|
|||
|
||||
/**
|
||||
* tipc_msg_bundle(): Append contents of a buffer to tail of an existing one
|
||||
* @bskb: the buffer to append to ("bundle")
|
||||
* @skb: buffer to be appended
|
||||
* @skb: the buffer to append to ("bundle")
|
||||
* @msg: message to be appended
|
||||
* @mtu: max allowable size for the bundle buffer
|
||||
* Consumes buffer if successful
|
||||
* Returns true if bundling could be performed, otherwise false
|
||||
*/
|
||||
bool tipc_msg_bundle(struct sk_buff *bskb, struct sk_buff *skb, u32 mtu)
|
||||
bool tipc_msg_bundle(struct sk_buff *skb, struct tipc_msg *msg, u32 mtu)
|
||||
{
|
||||
struct tipc_msg *bmsg;
|
||||
struct tipc_msg *msg = buf_msg(skb);
|
||||
unsigned int bsz;
|
||||
unsigned int msz = msg_size(msg);
|
||||
u32 start, pad;
|
||||
|
@ -348,9 +347,9 @@ bool tipc_msg_bundle(struct sk_buff *bskb, struct sk_buff *skb, u32 mtu)
|
|||
|
||||
if (likely(msg_user(msg) == MSG_FRAGMENTER))
|
||||
return false;
|
||||
if (!bskb)
|
||||
if (!skb)
|
||||
return false;
|
||||
bmsg = buf_msg(bskb);
|
||||
bmsg = buf_msg(skb);
|
||||
bsz = msg_size(bmsg);
|
||||
start = align(bsz);
|
||||
pad = start - bsz;
|
||||
|
@ -359,18 +358,20 @@ bool tipc_msg_bundle(struct sk_buff *bskb, struct sk_buff *skb, u32 mtu)
|
|||
return false;
|
||||
if (unlikely(msg_user(msg) == BCAST_PROTOCOL))
|
||||
return false;
|
||||
if (likely(msg_user(bmsg) != MSG_BUNDLER))
|
||||
if (unlikely(msg_user(bmsg) != MSG_BUNDLER))
|
||||
return false;
|
||||
if (unlikely(skb_tailroom(bskb) < (pad + msz)))
|
||||
if (unlikely(skb_tailroom(skb) < (pad + msz)))
|
||||
return false;
|
||||
if (unlikely(max < (start + msz)))
|
||||
return false;
|
||||
if ((msg_importance(msg) < TIPC_SYSTEM_IMPORTANCE) &&
|
||||
(msg_importance(bmsg) == TIPC_SYSTEM_IMPORTANCE))
|
||||
return false;
|
||||
|
||||
skb_put(bskb, pad + msz);
|
||||
skb_copy_to_linear_data_offset(bskb, start, skb->data, msz);
|
||||
skb_put(skb, pad + msz);
|
||||
skb_copy_to_linear_data_offset(skb, start, msg, msz);
|
||||
msg_set_size(bmsg, start + msz);
|
||||
msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
|
||||
kfree_skb(skb);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -416,18 +417,18 @@ none:
|
|||
|
||||
/**
|
||||
* tipc_msg_make_bundle(): Create bundle buf and append message to its tail
|
||||
* @list: the buffer chain
|
||||
* @skb: buffer to be appended and replaced
|
||||
* @list: the buffer chain, where head is the buffer to replace/append
|
||||
* @skb: buffer to be created, appended to and returned in case of success
|
||||
* @msg: message to be appended
|
||||
* @mtu: max allowable size for the bundle buffer, inclusive header
|
||||
* @dnode: destination node for message. (Not always present in header)
|
||||
* Replaces buffer if successful
|
||||
* Returns true if success, otherwise false
|
||||
*/
|
||||
bool tipc_msg_make_bundle(struct sk_buff **skb, u32 mtu, u32 dnode)
|
||||
bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
|
||||
u32 mtu, u32 dnode)
|
||||
{
|
||||
struct sk_buff *bskb;
|
||||
struct sk_buff *_skb;
|
||||
struct tipc_msg *bmsg;
|
||||
struct tipc_msg *msg = buf_msg(*skb);
|
||||
u32 msz = msg_size(msg);
|
||||
u32 max = mtu - INT_H_SIZE;
|
||||
|
||||
|
@ -440,19 +441,23 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, u32 mtu, u32 dnode)
|
|||
if (msz > (max / 2))
|
||||
return false;
|
||||
|
||||
bskb = tipc_buf_acquire(max);
|
||||
if (!bskb)
|
||||
_skb = tipc_buf_acquire(max);
|
||||
if (!_skb)
|
||||
return false;
|
||||
|
||||
skb_trim(bskb, INT_H_SIZE);
|
||||
bmsg = buf_msg(bskb);
|
||||
skb_trim(_skb, INT_H_SIZE);
|
||||
bmsg = buf_msg(_skb);
|
||||
tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0,
|
||||
INT_H_SIZE, dnode);
|
||||
if (msg_isdata(msg))
|
||||
msg_set_importance(bmsg, TIPC_CRITICAL_IMPORTANCE);
|
||||
else
|
||||
msg_set_importance(bmsg, TIPC_SYSTEM_IMPORTANCE);
|
||||
msg_set_seqno(bmsg, msg_seqno(msg));
|
||||
msg_set_ack(bmsg, msg_ack(msg));
|
||||
msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
|
||||
tipc_msg_bundle(bskb, *skb, mtu);
|
||||
*skb = bskb;
|
||||
tipc_msg_bundle(_skb, msg, mtu);
|
||||
*skb = _skb;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -313,12 +313,12 @@ static inline void msg_set_lookup_scope(struct tipc_msg *m, u32 n)
|
|||
msg_set_bits(m, 1, 19, 0x3, n);
|
||||
}
|
||||
|
||||
static inline u32 msg_bcast_ack(struct tipc_msg *m)
|
||||
static inline u16 msg_bcast_ack(struct tipc_msg *m)
|
||||
{
|
||||
return msg_bits(m, 1, 0, 0xffff);
|
||||
}
|
||||
|
||||
static inline void msg_set_bcast_ack(struct tipc_msg *m, u32 n)
|
||||
static inline void msg_set_bcast_ack(struct tipc_msg *m, u16 n)
|
||||
{
|
||||
msg_set_bits(m, 1, 0, 0xffff, n);
|
||||
}
|
||||
|
@ -327,22 +327,22 @@ static inline void msg_set_bcast_ack(struct tipc_msg *m, u32 n)
|
|||
/*
|
||||
* Word 2
|
||||
*/
|
||||
static inline u32 msg_ack(struct tipc_msg *m)
|
||||
static inline u16 msg_ack(struct tipc_msg *m)
|
||||
{
|
||||
return msg_bits(m, 2, 16, 0xffff);
|
||||
}
|
||||
|
||||
static inline void msg_set_ack(struct tipc_msg *m, u32 n)
|
||||
static inline void msg_set_ack(struct tipc_msg *m, u16 n)
|
||||
{
|
||||
msg_set_bits(m, 2, 16, 0xffff, n);
|
||||
}
|
||||
|
||||
static inline u32 msg_seqno(struct tipc_msg *m)
|
||||
static inline u16 msg_seqno(struct tipc_msg *m)
|
||||
{
|
||||
return msg_bits(m, 2, 0, 0xffff);
|
||||
}
|
||||
|
||||
static inline void msg_set_seqno(struct tipc_msg *m, u32 n)
|
||||
static inline void msg_set_seqno(struct tipc_msg *m, u16 n)
|
||||
{
|
||||
msg_set_bits(m, 2, 0, 0xffff, n);
|
||||
}
|
||||
|
@ -352,18 +352,22 @@ static inline void msg_set_seqno(struct tipc_msg *m, u32 n)
|
|||
*/
|
||||
static inline u32 msg_importance(struct tipc_msg *m)
|
||||
{
|
||||
if (unlikely(msg_user(m) == MSG_FRAGMENTER))
|
||||
int usr = msg_user(m);
|
||||
|
||||
if (likely((usr <= TIPC_CRITICAL_IMPORTANCE) && !msg_errcode(m)))
|
||||
return usr;
|
||||
if ((usr == MSG_FRAGMENTER) || (usr == MSG_BUNDLER))
|
||||
return msg_bits(m, 5, 13, 0x7);
|
||||
if (likely(msg_isdata(m) && !msg_errcode(m)))
|
||||
return msg_user(m);
|
||||
return TIPC_SYSTEM_IMPORTANCE;
|
||||
}
|
||||
|
||||
static inline void msg_set_importance(struct tipc_msg *m, u32 i)
|
||||
{
|
||||
if (unlikely(msg_user(m) == MSG_FRAGMENTER))
|
||||
int usr = msg_user(m);
|
||||
|
||||
if (likely((usr == MSG_FRAGMENTER) || (usr == MSG_BUNDLER)))
|
||||
msg_set_bits(m, 5, 13, 0x7, i);
|
||||
else if (likely(i < TIPC_SYSTEM_IMPORTANCE))
|
||||
else if (i < TIPC_SYSTEM_IMPORTANCE)
|
||||
msg_set_user(m, i);
|
||||
else
|
||||
pr_warn("Trying to set illegal importance in message\n");
|
||||
|
@ -772,9 +776,9 @@ struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
|
|||
uint data_sz, u32 dnode, u32 onode,
|
||||
u32 dport, u32 oport, int errcode);
|
||||
int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf);
|
||||
bool tipc_msg_bundle(struct sk_buff *bskb, struct sk_buff *skb, u32 mtu);
|
||||
|
||||
bool tipc_msg_make_bundle(struct sk_buff **skb, u32 mtu, u32 dnode);
|
||||
bool tipc_msg_bundle(struct sk_buff *skb, struct tipc_msg *msg, u32 mtu);
|
||||
bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
|
||||
u32 mtu, u32 dnode);
|
||||
bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos);
|
||||
int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
|
||||
int offset, int dsz, int mtu, struct sk_buff_head *list);
|
||||
|
@ -782,6 +786,11 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, u32 *dnode,
|
|||
int *err);
|
||||
struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list);
|
||||
|
||||
static inline u16 buf_seqno(struct sk_buff *skb)
|
||||
{
|
||||
return msg_seqno(buf_msg(skb));
|
||||
}
|
||||
|
||||
/* tipc_skb_peek(): peek and reserve first buffer in list
|
||||
* @list: list to be peeked in
|
||||
* Returns pointer to first buffer in list, if any
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#include "subscr.h"
|
||||
#include "socket.h"
|
||||
#include "node.h"
|
||||
#include "bcast.h"
|
||||
|
||||
static const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
|
||||
[TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC },
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* net/tipc/node.c: TIPC node management routines
|
||||
*
|
||||
* Copyright (c) 2000-2006, 2012-2014, Ericsson AB
|
||||
* Copyright (c) 2000-2006, 2012-2015, Ericsson AB
|
||||
* Copyright (c) 2005-2006, 2010-2014, Wind River Systems
|
||||
* All rights reserved.
|
||||
*
|
||||
|
@ -39,6 +39,7 @@
|
|||
#include "node.h"
|
||||
#include "name_distr.h"
|
||||
#include "socket.h"
|
||||
#include "bcast.h"
|
||||
|
||||
static void node_lost_contact(struct tipc_node *n_ptr);
|
||||
static void node_established_contact(struct tipc_node *n_ptr);
|
||||
|
|
|
@ -45,8 +45,6 @@
|
|||
/* Out-of-range value for node signature */
|
||||
#define INVALID_NODE_SIG 0x10000
|
||||
|
||||
#define NODE_HTABLE_SIZE 512
|
||||
|
||||
/* Flags used to take different actions according to flag type
|
||||
* TIPC_WAIT_PEER_LINKS_DOWN: wait to see that peer's links are down
|
||||
* TIPC_WAIT_OWN_LINKS_DOWN: wait until peer node is declared down
|
||||
|
|
|
@ -41,6 +41,7 @@
|
|||
#include "link.h"
|
||||
#include "name_distr.h"
|
||||
#include "socket.h"
|
||||
#include "bcast.h"
|
||||
|
||||
#define SS_LISTENING -1 /* socket is listening */
|
||||
#define SS_READY -2 /* socket is connectionless */
|
||||
|
|
Загрузка…
Ссылка в новой задаче