[NET] TIPC: Fix whitespace errors.

Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
YOSHIFUJI Hideaki 2007-02-09 23:25:21 +09:00 коммит произвёл David S. Miller
Родитель cca5172a7e
Коммит c430728526
44 изменённых файлов: 1200 добавлений и 1200 удалений

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/addr.c: TIPC address utility routines
*
*
* Copyright (c) 2000-2006, Ericsson AB
* Copyright (c) 2004-2005, Wind River Systems
* All rights reserved.
@ -48,10 +48,10 @@ u32 tipc_get_addr(void)
/**
* tipc_addr_domain_valid - validates a network domain address
*
* Accepts <Z.C.N>, <Z.C.0>, <Z.0.0>, and <0.0.0>,
*
* Accepts <Z.C.N>, <Z.C.0>, <Z.0.0>, and <0.0.0>,
* where Z, C, and N are non-zero and do not exceed the configured limits.
*
*
* Returns 1 if domain address is valid, otherwise 0
*/
@ -80,10 +80,10 @@ int tipc_addr_domain_valid(u32 addr)
/**
* tipc_addr_node_valid - validates a proposed network address for this node
*
* Accepts <Z.C.N>, where Z, C, and N are non-zero and do not exceed
*
* Accepts <Z.C.N>, where Z, C, and N are non-zero and do not exceed
* the configured limits.
*
*
* Returns 1 if address can be used, otherwise 0
*/

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/addr.h: Include file for TIPC address utility routines
*
*
* Copyright (c) 2000-2006, Ericsson AB
* Copyright (c) 2004-2005, Wind River Systems
* All rights reserved.
@ -100,8 +100,8 @@ static inline int addr_scope(u32 domain)
/**
* addr_domain - convert 2-bit scope value to equivalent message lookup domain
*
* Needed when address of a named message must be looked up a second time
*
* Needed when address of a named message must be looked up a second time
* after a network hop.
*/

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/bcast.c: TIPC broadcast code
*
*
* Copyright (c) 2004-2006, Ericsson AB
* Copyright (c) 2004, Intel Corporation.
* Copyright (c) 2005, Wind River Systems
@ -59,15 +59,15 @@
* Loss rate for incoming broadcast frames; used to test retransmission code.
* Set to N to cause every N'th frame to be discarded; 0 => don't discard any.
*/
#define TIPC_BCAST_LOSS_RATE 0
/**
* struct bcbearer_pair - a pair of bearers used by broadcast link
* @primary: pointer to primary bearer
* @secondary: pointer to secondary bearer
*
* Bearers must have same priority and same set of reachable destinations
*
* Bearers must have same priority and same set of reachable destinations
* to be paired.
*/
@ -84,7 +84,7 @@ struct bcbearer_pair {
* @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
* @remains: temporary node map used by tipc_bcbearer_send()
* @remains_new: temporary node map used tipc_bcbearer_send()
*
*
* Note: The fields labelled "temporary" are incorporated into the bearer
* to avoid consuming potentially limited stack space through the use of
* large local variables within multicast routines. Concurrent access is
@ -104,7 +104,7 @@ struct bcbearer {
* struct bclink - link used for broadcast messages
* @link: (non-standard) broadcast link structure
* @node: (non-standard) node structure representing b'cast link's peer node
*
*
* Handles sequence numbering, fragmentation, bundling, etc.
*/
@ -125,7 +125,7 @@ char tipc_bclink_name[] = "multicast-link";
static u32 buf_seqno(struct sk_buff *buf)
{
return msg_seqno(buf_msg(buf));
}
}
static u32 bcbuf_acks(struct sk_buff *buf)
{
@ -143,9 +143,9 @@ static void bcbuf_decr_acks(struct sk_buff *buf)
}
/**
/**
* bclink_set_gap - set gap according to contents of current deferred pkt queue
*
*
* Called with 'node' locked, bc_lock unlocked
*/
@ -159,14 +159,14 @@ static void bclink_set_gap(struct node *n_ptr)
n_ptr->bclink.gap_to = mod(buf_seqno(buf) - 1);
}
/**
/**
* bclink_ack_allowed - test if ACK or NACK message can be sent at this moment
*
*
* This mechanism endeavours to prevent all nodes in network from trying
* to ACK or NACK at the same time.
*
*
* Note: TIPC uses a different trigger to distribute ACKs than it does to
* distribute NACKs, but tries to use the same spacing (divide by 16).
* distribute NACKs, but tries to use the same spacing (divide by 16).
*/
static int bclink_ack_allowed(u32 n)
@ -175,11 +175,11 @@ static int bclink_ack_allowed(u32 n)
}
/**
/**
* bclink_retransmit_pkt - retransmit broadcast packets
* @after: sequence number of last packet to *not* retransmit
* @to: sequence number of last packet to retransmit
*
*
* Called with bc_lock locked
*/
@ -189,16 +189,16 @@ static void bclink_retransmit_pkt(u32 after, u32 to)
buf = bcl->first_out;
while (buf && less_eq(buf_seqno(buf), after)) {
buf = buf->next;
buf = buf->next;
}
tipc_link_retransmit(bcl, buf, mod(to - after));
}
/**
/**
* tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
* @n_ptr: node that sent acknowledgement info
* @acked: broadcast sequence # that has been acknowledged
*
*
* Node is locked, bc_lock unlocked.
*/
@ -244,9 +244,9 @@ void tipc_bclink_acknowledge(struct node *n_ptr, u32 acked)
spin_unlock_bh(&bc_lock);
}
/**
/**
* bclink_send_ack - unicast an ACK msg
*
*
* tipc_net_lock and node lock set
*/
@ -258,9 +258,9 @@ static void bclink_send_ack(struct node *n_ptr)
tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
}
/**
/**
* bclink_send_nack- broadcast a NACK msg
*
*
* tipc_net_lock and node lock set
*/
@ -278,7 +278,7 @@ static void bclink_send_nack(struct node *n_ptr)
msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
TIPC_OK, INT_H_SIZE, n_ptr->addr);
msg_set_mc_netid(msg, tipc_net_id);
msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in));
msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in));
msg_set_bcgap_after(msg, n_ptr->bclink.gap_after);
msg_set_bcgap_to(msg, n_ptr->bclink.gap_to);
msg_set_bcast_tag(msg, tipc_own_tag);
@ -292,17 +292,17 @@ static void bclink_send_nack(struct node *n_ptr)
bcl->stats.bearer_congs++;
}
/*
/*
* Ensure we doesn't send another NACK msg to the node
* until 16 more deferred messages arrive from it
* (i.e. helps prevent all nodes from NACK'ing at same time)
*/
n_ptr->bclink.nack_sync = tipc_own_tag;
}
}
/**
/**
* tipc_bclink_check_gap - send a NACK if a sequence gap exists
*
* tipc_net_lock and node lock set
@ -320,9 +320,9 @@ void tipc_bclink_check_gap(struct node *n_ptr, u32 last_sent)
bclink_send_nack(n_ptr);
}
/**
/**
* tipc_bclink_peek_nack - process a NACK msg meant for another node
*
*
* Only tipc_net_lock set.
*/
@ -349,7 +349,7 @@ static void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 g
if (less_eq(my_to, gap_to))
n_ptr->bclink.gap_to = gap_after;
} else {
/*
/*
* Expand gap if missing bufs not in deferred queue:
*/
struct sk_buff *buf = n_ptr->bclink.deferred_head;
@ -371,7 +371,7 @@ static void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 g
}
/*
* Some nodes may send a complementary NACK now:
*/
*/
if (bclink_ack_allowed(sender_tag + 1)) {
if (n_ptr->bclink.gap_to != n_ptr->bclink.gap_after) {
bclink_send_nack(n_ptr);
@ -408,7 +408,7 @@ int tipc_bclink_send_msg(struct sk_buff *buf)
/**
* tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards
*
*
* tipc_net_lock is read_locked, no other locks set
*/
@ -425,7 +425,7 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
msg_dbg(msg, "<BC<<<");
if (unlikely(!node || !tipc_node_is_up(node) || !node->bclink.supported ||
if (unlikely(!node || !tipc_node_is_up(node) || !node->bclink.supported ||
(msg_mc_netid(msg) != tipc_net_id))) {
buf_discard(buf);
return;
@ -443,7 +443,7 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
bclink_retransmit_pkt(msg_bcgap_after(msg),
msg_bcgap_to(msg));
bcl->owner->next = NULL;
spin_unlock_bh(&bc_lock);
spin_unlock_bh(&bc_lock);
} else {
tipc_bclink_peek_nack(msg_destnode(msg),
msg_bcast_tag(msg),
@ -547,10 +547,10 @@ u32 tipc_bclink_acks_missing(struct node *n_ptr)
/**
* tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
*
*
* Send through as many bearers as necessary to reach all nodes
* that support TIPC multicasting.
*
*
* Returns 0 if packet sent successfully, non-zero if not
*/
@ -581,7 +581,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
send_count = 0;
/* Send buffer over bearers until all targets reached */
bcbearer->remains = tipc_cltr_bcast_nodes;
for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
@ -615,7 +615,7 @@ update:
bcbearer->remains = bcbearer->remains_new;
}
/* Unable to reach all targets */
bcbearer->bearer.publ.blocked = 1;
@ -682,7 +682,7 @@ void tipc_bcbearer_sort(void)
/**
* tipc_bcbearer_push - resolve bearer congestion
*
*
* Forces bclink to push out any unsent packets, until all packets are gone
* or congestion reoccurs.
* No locks set when function called
@ -714,27 +714,27 @@ int tipc_bclink_stats(char *buf, const u32 buf_size)
spin_lock_bh(&bc_lock);
tipc_printf(&pb, "Link <%s>\n"
" Window:%u packets\n",
" Window:%u packets\n",
bcl->name, bcl->queue_limit[0]);
tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
bcl->stats.recv_info,
bcl->stats.recv_fragments,
bcl->stats.recv_fragmented,
bcl->stats.recv_bundles,
bcl->stats.recv_bundled);
tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
bcl->stats.sent_info,
bcl->stats.sent_fragments,
bcl->stats.sent_fragmented,
bcl->stats.sent_fragmented,
bcl->stats.sent_bundles,
bcl->stats.sent_bundled);
tipc_printf(&pb, " RX naks:%u defs:%u dups:%u\n",
tipc_printf(&pb, " RX naks:%u defs:%u dups:%u\n",
bcl->stats.recv_nacks,
bcl->stats.deferred_recv,
bcl->stats.deferred_recv,
bcl->stats.duplicates);
tipc_printf(&pb, " TX naks:%u acks:%u dups:%u\n",
bcl->stats.sent_nacks,
bcl->stats.sent_acks,
tipc_printf(&pb, " TX naks:%u acks:%u dups:%u\n",
bcl->stats.sent_nacks,
bcl->stats.sent_acks,
bcl->stats.retransmitted);
tipc_printf(&pb, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n",
bcl->stats.bearer_congs,
@ -778,7 +778,7 @@ int tipc_bclink_init(void)
bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
if (!bcbearer || !bclink) {
nomem:
warn("Multicast link creation failed, no memory\n");
warn("Multicast link creation failed, no memory\n");
kfree(bcbearer);
bcbearer = NULL;
kfree(bclink);
@ -796,7 +796,7 @@ int tipc_bclink_init(void)
bcl->next_out_no = 1;
spin_lock_init(&bclink->node.lock);
bcl->owner = &bclink->node;
bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
bcl->b_ptr = &bcbearer->bearer;
bcl->state = WORKING_WORKING;

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/bcast.h: Include file for TIPC broadcast code
*
*
* Copyright (c) 2003-2006, Ericsson AB
* Copyright (c) 2005, Wind River Systems
* All rights reserved.
@ -102,7 +102,7 @@ static inline void tipc_nmap_add(struct node_map *nm_ptr, u32 node)
}
}
/**
/**
* nmap_remove - remove a node from a node map
*/
@ -190,7 +190,7 @@ static inline void tipc_port_list_add(struct port_list *pl_ptr, u32 port)
/**
* port_list_free - free dynamically created entries in port_list chain
*
*
* Note: First item is on stack, so it doesn't need to be released
*/

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/bearer.c: TIPC bearer code
*
*
* Copyright (c) 1996-2006, Ericsson AB
* Copyright (c) 2004-2006, Wind River Systems
* All rights reserved.
@ -52,7 +52,7 @@ struct bearer *tipc_bearers = NULL;
/**
* media_name_valid - validate media name
*
*
* Returns 1 if media name is valid, otherwise 0.
*/
@ -84,17 +84,17 @@ static struct media *media_find(const char *name)
/**
* tipc_register_media - register a media type
*
*
* Bearers for this media type must be activated separately at a later stage.
*/
int tipc_register_media(u32 media_type,
char *name,
int (*enable)(struct tipc_bearer *),
void (*disable)(struct tipc_bearer *),
int (*send_msg)(struct sk_buff *,
char *name,
int (*enable)(struct tipc_bearer *),
void (*disable)(struct tipc_bearer *),
int (*send_msg)(struct sk_buff *,
struct tipc_bearer *,
struct tipc_media_addr *),
struct tipc_media_addr *),
char *(*addr2str)(struct tipc_media_addr *a,
char *str_buf, int str_size),
struct tipc_media_addr *bcast_addr,
@ -121,11 +121,11 @@ int tipc_register_media(u32 media_type,
}
if ((bearer_priority < TIPC_MIN_LINK_PRI) &&
(bearer_priority > TIPC_MAX_LINK_PRI)) {
warn("Media <%s> rejected, illegal priority (%u)\n", name,
warn("Media <%s> rejected, illegal priority (%u)\n", name,
bearer_priority);
goto exit;
}
if ((link_tolerance < TIPC_MIN_LINK_TOL) ||
if ((link_tolerance < TIPC_MIN_LINK_TOL) ||
(link_tolerance > TIPC_MAX_LINK_TOL)) {
warn("Media <%s> rejected, illegal tolerance (%u)\n", name,
link_tolerance);
@ -219,7 +219,7 @@ struct sk_buff *tipc_media_get_names(void)
read_lock_bh(&tipc_net_lock);
for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
tipc_cfg_append_tlv(buf, TIPC_TLV_MEDIA_NAME, m_ptr->name,
tipc_cfg_append_tlv(buf, TIPC_TLV_MEDIA_NAME, m_ptr->name,
strlen(m_ptr->name) + 1);
}
read_unlock_bh(&tipc_net_lock);
@ -230,11 +230,11 @@ struct sk_buff *tipc_media_get_names(void)
* bearer_name_validate - validate & (optionally) deconstruct bearer name
* @name - ptr to bearer name string
* @name_parts - ptr to area for bearer name components (or NULL if not needed)
*
*
* Returns 1 if bearer name is valid, otherwise 0.
*/
static int bearer_name_validate(const char *name,
static int bearer_name_validate(const char *name,
struct bearer_name *name_parts)
{
char name_copy[TIPC_MAX_BEARER_NAME];
@ -262,8 +262,8 @@ static int bearer_name_validate(const char *name,
/* validate component parts of bearer name */
if ((media_len <= 1) || (media_len > TIPC_MAX_MEDIA_NAME) ||
(if_len <= 1) || (if_len > TIPC_MAX_IF_NAME) ||
if ((media_len <= 1) || (media_len > TIPC_MAX_MEDIA_NAME) ||
(if_len <= 1) || (if_len > TIPC_MAX_IF_NAME) ||
(strspn(media_name, tipc_alphabet) != (media_len - 1)) ||
(strspn(if_name, tipc_alphabet) != (if_len - 1)))
return 0;
@ -336,8 +336,8 @@ struct sk_buff *tipc_bearer_get_names(void)
for (j = 0; j < MAX_BEARERS; j++) {
b_ptr = &tipc_bearers[j];
if (b_ptr->active && (b_ptr->media == m_ptr)) {
tipc_cfg_append_tlv(buf, TIPC_TLV_BEARER_NAME,
b_ptr->publ.name,
tipc_cfg_append_tlv(buf, TIPC_TLV_BEARER_NAME,
b_ptr->publ.name,
strlen(b_ptr->publ.name) + 1);
}
}
@ -401,8 +401,8 @@ void tipc_bearer_lock_push(struct bearer *b_ptr)
/*
* Interrupt enabling new requests after bearer congestion or blocking:
* See bearer_send().
* Interrupt enabling new requests after bearer congestion or blocking:
* See bearer_send().
*/
void tipc_continue(struct tipc_bearer *tb_ptr)
{
@ -417,9 +417,9 @@ void tipc_continue(struct tipc_bearer *tb_ptr)
}
/*
* Schedule link for sending of messages after the bearer
* has been deblocked by 'continue()'. This method is called
* when somebody tries to send a message via this link while
* Schedule link for sending of messages after the bearer
* has been deblocked by 'continue()'. This method is called
* when somebody tries to send a message via this link while
* the bearer is congested. 'tipc_net_lock' is in read_lock here
* bearer.lock is busy
*/
@ -430,9 +430,9 @@ static void tipc_bearer_schedule_unlocked(struct bearer *b_ptr, struct link *l_p
}
/*
* Schedule link for sending of messages after the bearer
* has been deblocked by 'continue()'. This method is called
* when somebody tries to send a message via this link while
* Schedule link for sending of messages after the bearer
* has been deblocked by 'continue()'. This method is called
* when somebody tries to send a message via this link while
* the bearer is congested. 'tipc_net_lock' is in read_lock here,
* bearer.lock is free
*/
@ -468,7 +468,7 @@ int tipc_bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr)
/**
* tipc_enable_bearer - enable bearer with the given name
*/
*/
int tipc_enable_bearer(const char *name, u32 bcast_scope, u32 priority)
{
@ -490,7 +490,7 @@ int tipc_enable_bearer(const char *name, u32 bcast_scope, u32 priority)
warn("Bearer <%s> rejected, illegal name\n", name);
return -EINVAL;
}
if (!tipc_addr_domain_valid(bcast_scope) ||
if (!tipc_addr_domain_valid(bcast_scope) ||
!in_scope(bcast_scope, tipc_own_addr)) {
warn("Bearer <%s> rejected, illegal broadcast scope\n", name);
return -EINVAL;
@ -539,7 +539,7 @@ restart:
}
}
if (bearer_id >= MAX_BEARERS) {
warn("Bearer <%s> rejected, bearer limit reached (%u)\n",
warn("Bearer <%s> rejected, bearer limit reached (%u)\n",
name, MAX_BEARERS);
goto failed;
}
@ -612,7 +612,7 @@ int tipc_block_bearer(const char *name)
/**
* bearer_disable -
*
*
* Note: This routine assumes caller holds tipc_net_lock.
*/

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/bearer.h: Include file for TIPC bearer code
*
*
* Copyright (c) 1996-2006, Ericsson AB
* Copyright (c) 2005, Wind River Systems
* All rights reserved.
@ -58,14 +58,14 @@
* @type_id: TIPC media identifier [defined in tipc_bearer.h]
* @name: media name
*/
struct media {
int (*send_msg)(struct sk_buff *buf,
int (*send_msg)(struct sk_buff *buf,
struct tipc_bearer *b_ptr,
struct tipc_media_addr *dest);
int (*enable_bearer)(struct tipc_bearer *b_ptr);
void (*disable_bearer)(struct tipc_bearer *b_ptr);
char *(*addr2str)(struct tipc_media_addr *a,
char *(*addr2str)(struct tipc_media_addr *a,
char *str_buf, int str_size);
struct tipc_media_addr bcast_addr;
int bcast;
@ -91,7 +91,7 @@ struct media {
* @net_plane: network plane ('A' through 'H') currently associated with bearer
* @nodes: indicates which nodes in cluster can be reached through bearer
*/
struct bearer {
struct tipc_bearer publ;
struct media *media;
@ -131,21 +131,21 @@ void tipc_bearer_lock_push(struct bearer *b_ptr);
/**
* tipc_bearer_send- sends buffer to destination over bearer
*
* tipc_bearer_send- sends buffer to destination over bearer
*
* Returns true (1) if successful, or false (0) if unable to send
*
*
* IMPORTANT:
* The media send routine must not alter the buffer being passed in
* as it may be needed for later retransmission!
*
* If the media send routine returns a non-zero value (indicating that
*
* If the media send routine returns a non-zero value (indicating that
* it was unable to send the buffer), it must:
* 1) mark the bearer as blocked,
* 2) call tipc_continue() once the bearer is able to send again.
* Media types that are unable to meet these two critera must ensure their
* send routine always returns success -- even if the buffer was not sent --
* and let TIPC's link code deal with the undelivered message.
* and let TIPC's link code deal with the undelivered message.
*/
static inline int tipc_bearer_send(struct bearer *b_ptr, struct sk_buff *buf,

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/cluster.c: TIPC cluster management routines
*
*
* Copyright (c) 2000-2006, Ericsson AB
* Copyright (c) 2005, Wind River Systems
* All rights reserved.
@ -56,7 +56,7 @@ struct cluster *tipc_cltr_create(u32 addr)
{
struct _zone *z_ptr;
struct cluster *c_ptr;
int max_nodes;
int max_nodes;
c_ptr = kzalloc(sizeof(*c_ptr), GFP_ATOMIC);
if (c_ptr == NULL) {
@ -81,7 +81,7 @@ struct cluster *tipc_cltr_create(u32 addr)
tipc_local_nodes = c_ptr->nodes;
c_ptr->highest_slave = LOWEST_SLAVE - 1;
c_ptr->highest_node = 0;
z_ptr = tipc_zone_find(tipc_zone(addr));
if (!z_ptr) {
z_ptr = tipc_zone_create(addr);
@ -150,7 +150,7 @@ void tipc_cltr_attach_node(struct cluster *c_ptr, struct node *n_ptr)
/**
* tipc_cltr_select_router - select router to a cluster
*
*
* Uses deterministic and fair algorithm.
*/
@ -192,7 +192,7 @@ u32 tipc_cltr_select_router(struct cluster *c_ptr, u32 ref)
/**
* tipc_cltr_select_node - select destination node within a remote cluster
*
*
* Uses deterministic and fair algorithm.
*/
@ -295,7 +295,7 @@ void tipc_cltr_send_slave_routes(struct cluster *c_ptr, u32 dest)
msg_set_remote_node(msg, c_ptr->addr);
msg_set_type(msg, SLAVE_ROUTING_TABLE);
for (n_num = LOWEST_SLAVE; n_num <= highest; n_num++) {
if (c_ptr->nodes[n_num] &&
if (c_ptr->nodes[n_num] &&
tipc_node_has_active_links(c_ptr->nodes[n_num])) {
send = 1;
msg_set_dataoctet(msg, n_num);
@ -329,7 +329,7 @@ void tipc_cltr_send_ext_routes(struct cluster *c_ptr, u32 dest)
msg_set_remote_node(msg, c_ptr->addr);
msg_set_type(msg, EXT_ROUTING_TABLE);
for (n_num = 1; n_num <= highest; n_num++) {
if (c_ptr->nodes[n_num] &&
if (c_ptr->nodes[n_num] &&
tipc_node_has_active_links(c_ptr->nodes[n_num])) {
send = 1;
msg_set_dataoctet(msg, n_num);
@ -360,7 +360,7 @@ void tipc_cltr_send_local_routes(struct cluster *c_ptr, u32 dest)
msg_set_remote_node(msg, c_ptr->addr);
msg_set_type(msg, LOCAL_ROUTING_TABLE);
for (n_num = 1; n_num <= highest; n_num++) {
if (c_ptr->nodes[n_num] &&
if (c_ptr->nodes[n_num] &&
tipc_node_has_active_links(c_ptr->nodes[n_num])) {
send = 1;
msg_set_dataoctet(msg, n_num);
@ -492,7 +492,7 @@ void tipc_cltr_remove_as_router(struct cluster *c_ptr, u32 router)
}
/**
* tipc_cltr_multicast - multicast message to local nodes
* tipc_cltr_multicast - multicast message to local nodes
*/
static void tipc_cltr_multicast(struct cluster *c_ptr, struct sk_buff *buf,
@ -554,9 +554,9 @@ void tipc_cltr_broadcast(struct sk_buff *buf)
buf_copy = skb_copy(buf, GFP_ATOMIC);
if (buf_copy == NULL)
goto exit;
msg_set_destnode(buf_msg(buf_copy),
msg_set_destnode(buf_msg(buf_copy),
n_ptr->addr);
tipc_link_send(buf_copy, n_ptr->addr,
tipc_link_send(buf_copy, n_ptr->addr,
n_ptr->addr);
}
}

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/cluster.h: Include file for TIPC cluster management routines
*
*
* Copyright (c) 2000-2006, Ericsson AB
* Copyright (c) 2005, Wind River Systems
* All rights reserved.
@ -50,7 +50,7 @@
* @highest_node: id of highest numbered node within cluster
* @highest_slave: (used for secondary node support)
*/
struct cluster {
u32 addr;
struct _zone *owner;

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/config.c: TIPC configuration management code
*
*
* Copyright (c) 2002-2006, Ericsson AB
* Copyright (c) 2004-2006, Wind River Systems
* All rights reserved.
@ -86,7 +86,7 @@ struct sk_buff *tipc_cfg_reply_alloc(int payload_size)
return buf;
}
int tipc_cfg_append_tlv(struct sk_buff *buf, int tlv_type,
int tipc_cfg_append_tlv(struct sk_buff *buf, int tlv_type,
void *tlv_data, int tlv_data_size)
{
struct tlv_desc *tlv = (struct tlv_desc *)buf->tail;
@ -112,7 +112,7 @@ struct sk_buff *tipc_cfg_reply_unsigned_type(u16 tlv_type, u32 value)
buf = tipc_cfg_reply_alloc(TLV_SPACE(sizeof(value)));
if (buf) {
value_net = htonl(value);
tipc_cfg_append_tlv(buf, tlv_type, &value_net,
tipc_cfg_append_tlv(buf, tlv_type, &value_net,
sizeof(value_net));
}
return buf;
@ -182,7 +182,7 @@ int tipc_cfg_cmd(const struct tipc_cmd_msg * msg,
static void cfg_cmd_event(struct tipc_cmd_msg *msg,
char *data,
u32 sz,
u32 sz,
struct tipc_portid const *orig)
{
int rv = -EINVAL;
@ -192,7 +192,7 @@ static void cfg_cmd_event(struct tipc_cmd_msg *msg,
msg->cmd = ntohl(msg->cmd);
cfg_prepare_res_msg(msg->cmd, msg->usr_handle, rv, &rmsg, msg_sect,
cfg_prepare_res_msg(msg->cmd, msg->usr_handle, rv, &rmsg, msg_sect,
data, 0);
if (ntohl(msg->magic) != TIPC_MAGIC)
goto exit;
@ -295,7 +295,7 @@ static struct sk_buff *cfg_set_own_addr(void)
" (cannot change node address once assigned)");
tipc_own_addr = addr;
/*
/*
* Must release all spinlocks before calling start_net() because
* Linux version of TIPC calls eth_media_start() which calls
* register_netdevice_notifier() which may block!
@ -619,7 +619,7 @@ static void cfg_named_msg_event(void *userdata,
struct sk_buff **buf,
const unchar *msg,
u32 size,
u32 importance,
u32 importance,
struct tipc_portid const *orig,
struct tipc_name_seq const *dest)
{
@ -640,7 +640,7 @@ static void cfg_named_msg_event(void *userdata,
/* Generate reply for request (if can't, return request) */
rep_buf = tipc_cfg_do_cmd(orig->node,
ntohs(req_hdr->tcm_type),
ntohs(req_hdr->tcm_type),
msg + sizeof(*req_hdr),
size - sizeof(*req_hdr),
BUF_HEADROOM + MAX_H_SIZE + sizeof(*rep_hdr));

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/config.h: Include file for TIPC configuration service code
*
*
* Copyright (c) 2003-2006, Ericsson AB
* Copyright (c) 2005, Wind River Systems
* All rights reserved.
@ -43,7 +43,7 @@
#include "link.h"
struct sk_buff *tipc_cfg_reply_alloc(int payload_size);
int tipc_cfg_append_tlv(struct sk_buff *buf, int tlv_type,
int tipc_cfg_append_tlv(struct sk_buff *buf, int tlv_type,
void *tlv_data, int tlv_data_size);
struct sk_buff *tipc_cfg_reply_unsigned_type(u16 tlv_type, u32 value);
struct sk_buff *tipc_cfg_reply_string_type(u16 tlv_type, char *string);
@ -68,8 +68,8 @@ static inline struct sk_buff *tipc_cfg_reply_ultra_string(char *string)
return tipc_cfg_reply_string_type(TIPC_TLV_ULTRA_STRING, string);
}
struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd,
const void *req_tlv_area, int req_tlv_space,
struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd,
const void *req_tlv_area, int req_tlv_space,
int headroom);
void tipc_cfg_link_event(u32 addr, char *name, int up);

Просмотреть файл

@ -89,7 +89,7 @@ int tipc_mode = TIPC_NOT_RUNNING;
int tipc_random;
atomic_t tipc_user_count = ATOMIC_INIT(0);
const char tipc_alphabet[] =
const char tipc_alphabet[] =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_.";
/* configurable TIPC parameters */
@ -171,13 +171,13 @@ int tipc_core_start(void)
get_random_bytes(&tipc_random, sizeof(tipc_random));
tipc_mode = TIPC_NODE_MODE;
if ((res = tipc_handler_start()) ||
if ((res = tipc_handler_start()) ||
(res = tipc_ref_table_init(tipc_max_ports + tipc_max_subscriptions,
tipc_random)) ||
(res = tipc_reg_start()) ||
(res = tipc_nametbl_init()) ||
(res = tipc_k_signal((Handler)tipc_subscr_start, 0)) ||
(res = tipc_k_signal((Handler)tipc_cfg_init, 0)) ||
(res = tipc_k_signal((Handler)tipc_subscr_start, 0)) ||
(res = tipc_k_signal((Handler)tipc_cfg_init, 0)) ||
(res = tipc_netlink_start()) ||
(res = tipc_socket_init())) {
tipc_core_stop();
@ -191,7 +191,7 @@ static int __init tipc_init(void)
int res;
tipc_log_reinit(CONFIG_TIPC_LOG);
info("Activated (version " TIPC_MOD_VER
info("Activated (version " TIPC_MOD_VER
" compiled " __DATE__ " " __TIME__ ")\n");
tipc_own_addr = 0;
@ -207,9 +207,9 @@ static int __init tipc_init(void)
if ((res = tipc_core_start()))
err("Unable to start in single node mode\n");
else
else
info("Started in single node mode\n");
return res;
return res;
}
static void __exit tipc_exit(void)
@ -268,11 +268,11 @@ EXPORT_SYMBOL(tipc_available_nodes);
/* TIPC API for external bearers (see tipc_bearer.h) */
EXPORT_SYMBOL(tipc_block_bearer);
EXPORT_SYMBOL(tipc_continue);
EXPORT_SYMBOL(tipc_continue);
EXPORT_SYMBOL(tipc_disable_bearer);
EXPORT_SYMBOL(tipc_enable_bearer);
EXPORT_SYMBOL(tipc_recv_msg);
EXPORT_SYMBOL(tipc_register_media);
EXPORT_SYMBOL(tipc_register_media);
/* TIPC API for external APIs (see tipc_port.h) */

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/core.h: Include file for TIPC global declarations
*
*
* Copyright (c) 2005-2006, Ericsson AB
* Copyright (c) 2005-2006, Wind River Systems
* All rights reserved.
@ -54,7 +54,7 @@
#include <asm/atomic.h>
#include <asm/hardirq.h>
#include <linux/netdevice.h>
#include <linux/in.h>
#include <linux/in.h>
#include <linux/list.h>
#include <linux/vmalloc.h>
@ -88,7 +88,7 @@ void tipc_dump(struct print_buf*,const char *fmt, ...);
#define dump(fmt, arg...) do {if (DBG_OUTPUT != TIPC_NULL) tipc_dump(DBG_OUTPUT, fmt, ##arg);} while(0)
/*
/*
* By default, TIPC_OUTPUT is defined to be system console and TIPC log buffer,
* while DBG_OUTPUT is the null print buffer. These defaults can be changed
* here, or on a per .c file basis, by redefining these symbols. The following
@ -126,9 +126,9 @@ void tipc_dump(struct print_buf*,const char *fmt, ...);
#define dump(fmt,arg...) do {} while (0)
/*
/*
* TIPC_OUTPUT is defined to be the system console, while DBG_OUTPUT is
* the null print buffer. Thes ensures that any system or debug messages
* the null print buffer. Thes ensures that any system or debug messages
* that are generated without using the above macros are handled correctly.
*/
@ -138,10 +138,10 @@ void tipc_dump(struct print_buf*,const char *fmt, ...);
#undef DBG_OUTPUT
#define DBG_OUTPUT TIPC_NULL
#endif
#endif
/*
/*
* TIPC-specific error codes
*/
@ -204,11 +204,11 @@ u32 tipc_k_signal(Handler routine, unsigned long argument);
* @timer: pointer to timer structure
* @routine: pointer to routine to invoke when timer expires
* @argument: value to pass to routine when timer expires
*
*
* Timer must be initialized before use (and terminated when no longer needed).
*/
static inline void k_init_timer(struct timer_list *timer, Handler routine,
static inline void k_init_timer(struct timer_list *timer, Handler routine,
unsigned long argument)
{
dbg("initializing timer %p\n", timer);
@ -221,13 +221,13 @@ static inline void k_init_timer(struct timer_list *timer, Handler routine,
* k_start_timer - start a timer
* @timer: pointer to timer structure
* @msec: time to delay (in ms)
*
*
* Schedules a previously initialized timer for later execution.
* If timer is already running, the new timeout overrides the previous request.
*
*
* To ensure the timer doesn't expire before the specified delay elapses,
* the amount of delay is rounded up when converting to the jiffies
* then an additional jiffy is added to account for the fact that
* then an additional jiffy is added to account for the fact that
* the starting time may be in the middle of the current jiffy.
*/
@ -240,10 +240,10 @@ static inline void k_start_timer(struct timer_list *timer, unsigned long msec)
/**
* k_cancel_timer - cancel a timer
* @timer: pointer to timer structure
*
* Cancels a previously initialized timer.
*
* Cancels a previously initialized timer.
* Can be called safely even if the timer is already inactive.
*
*
* WARNING: Must not be called when holding locks required by the timer's
* timeout routine, otherwise deadlock can occur on SMP systems!
*/
@ -257,11 +257,11 @@ static inline void k_cancel_timer(struct timer_list *timer)
/**
* k_term_timer - terminate a timer
* @timer: pointer to timer structure
*
*
* Prevents further use of a previously initialized timer.
*
*
* WARNING: Caller must ensure timer isn't currently running.
*
*
* (Do not "enhance" this routine to automatically cancel an active timer,
* otherwise deadlock can arise when a timeout routine calls k_term_timer.)
*/
@ -302,7 +302,7 @@ static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
* @size: message size (including TIPC header)
*
* Returns a new buffer with data pointers set to the specified size.
*
*
* NOTE: Headroom is reserved to allow prepending of a data link header.
* There may also be unrequested tailroom present at the buffer's end.
*/
@ -334,4 +334,4 @@ static inline void buf_discard(struct sk_buff *skb)
kfree_skb(skb);
}
#endif
#endif

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/dbg.c: TIPC print buffer routines for debugging
*
*
* Copyright (c) 1996-2006, Ericsson AB
* Copyright (c) 2005-2006, Wind River Systems
* All rights reserved.
@ -123,34 +123,34 @@ int tipc_printbuf_empty(struct print_buf *pb)
/**
* tipc_printbuf_validate - check for print buffer overflow
* @pb: pointer to print buffer structure
*
* Verifies that a print buffer has captured all data written to it.
*
* Verifies that a print buffer has captured all data written to it.
* If data has been lost, linearize buffer and prepend an error message
*
*
* Returns length of print buffer data string (including trailing NUL)
*/
int tipc_printbuf_validate(struct print_buf *pb)
{
char *err = "\n\n*** PRINT BUFFER OVERFLOW ***\n\n";
char *cp_buf;
struct print_buf cb;
char *err = "\n\n*** PRINT BUFFER OVERFLOW ***\n\n";
char *cp_buf;
struct print_buf cb;
if (!pb->buf)
return 0;
if (pb->buf[pb->size - 1] == 0) {
cp_buf = kmalloc(pb->size, GFP_ATOMIC);
if (cp_buf != NULL){
tipc_printbuf_init(&cb, cp_buf, pb->size);
tipc_printbuf_move(&cb, pb);
tipc_printbuf_move(pb, &cb);
kfree(cp_buf);
memcpy(pb->buf, err, strlen(err));
} else {
tipc_printbuf_reset(pb);
tipc_printf(pb, err);
}
cp_buf = kmalloc(pb->size, GFP_ATOMIC);
if (cp_buf != NULL){
tipc_printbuf_init(&cb, cp_buf, pb->size);
tipc_printbuf_move(&cb, pb);
tipc_printbuf_move(pb, &cb);
kfree(cp_buf);
memcpy(pb->buf, err, strlen(err));
} else {
tipc_printbuf_reset(pb);
tipc_printf(pb, err);
}
}
return (pb->crs - pb->buf + 1);
}
@ -159,7 +159,7 @@ int tipc_printbuf_validate(struct print_buf *pb)
* tipc_printbuf_move - move print buffer contents to another print buffer
* @pb_to: pointer to destination print buffer structure
* @pb_from: pointer to source print buffer structure
*
*
* Current contents of destination print buffer (if any) are discarded.
* Source print buffer becomes empty if a successful move occurs.
*/
@ -234,13 +234,13 @@ void tipc_printf(struct print_buf *pb, const char *fmt, ...)
pb->crs = pb->buf + pb->size - 1;
} else {
strcpy(pb->buf, print_string + chars_left);
save_char = print_string[chars_left];
print_string[chars_left] = 0;
strcpy(pb->crs, print_string);
print_string[chars_left] = save_char;
pb->crs = pb->buf + chars_to_add - chars_left;
}
}
save_char = print_string[chars_left];
print_string[chars_left] = 0;
strcpy(pb->crs, print_string);
print_string[chars_left] = save_char;
pb->crs = pb->buf + chars_to_add - chars_left;
}
}
pb_next = pb->next;
pb->next = NULL;
pb = pb_next;
@ -249,7 +249,7 @@ void tipc_printf(struct print_buf *pb, const char *fmt, ...)
}
/**
* TIPC_TEE - perform next output operation on both print buffers
* TIPC_TEE - perform next output operation on both print buffers
* @b0: pointer to chain of print buffers (may be NULL)
* @b1: pointer to print buffer to add to chain
*
@ -350,7 +350,7 @@ void tipc_dump(struct print_buf *pb, const char *fmt, ...)
}
/**
* tipc_log_stop - free up TIPC log print buffer
* tipc_log_stop - free up TIPC log print buffer
*/
void tipc_log_stop(void)

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/dbg.h: Include file for TIPC print buffer routines
*
*
* Copyright (c) 1997-2006, Ericsson AB
* Copyright (c) 2005-2006, Wind River Systems
* All rights reserved.

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/discover.c
*
*
* Copyright (c) 2003-2006, Ericsson AB
* Copyright (c) 2005-2006, Wind River Systems
* All rights reserved.
@ -56,10 +56,10 @@
#define CHECK_LINK_COUNT 306
#endif
/*
/*
* TODO: Most of the inter-cluster setup stuff should be
* rewritten, and be made conformant with specification.
*/
*/
/**
@ -80,10 +80,10 @@ struct link_req {
#if 0
int disc_create_link(const struct tipc_link_create *argv)
int disc_create_link(const struct tipc_link_create *argv)
{
/*
* Code for inter cluster link setup here
/*
* Code for inter cluster link setup here
*/
return TIPC_OK;
}
@ -93,16 +93,16 @@ int disc_create_link(const struct tipc_link_create *argv)
* disc_lost_link(): A link has lost contact
*/
void tipc_disc_link_event(u32 addr, char *name, int up)
void tipc_disc_link_event(u32 addr, char *name, int up)
{
if (in_own_cluster(addr))
return;
/*
* Code for inter cluster link setup here
/*
* Code for inter cluster link setup here
*/
}
/**
/**
* tipc_disc_init_msg - initialize a link setup message
* @type: message type (request or response)
* @req_links: number of links associated with message
@ -210,7 +210,7 @@ void tipc_disc_recv_msg(struct sk_buff *buf)
dbg("creating link\n");
link = tipc_link_create(b_ptr, orig, &media_addr);
if (!link) {
spin_unlock_bh(&n_ptr->lock);
spin_unlock_bh(&n_ptr->lock);
return;
}
}
@ -224,10 +224,10 @@ void tipc_disc_recv_msg(struct sk_buff *buf)
warn("Resetting link <%s>, peer interface address changed\n",
link->name);
memcpy(addr, &media_addr, sizeof(*addr));
tipc_link_reset(link);
tipc_link_reset(link);
}
link_fully_up = (link->state == WORKING_WORKING);
spin_unlock_bh(&n_ptr->lock);
spin_unlock_bh(&n_ptr->lock);
if ((type == DSC_RESP_MSG) || link_fully_up)
return;
rbuf = tipc_disc_init_msg(DSC_RESP_MSG, 1, orig, b_ptr);
@ -244,23 +244,23 @@ void tipc_disc_recv_msg(struct sk_buff *buf)
* @req: ptr to link request structure
*/
void tipc_disc_stop_link_req(struct link_req *req)
void tipc_disc_stop_link_req(struct link_req *req)
{
if (!req)
return;
k_cancel_timer(&req->timer);
k_term_timer(&req->timer);
buf_discard(req->buf);
kfree(req);
}
}
/**
* tipc_disc_update_link_req - update frequency of periodic link setup requests
* @req: ptr to link request structure
*/
void tipc_disc_update_link_req(struct link_req *req)
void tipc_disc_update_link_req(struct link_req *req)
{
if (!req)
return;
@ -278,16 +278,16 @@ void tipc_disc_update_link_req(struct link_req *req)
} else {
/* leave timer "as is" if haven't yet reached a "normal" rate */
}
}
}
/**
* disc_timeout - send a periodic link setup request
* @req: ptr to link request structure
*
*
* Called whenever a link setup request timer associated with a bearer expires.
*/
static void disc_timeout(struct link_req *req)
static void disc_timeout(struct link_req *req)
{
spin_lock_bh(&req->bearer->publ.lock);
@ -300,7 +300,7 @@ static void disc_timeout(struct link_req *req)
req->timer_intv *= 2;
if (req->timer_intv > TIPC_LINK_REQ_FAST)
req->timer_intv = TIPC_LINK_REQ_FAST;
if ((req->timer_intv == TIPC_LINK_REQ_FAST) &&
if ((req->timer_intv == TIPC_LINK_REQ_FAST) &&
(req->bearer->nodes.count))
req->timer_intv = TIPC_LINK_REQ_SLOW;
}
@ -315,14 +315,14 @@ static void disc_timeout(struct link_req *req)
* @dest: destination address for request messages
* @dest_domain: network domain of node(s) which should respond to message
* @req_links: max number of desired links
*
*
* Returns pointer to link request structure, or NULL if unable to create.
*/
struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
const struct tipc_media_addr *dest,
u32 dest_domain,
u32 req_links)
u32 req_links)
{
struct link_req *req;
@ -342,5 +342,5 @@ struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
k_init_timer(&req->timer, (Handler)disc_timeout, (unsigned long)req);
k_start_timer(&req->timer, req->timer_intv);
return req;
}
}

Просмотреть файл

@ -41,7 +41,7 @@
struct link_req;
struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
const struct tipc_media_addr *dest,
u32 dest_domain,
u32 req_links);

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/eth_media.c: Ethernet bearer support for TIPC
*
*
* Copyright (c) 2001-2006, Ericsson AB
* Copyright (c) 2005-2006, Wind River Systems
* All rights reserved.
@ -50,7 +50,7 @@
* @dev: ptr to associated Ethernet network device
* @tipc_packet_type: used in binding TIPC to Ethernet driver
*/
struct eth_bearer {
struct tipc_bearer *bearer;
struct net_device *dev;
@ -62,10 +62,10 @@ static int eth_started = 0;
static struct notifier_block notifier;
/**
* send_msg - send a TIPC message out over an Ethernet interface
* send_msg - send a TIPC message out over an Ethernet interface
*/
static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
struct tipc_media_addr *dest)
{
struct sk_buff *clone;
@ -76,7 +76,7 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
clone->nh.raw = clone->data;
dev = ((struct eth_bearer *)(tb_ptr->usr_handle))->dev;
clone->dev = dev;
dev->hard_header(clone, dev, ETH_P_TIPC,
dev->hard_header(clone, dev, ETH_P_TIPC,
&dest->dev_addr.eth_addr,
dev->dev_addr, clone->len);
dev_queue_xmit(clone);
@ -86,12 +86,12 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
/**
* recv_msg - handle incoming TIPC message from an Ethernet interface
*
*
* Routine truncates any Ethernet padding/CRC appended to the message,
* and ensures message size matches actual length
*/
static int recv_msg(struct sk_buff *buf, struct net_device *dev,
static int recv_msg(struct sk_buff *buf, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
struct eth_bearer *eb_ptr = (struct eth_bearer *)pt->af_packet_priv;
@ -99,14 +99,14 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev,
if (likely(eb_ptr->bearer)) {
if (likely(!dev->promiscuity) ||
!memcmp(buf->mac.raw,dev->dev_addr,ETH_ALEN) ||
!memcmp(buf->mac.raw,dev->broadcast,ETH_ALEN)) {
size = msg_size((struct tipc_msg *)buf->data);
skb_trim(buf, size);
if (likely(buf->len == size)) {
buf->next = NULL;
tipc_recv_msg(buf, eb_ptr->bearer);
return TIPC_OK;
!memcmp(buf->mac.raw,dev->dev_addr,ETH_ALEN) ||
!memcmp(buf->mac.raw,dev->broadcast,ETH_ALEN)) {
size = msg_size((struct tipc_msg *)buf->data);
skb_trim(buf, size);
if (likely(buf->len == size)) {
buf->next = NULL;
tipc_recv_msg(buf, eb_ptr->bearer);
return TIPC_OK;
}
}
}
@ -115,7 +115,7 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev,
}
/**
* enable_bearer - attach TIPC bearer to an Ethernet interface
* enable_bearer - attach TIPC bearer to an Ethernet interface
*/
static int enable_bearer(struct tipc_bearer *tb_ptr)
@ -127,7 +127,7 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
/* Find device with specified name */
while (dev && dev->name && strncmp(dev->name, driver_name, IFNAMSIZ)) {
while (dev && dev->name && strncmp(dev->name, driver_name, IFNAMSIZ)) {
dev = dev->next;
}
if (!dev)
@ -154,14 +154,14 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
eb_ptr->bearer = tb_ptr;
tb_ptr->usr_handle = (void *)eb_ptr;
tb_ptr->mtu = dev->mtu;
tb_ptr->blocked = 0;
tb_ptr->blocked = 0;
tb_ptr->addr.type = htonl(TIPC_MEDIA_TYPE_ETH);
memcpy(&tb_ptr->addr.dev_addr, &dev->dev_addr, ETH_ALEN);
return 0;
}
/**
* disable_bearer - detach TIPC bearer from an Ethernet interface
* disable_bearer - detach TIPC bearer from an Ethernet interface
*
* We really should do dev_remove_pack() here, but this function can not be
* called at tasklet level. => Use eth_bearer->bearer as a flag to throw away
@ -176,11 +176,11 @@ static void disable_bearer(struct tipc_bearer *tb_ptr)
/**
* recv_notification - handle device updates from OS
*
* Change the state of the Ethernet bearer (if any) associated with the
* Change the state of the Ethernet bearer (if any) associated with the
* specified device.
*/
static int recv_notification(struct notifier_block *nb, unsigned long evt,
static int recv_notification(struct notifier_block *nb, unsigned long evt,
void *dv)
{
struct net_device *dev = (struct net_device *)dv;
@ -194,7 +194,7 @@ static int recv_notification(struct notifier_block *nb, unsigned long evt,
if (!eb_ptr->bearer)
return NOTIFY_DONE; /* bearer had been disabled */
eb_ptr->bearer->mtu = dev->mtu;
eb_ptr->bearer->mtu = dev->mtu;
switch (evt) {
case NETDEV_CHANGE:
@ -210,12 +210,12 @@ static int recv_notification(struct notifier_block *nb, unsigned long evt,
tipc_block_bearer(eb_ptr->bearer->name);
break;
case NETDEV_CHANGEMTU:
case NETDEV_CHANGEADDR:
case NETDEV_CHANGEADDR:
tipc_block_bearer(eb_ptr->bearer->name);
tipc_continue(eb_ptr->bearer);
tipc_continue(eb_ptr->bearer);
break;
case NETDEV_UNREGISTER:
case NETDEV_CHANGENAME:
case NETDEV_CHANGENAME:
tipc_disable_bearer(eb_ptr->bearer->name);
break;
}
@ -227,7 +227,7 @@ static int recv_notification(struct notifier_block *nb, unsigned long evt,
*/
static char *eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size)
{
{
unchar *addr = (unchar *)&a->dev_addr;
if (str_size < 18)
@ -246,7 +246,7 @@ static char *eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size
*/
int tipc_eth_media_start(void)
{
{
struct tipc_media_addr bcast_addr;
int res;
@ -259,8 +259,8 @@ int tipc_eth_media_start(void)
memset(eth_bearers, 0, sizeof(eth_bearers));
res = tipc_register_media(TIPC_MEDIA_TYPE_ETH, "eth",
enable_bearer, disable_bearer, send_msg,
eth_addr2str, &bcast_addr, ETH_LINK_PRIORITY,
enable_bearer, disable_bearer, send_msg,
eth_addr2str, &bcast_addr, ETH_LINK_PRIORITY,
ETH_LINK_TOLERANCE, ETH_LINK_WINDOW);
if (res)
return res;

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/handler.c: TIPC signal handling
*
*
* Copyright (c) 2000-2006, Ericsson AB
* Copyright (c) 2005, Wind River Systems
* All rights reserved.
@ -95,7 +95,7 @@ static void process_signal_queue(unsigned long dummy)
int tipc_handler_start(void)
{
tipc_queue_item_cache =
tipc_queue_item_cache =
kmem_cache_create("tipc_queue_items", sizeof(struct queue_item),
0, SLAB_HWCACHE_ALIGN, NULL, NULL);
if (!tipc_queue_item_cache)
@ -110,7 +110,7 @@ int tipc_handler_start(void)
void tipc_handler_stop(void)
{
struct list_head *l, *n;
struct queue_item *item;
struct queue_item *item;
if (!handler_enabled)
return;

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/link.h: Include file for TIPC link code
*
*
* Copyright (c) 1995-2006, Ericsson AB
* Copyright (c) 2004-2005, Wind River Systems
* All rights reserved.
@ -45,8 +45,8 @@
#define PUSH_FAILED 1
#define PUSH_FINISHED 2
/*
* Link states
/*
* Link states
*/
#define WORKING_WORKING 560810u
@ -54,7 +54,7 @@
#define RESET_UNKNOWN 560812u
#define RESET_RESET 560813u
/*
/*
* Starting value for maximum packet size negotiation on unicast links
* (unless bearer MTU is less)
*/
@ -74,7 +74,7 @@
* @peer_session: link session # being used by peer end of link
* @peer_bearer_id: bearer id used by link's peer endpoint
* @b_ptr: pointer to bearer used by link
* @tolerance: minimum link continuity loss needed to reset link [in ms]
* @tolerance: minimum link continuity loss needed to reset link [in ms]
* @continuity_interval: link continuity testing interval [in ms]
* @abort_limit: # of unacknowledged continuity probes needed to reset link
* @state: current state of link FSM
@ -110,7 +110,7 @@
* @stats: collects statistics regarding link activity
* @print_buf: print buffer used to log link activity
*/
struct link {
u32 addr;
char name[TIPC_MAX_LINK_NAME];
@ -143,18 +143,18 @@ struct link {
u32 exp_msg_count;
u32 reset_checkpoint;
/* Max packet negotiation */
u32 max_pkt;
u32 max_pkt_target;
u32 max_pkt_probes;
/* Max packet negotiation */
u32 max_pkt;
u32 max_pkt_target;
u32 max_pkt_probes;
/* Sending */
u32 out_queue_size;
struct sk_buff *first_out;
struct sk_buff *last_out;
u32 next_out_no;
u32 last_retransmitted;
u32 stale_count;
u32 last_retransmitted;
u32 stale_count;
/* Reception */
u32 next_in_no;
@ -174,7 +174,7 @@ struct link {
u32 long_msg_seq_no;
struct sk_buff *defragm_buf;
/* Statistics */
/* Statistics */
struct {
u32 sent_info; /* used in counting # sent packets */
u32 recv_info; /* used in counting # recv'd packets */
@ -239,9 +239,9 @@ void tipc_link_reset(struct link *l_ptr);
int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector);
int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf);
u32 tipc_link_get_max_pkt(u32 dest,u32 selector);
int tipc_link_send_sections_fast(struct port* sender,
int tipc_link_send_sections_fast(struct port* sender,
struct iovec const *msg_sect,
const u32 num_sect,
const u32 num_sect,
u32 destnode);
int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf);
void tipc_link_tunnel(struct link *l_ptr, struct tipc_msg *tnl_hdr,
@ -250,7 +250,7 @@ void tipc_link_recv_bundle(struct sk_buff *buf);
int tipc_link_recv_fragment(struct sk_buff **pending,
struct sk_buff **fb,
struct tipc_msg **msg);
void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int prob, u32 gap,
void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int prob, u32 gap,
u32 tolerance, u32 priority, u32 acked_mtu);
void tipc_link_push_queue(struct link *l_ptr);
u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/msg.c: TIPC message header routines
*
*
* Copyright (c) 2000-2006, Ericsson AB
* Copyright (c) 2005, Wind River Systems
* All rights reserved.

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/msg.h: Include file for TIPC message header routines
*
*
* Copyright (c) 2000-2006, Ericsson AB
* Copyright (c) 2005, Wind River Systems
* All rights reserved.
@ -56,10 +56,10 @@
/*
TIPC user data message header format, version 2
- Fundamental definitions available to privileged TIPC users
are located in tipc_msg.h.
- Remaining definitions available to TIPC internal users appear below.
- Remaining definitions available to TIPC internal users appear below.
*/
@ -75,7 +75,7 @@ static inline void msg_set_bits(struct tipc_msg *m, u32 w,
msg_set_word(m, w, (word |= (val << pos)));
}
/*
/*
* Word 0
*/
@ -84,7 +84,7 @@ static inline u32 msg_version(struct tipc_msg *m)
return msg_bits(m, 0, 29, 7);
}
static inline void msg_set_version(struct tipc_msg *m)
static inline void msg_set_version(struct tipc_msg *m)
{
msg_set_bits(m, 0, 29, 0xf, TIPC_VERSION);
}
@ -99,47 +99,47 @@ static inline u32 msg_isdata(struct tipc_msg *m)
return (msg_user(m) <= DATA_CRITICAL);
}
static inline void msg_set_user(struct tipc_msg *m, u32 n)
static inline void msg_set_user(struct tipc_msg *m, u32 n)
{
msg_set_bits(m, 0, 25, 0xf, n);
}
static inline void msg_set_importance(struct tipc_msg *m, u32 i)
static inline void msg_set_importance(struct tipc_msg *m, u32 i)
{
msg_set_user(m, i);
}
static inline void msg_set_hdr_sz(struct tipc_msg *m,u32 n)
static inline void msg_set_hdr_sz(struct tipc_msg *m,u32 n)
{
msg_set_bits(m, 0, 21, 0xf, n>>2);
}
static inline int msg_non_seq(struct tipc_msg *m)
static inline int msg_non_seq(struct tipc_msg *m)
{
return msg_bits(m, 0, 20, 1);
}
static inline void msg_set_non_seq(struct tipc_msg *m)
static inline void msg_set_non_seq(struct tipc_msg *m)
{
msg_set_bits(m, 0, 20, 1, 1);
}
static inline int msg_dest_droppable(struct tipc_msg *m)
static inline int msg_dest_droppable(struct tipc_msg *m)
{
return msg_bits(m, 0, 19, 1);
}
static inline void msg_set_dest_droppable(struct tipc_msg *m, u32 d)
static inline void msg_set_dest_droppable(struct tipc_msg *m, u32 d)
{
msg_set_bits(m, 0, 19, 1, d);
}
static inline int msg_src_droppable(struct tipc_msg *m)
static inline int msg_src_droppable(struct tipc_msg *m)
{
return msg_bits(m, 0, 18, 1);
}
static inline void msg_set_src_droppable(struct tipc_msg *m, u32 d)
static inline void msg_set_src_droppable(struct tipc_msg *m, u32 d)
{
msg_set_bits(m, 0, 18, 1, d);
}
@ -150,31 +150,31 @@ static inline void msg_set_size(struct tipc_msg *m, u32 sz)
}
/*
/*
* Word 1
*/
static inline void msg_set_type(struct tipc_msg *m, u32 n)
static inline void msg_set_type(struct tipc_msg *m, u32 n)
{
msg_set_bits(m, 1, 29, 0x7, n);
}
static inline void msg_set_errcode(struct tipc_msg *m, u32 err)
static inline void msg_set_errcode(struct tipc_msg *m, u32 err)
{
msg_set_bits(m, 1, 25, 0xf, err);
}
static inline u32 msg_reroute_cnt(struct tipc_msg *m)
static inline u32 msg_reroute_cnt(struct tipc_msg *m)
{
return msg_bits(m, 1, 21, 0xf);
}
static inline void msg_incr_reroute_cnt(struct tipc_msg *m)
static inline void msg_incr_reroute_cnt(struct tipc_msg *m)
{
msg_set_bits(m, 1, 21, 0xf, msg_reroute_cnt(m) + 1);
}
static inline void msg_reset_reroute_cnt(struct tipc_msg *m)
static inline void msg_reset_reroute_cnt(struct tipc_msg *m)
{
msg_set_bits(m, 1, 21, 0xf, 0);
}
@ -184,12 +184,12 @@ static inline u32 msg_lookup_scope(struct tipc_msg *m)
return msg_bits(m, 1, 19, 0x3);
}
static inline void msg_set_lookup_scope(struct tipc_msg *m, u32 n)
static inline void msg_set_lookup_scope(struct tipc_msg *m, u32 n)
{
msg_set_bits(m, 1, 19, 0x3, n);
}
static inline void msg_set_options(struct tipc_msg *m, const char *opt, u32 sz)
static inline void msg_set_options(struct tipc_msg *m, const char *opt, u32 sz)
{
u32 hsz = msg_hdr_sz(m);
char *to = (char *)&m->hdr[hsz/4];
@ -206,13 +206,13 @@ static inline u32 msg_bcast_ack(struct tipc_msg *m)
return msg_bits(m, 1, 0, 0xffff);
}
static inline void msg_set_bcast_ack(struct tipc_msg *m, u32 n)
static inline void msg_set_bcast_ack(struct tipc_msg *m, u32 n)
{
msg_set_bits(m, 1, 0, 0xffff, n);
}
/*
/*
* Word 2
*/
@ -221,7 +221,7 @@ static inline u32 msg_ack(struct tipc_msg *m)
return msg_bits(m, 2, 16, 0xffff);
}
static inline void msg_set_ack(struct tipc_msg *m, u32 n)
static inline void msg_set_ack(struct tipc_msg *m, u32 n)
{
msg_set_bits(m, 2, 16, 0xffff, n);
}
@ -231,48 +231,48 @@ static inline u32 msg_seqno(struct tipc_msg *m)
return msg_bits(m, 2, 0, 0xffff);
}
static inline void msg_set_seqno(struct tipc_msg *m, u32 n)
static inline void msg_set_seqno(struct tipc_msg *m, u32 n)
{
msg_set_bits(m, 2, 0, 0xffff, n);
}
/*
/*
* Words 3-10
*/
static inline void msg_set_prevnode(struct tipc_msg *m, u32 a)
static inline void msg_set_prevnode(struct tipc_msg *m, u32 a)
{
msg_set_word(m, 3, a);
}
static inline void msg_set_origport(struct tipc_msg *m, u32 p)
static inline void msg_set_origport(struct tipc_msg *m, u32 p)
{
msg_set_word(m, 4, p);
}
static inline void msg_set_destport(struct tipc_msg *m, u32 p)
static inline void msg_set_destport(struct tipc_msg *m, u32 p)
{
msg_set_word(m, 5, p);
}
static inline void msg_set_mc_netid(struct tipc_msg *m, u32 p)
static inline void msg_set_mc_netid(struct tipc_msg *m, u32 p)
{
msg_set_word(m, 5, p);
}
static inline void msg_set_orignode(struct tipc_msg *m, u32 a)
static inline void msg_set_orignode(struct tipc_msg *m, u32 a)
{
msg_set_word(m, 6, a);
}
static inline void msg_set_destnode(struct tipc_msg *m, u32 a)
static inline void msg_set_destnode(struct tipc_msg *m, u32 a)
{
msg_set_word(m, 7, a);
}
static inline int msg_is_dest(struct tipc_msg *m, u32 d)
static inline int msg_is_dest(struct tipc_msg *m, u32 d)
{
return(msg_short(m) || (msg_destnode(m) == d));
}
@ -284,7 +284,7 @@ static inline u32 msg_routed(struct tipc_msg *m)
return(msg_destnode(m) ^ msg_orignode(m)) >> 11;
}
static inline void msg_set_nametype(struct tipc_msg *m, u32 n)
static inline void msg_set_nametype(struct tipc_msg *m, u32 n)
{
msg_set_word(m, 8, n);
}
@ -309,17 +309,17 @@ static inline void msg_set_transp_seqno(struct tipc_msg *m, u32 n)
msg_set_word(m, 8, n);
}
static inline void msg_set_namelower(struct tipc_msg *m, u32 n)
static inline void msg_set_namelower(struct tipc_msg *m, u32 n)
{
msg_set_word(m, 9, n);
}
static inline void msg_set_nameinst(struct tipc_msg *m, u32 n)
static inline void msg_set_nameinst(struct tipc_msg *m, u32 n)
{
msg_set_namelower(m, n);
}
static inline void msg_set_nameupper(struct tipc_msg *m, u32 n)
static inline void msg_set_nameupper(struct tipc_msg *m, u32 n)
{
msg_set_word(m, 10, n);
}
@ -329,7 +329,7 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
return (struct tipc_msg *)msg_data(m);
}
static inline void msg_expand(struct tipc_msg *m, u32 destnode)
static inline void msg_expand(struct tipc_msg *m, u32 destnode)
{
if (!msg_short(m))
return;
@ -344,7 +344,7 @@ static inline void msg_expand(struct tipc_msg *m, u32 destnode)
/*
TIPC internal message header format, version 2
1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0
1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
w0:|vers |msg usr|hdr sz |n|resrv| packet size |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@ -372,9 +372,9 @@ static inline void msg_expand(struct tipc_msg *m, u32 destnode)
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
NB: CONN_MANAGER use data message format. LINK_CONFIG has own format.
*/
*/
/*
/*
* Internal users
*/
@ -390,7 +390,7 @@ static inline void msg_expand(struct tipc_msg *m, u32 destnode)
#define INT_H_SIZE 40
#define DSC_H_SIZE 40
/*
/*
* Connection management protocol messages
*/
@ -398,7 +398,7 @@ static inline void msg_expand(struct tipc_msg *m, u32 destnode)
#define CONN_PROBE_REPLY 1
#define CONN_ACK 2
/*
/*
* Name distributor messages
*/
@ -406,7 +406,7 @@ static inline void msg_expand(struct tipc_msg *m, u32 destnode)
#define WITHDRAWAL 1
/*
/*
* Word 1
*/
@ -425,13 +425,13 @@ static inline u32 msg_req_links(struct tipc_msg *m)
return msg_bits(m, 1, 16, 0xfff);
}
static inline void msg_set_req_links(struct tipc_msg *m, u32 n)
static inline void msg_set_req_links(struct tipc_msg *m, u32 n)
{
msg_set_bits(m, 1, 16, 0xfff, n);
}
/*
/*
* Word 2
*/
@ -440,7 +440,7 @@ static inline u32 msg_dest_domain(struct tipc_msg *m)
return msg_word(m, 2);
}
static inline void msg_set_dest_domain(struct tipc_msg *m, u32 n)
static inline void msg_set_dest_domain(struct tipc_msg *m, u32 n)
{
msg_set_word(m, 2, n);
}
@ -460,13 +460,13 @@ static inline u32 msg_bcgap_to(struct tipc_msg *m)
return msg_bits(m, 2, 0, 0xffff);
}
static inline void msg_set_bcgap_to(struct tipc_msg *m, u32 n)
static inline void msg_set_bcgap_to(struct tipc_msg *m, u32 n)
{
msg_set_bits(m, 2, 0, 0xffff, n);
}
/*
/*
* Word 4
*/
@ -533,7 +533,7 @@ static inline void msg_set_link_selector(struct tipc_msg *m, u32 n)
msg_set_bits(m, 4, 0, 1, (n & 1));
}
/*
/*
* Word 5
*/
@ -603,7 +603,7 @@ static inline void msg_clear_redundant_link(struct tipc_msg *m)
}
/*
/*
* Word 9
*/
@ -627,12 +627,12 @@ static inline void msg_set_bcast_tag(struct tipc_msg *m, u32 n)
msg_set_bits(m, 9, 16, 0xffff, n);
}
static inline u32 msg_max_pkt(struct tipc_msg *m)
static inline u32 msg_max_pkt(struct tipc_msg *m)
{
return (msg_bits(m, 9, 16, 0xffff) * 4);
}
static inline void msg_set_max_pkt(struct tipc_msg *m, u32 n)
static inline void msg_set_max_pkt(struct tipc_msg *m, u32 n)
{
msg_set_bits(m, 9, 16, 0xffff, (n / 4));
}
@ -647,7 +647,7 @@ static inline void msg_set_link_tolerance(struct tipc_msg *m, u32 n)
msg_set_bits(m, 9, 0, 0xffff, n);
}
/*
/*
* Routing table message data
*/
@ -672,7 +672,7 @@ static inline void msg_set_dataoctet(struct tipc_msg *m, u32 pos)
msg_data(m)[pos + 4] = 1;
}
/*
/*
* Segmentation message types
*/
@ -680,7 +680,7 @@ static inline void msg_set_dataoctet(struct tipc_msg *m, u32 pos)
#define FRAGMENT 1
#define LAST_FRAGMENT 2
/*
/*
* Link management protocol message types
*/
@ -688,13 +688,13 @@ static inline void msg_set_dataoctet(struct tipc_msg *m, u32 pos)
#define RESET_MSG 1
#define ACTIVATE_MSG 2
/*
/*
* Changeover tunnel message types
*/
#define DUPLICATE_MSG 0
#define ORIGINAL_MSG 1
/*
/*
* Routing table message types
*/
#define EXT_ROUTING_TABLE 0
@ -703,7 +703,7 @@ static inline void msg_set_dataoctet(struct tipc_msg *m, u32 pos)
#define ROUTE_ADDITION 3
#define ROUTE_REMOVAL 4
/*
/*
* Config protocol message types
*/
@ -724,7 +724,7 @@ static inline u32 msg_tot_importance(struct tipc_msg *m)
}
static inline void msg_init(struct tipc_msg *m, u32 user, u32 type,
static inline void msg_init(struct tipc_msg *m, u32 user, u32 type,
u32 err, u32 hsize, u32 destnode)
{
memset(m, 0, hsize);
@ -741,7 +741,7 @@ static inline void msg_init(struct tipc_msg *m, u32 user, u32 type,
}
}
/**
/**
* msg_calc_data_size - determine total data size for message
*/
@ -755,15 +755,15 @@ static inline int msg_calc_data_size(struct iovec const *msg_sect, u32 num_sect)
return dsz;
}
/**
/**
* msg_build - create message using specified header and data
*
*
* Note: Caller must not hold any locks in case copy_from_user() is interrupted!
*
*
* Returns message data size or errno
*/
static inline int msg_build(struct tipc_msg *hdr,
static inline int msg_build(struct tipc_msg *hdr,
struct iovec const *msg_sect, u32 num_sect,
int max_size, int usrmem, struct sk_buff** buf)
{
@ -789,11 +789,11 @@ static inline int msg_build(struct tipc_msg *hdr,
memcpy((*buf)->data, (unchar *)hdr, hsz);
for (res = 1, cnt = 0; res && (cnt < num_sect); cnt++) {
if (likely(usrmem))
res = !copy_from_user((*buf)->data + pos,
msg_sect[cnt].iov_base,
res = !copy_from_user((*buf)->data + pos,
msg_sect[cnt].iov_base,
msg_sect[cnt].iov_len);
else
memcpy((*buf)->data + pos, msg_sect[cnt].iov_base,
memcpy((*buf)->data + pos, msg_sect[cnt].iov_base,
msg_sect[cnt].iov_len);
pos += msg_sect[cnt].iov_len;
}

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/name_distr.c: TIPC name distribution code
*
*
* Copyright (c) 2000-2006, Ericsson AB
* Copyright (c) 2005, Wind River Systems
* All rights reserved.
@ -53,15 +53,15 @@
* @upper: name sequence upper bound
* @ref: publishing port reference
* @key: publication key
*
*
* ===> All fields are stored in network byte order. <===
*
*
* First 3 fields identify (name or) name sequence being published.
* Reference field uniquely identifies port that published name sequence.
* Key field uniquely identifies publication, in the event a port has
* multiple publications of the same name sequence.
*
* Note: There is no field that identifies the publishing node because it is
*
* Note: There is no field that identifies the publishing node because it is
* the same for all items contained within a publication message.
*/
@ -74,12 +74,12 @@ struct distr_item {
};
/**
* List of externally visible publications by this node --
* List of externally visible publications by this node --
* that is, all publications having scope > TIPC_NODE_SCOPE.
*/
static LIST_HEAD(publ_root);
static u32 publ_cnt = 0;
static u32 publ_cnt = 0;
/**
* publ_to_item - add publication info to a publication message
@ -101,12 +101,12 @@ static void publ_to_item(struct distr_item *i, struct publication *p)
static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
{
struct sk_buff *buf = buf_acquire(LONG_H_SIZE + size);
struct sk_buff *buf = buf_acquire(LONG_H_SIZE + size);
struct tipc_msg *msg;
if (buf != NULL) {
msg = buf_msg(buf);
msg_init(msg, NAME_DISTRIBUTOR, type, TIPC_OK,
msg_init(msg, NAME_DISTRIBUTOR, type, TIPC_OK,
LONG_H_SIZE, dest);
msg_set_size(msg, LONG_H_SIZE + size);
}
@ -174,7 +174,7 @@ void tipc_named_node_up(unsigned long node)
u32 rest;
u32 max_item_buf;
read_lock_bh(&tipc_nametbl_lock);
read_lock_bh(&tipc_nametbl_lock);
max_item_buf = TIPC_MAX_USER_MSG_SIZE / ITEM_SIZE;
max_item_buf *= ITEM_SIZE;
rest = publ_cnt * ITEM_SIZE;
@ -183,7 +183,7 @@ void tipc_named_node_up(unsigned long node)
if (!buf) {
left = (rest <= max_item_buf) ? rest : max_item_buf;
rest -= left;
buf = named_prepare_buf(PUBLICATION, left, node);
buf = named_prepare_buf(PUBLICATION, left, node);
if (!buf) {
warn("Bulk publication distribution failure\n");
goto exit;
@ -196,20 +196,20 @@ void tipc_named_node_up(unsigned long node)
if (!left) {
msg_set_link_selector(buf_msg(buf), node);
dbg("tipc_named_node_up: sending publish msg to "
"<%u.%u.%u>\n", tipc_zone(node),
"<%u.%u.%u>\n", tipc_zone(node),
tipc_cluster(node), tipc_node(node));
tipc_link_send(buf, node, node);
buf = NULL;
}
}
exit:
read_unlock_bh(&tipc_nametbl_lock);
read_unlock_bh(&tipc_nametbl_lock);
}
/**
* node_is_down - remove publication associated with a failed node
*
* Invoked for each publication issued by a newly failed node.
*
* Invoked for each publication issued by a newly failed node.
* Removes publication structure from name table & deletes it.
* In rare cases the link may have come back up again when this
* function is called, and we have two items representing the same
@ -221,15 +221,15 @@ static void node_is_down(struct publication *publ)
{
struct publication *p;
write_lock_bh(&tipc_nametbl_lock);
dbg("node_is_down: withdrawing %u, %u, %u\n",
write_lock_bh(&tipc_nametbl_lock);
dbg("node_is_down: withdrawing %u, %u, %u\n",
publ->type, publ->lower, publ->upper);
publ->key += 1222345;
p = tipc_nametbl_remove_publ(publ->type, publ->lower,
publ->key += 1222345;
p = tipc_nametbl_remove_publ(publ->type, publ->lower,
publ->node, publ->ref, publ->key);
write_unlock_bh(&tipc_nametbl_lock);
if (p != publ) {
if (p != publ) {
err("Unable to remove publication from failed node\n"
"(type=%u, lower=%u, node=0x%x, ref=%u, key=%u)\n",
publ->type, publ->lower, publ->node, publ->ref, publ->key);
@ -251,27 +251,27 @@ void tipc_named_recv(struct sk_buff *buf)
struct distr_item *item = (struct distr_item *)msg_data(msg);
u32 count = msg_data_sz(msg) / ITEM_SIZE;
write_lock_bh(&tipc_nametbl_lock);
write_lock_bh(&tipc_nametbl_lock);
while (count--) {
if (msg_type(msg) == PUBLICATION) {
dbg("tipc_named_recv: got publication for %u, %u, %u\n",
dbg("tipc_named_recv: got publication for %u, %u, %u\n",
ntohl(item->type), ntohl(item->lower),
ntohl(item->upper));
publ = tipc_nametbl_insert_publ(ntohl(item->type),
publ = tipc_nametbl_insert_publ(ntohl(item->type),
ntohl(item->lower),
ntohl(item->upper),
TIPC_CLUSTER_SCOPE,
msg_orignode(msg),
msg_orignode(msg),
ntohl(item->ref),
ntohl(item->key));
if (publ) {
tipc_nodesub_subscribe(&publ->subscr,
msg_orignode(msg),
tipc_nodesub_subscribe(&publ->subscr,
msg_orignode(msg),
publ,
(net_ev_handler)node_is_down);
}
} else if (msg_type(msg) == WITHDRAWAL) {
dbg("tipc_named_recv: got withdrawl for %u, %u, %u\n",
dbg("tipc_named_recv: got withdrawl for %u, %u, %u\n",
ntohl(item->type), ntohl(item->lower),
ntohl(item->upper));
publ = tipc_nametbl_remove_publ(ntohl(item->type),
@ -282,7 +282,7 @@ void tipc_named_recv(struct sk_buff *buf)
if (publ) {
tipc_nodesub_unsubscribe(&publ->subscr);
kfree(publ);
kfree(publ);
} else {
err("Unable to remove publication by node 0x%x\n"
"(type=%u, lower=%u, ref=%u, key=%u)\n",
@ -295,13 +295,13 @@ void tipc_named_recv(struct sk_buff *buf)
}
item++;
}
write_unlock_bh(&tipc_nametbl_lock);
write_unlock_bh(&tipc_nametbl_lock);
buf_discard(buf);
}
/**
* tipc_named_reinit - re-initialize local publication list
*
*
* This routine is called whenever TIPC networking is (re)enabled.
* All existing publications by this node that have "cluster" or "zone" scope
* are updated to reflect the node's current network address.
@ -312,11 +312,11 @@ void tipc_named_reinit(void)
{
struct publication *publ;
write_lock_bh(&tipc_nametbl_lock);
write_lock_bh(&tipc_nametbl_lock);
list_for_each_entry(publ, &publ_root, local_list) {
if (publ->node == tipc_own_addr)
break;
publ->node = tipc_own_addr;
}
write_unlock_bh(&tipc_nametbl_lock);
write_unlock_bh(&tipc_nametbl_lock);
}

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/name_distr.h: Include file for TIPC name distribution code
*
*
* Copyright (c) 2000-2006, Ericsson AB
* Copyright (c) 2005, Wind River Systems
* All rights reserved.

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/name_table.c: TIPC name table code
*
*
* Copyright (c) 2000-2006, Ericsson AB
* Copyright (c) 2004-2005, Wind River Systems
* All rights reserved.
@ -65,7 +65,7 @@ struct sub_seq {
struct publication *zone_list;
};
/**
/**
* struct name_seq - container for all published instances of a name type
* @type: 32 bit 'type' value for name sequence
* @sseq: pointer to dynamically-sized array of sub-sequences of this 'type';
@ -89,7 +89,7 @@ struct name_seq {
/**
* struct name_table - table containing all existing port name publications
* @types: pointer to fixed-sized array of name sequence lists,
* @types: pointer to fixed-sized array of name sequence lists,
* accessed via hashing on 'type'; name sequence lists are *not* sorted
* @local_publ_count: number of publications issued by this node
*/
@ -113,8 +113,8 @@ static int hash(int x)
* publ_create - create a publication structure
*/
static struct publication *publ_create(u32 type, u32 lower, u32 upper,
u32 scope, u32 node, u32 port_ref,
static struct publication *publ_create(u32 type, u32 lower, u32 upper,
u32 scope, u32 node, u32 port_ref,
u32 key)
{
struct publication *publ = kzalloc(sizeof(*publ), GFP_ATOMIC);
@ -148,7 +148,7 @@ static struct sub_seq *tipc_subseq_alloc(u32 cnt)
/**
* tipc_nameseq_create - create a name sequence structure for the specified 'type'
*
*
* Allocates a single sub-sequence structure and sets it to all 0's.
*/
@ -178,7 +178,7 @@ static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_hea
/**
* nameseq_find_subseq - find sub-sequence (if any) matching a name instance
*
*
* Very time-critical, so binary searches through sub-sequence array.
*/
@ -204,7 +204,7 @@ static struct sub_seq *nameseq_find_subseq(struct name_seq *nseq,
/**
* nameseq_locate_subseq - determine position of name instance in sub-sequence
*
*
* Returns index in sub-sequence array of the entry that contains the specified
* instance value; if no entry contains that value, returns the position
* where a new entry for it would be inserted in the array.
@ -232,7 +232,7 @@ static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance)
}
/**
* tipc_nameseq_insert_publ -
* tipc_nameseq_insert_publ -
*/
static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
@ -343,8 +343,8 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
}
}
/*
* Any subscriptions waiting for notification?
/*
* Any subscriptions waiting for notification?
*/
list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
dbg("calling report_overlap()\n");
@ -352,7 +352,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
publ->lower,
publ->upper,
TIPC_PUBLISHED,
publ->ref,
publ->ref,
publ->node,
created_subseq);
}
@ -361,7 +361,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
/**
* tipc_nameseq_remove_publ -
*
*
* NOTE: There may be cases where TIPC is asked to remove a publication
* that is not in the name table. For example, if another node issues a
* publication for a name sequence that overlaps an existing name sequence
@ -392,12 +392,12 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i
prev = sseq->zone_list;
publ = sseq->zone_list->zone_list_next;
while ((publ->key != key) || (publ->ref != ref) ||
while ((publ->key != key) || (publ->ref != ref) ||
(publ->node && (publ->node != node))) {
prev = publ;
publ = publ->zone_list_next;
if (prev == sseq->zone_list) {
/* Prevent endless loop if publication not found */
return NULL;
@ -426,7 +426,7 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i
err("Unable to de-list cluster publication\n"
"{%u%u}, node=0x%x, ref=%u, key=%u)\n",
publ->type, publ->lower, publ->node,
publ->type, publ->lower, publ->node,
publ->ref, publ->key);
goto end_cluster;
}
@ -456,7 +456,7 @@ end_cluster:
err("Unable to de-list node publication\n"
"{%u%u}, node=0x%x, ref=%u, key=%u)\n",
publ->type, publ->lower, publ->node,
publ->type, publ->lower, publ->node,
publ->ref, publ->key);
goto end_node;
}
@ -486,8 +486,8 @@ end_node:
tipc_subscr_report_overlap(s,
publ->lower,
publ->upper,
TIPC_WITHDRAWN,
publ->ref,
TIPC_WITHDRAWN,
publ->ref,
publ->node,
removed_subseq);
}
@ -517,8 +517,8 @@ void tipc_nameseq_subscribe(struct name_seq *nseq, struct subscription *s)
int must_report = 1;
do {
tipc_subscr_report_overlap(s,
sseq->lower,
tipc_subscr_report_overlap(s,
sseq->lower,
sseq->upper,
TIPC_PUBLISHED,
crs->ref,
@ -576,7 +576,7 @@ struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
scope, node, port, key);
}
struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
u32 node, u32 ref, u32 key)
{
struct publication *publ;
@ -676,14 +676,14 @@ not_found:
/**
* tipc_nametbl_mc_translate - find multicast destinations
*
*
* Creates list of all local ports that overlap the given multicast address;
* also determines if any off-node ports overlap.
*
* Note: Publications with a scope narrower than 'limit' are ignored.
* (i.e. local node-scope publications mustn't receive messages arriving
* from another node, even if the multcast link brought it here)
*
*
* Returns non-zero if any off-node ports overlap
*/
@ -730,7 +730,7 @@ exit:
* tipc_nametbl_publish_rsv - publish port name using a reserved name type
*/
int tipc_nametbl_publish_rsv(u32 ref, unsigned int scope,
int tipc_nametbl_publish_rsv(u32 ref, unsigned int scope,
struct tipc_name_seq const *seq)
{
int res;
@ -745,13 +745,13 @@ int tipc_nametbl_publish_rsv(u32 ref, unsigned int scope,
* tipc_nametbl_publish - add name publication to network name tables
*/
struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
u32 scope, u32 port_ref, u32 key)
{
struct publication *publ;
if (table.local_publ_count >= tipc_max_publications) {
warn("Publication failed, local publication limit reached (%u)\n",
warn("Publication failed, local publication limit reached (%u)\n",
tipc_max_publications);
return NULL;
}
@ -808,22 +808,22 @@ void tipc_nametbl_subscribe(struct subscription *s)
u32 type = s->seq.type;
struct name_seq *seq;
write_lock_bh(&tipc_nametbl_lock);
write_lock_bh(&tipc_nametbl_lock);
seq = nametbl_find_seq(type);
if (!seq) {
seq = tipc_nameseq_create(type, &table.types[hash(type)]);
}
if (seq){
spin_lock_bh(&seq->lock);
dbg("tipc_nametbl_subscribe:found %p for {%u,%u,%u}\n",
seq, type, s->seq.lower, s->seq.upper);
tipc_nameseq_subscribe(seq, s);
spin_unlock_bh(&seq->lock);
} else {
if (seq){
spin_lock_bh(&seq->lock);
dbg("tipc_nametbl_subscribe:found %p for {%u,%u,%u}\n",
seq, type, s->seq.lower, s->seq.upper);
tipc_nameseq_subscribe(seq, s);
spin_unlock_bh(&seq->lock);
} else {
warn("Failed to create subscription for {%u,%u,%u}\n",
s->seq.type, s->seq.lower, s->seq.upper);
}
write_unlock_bh(&tipc_nametbl_lock);
}
write_unlock_bh(&tipc_nametbl_lock);
}
/**
@ -834,19 +834,19 @@ void tipc_nametbl_unsubscribe(struct subscription *s)
{
struct name_seq *seq;
write_lock_bh(&tipc_nametbl_lock);
seq = nametbl_find_seq(s->seq.type);
write_lock_bh(&tipc_nametbl_lock);
seq = nametbl_find_seq(s->seq.type);
if (seq != NULL){
spin_lock_bh(&seq->lock);
list_del_init(&s->nameseq_list);
spin_unlock_bh(&seq->lock);
if ((seq->first_free == 0) && list_empty(&seq->subscriptions)) {
hlist_del_init(&seq->ns_list);
kfree(seq->sseqs);
kfree(seq);
}
}
write_unlock_bh(&tipc_nametbl_lock);
spin_lock_bh(&seq->lock);
list_del_init(&s->nameseq_list);
spin_unlock_bh(&seq->lock);
if ((seq->first_free == 0) && list_empty(&seq->subscriptions)) {
hlist_del_init(&seq->ns_list);
kfree(seq->sseqs);
kfree(seq);
}
}
write_unlock_bh(&tipc_nametbl_lock);
}
@ -952,7 +952,7 @@ static void nametbl_header(struct print_buf *buf, u32 depth)
* nametbl_list - print specified name table contents into the given buffer
*/
static void nametbl_list(struct print_buf *buf, u32 depth_info,
static void nametbl_list(struct print_buf *buf, u32 depth_info,
u32 type, u32 lowbound, u32 upbound)
{
struct hlist_head *seq_head;
@ -976,7 +976,7 @@ static void nametbl_list(struct print_buf *buf, u32 depth_info,
for (i = 0; i < tipc_nametbl_size; i++) {
seq_head = &table.types[i];
hlist_for_each_entry(seq, seq_node, seq_head, ns_list) {
nameseq_list(seq, buf, depth, seq->type,
nameseq_list(seq, buf, depth, seq->type,
lowbound, upbound, i);
}
}
@ -991,7 +991,7 @@ static void nametbl_list(struct print_buf *buf, u32 depth_info,
seq_head = &table.types[i];
hlist_for_each_entry(seq, seq_node, seq_head, ns_list) {
if (seq->type == type) {
nameseq_list(seq, buf, depth, type,
nameseq_list(seq, buf, depth, type,
lowbound, upbound, i);
break;
}
@ -1030,7 +1030,7 @@ struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space)
tipc_printbuf_init(&b, TLV_DATA(rep_tlv), MAX_NAME_TBL_QUERY);
argv = (struct tipc_name_table_query *)TLV_DATA(req_tlv_area);
read_lock_bh(&tipc_nametbl_lock);
nametbl_list(&b, ntohl(argv->depth), ntohl(argv->type),
nametbl_list(&b, ntohl(argv->depth), ntohl(argv->type),
ntohl(argv->lowbound), ntohl(argv->upbound));
read_unlock_bh(&tipc_nametbl_lock);
str_len = tipc_printbuf_validate(&b);

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/name_table.h: Include file for TIPC name table code
*
*
* Copyright (c) 2000-2006, Ericsson AB
* Copyright (c) 2004-2005, Wind River Systems
* All rights reserved.
@ -64,7 +64,7 @@ struct port_list;
* @node_list: next matching name seq publication with >= node scope
* @cluster_list: next matching name seq publication with >= cluster scope
* @zone_list: next matching name seq publication with >= zone scope
*
*
* Note that the node list, cluster list, and zone list are circular lists.
*/
@ -89,16 +89,16 @@ extern rwlock_t tipc_nametbl_lock;
struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space);
u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *node);
int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
struct port_list *dports);
int tipc_nametbl_publish_rsv(u32 ref, unsigned int scope,
int tipc_nametbl_publish_rsv(u32 ref, unsigned int scope,
struct tipc_name_seq const *seq);
struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
u32 scope, u32 port_ref, u32 key);
int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key);
struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
u32 scope, u32 node, u32 ref, u32 key);
struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
u32 node, u32 ref, u32 key);
void tipc_nametbl_subscribe(struct subscription *s);
void tipc_nametbl_unsubscribe(struct subscription *s);

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/net.c: TIPC network routing code
*
*
* Copyright (c) 1995-2006, Ericsson AB
* Copyright (c) 2005, Wind River Systems
* All rights reserved.
@ -49,63 +49,63 @@
#include "discover.h"
#include "config.h"
/*
/*
* The TIPC locking policy is designed to ensure a very fine locking
* granularity, permitting complete parallel access to individual
* port and node/link instances. The code consists of three major
* port and node/link instances. The code consists of three major
* locking domains, each protected with their own disjunct set of locks.
*
* 1: The routing hierarchy.
* Comprises the structures 'zone', 'cluster', 'node', 'link'
* and 'bearer'. The whole hierarchy is protected by a big
* read/write lock, tipc_net_lock, to enssure that nothing is added
* or removed while code is accessing any of these structures.
* This layer must not be called from the two others while they
* Comprises the structures 'zone', 'cluster', 'node', 'link'
* and 'bearer'. The whole hierarchy is protected by a big
* read/write lock, tipc_net_lock, to enssure that nothing is added
* or removed while code is accessing any of these structures.
* This layer must not be called from the two others while they
* hold any of their own locks.
* Neither must it itself do any upcalls to the other two before
* it has released tipc_net_lock and other protective locks.
*
* Within the tipc_net_lock domain there are two sub-domains;'node' and
* Within the tipc_net_lock domain there are two sub-domains;'node' and
* 'bearer', where local write operations are permitted,
* provided that those are protected by individual spin_locks
* per instance. Code holding tipc_net_lock(read) and a node spin_lock
* per instance. Code holding tipc_net_lock(read) and a node spin_lock
* is permitted to poke around in both the node itself and its
* subordinate links. I.e, it can update link counters and queues,
* change link state, send protocol messages, and alter the
* "active_links" array in the node; but it can _not_ remove a link
* subordinate links. I.e, it can update link counters and queues,
* change link state, send protocol messages, and alter the
* "active_links" array in the node; but it can _not_ remove a link
* or a node from the overall structure.
* Correspondingly, individual bearers may change status within a
* tipc_net_lock(read), protected by an individual spin_lock ber bearer
* Correspondingly, individual bearers may change status within a
* tipc_net_lock(read), protected by an individual spin_lock ber bearer
* instance, but it needs tipc_net_lock(write) to remove/add any bearers.
*
*
* 2: The transport level of the protocol.
* This consists of the structures port, (and its user level
* representations, such as user_port and tipc_sock), reference and
* tipc_user (port.c, reg.c, socket.c).
*
* 2: The transport level of the protocol.
* This consists of the structures port, (and its user level
* representations, such as user_port and tipc_sock), reference and
* tipc_user (port.c, reg.c, socket.c).
*
* This layer has four different locks:
* - The tipc_port spin_lock. This is protecting each port instance
* from parallel data access and removal. Since we can not place
* this lock in the port itself, it has been placed in the
* from parallel data access and removal. Since we can not place
* this lock in the port itself, it has been placed in the
* corresponding reference table entry, which has the same life
* cycle as the module. This entry is difficult to access from
* outside the TIPC core, however, so a pointer to the lock has
* been added in the port instance, -to be used for unlocking
* cycle as the module. This entry is difficult to access from
* outside the TIPC core, however, so a pointer to the lock has
* been added in the port instance, -to be used for unlocking
* only.
* - A read/write lock to protect the reference table itself (teg.c).
* (Nobody is using read-only access to this, so it can just as
* - A read/write lock to protect the reference table itself (teg.c).
* (Nobody is using read-only access to this, so it can just as
* well be changed to a spin_lock)
* - A spin lock to protect the registry of kernel/driver users (reg.c)
* - A global spin_lock (tipc_port_lock), which only task is to ensure
* - A global spin_lock (tipc_port_lock), which only task is to ensure
* consistency where more than one port is involved in an operation,
* i.e., whe a port is part of a linked list of ports.
* There are two such lists; 'port_list', which is used for management,
* and 'wait_list', which is used to queue ports during congestion.
*
*
* 3: The name table (name_table.c, name_distr.c, subscription.c)
* - There is one big read/write-lock (tipc_nametbl_lock) protecting the
* overall name table structure. Nothing must be added/removed to
* - There is one big read/write-lock (tipc_nametbl_lock) protecting the
* overall name table structure. Nothing must be added/removed to
* this structure without holding write access to it.
* - There is one local spin_lock per sub_sequence, which can be seen
* as a sub-domain to the tipc_nametbl_lock domain. It is used only
@ -118,7 +118,7 @@
DEFINE_RWLOCK(tipc_net_lock);
struct network tipc_net = { NULL };
struct node *tipc_net_select_remote_node(u32 addr, u32 ref)
struct node *tipc_net_select_remote_node(u32 addr, u32 ref)
{
return tipc_zone_select_remote_node(tipc_net.zones[tipc_zone(addr)], addr, ref);
}
@ -224,7 +224,7 @@ void tipc_net_route_msg(struct sk_buff *buf)
buf_discard(buf);
} else {
msg_dbg(msg, "NET>REJ>:");
tipc_reject_msg(buf, msg_destport(msg) ?
tipc_reject_msg(buf, msg_destport(msg) ?
TIPC_ERR_NO_PORT : TIPC_ERR_NO_NAME);
}
return;
@ -236,7 +236,7 @@ void tipc_net_route_msg(struct sk_buff *buf)
dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg);
if (in_scope(dnode, tipc_own_addr)) {
if (msg_isdata(msg)) {
if (msg_mcast(msg))
if (msg_mcast(msg))
tipc_port_recv_mcast(buf, NULL);
else if (msg_destport(msg))
tipc_port_recv_msg(buf);
@ -284,7 +284,7 @@ int tipc_net_start(void)
(res = tipc_bclink_init())) {
return res;
}
tipc_subscr_stop();
tipc_subscr_stop();
tipc_cfg_stop();
tipc_k_signal((Handler)tipc_subscr_start, 0);
tipc_k_signal((Handler)tipc_cfg_init, 0);
@ -298,12 +298,12 @@ void tipc_net_stop(void)
{
if (tipc_mode != TIPC_NET_MODE)
return;
write_lock_bh(&tipc_net_lock);
write_lock_bh(&tipc_net_lock);
tipc_bearer_stop();
tipc_mode = TIPC_NODE_MODE;
tipc_bclink_stop();
net_stop();
write_unlock_bh(&tipc_net_lock);
write_unlock_bh(&tipc_net_lock);
info("Left network mode \n");
}

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/net.h: Include file for TIPC network routing code
*
*
* Copyright (c) 1995-2006, Ericsson AB
* Copyright (c) 2005, Wind River Systems
* All rights reserved.
@ -43,7 +43,7 @@ struct _zone;
* struct network - TIPC network structure
* @zones: array of pointers to all zones within network
*/
struct network {
struct _zone **zones;
};

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/netlink.c: TIPC configuration handling
*
*
* Copyright (c) 2005-2006, Ericsson AB
* Copyright (c) 2005, Wind River Systems
* All rights reserved.
@ -63,15 +63,15 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
genlmsg_unicast(rep_buf, req_nlh->nlmsg_pid);
}
return 0;
return 0;
}
static struct genl_family family = {
.id = GENL_ID_GENERATE,
.name = TIPC_GENL_NAME,
.version = TIPC_GENL_VERSION,
.hdrsize = TIPC_GENL_HDRLEN,
.maxattr = 0,
.id = GENL_ID_GENERATE,
.name = TIPC_GENL_NAME,
.version = TIPC_GENL_VERSION,
.hdrsize = TIPC_GENL_HDRLEN,
.maxattr = 0,
};
static struct genl_ops ops = {
@ -93,7 +93,7 @@ int tipc_netlink_start(void)
if (genl_register_ops(&family, &ops))
goto err_unregister;
return 0;
return 0;
err_unregister:
genl_unregister_family(&family);

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/node.c: TIPC node management routines
*
*
* Copyright (c) 2000-2006, Ericsson AB
* Copyright (c) 2005-2006, Wind River Systems
* All rights reserved.
@ -58,7 +58,7 @@ struct node *tipc_node_create(u32 addr)
{
struct cluster *c_ptr;
struct node *n_ptr;
struct node **curr_node;
struct node **curr_node;
n_ptr = kzalloc(sizeof(*n_ptr),GFP_ATOMIC);
if (!n_ptr) {
@ -74,16 +74,16 @@ struct node *tipc_node_create(u32 addr)
kfree(n_ptr);
return NULL;
}
n_ptr->addr = addr;
spin_lock_init(&n_ptr->lock);
spin_lock_init(&n_ptr->lock);
INIT_LIST_HEAD(&n_ptr->nsub);
n_ptr->owner = c_ptr;
tipc_cltr_attach_node(c_ptr, n_ptr);
n_ptr->last_router = -1;
/* Insert node into ordered list */
for (curr_node = &tipc_nodes; *curr_node;
for (curr_node = &tipc_nodes; *curr_node;
curr_node = &(*curr_node)->next) {
if (addr < (*curr_node)->addr) {
n_ptr->next = *curr_node;
@ -116,7 +116,7 @@ void tipc_node_delete(struct node *n_ptr)
/**
* tipc_node_link_up - handle addition of link
*
*
* Link becomes active (alone or shared) or standby, depending on its priority.
*/
@ -128,19 +128,19 @@ void tipc_node_link_up(struct node *n_ptr, struct link *l_ptr)
info("Established link <%s> on network plane %c\n",
l_ptr->name, l_ptr->b_ptr->net_plane);
if (!active[0]) {
dbg(" link %x into %x/%x\n", l_ptr, &active[0], &active[1]);
active[0] = active[1] = l_ptr;
node_established_contact(n_ptr);
return;
}
if (l_ptr->priority < active[0]->priority) {
if (l_ptr->priority < active[0]->priority) {
info("New link <%s> becomes standby\n", l_ptr->name);
return;
}
tipc_link_send_duplicate(active[0], l_ptr);
if (l_ptr->priority == active[0]->priority) {
if (l_ptr->priority == active[0]->priority) {
active[0] = l_ptr;
return;
}
@ -160,17 +160,17 @@ static void node_select_active_links(struct node *n_ptr)
u32 i;
u32 highest_prio = 0;
active[0] = active[1] = NULL;
active[0] = active[1] = NULL;
for (i = 0; i < MAX_BEARERS; i++) {
struct link *l_ptr = n_ptr->links[i];
struct link *l_ptr = n_ptr->links[i];
if (!l_ptr || !tipc_link_is_up(l_ptr) ||
(l_ptr->priority < highest_prio))
continue;
if (l_ptr->priority > highest_prio) {
highest_prio = l_ptr->priority;
highest_prio = l_ptr->priority;
active[0] = active[1] = l_ptr;
} else {
active[1] = l_ptr;
@ -203,15 +203,15 @@ void tipc_node_link_down(struct node *n_ptr, struct link *l_ptr)
active[1] = active[0];
if (active[0] == l_ptr)
node_select_active_links(n_ptr);
if (tipc_node_is_up(n_ptr))
if (tipc_node_is_up(n_ptr))
tipc_link_changeover(l_ptr);
else
else
node_lost_contact(n_ptr);
}
int tipc_node_has_active_links(struct node *n_ptr)
{
return (n_ptr &&
return (n_ptr &&
((n_ptr->active_links[0]) || (n_ptr->active_links[1])));
}
@ -236,28 +236,28 @@ struct node *tipc_node_attach_link(struct link *l_ptr)
if (!n_ptr)
n_ptr = tipc_node_create(l_ptr->addr);
if (n_ptr) {
if (n_ptr) {
u32 bearer_id = l_ptr->b_ptr->identity;
char addr_string[16];
if (n_ptr->link_cnt >= 2) {
if (n_ptr->link_cnt >= 2) {
char addr_string[16];
err("Attempt to create third link to %s\n",
err("Attempt to create third link to %s\n",
addr_string_fill(addr_string, n_ptr->addr));
return NULL;
}
return NULL;
}
if (!n_ptr->links[bearer_id]) {
n_ptr->links[bearer_id] = l_ptr;
tipc_net.zones[tipc_zone(l_ptr->addr)]->links++;
n_ptr->link_cnt++;
return n_ptr;
}
err("Attempt to establish second link on <%s> to %s \n",
l_ptr->b_ptr->publ.name,
if (!n_ptr->links[bearer_id]) {
n_ptr->links[bearer_id] = l_ptr;
tipc_net.zones[tipc_zone(l_ptr->addr)]->links++;
n_ptr->link_cnt++;
return n_ptr;
}
err("Attempt to establish second link on <%s> to %s \n",
l_ptr->b_ptr->publ.name,
addr_string_fill(addr_string, l_ptr->addr));
}
}
return NULL;
}
@ -272,17 +272,17 @@ void tipc_node_detach_link(struct node *n_ptr, struct link *l_ptr)
* Routing table management - five cases to handle:
*
* 1: A link towards a zone/cluster external node comes up.
* => Send a multicast message updating routing tables of all
* system nodes within own cluster that the new destination
* can be reached via this node.
* => Send a multicast message updating routing tables of all
* system nodes within own cluster that the new destination
* can be reached via this node.
* (node.establishedContact()=>cluster.multicastNewRoute())
*
* 2: A link towards a slave node comes up.
* => Send a multicast message updating routing tables of all
* system nodes within own cluster that the new destination
* can be reached via this node.
* => Send a multicast message updating routing tables of all
* system nodes within own cluster that the new destination
* can be reached via this node.
* (node.establishedContact()=>cluster.multicastNewRoute())
* => Send a message to the slave node about existence
* => Send a message to the slave node about existence
* of all system nodes within cluster:
* (node.establishedContact()=>cluster.sendLocalRoutes())
*
@ -292,13 +292,13 @@ void tipc_node_detach_link(struct node *n_ptr, struct link *l_ptr)
* nodes which can be reached via this node.
* (node.establishedContact()==>network.sendExternalRoutes())
* (node.establishedContact()==>network.sendSlaveRoutes())
* => Send messages to all directly connected slave nodes
* => Send messages to all directly connected slave nodes
* containing information about the existence of the new node
* (node.establishedContact()=>cluster.multicastNewRoute())
*
*
* 4: The link towards a zone/cluster external node or slave
* node goes down.
* => Send a multcast message updating routing tables of all
* => Send a multcast message updating routing tables of all
* nodes within cluster that the new destination can not any
* longer be reached via this node.
* (node.lostAllLinks()=>cluster.bcastLostRoute())
@ -308,7 +308,7 @@ void tipc_node_detach_link(struct node *n_ptr, struct link *l_ptr)
* routing tables. Note: This is a completely node
* local operation.
* (node.lostAllLinks()=>network.removeAsRouter())
* => Send messages to all directly connected slave nodes
* => Send messages to all directly connected slave nodes
* containing information about loss of the node
* (node.establishedContact()=>cluster.multicastLostRoute())
*
@ -319,12 +319,12 @@ static void node_established_contact(struct node *n_ptr)
struct cluster *c_ptr;
dbg("node_established_contact:-> %x\n", n_ptr->addr);
if (!tipc_node_has_active_routes(n_ptr) && in_own_cluster(n_ptr->addr)) {
if (!tipc_node_has_active_routes(n_ptr) && in_own_cluster(n_ptr->addr)) {
tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr);
}
/* Syncronize broadcast acks */
n_ptr->bclink.acked = tipc_bclink_get_last_sent();
/* Syncronize broadcast acks */
n_ptr->bclink.acked = tipc_bclink_get_last_sent();
if (is_slave(tipc_own_addr))
return;
@ -333,11 +333,11 @@ static void node_established_contact(struct node *n_ptr)
c_ptr = tipc_cltr_find(tipc_own_addr);
if (!c_ptr)
c_ptr = tipc_cltr_create(tipc_own_addr);
if (c_ptr)
tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, 1,
if (c_ptr)
tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, 1,
tipc_max_nodes);
return;
}
}
c_ptr = n_ptr->owner;
if (is_slave(n_ptr->addr)) {
@ -367,26 +367,26 @@ static void node_lost_contact(struct node *n_ptr)
char addr_string[16];
u32 i;
/* Clean up broadcast reception remains */
n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0;
while (n_ptr->bclink.deferred_head) {
struct sk_buff* buf = n_ptr->bclink.deferred_head;
n_ptr->bclink.deferred_head = buf->next;
buf_discard(buf);
}
if (n_ptr->bclink.defragm) {
buf_discard(n_ptr->bclink.defragm);
n_ptr->bclink.defragm = NULL;
}
if (in_own_cluster(n_ptr->addr) && n_ptr->bclink.supported) {
tipc_bclink_acknowledge(n_ptr, mod(n_ptr->bclink.acked + 10000));
}
/* Clean up broadcast reception remains */
n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0;
while (n_ptr->bclink.deferred_head) {
struct sk_buff* buf = n_ptr->bclink.deferred_head;
n_ptr->bclink.deferred_head = buf->next;
buf_discard(buf);
}
if (n_ptr->bclink.defragm) {
buf_discard(n_ptr->bclink.defragm);
n_ptr->bclink.defragm = NULL;
}
if (in_own_cluster(n_ptr->addr) && n_ptr->bclink.supported) {
tipc_bclink_acknowledge(n_ptr, mod(n_ptr->bclink.acked + 10000));
}
/* Update routing tables */
/* Update routing tables */
if (is_slave(tipc_own_addr)) {
tipc_net_remove_as_router(n_ptr->addr);
} else {
if (!in_own_cluster(n_ptr->addr)) {
if (!in_own_cluster(n_ptr->addr)) {
/* Case 4 (see above) */
c_ptr = tipc_cltr_find(tipc_own_addr);
tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr, 1,
@ -399,7 +399,7 @@ static void node_lost_contact(struct node *n_ptr)
tipc_max_nodes);
} else {
if (n_ptr->bclink.supported) {
tipc_nmap_remove(&tipc_cltr_bcast_nodes,
tipc_nmap_remove(&tipc_cltr_bcast_nodes,
n_ptr->addr);
if (n_ptr->addr < tipc_own_addr)
tipc_own_tag--;
@ -414,13 +414,13 @@ static void node_lost_contact(struct node *n_ptr)
if (tipc_node_has_active_routes(n_ptr))
return;
info("Lost contact with %s\n",
info("Lost contact with %s\n",
addr_string_fill(addr_string, n_ptr->addr));
/* Abort link changeover */
for (i = 0; i < MAX_BEARERS; i++) {
struct link *l_ptr = n_ptr->links[i];
if (!l_ptr)
if (!l_ptr)
continue;
l_ptr->reset_checkpoint = l_ptr->next_in_no;
l_ptr->exp_msg_count = 0;
@ -429,7 +429,7 @@ static void node_lost_contact(struct node *n_ptr)
/* Notify subscribers */
list_for_each_entry_safe(ns, tns, &n_ptr->nsub, nodesub_list) {
ns->node = NULL;
ns->node = NULL;
list_del_init(&ns->nodesub_list);
tipc_k_signal((Handler)ns->handle_node_down,
(unsigned long)ns->usr_handle);
@ -438,7 +438,7 @@ static void node_lost_contact(struct node *n_ptr)
/**
* tipc_node_select_next_hop - find the next-hop node for a message
*
*
* Called by when cluster local lookup has failed.
*/
@ -447,13 +447,13 @@ struct node *tipc_node_select_next_hop(u32 addr, u32 selector)
struct node *n_ptr;
u32 router_addr;
if (!tipc_addr_domain_valid(addr))
return NULL;
if (!tipc_addr_domain_valid(addr))
return NULL;
/* Look for direct link to destination processsor */
n_ptr = tipc_node_find(addr);
if (n_ptr && tipc_node_has_active_links(n_ptr))
return n_ptr;
return n_ptr;
/* Cluster local system nodes *must* have direct links */
if (!is_slave(addr) && in_own_cluster(addr))
@ -461,10 +461,10 @@ struct node *tipc_node_select_next_hop(u32 addr, u32 selector)
/* Look for cluster local router with direct link to node */
router_addr = tipc_node_select_router(n_ptr, selector);
if (router_addr)
return tipc_node_select(router_addr, selector);
if (router_addr)
return tipc_node_select(router_addr, selector);
/* Slave nodes can only be accessed within own cluster via a
/* Slave nodes can only be accessed within own cluster via a
known router with direct link -- if no router was found,give up */
if (is_slave(addr))
return NULL;
@ -473,20 +473,20 @@ struct node *tipc_node_select_next_hop(u32 addr, u32 selector)
addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
n_ptr = tipc_net_select_remote_node(addr, selector);
if (n_ptr && tipc_node_has_active_links(n_ptr))
return n_ptr;
return n_ptr;
/* Last resort -- look for any router to anywhere in remote zone */
router_addr = tipc_net_select_router(addr, selector);
if (router_addr)
return tipc_node_select(router_addr, selector);
if (router_addr)
return tipc_node_select(router_addr, selector);
return NULL;
return NULL;
}
/**
* tipc_node_select_router - select router to reach specified node
*
* Uses a deterministic and fair algorithm for selecting router node.
*
* Uses a deterministic and fair algorithm for selecting router node.
*/
u32 tipc_node_select_router(struct node *n_ptr, u32 ref)
@ -496,8 +496,8 @@ u32 tipc_node_select_router(struct node *n_ptr, u32 ref)
u32 start;
u32 r;
if (!n_ptr)
return 0;
if (!n_ptr)
return 0;
if (n_ptr->last_router < 0)
return 0;
@ -531,10 +531,10 @@ void tipc_node_add_router(struct node *n_ptr, u32 router)
{
u32 r_num = tipc_node(router);
n_ptr->routers[r_num / 32] =
n_ptr->routers[r_num / 32] =
((1 << (r_num % 32)) | n_ptr->routers[r_num / 32]);
n_ptr->last_router = tipc_max_nodes / 32;
while ((--n_ptr->last_router >= 0) &&
while ((--n_ptr->last_router >= 0) &&
!n_ptr->routers[n_ptr->last_router]);
}
@ -548,7 +548,7 @@ void tipc_node_remove_router(struct node *n_ptr, u32 router)
n_ptr->routers[r_num / 32] =
((~(1 << (r_num % 32))) & (n_ptr->routers[r_num / 32]));
n_ptr->last_router = tipc_max_nodes / 32;
while ((--n_ptr->last_router >= 0) &&
while ((--n_ptr->last_router >= 0) &&
!n_ptr->routers[n_ptr->last_router]);
if (!tipc_node_is_up(n_ptr))
@ -562,7 +562,7 @@ void node_print(struct print_buf *buf, struct node *n_ptr, char *str)
tipc_printf(buf, "\n\n%s", str);
for (i = 0; i < MAX_BEARERS; i++) {
if (!n_ptr->links[i])
if (!n_ptr->links[i])
continue;
tipc_printf(buf, "Links[%u]: %x, ", i, n_ptr->links[i]);
}
@ -590,7 +590,7 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
u32 domain;
struct sk_buff *buf;
struct node *n_ptr;
struct tipc_node_info node_info;
struct tipc_node_info node_info;
u32 payload_size;
if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
@ -601,10 +601,10 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
" (network address)");
if (!tipc_nodes)
return tipc_cfg_reply_none();
if (!tipc_nodes)
return tipc_cfg_reply_none();
/* For now, get space for all other nodes
/* For now, get space for all other nodes
(will need to modify this when slave nodes are supported */
payload_size = TLV_SPACE(sizeof(node_info)) * (tipc_max_nodes - 1);
@ -620,9 +620,9 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) {
if (!in_scope(domain, n_ptr->addr))
continue;
node_info.addr = htonl(n_ptr->addr);
node_info.up = htonl(tipc_node_is_up(n_ptr));
tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO,
node_info.addr = htonl(n_ptr->addr);
node_info.up = htonl(tipc_node_is_up(n_ptr));
tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO,
&node_info, sizeof(node_info));
}
@ -634,7 +634,7 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
u32 domain;
struct sk_buff *buf;
struct node *n_ptr;
struct tipc_link_info link_info;
struct tipc_link_info link_info;
u32 payload_size;
if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
@ -645,9 +645,9 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
" (network address)");
if (tipc_mode != TIPC_NET_MODE)
return tipc_cfg_reply_none();
if (tipc_mode != TIPC_NET_MODE)
return tipc_cfg_reply_none();
/* Get space for all unicast links + multicast link */
payload_size = TLV_SPACE(sizeof(link_info)) *
@ -661,27 +661,27 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
/* Add TLV for broadcast link */
link_info.dest = htonl(tipc_own_addr & 0xfffff00);
link_info.up = htonl(1);
sprintf(link_info.str, tipc_bclink_name);
link_info.dest = htonl(tipc_own_addr & 0xfffff00);
link_info.up = htonl(1);
sprintf(link_info.str, tipc_bclink_name);
tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info));
/* Add TLVs for any other links in scope */
for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) {
u32 i;
u32 i;
if (!in_scope(domain, n_ptr->addr))
continue;
for (i = 0; i < MAX_BEARERS; i++) {
if (!n_ptr->links[i])
continue;
link_info.dest = htonl(n_ptr->addr);
link_info.up = htonl(tipc_link_is_up(n_ptr->links[i]));
strcpy(link_info.str, n_ptr->links[i]->name);
tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO,
for (i = 0; i < MAX_BEARERS; i++) {
if (!n_ptr->links[i])
continue;
link_info.dest = htonl(n_ptr->addr);
link_info.up = htonl(tipc_link_is_up(n_ptr->links[i]));
strcpy(link_info.str, n_ptr->links[i]->name);
tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO,
&link_info, sizeof(link_info));
}
}
}
return buf;

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/node.h: Include file for TIPC node management routines
*
*
* Copyright (c) 2000-2006, Ericsson AB
* Copyright (c) 2005, Wind River Systems
* All rights reserved.
@ -67,7 +67,7 @@
* @deferred_tail: newest OOS b'cast message received from node
* @defragm: list of partially reassembled b'cast message fragments from node
*/
struct node {
u32 addr;
spinlock_t lock;
@ -85,8 +85,8 @@ struct node {
int supported;
u32 acked;
u32 last_in;
u32 gap_after;
u32 gap_to;
u32 gap_after;
u32 gap_to;
u32 nack_sync;
struct sk_buff *deferred_head;
struct sk_buff *deferred_tail;

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/node_subscr.c: TIPC "node down" subscription handling
*
*
* Copyright (c) 1995-2006, Ericsson AB
* Copyright (c) 2005, Wind River Systems
* All rights reserved.
@ -44,14 +44,14 @@
* tipc_nodesub_subscribe - create "node down" subscription for specified node
*/
void tipc_nodesub_subscribe(struct node_subscr *node_sub, u32 addr,
void tipc_nodesub_subscribe(struct node_subscr *node_sub, u32 addr,
void *usr_handle, net_ev_handler handle_down)
{
if (addr == tipc_own_addr) {
node_sub->node = NULL;
return;
}
node_sub->node = tipc_node_find(addr);
if (!node_sub->node) {
warn("Node subscription rejected, unknown node 0x%x\n", addr);

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/node_subscr.h: Include file for TIPC "node down" subscription handling
*
*
* Copyright (c) 1995-2006, Ericsson AB
* Copyright (c) 2005, Wind River Systems
* All rights reserved.

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/port.c: TIPC port code
*
*
* Copyright (c) 1992-2006, Ericsson AB
* Copyright (c) 2004-2005, Wind River Systems
* All rights reserved.
@ -126,8 +126,8 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 domain,
ext_targets = tipc_nametbl_mc_translate(seq->type, seq->lower, seq->upper,
TIPC_NODE_SCOPE, &dports);
/* Send message to destinations (duplicate it only if necessary) */
/* Send message to destinations (duplicate it only if necessary) */
if (ext_targets) {
if (dports.count != 0) {
@ -157,7 +157,7 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 domain,
/**
* tipc_port_recv_mcast - deliver multicast message to all destination ports
*
*
* If there is no port list, perform a lookup to create one
*/
@ -213,7 +213,7 @@ exit:
/**
* tipc_createport_raw - create a native TIPC port
*
*
* Returns local port reference
*/
@ -273,7 +273,7 @@ int tipc_deleteport(u32 ref)
tipc_withdraw(ref, 0, NULL);
p_ptr = tipc_port_lock(ref);
if (!p_ptr)
if (!p_ptr)
return -EINVAL;
tipc_ref_discard(ref);
@ -302,7 +302,7 @@ int tipc_deleteport(u32 ref)
/**
* tipc_get_port() - return port associated with 'ref'
*
*
* Note: Port is not locked.
*/
@ -336,7 +336,7 @@ static int port_unreliable(struct port *p_ptr)
int tipc_portunreliable(u32 ref, unsigned int *isunreliable)
{
struct port *p_ptr;
p_ptr = tipc_port_lock(ref);
if (!p_ptr)
return -EINVAL;
@ -348,7 +348,7 @@ int tipc_portunreliable(u32 ref, unsigned int *isunreliable)
int tipc_set_portunreliable(u32 ref, unsigned int isunreliable)
{
struct port *p_ptr;
p_ptr = tipc_port_lock(ref);
if (!p_ptr)
return -EINVAL;
@ -365,7 +365,7 @@ static int port_unreturnable(struct port *p_ptr)
int tipc_portunreturnable(u32 ref, unsigned int *isunrejectable)
{
struct port *p_ptr;
p_ptr = tipc_port_lock(ref);
if (!p_ptr)
return -EINVAL;
@ -377,7 +377,7 @@ int tipc_portunreturnable(u32 ref, unsigned int *isunrejectable)
int tipc_set_portunreturnable(u32 ref, unsigned int isunrejectable)
{
struct port *p_ptr;
p_ptr = tipc_port_lock(ref);
if (!p_ptr)
return -EINVAL;
@ -386,19 +386,19 @@ int tipc_set_portunreturnable(u32 ref, unsigned int isunrejectable)
return TIPC_OK;
}
/*
* port_build_proto_msg(): build a port level protocol
* or a connection abortion message. Called with
/*
* port_build_proto_msg(): build a port level protocol
* or a connection abortion message. Called with
* tipc_port lock on.
*/
static struct sk_buff *port_build_proto_msg(u32 destport, u32 destnode,
u32 origport, u32 orignode,
u32 usr, u32 type, u32 err,
u32 usr, u32 type, u32 err,
u32 seqno, u32 ack)
{
struct sk_buff *buf;
struct tipc_msg *msg;
buf = buf_acquire(LONG_H_SIZE);
if (buf) {
msg = buf_msg(buf);
@ -461,7 +461,7 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
msg_set_orignode(rmsg, tipc_own_addr);
else
msg_set_orignode(rmsg, msg_destnode(msg));
msg_set_size(rmsg, data_sz + hdr_sz);
msg_set_size(rmsg, data_sz + hdr_sz);
msg_set_nametype(rmsg, msg_nametype(msg));
msg_set_nameinst(rmsg, msg_nameinst(msg));
memcpy(rbuf->data + hdr_sz, msg_data(msg), data_sz);
@ -492,7 +492,7 @@ int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
struct sk_buff *buf;
int res;
res = msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE,
res = msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE,
!p_ptr->user_port, &buf);
if (!buf)
return res;
@ -523,7 +523,7 @@ static void port_timeout(unsigned long ref)
tipc_own_addr,
CONN_MANAGER,
CONN_PROBE,
TIPC_OK,
TIPC_OK,
port_out_seqno(p_ptr),
0);
port_incr_out_seqno(p_ptr);
@ -562,7 +562,7 @@ static struct sk_buff *port_build_self_abort_msg(struct port *p_ptr, u32 err)
port_peernode(p_ptr),
imp,
TIPC_CONN_MSG,
err,
err,
p_ptr->last_in_seqno + 1,
0);
}
@ -582,7 +582,7 @@ static struct sk_buff *port_build_peer_abort_msg(struct port *p_ptr, u32 err)
tipc_own_addr,
imp,
TIPC_CONN_MSG,
err,
err,
port_out_seqno(p_ptr),
0);
}
@ -613,7 +613,7 @@ void tipc_port_recv_proto_msg(struct sk_buff *buf)
}
}
if (msg_type(msg) == CONN_ACK) {
int wakeup = tipc_port_congested(p_ptr) &&
int wakeup = tipc_port_congested(p_ptr) &&
p_ptr->publ.congested &&
p_ptr->wakeup;
p_ptr->acked += msg_msgcnt(msg);
@ -630,8 +630,8 @@ void tipc_port_recv_proto_msg(struct sk_buff *buf)
}
if (err) {
r_buf = port_build_proto_msg(msg_origport(msg),
msg_orignode(msg),
msg_destport(msg),
msg_orignode(msg),
msg_destport(msg),
tipc_own_addr,
DATA_HIGH,
TIPC_CONN_MSG,
@ -643,10 +643,10 @@ void tipc_port_recv_proto_msg(struct sk_buff *buf)
/* All is fine */
if (msg_type(msg) == CONN_PROBE) {
r_buf = port_build_proto_msg(msg_origport(msg),
msg_orignode(msg),
msg_destport(msg),
tipc_own_addr,
r_buf = port_build_proto_msg(msg_origport(msg),
msg_orignode(msg),
msg_destport(msg),
tipc_own_addr,
CONN_MANAGER,
CONN_PROBE_REPLY,
TIPC_OK,
@ -665,39 +665,39 @@ exit:
static void port_print(struct port *p_ptr, struct print_buf *buf, int full_id)
{
struct publication *publ;
struct publication *publ;
if (full_id)
tipc_printf(buf, "<%u.%u.%u:%u>:",
tipc_printf(buf, "<%u.%u.%u:%u>:",
tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
tipc_node(tipc_own_addr), p_ptr->publ.ref);
tipc_node(tipc_own_addr), p_ptr->publ.ref);
else
tipc_printf(buf, "%-10u:", p_ptr->publ.ref);
if (p_ptr->publ.connected) {
u32 dport = port_peerport(p_ptr);
u32 destnode = port_peernode(p_ptr);
if (p_ptr->publ.connected) {
u32 dport = port_peerport(p_ptr);
u32 destnode = port_peernode(p_ptr);
tipc_printf(buf, " connected to <%u.%u.%u:%u>",
tipc_zone(destnode), tipc_cluster(destnode),
tipc_node(destnode), dport);
if (p_ptr->publ.conn_type != 0)
tipc_printf(buf, " via {%u,%u}",
p_ptr->publ.conn_type,
p_ptr->publ.conn_instance);
}
else if (p_ptr->publ.published) {
tipc_printf(buf, " bound to");
list_for_each_entry(publ, &p_ptr->publications, pport_list) {
tipc_printf(buf, " connected to <%u.%u.%u:%u>",
tipc_zone(destnode), tipc_cluster(destnode),
tipc_node(destnode), dport);
if (p_ptr->publ.conn_type != 0)
tipc_printf(buf, " via {%u,%u}",
p_ptr->publ.conn_type,
p_ptr->publ.conn_instance);
}
else if (p_ptr->publ.published) {
tipc_printf(buf, " bound to");
list_for_each_entry(publ, &p_ptr->publications, pport_list) {
if (publ->lower == publ->upper)
tipc_printf(buf, " {%u,%u}", publ->type,
publ->lower);
else
tipc_printf(buf, " {%u,%u,%u}", publ->type,
tipc_printf(buf, " {%u,%u,%u}", publ->type,
publ->lower, publ->upper);
}
}
tipc_printf(buf, "\n");
}
}
tipc_printf(buf, "\n");
}
#define MAX_PORT_QUERY 32768
@ -818,7 +818,7 @@ static void port_dispatcher_sigh(void *dummy)
struct sk_buff *next = buf->next;
struct tipc_msg *msg = buf_msg(buf);
u32 dref = msg_destport(msg);
message_type = msg_type(msg);
if (message_type > TIPC_DIRECT_MSG)
goto reject; /* Unsupported message type */
@ -838,7 +838,7 @@ static void port_dispatcher_sigh(void *dummy)
goto err;
switch (message_type) {
case TIPC_CONN_MSG:{
tipc_conn_msg_event cb = up_ptr->conn_msg_cb;
u32 peer_port = port_peerport(p_ptr);
@ -856,9 +856,9 @@ static void port_dispatcher_sigh(void *dummy)
goto reject;
if (unlikely(!cb))
goto reject;
if (unlikely(++p_ptr->publ.conn_unacked >=
if (unlikely(++p_ptr->publ.conn_unacked >=
TIPC_FLOW_CONTROL_WIN))
tipc_acknowledge(dref,
tipc_acknowledge(dref,
p_ptr->publ.conn_unacked);
skb_pull(buf, msg_hdr_sz(msg));
cb(usr_handle, dref, &buf, msg_data(msg),
@ -874,7 +874,7 @@ static void port_dispatcher_sigh(void *dummy)
if (unlikely(!cb))
goto reject;
skb_pull(buf, msg_hdr_sz(msg));
cb(usr_handle, dref, &buf, msg_data(msg),
cb(usr_handle, dref, &buf, msg_data(msg),
msg_data_sz(msg), msg_importance(msg),
&orig);
break;
@ -895,7 +895,7 @@ static void port_dispatcher_sigh(void *dummy)
dseq.upper = (message_type == TIPC_NAMED_MSG)
? dseq.lower : msg_nameupper(msg);
skb_pull(buf, msg_hdr_sz(msg));
cb(usr_handle, dref, &buf, msg_data(msg),
cb(usr_handle, dref, &buf, msg_data(msg),
msg_data_sz(msg), msg_importance(msg),
&orig, &dseq);
break;
@ -907,9 +907,9 @@ static void port_dispatcher_sigh(void *dummy)
continue;
err:
switch (message_type) {
case TIPC_CONN_MSG:{
tipc_conn_shutdown_event cb =
tipc_conn_shutdown_event cb =
up_ptr->conn_err_cb;
u32 peer_port = port_peerport(p_ptr);
u32 peer_node = port_peernode(p_ptr);
@ -940,7 +940,7 @@ err:
}
case TIPC_MCAST_MSG:
case TIPC_NAMED_MSG:{
tipc_named_msg_err_event cb =
tipc_named_msg_err_event cb =
up_ptr->named_err_cb;
spin_unlock_bh(p_ptr->publ.lock);
@ -951,7 +951,7 @@ err:
dseq.upper = (message_type == TIPC_NAMED_MSG)
? dseq.lower : msg_nameupper(msg);
skb_pull(buf, msg_hdr_sz(msg));
cb(usr_handle, dref, &buf, msg_data(msg),
cb(usr_handle, dref, &buf, msg_data(msg),
msg_data_sz(msg), msg_errcode(msg), &dseq);
break;
}
@ -986,9 +986,9 @@ static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf)
return TIPC_OK;
}
/*
/*
* Wake up port after congestion: Called with port locked,
*
*
*/
static void port_wakeup_sh(unsigned long ref)
@ -1033,7 +1033,7 @@ void tipc_acknowledge(u32 ref, u32 ack)
tipc_own_addr,
CONN_MANAGER,
CONN_ACK,
TIPC_OK,
TIPC_OK,
port_out_seqno(p_ptr),
ack);
}
@ -1046,20 +1046,20 @@ void tipc_acknowledge(u32 ref, u32 ack)
* registry if non-zero user_ref.
*/
int tipc_createport(u32 user_ref,
void *usr_handle,
unsigned int importance,
tipc_msg_err_event error_cb,
tipc_named_msg_err_event named_error_cb,
tipc_conn_shutdown_event conn_error_cb,
tipc_msg_event msg_cb,
tipc_named_msg_event named_msg_cb,
tipc_conn_msg_event conn_msg_cb,
int tipc_createport(u32 user_ref,
void *usr_handle,
unsigned int importance,
tipc_msg_err_event error_cb,
tipc_named_msg_err_event named_error_cb,
tipc_conn_shutdown_event conn_error_cb,
tipc_msg_event msg_cb,
tipc_named_msg_event named_msg_cb,
tipc_conn_msg_event conn_msg_cb,
tipc_continue_event continue_event_cb,/* May be zero */
u32 *portref)
{
struct user_port *up_ptr;
struct port *p_ptr;
struct port *p_ptr;
u32 ref;
up_ptr = kmalloc(sizeof(*up_ptr), GFP_ATOMIC);
@ -1088,7 +1088,7 @@ int tipc_createport(u32 user_ref,
INIT_LIST_HEAD(&up_ptr->uport_list);
tipc_reg_add_port(up_ptr);
*portref = p_ptr->publ.ref;
dbg(" tipc_createport: %x with ref %u\n", p_ptr, p_ptr->publ.ref);
dbg(" tipc_createport: %x with ref %u\n", p_ptr, p_ptr->publ.ref);
tipc_port_unlock(p_ptr);
return TIPC_OK;
}
@ -1103,7 +1103,7 @@ int tipc_ownidentity(u32 ref, struct tipc_portid *id)
int tipc_portimportance(u32 ref, unsigned int *importance)
{
struct port *p_ptr;
p_ptr = tipc_port_lock(ref);
if (!p_ptr)
return -EINVAL;
@ -1172,19 +1172,19 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
struct publication *publ;
struct publication *tpubl;
int res = -EINVAL;
p_ptr = tipc_port_lock(ref);
if (!p_ptr)
return -EINVAL;
if (!seq) {
list_for_each_entry_safe(publ, tpubl,
list_for_each_entry_safe(publ, tpubl,
&p_ptr->publications, pport_list) {
tipc_nametbl_withdraw(publ->type, publ->lower,
tipc_nametbl_withdraw(publ->type, publ->lower,
publ->ref, publ->key);
}
res = TIPC_OK;
} else {
list_for_each_entry_safe(publ, tpubl,
list_for_each_entry_safe(publ, tpubl,
&p_ptr->publications, pport_list) {
if (publ->scope != scope)
continue;
@ -1194,7 +1194,7 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
continue;
if (publ->upper != seq->upper)
break;
tipc_nametbl_withdraw(publ->type, publ->lower,
tipc_nametbl_withdraw(publ->type, publ->lower,
publ->ref, publ->key);
res = TIPC_OK;
break;
@ -1292,7 +1292,7 @@ int tipc_shutdown(u32 ref)
tipc_own_addr,
imp,
TIPC_CONN_MSG,
TIPC_CONN_SHUTDOWN,
TIPC_CONN_SHUTDOWN,
port_out_seqno(p_ptr),
0);
}
@ -1304,7 +1304,7 @@ int tipc_shutdown(u32 ref)
int tipc_isconnected(u32 ref, int *isconnected)
{
struct port *p_ptr;
p_ptr = tipc_port_lock(ref);
if (!p_ptr)
return -EINVAL;
@ -1317,7 +1317,7 @@ int tipc_peer(u32 ref, struct tipc_portid *peer)
{
struct port *p_ptr;
int res;
p_ptr = tipc_port_lock(ref);
if (!p_ptr)
return -EINVAL;
@ -1348,7 +1348,7 @@ int tipc_port_recv_sections(struct port *sender, unsigned int num_sect,
{
struct sk_buff *buf;
int res;
res = msg_build(&sender->publ.phdr, msg_sect, num_sect,
MAX_MSG_SIZE, !sender->user_port, &buf);
if (likely(buf))
@ -1394,7 +1394,7 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
return -ELINKCONG;
}
/**
/**
* tipc_send_buf - send message buffer on connection
*/
@ -1406,7 +1406,7 @@ int tipc_send_buf(u32 ref, struct sk_buff *buf, unsigned int dsz)
u32 hsz;
u32 sz;
u32 res;
p_ptr = tipc_port_deref(ref);
if (!p_ptr || !p_ptr->publ.connected)
return -EINVAL;
@ -1447,12 +1447,12 @@ int tipc_send_buf(u32 ref, struct sk_buff *buf, unsigned int dsz)
* tipc_forward2name - forward message sections to port name
*/
int tipc_forward2name(u32 ref,
struct tipc_name const *name,
int tipc_forward2name(u32 ref,
struct tipc_name const *name,
u32 domain,
u32 num_sect,
u32 num_sect,
struct iovec const *msg_sect,
struct tipc_portid const *orig,
struct tipc_portid const *orig,
unsigned int importance)
{
struct port *p_ptr;
@ -1483,7 +1483,7 @@ int tipc_forward2name(u32 ref,
p_ptr->sent++;
if (likely(destnode == tipc_own_addr))
return tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
destnode);
if (likely(res != -ELINKCONG))
return res;
@ -1493,7 +1493,7 @@ int tipc_forward2name(u32 ref,
}
return -ELINKCONG;
}
return tipc_port_reject_sections(p_ptr, msg, msg_sect, num_sect,
return tipc_port_reject_sections(p_ptr, msg, msg_sect, num_sect,
TIPC_ERR_NO_NAME);
}
@ -1501,10 +1501,10 @@ int tipc_forward2name(u32 ref,
* tipc_send2name - send message sections to port name
*/
int tipc_send2name(u32 ref,
int tipc_send2name(u32 ref,
struct tipc_name const *name,
unsigned int domain,
unsigned int num_sect,
unsigned int domain,
unsigned int num_sect,
struct iovec const *msg_sect)
{
struct tipc_portid orig;
@ -1515,7 +1515,7 @@ int tipc_send2name(u32 ref,
TIPC_PORT_IMPORTANCE);
}
/**
/**
* tipc_forward_buf2name - forward message buffer to port name
*/
@ -1571,14 +1571,14 @@ int tipc_forward_buf2name(u32 ref,
return tipc_reject_msg(buf, TIPC_ERR_NO_NAME);
}
/**
/**
* tipc_send_buf2name - send message buffer to port name
*/
int tipc_send_buf2name(u32 ref,
struct tipc_name const *dest,
int tipc_send_buf2name(u32 ref,
struct tipc_name const *dest,
u32 domain,
struct sk_buff *buf,
struct sk_buff *buf,
unsigned int dsz)
{
struct tipc_portid orig;
@ -1589,15 +1589,15 @@ int tipc_send_buf2name(u32 ref,
TIPC_PORT_IMPORTANCE);
}
/**
/**
* tipc_forward2port - forward message sections to port identity
*/
int tipc_forward2port(u32 ref,
struct tipc_portid const *dest,
unsigned int num_sect,
unsigned int num_sect,
struct iovec const *msg_sect,
struct tipc_portid const *orig,
struct tipc_portid const *orig,
unsigned int importance)
{
struct port *p_ptr;
@ -1630,24 +1630,24 @@ int tipc_forward2port(u32 ref,
return -ELINKCONG;
}
/**
* tipc_send2port - send message sections to port identity
/**
* tipc_send2port - send message sections to port identity
*/
int tipc_send2port(u32 ref,
int tipc_send2port(u32 ref,
struct tipc_portid const *dest,
unsigned int num_sect,
unsigned int num_sect,
struct iovec const *msg_sect)
{
struct tipc_portid orig;
orig.ref = ref;
orig.node = tipc_own_addr;
return tipc_forward2port(ref, dest, num_sect, msg_sect, &orig,
return tipc_forward2port(ref, dest, num_sect, msg_sect, &orig,
TIPC_PORT_IMPORTANCE);
}
/**
/**
* tipc_forward_buf2port - forward message buffer to port identity
*/
int tipc_forward_buf2port(u32 ref,
@ -1692,20 +1692,20 @@ int tipc_forward_buf2port(u32 ref,
return -ELINKCONG;
}
/**
/**
* tipc_send_buf2port - send message buffer to port identity
*/
int tipc_send_buf2port(u32 ref,
int tipc_send_buf2port(u32 ref,
struct tipc_portid const *dest,
struct sk_buff *buf,
struct sk_buff *buf,
unsigned int dsz)
{
struct tipc_portid orig;
orig.ref = ref;
orig.node = tipc_own_addr;
return tipc_forward_buf2port(ref, dest, buf, dsz, &orig,
return tipc_forward_buf2port(ref, dest, buf, dsz, &orig,
TIPC_PORT_IMPORTANCE);
}

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/port.h: Include file for TIPC port code
*
*
* Copyright (c) 1994-2006, Ericsson AB
* Copyright (c) 2004-2005, Wind River Systems
* All rights reserved.
@ -52,17 +52,17 @@
* <various callback routines>
* @uport_list: adjacent user ports in list of ports held by user
*/
struct user_port {
u32 user_ref;
void *usr_handle;
void *usr_handle;
u32 ref;
tipc_msg_err_event err_cb;
tipc_named_msg_err_event named_err_cb;
tipc_conn_shutdown_event conn_err_cb;
tipc_msg_event msg_cb;
tipc_named_msg_event named_msg_cb;
tipc_conn_msg_event conn_msg_cb;
tipc_msg_err_event err_cb;
tipc_named_msg_err_event named_err_cb;
tipc_conn_shutdown_event conn_err_cb;
tipc_msg_event msg_cb;
tipc_named_msg_event named_msg_cb;
tipc_conn_msg_event conn_msg_cb;
tipc_continue_event continue_event_cb;
struct list_head uport_list;
};
@ -113,7 +113,7 @@ struct port {
extern spinlock_t tipc_port_list_lock;
struct port_list;
int tipc_port_recv_sections(struct port *p_ptr, u32 num_sect,
int tipc_port_recv_sections(struct port *p_ptr, u32 num_sect,
struct iovec const *msg_sect);
int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
struct iovec const *msg_sect, u32 num_sect,
@ -133,9 +133,9 @@ static inline struct port *tipc_port_lock(u32 ref)
return (struct port *)tipc_ref_lock(ref);
}
/**
/**
* tipc_port_unlock - unlock a port instance
*
*
* Can use pointer instead of tipc_ref_unlock() since port is already locked.
*/
@ -164,7 +164,7 @@ static inline int tipc_port_congested(struct port *p_ptr)
return((p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2));
}
/**
/**
* tipc_port_recv_msg - receive message from lower layer and deliver to port user
*/
@ -175,7 +175,7 @@ static inline int tipc_port_recv_msg(struct sk_buff *buf)
u32 destport = msg_destport(msg);
u32 dsz = msg_data_sz(msg);
u32 err;
/* forward unresolved named message */
if (unlikely(!destport)) {
tipc_net_route_msg(buf);

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/ref.c: TIPC object registry code
*
*
* Copyright (c) 1991-2006, Ericsson AB
* Copyright (c) 2004-2005, Wind River Systems
* All rights reserved.
@ -50,11 +50,11 @@
* Object reference table consists of 2**N entries.
*
* A used entry has object ptr != 0, reference == XXXX|own index
* (XXXX changes each time entry is acquired)
* (XXXX changes each time entry is acquired)
* A free entry has object ptr == 0, reference == YYYY|next free index
* (YYYY is one more than last used XXXX)
*
* Free list is initially chained from entry (2**N)-1 to entry 1.
* Free list is initially chained from entry (2**N)-1 to entry 1.
* Entry 0 is not used to allow index 0 to indicate the end of the free list.
*
* Note: Any accidental reference of the form XXXX|0--0 won't match entry 0
@ -113,9 +113,9 @@ void tipc_ref_table_stop(void)
/**
* tipc_ref_acquire - create reference to an object
*
*
* Return a unique reference value which can be translated back to the pointer
* 'object' at a later time. Also, pass back a pointer to the lock protecting
* 'object' at a later time. Also, pass back a pointer to the lock protecting
* the object, but without locking it.
*/
@ -141,15 +141,15 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
index = tipc_ref_table.first_free;
entry = &(tipc_ref_table.entries[index]);
index_mask = tipc_ref_table.index_mask;
/* take lock in case a previous user of entry still holds it */
spin_lock_bh(&entry->lock);
/* take lock in case a previous user of entry still holds it */
spin_lock_bh(&entry->lock);
next_plus_upper = entry->data.next_plus_upper;
tipc_ref_table.first_free = next_plus_upper & index_mask;
reference = (next_plus_upper & ~index_mask) + index;
entry->data.reference = reference;
entry->object = object;
if (lock != 0)
*lock = &entry->lock;
if (lock != 0)
*lock = &entry->lock;
spin_unlock_bh(&entry->lock);
}
write_unlock_bh(&ref_table_lock);
@ -158,7 +158,7 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
/**
* tipc_ref_discard - invalidate references to an object
*
*
* Disallow future references to an object and free up the entry for re-use.
* Note: The entry's spin_lock may still be busy after discard
*/
@ -166,7 +166,7 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
void tipc_ref_discard(u32 ref)
{
struct reference *entry;
u32 index;
u32 index;
u32 index_mask;
if (!ref) {
@ -198,7 +198,7 @@ void tipc_ref_discard(u32 ref)
tipc_ref_table.first_free = index;
else
/* next_plus_upper is always XXXX|0--0 for last free entry */
tipc_ref_table.entries[tipc_ref_table.last_free].data.next_plus_upper
tipc_ref_table.entries[tipc_ref_table.last_free].data.next_plus_upper
|= index;
tipc_ref_table.last_free = index;

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/ref.h: Include file for TIPC object registry code
*
*
* Copyright (c) 1991-2006, Ericsson AB
* Copyright (c) 2005, Wind River Systems
* All rights reserved.
@ -43,7 +43,7 @@
* @lock: spinlock controlling access to object
* @data: reference value associated with object (or link to next unused entry)
*/
struct reference {
void *object;
spinlock_t lock;
@ -96,7 +96,7 @@ static inline void *tipc_ref_lock(u32 ref)
}
/**
* tipc_ref_unlock - unlock referenced object
* tipc_ref_unlock - unlock referenced object
*/
static inline void tipc_ref_unlock(u32 ref)
@ -119,7 +119,7 @@ static inline void tipc_ref_unlock(u32 ref)
static inline void *tipc_ref_deref(u32 ref)
{
if (likely(tipc_ref_table.entries)) {
struct reference *r =
struct reference *r =
&tipc_ref_table.entries[ref & tipc_ref_table.index_mask];
if (likely(r->data.reference == ref))

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/subscr.c: TIPC subscription service
*
*
* Copyright (c) 2000-2006, Ericsson AB
* Copyright (c) 2005, Wind River Systems
* All rights reserved.
@ -49,10 +49,10 @@
* @port_ref: object reference to port used to communicate with subscriber
* @swap: indicates if subscriber uses opposite endianness in its messages
*/
struct subscriber {
u32 ref;
spinlock_t *lock;
spinlock_t *lock;
struct list_head subscriber_list;
struct list_head subscription_list;
u32 port_ref;
@ -82,7 +82,7 @@ static struct top_srv topsrv = { 0 };
* htohl - convert value to endianness used by destination
* @in: value to convert
* @swap: non-zero if endianness must be reversed
*
*
* Returns converted value
*/
@ -97,11 +97,11 @@ static u32 htohl(u32 in, int swap)
* subscr_send_event - send a message containing a tipc_event to the subscriber
*/
static void subscr_send_event(struct subscription *sub,
u32 found_lower,
static void subscr_send_event(struct subscription *sub,
u32 found_lower,
u32 found_upper,
u32 event,
u32 port_ref,
u32 event,
u32 port_ref,
u32 node)
{
struct iovec msg_sect;
@ -123,8 +123,8 @@ static void subscr_send_event(struct subscription *sub,
* Returns 1 if there is overlap, otherwise 0.
*/
int tipc_subscr_overlap(struct subscription *sub,
u32 found_lower,
int tipc_subscr_overlap(struct subscription *sub,
u32 found_lower,
u32 found_upper)
{
@ -139,15 +139,15 @@ int tipc_subscr_overlap(struct subscription *sub,
/**
* tipc_subscr_report_overlap - issue event if there is subscription overlap
*
*
* Protected by nameseq.lock in name_table.c
*/
void tipc_subscr_report_overlap(struct subscription *sub,
u32 found_lower,
void tipc_subscr_report_overlap(struct subscription *sub,
u32 found_lower,
u32 found_upper,
u32 event,
u32 port_ref,
u32 event,
u32 port_ref,
u32 node,
int must)
{
@ -189,11 +189,11 @@ static void subscr_timeout(struct subscription *sub)
/* Notify subscriber of timeout, then unlink subscription */
subscr_send_event(sub,
sub->evt.s.seq.lower,
subscr_send_event(sub,
sub->evt.s.seq.lower,
sub->evt.s.seq.upper,
TIPC_SUBSCR_TIMEOUT,
0,
TIPC_SUBSCR_TIMEOUT,
0,
0);
list_del(&sub->subscription_list);
@ -221,11 +221,11 @@ static void subscr_del(struct subscription *sub)
/**
* subscr_terminate - terminate communication with a subscriber
*
*
* Called with subscriber locked. Routine must temporarily release this lock
* to enable subscription timeout routine(s) to finish without deadlocking;
* to enable subscription timeout routine(s) to finish without deadlocking;
* the lock is then reclaimed to allow caller to release it upon return.
* (This should work even in the unlikely event some other thread creates
* (This should work even in the unlikely event some other thread creates
* a new object reference in the interim that uses this lock; this routine will
* simply wait for it to be released, then claim it.)
*/
@ -241,7 +241,7 @@ static void subscr_terminate(struct subscriber *subscriber)
spin_unlock_bh(subscriber->lock);
/* Destroy any existing subscriptions for subscriber */
list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
subscription_list) {
if (sub->timeout != TIPC_WAIT_FOREVER) {
@ -315,7 +315,7 @@ static void subscr_cancel(struct tipc_subscr *s,
/**
* subscr_subscribe - create subscription for subscriber
*
*
* Called with subscriber locked
*/
@ -431,7 +431,7 @@ static void subscr_conn_msg_event(void *usr_handle,
subscr_terminate(subscriber);
else
subscr_subscribe((struct tipc_subscr *)data, subscriber);
spin_unlock_bh(subscriber_lock);
}
@ -444,7 +444,7 @@ static void subscr_named_msg_event(void *usr_handle,
struct sk_buff **buf,
const unchar *data,
u32 size,
u32 importance,
u32 importance,
struct tipc_portid const *orig,
struct tipc_name_seq const *dest)
{
@ -534,22 +534,22 @@ int tipc_subscr_start(void)
return res;
}
res = tipc_createport(topsrv.user_ref,
NULL,
TIPC_CRITICAL_IMPORTANCE,
NULL,
NULL,
NULL,
NULL,
subscr_named_msg_event,
NULL,
NULL,
&topsrv.setup_port);
if (res)
res = tipc_createport(topsrv.user_ref,
NULL,
TIPC_CRITICAL_IMPORTANCE,
NULL,
NULL,
NULL,
NULL,
subscr_named_msg_event,
NULL,
NULL,
&topsrv.setup_port);
if (res)
goto failed;
res = tipc_nametbl_publish_rsv(topsrv.setup_port, TIPC_NODE_SCOPE, &seq);
if (res)
res = tipc_nametbl_publish_rsv(topsrv.setup_port, TIPC_NODE_SCOPE, &seq);
if (res)
goto failed;
spin_unlock_bh(&topsrv.lock);
@ -571,7 +571,7 @@ void tipc_subscr_stop(void)
if (topsrv.user_ref) {
tipc_deleteport(topsrv.setup_port);
list_for_each_entry_safe(subscriber, subscriber_temp,
list_for_each_entry_safe(subscriber, subscriber_temp,
&topsrv.subscriber_list,
subscriber_list) {
tipc_ref_lock(subscriber->ref);

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/subscr.h: Include file for TIPC subscription service
*
*
* Copyright (c) 2003-2006, Ericsson AB
* Copyright (c) 2005, Wind River Systems
* All rights reserved.
@ -48,7 +48,7 @@
* @timer_ref: reference to timer governing subscription duration (may be NULL)
* @owner: pointer to subscriber object associated with this subscription
*/
struct subscription {
struct tipc_name_seq seq;
u32 timeout;
@ -60,15 +60,15 @@ struct subscription {
struct subscriber *owner;
};
int tipc_subscr_overlap(struct subscription * sub,
u32 found_lower,
int tipc_subscr_overlap(struct subscription * sub,
u32 found_lower,
u32 found_upper);
void tipc_subscr_report_overlap(struct subscription * sub,
u32 found_lower,
void tipc_subscr_report_overlap(struct subscription * sub,
u32 found_lower,
u32 found_upper,
u32 event,
u32 port_ref,
u32 event,
u32 port_ref,
u32 node,
int must_report);

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/user_reg.c: TIPC user registry code
*
*
* Copyright (c) 2000-2006, Ericsson AB
* Copyright (c) 2004-2005, Wind River Systems
* All rights reserved.
@ -40,7 +40,7 @@
/*
* TIPC user registry keeps track of users of the tipc_port interface.
*
* The registry utilizes an array of "TIPC user" entries;
* The registry utilizes an array of "TIPC user" entries;
* a user's ID is the index of their associated array entry.
* Array entry 0 is not used, so userid 0 is not valid;
* TIPC sometimes uses this value to denote an anonymous user.
@ -51,7 +51,7 @@
* struct tipc_user - registered TIPC user info
* @next: index of next free registry entry (or -1 for an allocated entry)
* @callback: ptr to routine to call when TIPC mode changes (NULL if none)
* @usr_handle: user-defined value passed to callback routine
* @usr_handle: user-defined value passed to callback routine
* @ports: list of user ports owned by the user
*/
@ -71,7 +71,7 @@ static DEFINE_SPINLOCK(reg_lock);
/**
* reg_init - create TIPC user registry (but don't activate it)
*
*
* If registry has been pre-initialized it is left "as is".
* NOTE: This routine may be called when TIPC is inactive.
*/
@ -79,7 +79,7 @@ static DEFINE_SPINLOCK(reg_lock);
static int reg_init(void)
{
u32 i;
spin_lock_bh(&reg_lock);
if (!users) {
users = kzalloc(USER_LIST_SIZE, GFP_ATOMIC);
@ -137,7 +137,7 @@ int tipc_reg_start(void)
*/
void tipc_reg_stop(void)
{
{
int id;
if (!users)
@ -174,14 +174,14 @@ int tipc_attach(u32 *userid, tipc_mode_event cb, void *usr_handle)
user_ptr = &users[next_free_user];
*userid = next_free_user;
next_free_user = user_ptr->next;
user_ptr->next = -1;
user_ptr->next = -1;
spin_unlock_bh(&reg_lock);
user_ptr->callback = cb;
user_ptr->usr_handle = usr_handle;
INIT_LIST_HEAD(&user_ptr->ports);
atomic_inc(&tipc_user_count);
if (cb && (tipc_mode != TIPC_NOT_RUNNING))
tipc_k_signal((Handler)reg_callback, (unsigned long)user_ptr);
return TIPC_OK;
@ -207,16 +207,16 @@ void tipc_detach(u32 userid)
}
user_ptr = &users[userid];
user_ptr->callback = NULL;
user_ptr->callback = NULL;
INIT_LIST_HEAD(&ports_temp);
list_splice(&user_ptr->ports, &ports_temp);
list_splice(&user_ptr->ports, &ports_temp);
user_ptr->next = next_free_user;
next_free_user = userid;
spin_unlock_bh(&reg_lock);
atomic_dec(&tipc_user_count);
list_for_each_entry_safe(up_ptr, temp_up_ptr, &ports_temp, uport_list) {
list_for_each_entry_safe(up_ptr, temp_up_ptr, &ports_temp, uport_list) {
tipc_deleteport(up_ptr->ref);
}
}

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/user_reg.h: Include file for TIPC user registry code
*
*
* Copyright (c) 2000-2006, Ericsson AB
* Copyright (c) 2005, Wind River Systems
* All rights reserved.

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/zone.c: TIPC zone management routines
*
*
* Copyright (c) 2000-2006, Ericsson AB
* Copyright (c) 2005, Wind River Systems
* All rights reserved.
@ -92,7 +92,7 @@ void tipc_zone_remove_as_router(struct _zone *z_ptr, u32 router)
for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
if (z_ptr->clusters[c_num]) {
tipc_cltr_remove_as_router(z_ptr->clusters[c_num],
tipc_cltr_remove_as_router(z_ptr->clusters[c_num],
router);
}
}

Просмотреть файл

@ -1,6 +1,6 @@
/*
* net/tipc/zone.h: Include file for TIPC zone management routines
*
*
* Copyright (c) 2000-2006, Ericsson AB
* Copyright (c) 2005-2006, Wind River Systems
* All rights reserved.
@ -47,7 +47,7 @@
* @clusters: array of pointers to all clusters within zone
* @links: number of (unicast) links to zone
*/
struct _zone {
u32 addr;
struct cluster *clusters[2]; /* currently limited to just 1 cluster */