tipc: Add missing locks in broadcast link statistics accumulation

Ensures that all attempts to update broadcast link statistics are done
only while holding the lock that protects the link's main data structures,
to prevent interference by simultaneous updates caused by messages
arriving on other interfaces.

Signed-off-by: Allan Stephens <allan.stephens@windriver.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
This commit is contained in:
Allan Stephens 2011-10-26 16:13:35 -04:00 committed by Paul Gortmaker
parent 0232c5a566
commit b98158e3b3
1 changed files with 11 additions and 0 deletions

View File

@ -520,6 +520,7 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
if (likely(seqno == next_in)) { if (likely(seqno == next_in)) {
receive: receive:
spin_lock_bh(&bc_lock);
bcl->stats.recv_info++; bcl->stats.recv_info++;
node->bclink.last_in++; node->bclink.last_in++;
bclink_set_gap(node); bclink_set_gap(node);
@ -527,7 +528,9 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
bclink_send_ack(node); bclink_send_ack(node);
bcl->stats.sent_acks++; bcl->stats.sent_acks++;
} }
if (likely(msg_isdata(msg))) { if (likely(msg_isdata(msg))) {
spin_unlock_bh(&bc_lock);
tipc_node_unlock(node); tipc_node_unlock(node);
if (likely(msg_mcast(msg))) if (likely(msg_mcast(msg)))
tipc_port_recv_mcast(buf, NULL); tipc_port_recv_mcast(buf, NULL);
@ -536,6 +539,7 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
} else if (msg_user(msg) == MSG_BUNDLER) { } else if (msg_user(msg) == MSG_BUNDLER) {
bcl->stats.recv_bundles++; bcl->stats.recv_bundles++;
bcl->stats.recv_bundled += msg_msgcnt(msg); bcl->stats.recv_bundled += msg_msgcnt(msg);
spin_unlock_bh(&bc_lock);
tipc_node_unlock(node); tipc_node_unlock(node);
tipc_link_recv_bundle(buf); tipc_link_recv_bundle(buf);
} else if (msg_user(msg) == MSG_FRAGMENTER) { } else if (msg_user(msg) == MSG_FRAGMENTER) {
@ -543,12 +547,15 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
if (tipc_link_recv_fragment(&node->bclink.defragm, if (tipc_link_recv_fragment(&node->bclink.defragm,
&buf, &msg)) &buf, &msg))
bcl->stats.recv_fragmented++; bcl->stats.recv_fragmented++;
spin_unlock_bh(&bc_lock);
tipc_node_unlock(node); tipc_node_unlock(node);
tipc_net_route_msg(buf); tipc_net_route_msg(buf);
} else if (msg_user(msg) == NAME_DISTRIBUTOR) { } else if (msg_user(msg) == NAME_DISTRIBUTOR) {
spin_unlock_bh(&bc_lock);
tipc_node_unlock(node); tipc_node_unlock(node);
tipc_named_recv(buf); tipc_named_recv(buf);
} else { } else {
spin_unlock_bh(&bc_lock);
tipc_node_unlock(node); tipc_node_unlock(node);
buf_discard(buf); buf_discard(buf);
} }
@ -601,11 +608,15 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
} else } else
deferred = 0; deferred = 0;
spin_lock_bh(&bc_lock);
if (deferred) if (deferred)
bcl->stats.deferred_recv++; bcl->stats.deferred_recv++;
else else
bcl->stats.duplicates++; bcl->stats.duplicates++;
spin_unlock_bh(&bc_lock);
unlock: unlock:
tipc_node_unlock(node); tipc_node_unlock(node);
exit: exit: