bond_alb: don't disable softirq under bond_alb_xmit

No need to lock soft irqs under bond_alb_xmit()
which already has softirq disabled.

Changes:
1. add non-bh/bh version to tlb_clear_slave()

2. represent BH and non BH hash table locks
_lock_rx_hashtbl_bh/_unlock_rx_hashtbl_bh
_lock_rx_hashtbl/_unlock_rx_hashtbl
_lock_tx_hashtbl_bh/_unlock_tx_hashtbl_bh
_lock_tx_hashtbl/_unlock_tx_hashtbl

Signed-off-by: Maxim Uvarov <maxim.uvarov@oracle.com>
Signed-off-by: Cong Wang <amwang@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Maxim Uvarov 2012-01-09 12:01:37 +00:00 коммит произвёл David S. Miller
Родитель d6c25beba3
Коммит f515e6b770
1 изменённых файлов: 76 добавлений и 36 удалений

Просмотреть файл

@ -99,16 +99,26 @@ static inline u8 _simple_hash(const u8 *hash_start, int hash_size)
/*********************** tlb specific functions ***************************/
static inline void _lock_tx_hashtbl(struct bonding *bond)
static inline void _lock_tx_hashtbl_bh(struct bonding *bond)
{
spin_lock_bh(&(BOND_ALB_INFO(bond).tx_hashtbl_lock));
}
static inline void _unlock_tx_hashtbl(struct bonding *bond)
static inline void _unlock_tx_hashtbl_bh(struct bonding *bond)
{
spin_unlock_bh(&(BOND_ALB_INFO(bond).tx_hashtbl_lock));
}
static inline void _lock_tx_hashtbl(struct bonding *bond)
{
spin_lock(&(BOND_ALB_INFO(bond).tx_hashtbl_lock));
}
static inline void _unlock_tx_hashtbl(struct bonding *bond)
{
spin_unlock(&(BOND_ALB_INFO(bond).tx_hashtbl_lock));
}
/* Caller must hold tx_hashtbl lock */
static inline void tlb_init_table_entry(struct tlb_client_info *entry, int save_load)
{
@ -129,14 +139,13 @@ static inline void tlb_init_slave(struct slave *slave)
SLAVE_TLB_INFO(slave).head = TLB_NULL_INDEX;
}
/* Caller must hold bond lock for read */
static void tlb_clear_slave(struct bonding *bond, struct slave *slave, int save_load)
/* Caller must hold bond lock for read, BH disabled */
static void __tlb_clear_slave(struct bonding *bond, struct slave *slave,
int save_load)
{
struct tlb_client_info *tx_hash_table;
u32 index;
_lock_tx_hashtbl(bond);
/* clear slave from tx_hashtbl */
tx_hash_table = BOND_ALB_INFO(bond).tx_hashtbl;
@ -151,8 +160,15 @@ static void tlb_clear_slave(struct bonding *bond, struct slave *slave, int save_
}
tlb_init_slave(slave);
}
_unlock_tx_hashtbl(bond);
/* Caller must hold bond lock for read */
static void tlb_clear_slave(struct bonding *bond, struct slave *slave,
int save_load)
{
_lock_tx_hashtbl_bh(bond);
__tlb_clear_slave(bond, slave, save_load);
_unlock_tx_hashtbl_bh(bond);
}
/* Must be called before starting the monitor timer */
@ -169,7 +185,7 @@ static int tlb_initialize(struct bonding *bond)
bond->dev->name);
return -1;
}
_lock_tx_hashtbl(bond);
_lock_tx_hashtbl_bh(bond);
bond_info->tx_hashtbl = new_hashtbl;
@ -177,7 +193,7 @@ static int tlb_initialize(struct bonding *bond)
tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0);
}
_unlock_tx_hashtbl(bond);
_unlock_tx_hashtbl_bh(bond);
return 0;
}
@ -187,12 +203,12 @@ static void tlb_deinitialize(struct bonding *bond)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
_lock_tx_hashtbl(bond);
_lock_tx_hashtbl_bh(bond);
kfree(bond_info->tx_hashtbl);
bond_info->tx_hashtbl = NULL;
_unlock_tx_hashtbl(bond);
_unlock_tx_hashtbl_bh(bond);
}
static long long compute_gap(struct slave *slave)
@ -226,15 +242,13 @@ static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
return least_loaded;
}
/* Caller must hold bond lock for read */
static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index, u32 skb_len)
static struct slave *__tlb_choose_channel(struct bonding *bond, u32 hash_index,
u32 skb_len)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct tlb_client_info *hash_table;
struct slave *assigned_slave;
_lock_tx_hashtbl(bond);
hash_table = bond_info->tx_hashtbl;
assigned_slave = hash_table[hash_index].tx_slave;
if (!assigned_slave) {
@ -263,22 +277,46 @@ static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index, u3
hash_table[hash_index].tx_bytes += skb_len;
}
_unlock_tx_hashtbl(bond);
return assigned_slave;
}
/* Caller must hold bond lock for read */
static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index,
u32 skb_len)
{
struct slave *tx_slave;
/*
* We don't need to disable softirq here, becase
* tlb_choose_channel() is only called by bond_alb_xmit()
* which already has softirq disabled.
*/
_lock_tx_hashtbl(bond);
tx_slave = __tlb_choose_channel(bond, hash_index, skb_len);
_unlock_tx_hashtbl(bond);
return tx_slave;
}
/*********************** rlb specific functions ***************************/
static inline void _lock_rx_hashtbl(struct bonding *bond)
static inline void _lock_rx_hashtbl_bh(struct bonding *bond)
{
spin_lock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
}
static inline void _unlock_rx_hashtbl(struct bonding *bond)
static inline void _unlock_rx_hashtbl_bh(struct bonding *bond)
{
spin_unlock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
}
static inline void _lock_rx_hashtbl(struct bonding *bond)
{
spin_lock(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
}
static inline void _unlock_rx_hashtbl(struct bonding *bond)
{
spin_unlock(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
}
/* when an ARP REPLY is received from a client update its info
* in the rx_hashtbl
*/
@ -288,7 +326,7 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
struct rlb_client_info *client_info;
u32 hash_index;
_lock_rx_hashtbl(bond);
_lock_rx_hashtbl_bh(bond);
hash_index = _simple_hash((u8*)&(arp->ip_src), sizeof(arp->ip_src));
client_info = &(bond_info->rx_hashtbl[hash_index]);
@ -303,7 +341,7 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
bond_info->rx_ntt = 1;
}
_unlock_rx_hashtbl(bond);
_unlock_rx_hashtbl_bh(bond);
}
static void rlb_arp_recv(struct sk_buff *skb, struct bonding *bond,
@ -401,7 +439,7 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
u32 index, next_index;
/* clear slave from rx_hashtbl */
_lock_rx_hashtbl(bond);
_lock_rx_hashtbl_bh(bond);
rx_hash_table = bond_info->rx_hashtbl;
index = bond_info->rx_hashtbl_head;
@ -432,7 +470,7 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
}
}
_unlock_rx_hashtbl(bond);
_unlock_rx_hashtbl_bh(bond);
write_lock_bh(&bond->curr_slave_lock);
@ -489,7 +527,7 @@ static void rlb_update_rx_clients(struct bonding *bond)
struct rlb_client_info *client_info;
u32 hash_index;
_lock_rx_hashtbl(bond);
_lock_rx_hashtbl_bh(bond);
hash_index = bond_info->rx_hashtbl_head;
for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) {
@ -507,7 +545,7 @@ static void rlb_update_rx_clients(struct bonding *bond)
*/
bond_info->rlb_update_delay_counter = RLB_UPDATE_DELAY;
_unlock_rx_hashtbl(bond);
_unlock_rx_hashtbl_bh(bond);
}
/* The slave was assigned a new mac address - update the clients */
@ -518,7 +556,7 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla
int ntt = 0;
u32 hash_index;
_lock_rx_hashtbl(bond);
_lock_rx_hashtbl_bh(bond);
hash_index = bond_info->rx_hashtbl_head;
for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) {
@ -538,7 +576,7 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla
bond_info->rlb_update_retry_counter = RLB_UPDATE_RETRY;
}
_unlock_rx_hashtbl(bond);
_unlock_rx_hashtbl_bh(bond);
}
/* mark all clients using src_ip to be updated */
@ -709,7 +747,7 @@ static void rlb_rebalance(struct bonding *bond)
int ntt;
u32 hash_index;
_lock_rx_hashtbl(bond);
_lock_rx_hashtbl_bh(bond);
ntt = 0;
hash_index = bond_info->rx_hashtbl_head;
@ -727,7 +765,7 @@ static void rlb_rebalance(struct bonding *bond)
if (ntt) {
bond_info->rx_ntt = 1;
}
_unlock_rx_hashtbl(bond);
_unlock_rx_hashtbl_bh(bond);
}
/* Caller must hold rx_hashtbl lock */
@ -751,7 +789,7 @@ static int rlb_initialize(struct bonding *bond)
bond->dev->name);
return -1;
}
_lock_rx_hashtbl(bond);
_lock_rx_hashtbl_bh(bond);
bond_info->rx_hashtbl = new_hashtbl;
@ -761,7 +799,7 @@ static int rlb_initialize(struct bonding *bond)
rlb_init_table_entry(bond_info->rx_hashtbl + i);
}
_unlock_rx_hashtbl(bond);
_unlock_rx_hashtbl_bh(bond);
/* register to receive ARPs */
bond->recv_probe = rlb_arp_recv;
@ -773,13 +811,13 @@ static void rlb_deinitialize(struct bonding *bond)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
_lock_rx_hashtbl(bond);
_lock_rx_hashtbl_bh(bond);
kfree(bond_info->rx_hashtbl);
bond_info->rx_hashtbl = NULL;
bond_info->rx_hashtbl_head = RLB_NULL_INDEX;
_unlock_rx_hashtbl(bond);
_unlock_rx_hashtbl_bh(bond);
}
static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
@ -787,7 +825,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
u32 curr_index;
_lock_rx_hashtbl(bond);
_lock_rx_hashtbl_bh(bond);
curr_index = bond_info->rx_hashtbl_head;
while (curr_index != RLB_NULL_INDEX) {
@ -812,7 +850,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
curr_index = next_index;
}
_unlock_rx_hashtbl(bond);
_unlock_rx_hashtbl_bh(bond);
}
/*********************** tlb/rlb shared functions *********************/
@ -1320,7 +1358,9 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
res = bond_dev_queue_xmit(bond, skb, tx_slave->dev);
} else {
if (tx_slave) {
tlb_clear_slave(bond, tx_slave, 0);
_lock_tx_hashtbl(bond);
__tlb_clear_slave(bond, tx_slave, 0);
_unlock_tx_hashtbl(bond);
}
}