carl9170: fix HT peer BA session corruption

This patch adds an alternative tx status path
for BlockAck Requests as the hardware doesn't
recognize that a BlockAck Requests is usually
acked with a BlockAck and not a legacy ACK.

Without this patch, the stack would constantly
resent old and stale BARs. So, depending on the
receiver stack, this could lead to:

 - "stuck" ba sessions and package loss, as the
   stale BAR would reset the sequence each time.

 - lots of reorder releases.

 - ...

Reported-by: Sean Patrick Santos <quantheory@gmail.com>
Reported-by: Mikołaj Kuligowski <mikolaj.q@wp.pl>
Reported-by: Per-Erik Westerberg <per-erik.westerberg@bredband.net>
Signed-off-by: Christian Lamparter <chunkeey@googlemail.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
This commit is contained in:
Christian Lamparter 2012-07-07 21:13:59 +02:00 коммит произвёл John W. Linville
Родитель 4519a74338
Коммит c9122c0d63
5 изменённых файлов: 132 добавлений и 0 удалений

Просмотреть файл

@ -289,6 +289,7 @@ struct ar9170 {
unsigned int mem_block_size;
unsigned int rx_size;
unsigned int tx_seq_table;
bool ba_filter;
} fw;
/* interface configuration combinations */
@ -425,6 +426,10 @@ struct ar9170 {
struct sk_buff *rx_failover;
int rx_failover_missing;
/* FIFO for collecting outstanding BlockAckRequest */
struct list_head bar_list[__AR9170_NUM_TXQ];
spinlock_t bar_list_lock[__AR9170_NUM_TXQ];
#ifdef CONFIG_CARL9170_WPC
struct {
bool pbc_state;
@ -468,6 +473,12 @@ enum carl9170_ps_off_override_reasons {
PS_OFF_BCN = BIT(1),
};
struct carl9170_bar_list_entry {
struct list_head list;
struct rcu_head head;
struct sk_buff *skb;
};
struct carl9170_ba_stats {
u8 ampdu_len;
u8 ampdu_ack_len;

Просмотреть файл

@ -307,6 +307,9 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
if (SUPP(CARL9170FW_WOL))
device_set_wakeup_enable(&ar->udev->dev, true);
if (SUPP(CARL9170FW_RX_BA_FILTER))
ar->fw.ba_filter = true;
if_comb_types = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_P2P_CLIENT);

Просмотреть файл

@ -949,6 +949,9 @@ static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
if (ar->fw.rx_filter && changed_flags & ar->rx_filter_caps) {
u32 rx_filter = 0;
if (!ar->fw.ba_filter)
rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
if (!(*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL)))
rx_filter |= CARL9170_RX_FILTER_BAD;
@ -1753,6 +1756,9 @@ void *carl9170_alloc(size_t priv_size)
for (i = 0; i < ar->hw->queues; i++) {
skb_queue_head_init(&ar->tx_status[i]);
skb_queue_head_init(&ar->tx_pending[i]);
INIT_LIST_HEAD(&ar->bar_list[i]);
spin_lock_init(&ar->bar_list_lock[i]);
}
INIT_WORK(&ar->ps_work, carl9170_ps_work);
INIT_WORK(&ar->ping_work, carl9170_ping_work);

Просмотреть файл

@ -576,6 +576,53 @@ static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
}
}
static void carl9170_ba_check(struct ar9170 *ar, void *data, unsigned int len)
{
struct ieee80211_bar *bar = (void *) data;
struct carl9170_bar_list_entry *entry;
unsigned int queue;
if (likely(!ieee80211_is_back(bar->frame_control)))
return;
if (len <= sizeof(*bar) + FCS_LEN)
return;
queue = TID_TO_WME_AC(((le16_to_cpu(bar->control) &
IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
IEEE80211_BAR_CTRL_TID_INFO_SHIFT) & 7);
rcu_read_lock();
list_for_each_entry_rcu(entry, &ar->bar_list[queue], list) {
struct sk_buff *entry_skb = entry->skb;
struct _carl9170_tx_superframe *super = (void *)entry_skb->data;
struct ieee80211_bar *entry_bar = (void *)super->frame_data;
#define TID_CHECK(a, b) ( \
((a) & cpu_to_le16(IEEE80211_BAR_CTRL_TID_INFO_MASK)) == \
((b) & cpu_to_le16(IEEE80211_BAR_CTRL_TID_INFO_MASK))) \
if (bar->start_seq_num == entry_bar->start_seq_num &&
TID_CHECK(bar->control, entry_bar->control) &&
compare_ether_addr(bar->ra, entry_bar->ta) == 0 &&
compare_ether_addr(bar->ta, entry_bar->ra) == 0) {
struct ieee80211_tx_info *tx_info;
tx_info = IEEE80211_SKB_CB(entry_skb);
tx_info->flags |= IEEE80211_TX_STAT_ACK;
spin_lock_bh(&ar->bar_list_lock[queue]);
list_del_rcu(&entry->list);
spin_unlock_bh(&ar->bar_list_lock[queue]);
kfree_rcu(entry, head);
break;
}
}
rcu_read_unlock();
#undef TID_CHECK
}
static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms)
{
__le16 fc;
@ -738,6 +785,8 @@ static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
carl9170_ps_beacon(ar, buf, mpdu_len);
carl9170_ba_check(ar, buf, mpdu_len);
skb = carl9170_rx_copy_data(buf, mpdu_len);
if (!skb)
goto drop;

Просмотреть файл

@ -436,6 +436,45 @@ out_rcu:
rcu_read_unlock();
}
static void carl9170_tx_bar_status(struct ar9170 *ar, struct sk_buff *skb,
struct ieee80211_tx_info *tx_info)
{
struct _carl9170_tx_superframe *super = (void *) skb->data;
struct ieee80211_bar *bar = (void *) super->frame_data;
/*
* Unlike all other frames, the status report for BARs does
* not directly come from the hardware as it is incapable of
* matching a BA to a previously send BAR.
* Instead the RX-path will scan for incoming BAs and set the
* IEEE80211_TX_STAT_ACK if it sees one that was likely
* caused by a BAR from us.
*/
if (unlikely(ieee80211_is_back_req(bar->frame_control)) &&
!(tx_info->flags & IEEE80211_TX_STAT_ACK)) {
struct carl9170_bar_list_entry *entry;
int queue = skb_get_queue_mapping(skb);
rcu_read_lock();
list_for_each_entry_rcu(entry, &ar->bar_list[queue], list) {
if (entry->skb == skb) {
spin_lock_bh(&ar->bar_list_lock[queue]);
list_del_rcu(&entry->list);
spin_unlock_bh(&ar->bar_list_lock[queue]);
kfree_rcu(entry, head);
goto out;
}
}
WARN(1, "bar not found in %d - ra:%pM ta:%pM c:%x ssn:%x\n",
queue, bar->ra, bar->ta, bar->control,
bar->start_seq_num);
out:
rcu_read_unlock();
}
}
void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
const bool success)
{
@ -445,6 +484,8 @@ void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
txinfo = IEEE80211_SKB_CB(skb);
carl9170_tx_bar_status(ar, skb, txinfo);
if (success)
txinfo->flags |= IEEE80211_TX_STAT_ACK;
else
@ -1265,6 +1306,26 @@ out_rcu:
return false;
}
static void carl9170_bar_check(struct ar9170 *ar, struct sk_buff *skb)
{
struct _carl9170_tx_superframe *super = (void *) skb->data;
struct ieee80211_bar *bar = (void *) super->frame_data;
if (unlikely(ieee80211_is_back_req(bar->frame_control)) &&
skb->len >= sizeof(struct ieee80211_bar)) {
struct carl9170_bar_list_entry *entry;
unsigned int queue = skb_get_queue_mapping(skb);
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!WARN_ON_ONCE(!entry)) {
entry->skb = skb;
spin_lock_bh(&ar->bar_list_lock[queue]);
list_add_tail_rcu(&entry->list, &ar->bar_list[queue]);
spin_unlock_bh(&ar->bar_list_lock[queue]);
}
}
}
static void carl9170_tx(struct ar9170 *ar)
{
struct sk_buff *skb;
@ -1287,6 +1348,8 @@ static void carl9170_tx(struct ar9170 *ar)
if (unlikely(carl9170_tx_ps_drop(ar, skb)))
continue;
carl9170_bar_check(ar, skb);
atomic_inc(&ar->tx_total_pending);
q = __carl9170_get_queue(ar, i);