Merge ath-next from git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git

ath.git patches for v5.16. Major changes:

ath11k

* fix QCA6390 A-MSDU handling (CVE-2020-24588)

wcn36xx

* enable hardware scan offload for 5Ghz band

* add missing 5GHz channels 136 and 144
This commit is contained in:
Kalle Valo 2021-10-28 16:23:52 +03:00
Родитель a427aca0a9 c1b9ca365d
Коммит d7333a8ec8
21 изменённых файлов: 403 добавлений и 100 удалений

Просмотреть файл

@ -2690,9 +2690,16 @@ static int ath10k_core_copy_target_iram(struct ath10k *ar)
int i, ret;
u32 len, remaining_len;
hw_mem = ath10k_coredump_get_mem_layout(ar);
/* copy target iram feature must work also when
* ATH10K_FW_CRASH_DUMP_RAM_DATA is disabled, so
* _ath10k_coredump_get_mem_layout() to accomplist that
*/
hw_mem = _ath10k_coredump_get_mem_layout(ar);
if (!hw_mem)
return -ENOMEM;
/* if CONFIG_DEV_COREDUMP is disabled we get NULL, then
* just silently disable the feature by doing nothing
*/
return 0;
for (i = 0; i < hw_mem->region_table.size; i++) {
tmp = &hw_mem->region_table.regions[i];

Просмотреть файл

@ -1447,11 +1447,17 @@ static u32 ath10k_coredump_get_ramdump_size(struct ath10k *ar)
const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k *ar)
{
int i;
if (!test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask))
return NULL;
return _ath10k_coredump_get_mem_layout(ar);
}
EXPORT_SYMBOL(ath10k_coredump_get_mem_layout);
const struct ath10k_hw_mem_layout *_ath10k_coredump_get_mem_layout(struct ath10k *ar)
{
int i;
if (WARN_ON(ar->target_version == 0))
return NULL;
@ -1464,7 +1470,6 @@ const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k
return NULL;
}
EXPORT_SYMBOL(ath10k_coredump_get_mem_layout);
struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar)
{

Просмотреть файл

@ -176,6 +176,7 @@ int ath10k_coredump_register(struct ath10k *ar);
void ath10k_coredump_unregister(struct ath10k *ar);
void ath10k_coredump_destroy(struct ath10k *ar);
const struct ath10k_hw_mem_layout *_ath10k_coredump_get_mem_layout(struct ath10k *ar);
const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k *ar);
#else /* CONFIG_DEV_COREDUMP */
@ -214,6 +215,12 @@ ath10k_coredump_get_mem_layout(struct ath10k *ar)
return NULL;
}
static inline const struct ath10k_hw_mem_layout *
_ath10k_coredump_get_mem_layout(struct ath10k *ar)
{
return NULL;
}
#endif /* CONFIG_DEV_COREDUMP */
#endif /* _COREDUMP_H_ */

Просмотреть файл

@ -5583,7 +5583,15 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) {
arvif->beacon_buf = kmalloc(IEEE80211_MAX_FRAME_LEN,
GFP_KERNEL);
arvif->beacon_paddr = (dma_addr_t)arvif->beacon_buf;
/* Using a kernel pointer in place of a dma_addr_t
* token can lead to undefined behavior if that
* makes it into cache management functions. Use a
* known-invalid address token instead, which
* avoids the warning and makes it easier to catch
* bugs if it does end up getting used.
*/
arvif->beacon_paddr = DMA_MAPPING_ERROR;
} else {
arvif->beacon_buf =
dma_alloc_coherent(ar->dev,

Просмотреть файл

@ -525,7 +525,7 @@ static int ath10k_usb_submit_ctrl_in(struct ath10k *ar,
req,
USB_DIR_IN | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, value, index, buf,
size, 2 * HZ);
size, 2000);
if (ret < 0) {
ath10k_warn(ar, "Failed to read usb control message: %d\n",
@ -853,6 +853,11 @@ static int ath10k_usb_setup_pipe_resources(struct ath10k *ar,
le16_to_cpu(endpoint->wMaxPacketSize),
endpoint->bInterval);
}
/* Ignore broken descriptors. */
if (usb_endpoint_maxp(endpoint) == 0)
continue;
urbcount = 0;
pipe_num =

Просмотреть файл

@ -81,6 +81,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
.fix_l1ss = true,
.max_tx_ring = DP_TCL_NUM_RING_MAX,
.hal_params = &ath11k_hw_hal_params_ipq8074,
},
{
.hw_rev = ATH11K_HW_IPQ6018_HW10,
@ -129,6 +130,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
.fix_l1ss = true,
.max_tx_ring = DP_TCL_NUM_RING_MAX,
.hal_params = &ath11k_hw_hal_params_ipq8074,
},
{
.name = "qca6390 hw2.0",
@ -176,6 +178,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
.fix_l1ss = true,
.max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
.hal_params = &ath11k_hw_hal_params_qca6390,
},
{
.name = "qcn9074 hw1.0",
@ -223,6 +226,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
.fix_l1ss = true,
.max_tx_ring = DP_TCL_NUM_RING_MAX,
.hal_params = &ath11k_hw_hal_params_ipq8074,
},
{
.name = "wcn6855 hw2.0",
@ -270,6 +274,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
.fix_l1ss = false,
.max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
.hal_params = &ath11k_hw_hal_params_qca6390,
},
};

Просмотреть файл

@ -739,6 +739,7 @@ int ath11k_dp_service_srng(struct ath11k_base *ab,
int budget)
{
struct napi_struct *napi = &irq_grp->napi;
const struct ath11k_hw_hal_params *hal_params;
int grp_id = irq_grp->grp_id;
int work_done = 0;
int i = 0, j;
@ -821,8 +822,9 @@ int ath11k_dp_service_srng(struct ath11k_base *ab,
struct ath11k_pdev_dp *dp = &ar->dp;
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
hal_params = ab->hw_params.hal_params;
ath11k_dp_rxbufs_replenish(ab, id, rx_ring, 0,
HAL_RX_BUF_RBM_SW3_BM);
hal_params->rx_buf_rbm);
}
}
}

Просмотреть файл

@ -499,7 +499,7 @@ static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar,
rx_ring->bufs_max = num_entries;
ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries,
HAL_RX_BUF_RBM_SW3_BM);
ar->ab->hw_params.hal_params->rx_buf_rbm);
return 0;
}
@ -2756,7 +2756,7 @@ try_again:
rx_ring = &ar->dp.rx_refill_buf_ring;
ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
HAL_RX_BUF_RBM_SW3_BM);
ab->hw_params.hal_params->rx_buf_rbm);
}
ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list,
@ -2949,6 +2949,7 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
int *budget, struct sk_buff_head *skb_list)
{
struct ath11k *ar;
const struct ath11k_hw_hal_params *hal_params;
struct ath11k_pdev_dp *dp;
struct dp_rxdma_ring *rx_ring;
struct hal_srng *srng;
@ -3019,8 +3020,9 @@ move_next:
&buf_id);
if (!skb) {
hal_params = ab->hw_params.hal_params;
ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
HAL_RX_BUF_RBM_SW3_BM);
hal_params->rx_buf_rbm);
num_buffs_reaped++;
break;
}
@ -3030,7 +3032,8 @@ move_next:
FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr,
cookie, HAL_RX_BUF_RBM_SW3_BM);
cookie,
ab->hw_params.hal_params->rx_buf_rbm);
ath11k_hal_srng_src_get_next_entry(ab, srng);
num_buffs_reaped++;
}
@ -3419,7 +3422,8 @@ static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_ti
cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) |
FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie, HAL_RX_BUF_RBM_SW3_BM);
ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie,
ab->hw_params.hal_params->rx_buf_rbm);
/* Fill mpdu details into reo entrace ring */
srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id];
@ -3796,7 +3800,7 @@ int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
&rbm);
if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
rbm != HAL_RX_BUF_RBM_SW3_BM) {
rbm != ab->hw_params.hal_params->rx_buf_rbm) {
ab->soc_stats.invalid_rbm++;
ath11k_warn(ab, "invalid return buffer manager %d\n", rbm);
ath11k_dp_rx_link_desc_return(ab, desc,
@ -3852,7 +3856,7 @@ exit:
rx_ring = &ar->dp.rx_refill_buf_ring;
ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i],
HAL_RX_BUF_RBM_SW3_BM);
ab->hw_params.hal_params->rx_buf_rbm);
}
return tot_n_bufs_reaped;
@ -4148,7 +4152,7 @@ int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
rx_ring = &ar->dp.rx_refill_buf_ring;
ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
HAL_RX_BUF_RBM_SW3_BM);
ab->hw_params.hal_params->rx_buf_rbm);
}
rcu_read_lock();
@ -4257,7 +4261,7 @@ int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget)
if (num_buf_freed)
ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed,
HAL_RX_BUF_RBM_SW3_BM);
ab->hw_params.hal_params->rx_buf_rbm);
return budget - quota;
}
@ -4976,6 +4980,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
{
struct ath11k_pdev_dp *dp = &ar->dp;
struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
const struct ath11k_hw_hal_params *hal_params;
void *ring_entry;
void *mon_dst_srng;
u32 ppdu_id;
@ -5039,16 +5044,18 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
if (rx_bufs_used) {
rx_mon_stats->dest_ppdu_done++;
hal_params = ar->ab->hw_params.hal_params;
if (ar->ab->hw_params.rxdma1_enable)
ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
&dp->rxdma_mon_buf_ring,
rx_bufs_used,
HAL_RX_BUF_RBM_SW3_BM);
hal_params->rx_buf_rbm);
else
ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
&dp->rx_refill_buf_ring,
rx_bufs_used,
HAL_RX_BUF_RBM_SW3_BM);
hal_params->rx_buf_rbm);
}
}

Просмотреть файл

@ -356,6 +356,7 @@ int ath11k_hal_wbm_desc_parse_err(struct ath11k_base *ab, void *desc,
struct hal_wbm_release_ring *wbm_desc = desc;
enum hal_wbm_rel_desc_type type;
enum hal_wbm_rel_src_module rel_src;
enum hal_rx_buf_return_buf_manager ret_buf_mgr;
type = FIELD_GET(HAL_WBM_RELEASE_INFO0_DESC_TYPE,
wbm_desc->info0);
@ -371,8 +372,9 @@ int ath11k_hal_wbm_desc_parse_err(struct ath11k_base *ab, void *desc,
rel_src != HAL_WBM_REL_SRC_MODULE_REO)
return -EINVAL;
if (FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
wbm_desc->buf_addr_info.info1) != HAL_RX_BUF_RBM_SW3_BM) {
ret_buf_mgr = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
wbm_desc->buf_addr_info.info1);
if (ret_buf_mgr != ab->hw_params.hal_params->rx_buf_rbm) {
ab->soc_stats.invalid_rbm++;
return -EINVAL;
}

Просмотреть файл

@ -7,10 +7,11 @@
#include <linux/bitops.h>
#include <linux/bitfield.h>
#include "hw.h"
#include "core.h"
#include "ce.h"
#include "hif.h"
#include "hal.h"
#include "hw.h"
/* Map from pdev index to hw mac index */
static u8 ath11k_hw_ipq8074_mac_from_pdev_id(int pdev_idx)
@ -2124,3 +2125,11 @@ const struct ath11k_hw_regs wcn6855_regs = {
.pcie_qserdes_sysclk_en_sel = 0x01e0c0ac,
.pcie_pcs_osc_dtct_config_base = 0x01e0c628,
};
const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074 = {
.rx_buf_rbm = HAL_RX_BUF_RBM_SW3_BM,
};
const struct ath11k_hw_hal_params ath11k_hw_hal_params_qca6390 = {
.rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM,
};

Просмотреть файл

@ -6,6 +6,7 @@
#ifndef ATH11K_HW_H
#define ATH11K_HW_H
#include "hal.h"
#include "wmi.h"
/* Target configuration defines */
@ -119,6 +120,10 @@ struct ath11k_hw_ring_mask {
u8 host2rxdma[ATH11K_EXT_IRQ_GRP_NUM_MAX];
};
struct ath11k_hw_hal_params {
enum hal_rx_buf_return_buf_manager rx_buf_rbm;
};
struct ath11k_hw_params {
const char *name;
u16 hw_rev;
@ -170,6 +175,7 @@ struct ath11k_hw_params {
u32 hal_desc_sz;
bool fix_l1ss;
u8 max_tx_ring;
const struct ath11k_hw_hal_params *hal_params;
};
struct ath11k_hw_ops {
@ -223,6 +229,9 @@ extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074;
extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390;
extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qcn9074;
extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074;
extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_qca6390;
static inline
int ath11k_hw_get_mac_from_pdev_id(struct ath11k_hw_params *hw,
int pdev_idx)

Просмотреть файл

@ -340,6 +340,11 @@ static int ath6kl_usb_setup_pipe_resources(struct ath6kl_usb *ar_usb)
le16_to_cpu(endpoint->wMaxPacketSize),
endpoint->bInterval);
}
/* Ignore broken descriptors. */
if (usb_endpoint_maxp(endpoint) == 0)
continue;
urbcount = 0;
pipe_num =
@ -907,7 +912,7 @@ static int ath6kl_usb_submit_ctrl_in(struct ath6kl_usb *ar_usb,
req,
USB_DIR_IN | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, value, index, buf,
size, 2 * HZ);
size, 2000);
if (ret < 0) {
ath6kl_warn("Failed to read usb control message: %d\n", ret);

Просмотреть файл

@ -403,8 +403,21 @@ static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
dma_unmap_single(wcn->dev, ctl->desc->src_addr_l,
ctl->skb->len, DMA_TO_DEVICE);
info = IEEE80211_SKB_CB(ctl->skb);
if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
/* Keep frame until TX status comes */
if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
ieee80211_tx_status_irqsafe(wcn->hw, ctl->skb);
} else {
/* Wait for the TX ack indication or timeout... */
spin_lock(&wcn->dxe_lock);
if (WARN_ON(wcn->tx_ack_skb))
ieee80211_free_txskb(wcn->hw, wcn->tx_ack_skb);
wcn->tx_ack_skb = ctl->skb; /* Tracking ref */
mod_timer(&wcn->tx_ack_timer, jiffies + HZ / 10);
spin_unlock(&wcn->dxe_lock);
}
/* do not free, ownership transferred to mac80211 status cb */
} else {
ieee80211_free_txskb(wcn->hw, ctl->skb);
}
@ -426,7 +439,6 @@ static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
{
struct wcn36xx *wcn = (struct wcn36xx *)dev;
int int_src, int_reason;
bool transmitted = false;
wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
@ -466,7 +478,6 @@ static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
WCN36XX_CH_STAT_INT_ED_MASK)) {
reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
transmitted = true;
}
}
@ -479,7 +490,6 @@ static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
WCN36XX_DXE_0_INT_CLR,
WCN36XX_INT_MASK_CHAN_TX_L);
if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) {
wcn36xx_dxe_write_register(wcn,
WCN36XX_DXE_0_INT_ERR_CLR,
@ -507,26 +517,9 @@ static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
WCN36XX_CH_STAT_INT_ED_MASK)) {
reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
transmitted = true;
}
}
spin_lock(&wcn->dxe_lock);
if (wcn->tx_ack_skb && transmitted) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(wcn->tx_ack_skb);
/* TX complete, no need to wait for 802.11 ack indication */
if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS &&
info->flags & IEEE80211_TX_CTL_NO_ACK) {
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
del_timer(&wcn->tx_ack_timer);
ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
wcn->tx_ack_skb = NULL;
ieee80211_wake_queues(wcn->hw);
}
}
spin_unlock(&wcn->dxe_lock);
return IRQ_HANDLED;
}
@ -613,6 +606,10 @@ static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
dxe = ctl->desc;
while (!(READ_ONCE(dxe->ctrl) & WCN36xx_DXE_CTRL_VLD)) {
/* do not read until we own DMA descriptor */
dma_rmb();
/* read/modify DMA descriptor */
skb = ctl->skb;
dma_addr = dxe->dst_addr_l;
ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl, GFP_ATOMIC);
@ -623,9 +620,15 @@ static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
dma_unmap_single(wcn->dev, dma_addr, WCN36XX_PKT_SIZE,
DMA_FROM_DEVICE);
wcn36xx_rx_skb(wcn, skb);
} /* else keep old skb not submitted and use it for rx DMA */
}
/* else keep old skb not submitted and reuse it for rx DMA
* (dropping the packet that it contained)
*/
/* flush descriptor changes before re-marking as valid */
dma_wmb();
dxe->ctrl = ctrl;
ctl = ctl->next;
dxe = ctl->desc;
}

Просмотреть файл

@ -359,6 +359,8 @@ enum wcn36xx_hal_host_msg_type {
WCN36XX_HAL_START_SCAN_OFFLOAD_RSP = 205,
WCN36XX_HAL_STOP_SCAN_OFFLOAD_REQ = 206,
WCN36XX_HAL_STOP_SCAN_OFFLOAD_RSP = 207,
WCN36XX_HAL_UPDATE_CHANNEL_LIST_REQ = 208,
WCN36XX_HAL_UPDATE_CHANNEL_LIST_RSP = 209,
WCN36XX_HAL_SCAN_OFFLOAD_IND = 210,
WCN36XX_HAL_AVOID_FREQ_RANGE_IND = 233,
@ -1353,6 +1355,36 @@ struct wcn36xx_hal_stop_scan_offload_rsp_msg {
u32 status;
} __packed;
#define WCN36XX_HAL_CHAN_REG1_MIN_PWR_MASK 0x000000ff
#define WCN36XX_HAL_CHAN_REG1_MAX_PWR_MASK 0x0000ff00
#define WCN36XX_HAL_CHAN_REG1_REG_PWR_MASK 0x00ff0000
#define WCN36XX_HAL_CHAN_REG1_CLASS_ID_MASK 0xff000000
#define WCN36XX_HAL_CHAN_REG2_ANT_GAIN_MASK 0x000000ff
#define WCN36XX_HAL_CHAN_INFO_FLAG_PASSIVE BIT(7)
#define WCN36XX_HAL_CHAN_INFO_FLAG_DFS BIT(10)
#define WCN36XX_HAL_CHAN_INFO_FLAG_HT BIT(11)
#define WCN36XX_HAL_CHAN_INFO_FLAG_VHT BIT(12)
#define WCN36XX_HAL_CHAN_INFO_PHY_11A 0
#define WCN36XX_HAL_CHAN_INFO_PHY_11BG 1
#define WCN36XX_HAL_DEFAULT_ANT_GAIN 6
#define WCN36XX_HAL_DEFAULT_MIN_POWER 6
struct wcn36xx_hal_channel_param {
u32 mhz;
u32 band_center_freq1;
u32 band_center_freq2;
u32 channel_info;
u32 reg_info_1;
u32 reg_info_2;
} __packed;
struct wcn36xx_hal_update_channel_list_req_msg {
struct wcn36xx_hal_msg_header header;
u8 num_channel;
struct wcn36xx_hal_channel_param channels[80];
} __packed;
enum wcn36xx_hal_rate_index {
HW_RATE_INDEX_1MBPS = 0x82,
HW_RATE_INDEX_2MBPS = 0x84,

Просмотреть файл

@ -85,7 +85,9 @@ static struct ieee80211_channel wcn_5ghz_channels[] = {
CHAN5G(5620, 124, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH),
CHAN5G(5640, 128, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH),
CHAN5G(5660, 132, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW),
CHAN5G(5680, 136, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW),
CHAN5G(5700, 140, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH),
CHAN5G(5720, 144, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH),
CHAN5G(5745, 149, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW),
CHAN5G(5765, 153, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW),
CHAN5G(5785, 157, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH),
@ -135,7 +137,9 @@ static struct ieee80211_supported_band wcn_band_2ghz = {
.cap = IEEE80211_HT_CAP_GRN_FLD |
IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_DSSSCCK40 |
IEEE80211_HT_CAP_LSIG_TXOP_PROT,
IEEE80211_HT_CAP_LSIG_TXOP_PROT |
IEEE80211_HT_CAP_SGI_40 |
IEEE80211_HT_CAP_SUP_WIDTH_20_40,
.ht_supported = true,
.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
@ -613,15 +617,6 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
}
}
}
/* FIXME: Only enable bmps support when encryption is enabled.
* For any reasons, when connected to open/no-security BSS,
* the wcn36xx controller in bmps mode does not forward
* 'wake-up' beacons despite AP sends DTIM with station AID.
* It could be due to a firmware issue or to the way driver
* configure the station.
*/
if (vif->type == NL80211_IFTYPE_STATION)
vif_priv->allow_bmps = true;
break;
case DISABLE_KEY:
if (!(IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags)) {
@ -659,19 +654,19 @@ static int wcn36xx_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_scan_request *hw_req)
{
struct wcn36xx *wcn = hw->priv;
int i;
if (!get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) {
/* fallback to mac80211 software scan */
return 1;
}
/* For unknown reason, the hardware offloaded scan only works with
* 2.4Ghz channels, fallback to software scan in other cases.
/* Firmware scan offload is limited to 48 channels, fallback to
* software driven scanning otherwise.
*/
for (i = 0; i < hw_req->req.n_channels; i++) {
if (hw_req->req.channels[i]->band != NL80211_BAND_2GHZ)
return 1;
if (hw_req->req.n_channels > 48) {
wcn36xx_warn("Offload scan aborted, n_channels=%u",
hw_req->req.n_channels);
return 1;
}
mutex_lock(&wcn->scan_lock);
@ -685,6 +680,7 @@ static int wcn36xx_hw_scan(struct ieee80211_hw *hw,
mutex_unlock(&wcn->scan_lock);
wcn36xx_smd_update_channel_list(wcn, &hw_req->req);
return wcn36xx_smd_start_hw_scan(wcn, vif, &hw_req->req);
}
@ -922,7 +918,6 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
vif->addr,
bss_conf->aid);
vif_priv->sta_assoc = false;
vif_priv->allow_bmps = false;
wcn36xx_smd_set_link_st(wcn,
bss_conf->bssid,
vif->addr,
@ -1132,6 +1127,13 @@ static int wcn36xx_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wow)
goto out;
ret = wcn36xx_smd_wlan_host_suspend_ind(wcn);
}
/* Disable IRQ, we don't want to handle any packet before mac80211 is
* resumed and ready to receive packets.
*/
disable_irq(wcn->tx_irq);
disable_irq(wcn->rx_irq);
out:
mutex_unlock(&wcn->conf_mutex);
return ret;
@ -1154,6 +1156,10 @@ static int wcn36xx_resume(struct ieee80211_hw *hw)
wcn36xx_smd_ipv6_ns_offload(wcn, vif, false);
wcn36xx_smd_arp_offload(wcn, vif, false);
}
enable_irq(wcn->tx_irq);
enable_irq(wcn->rx_irq);
mutex_unlock(&wcn->conf_mutex);
return 0;
@ -1347,7 +1353,6 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
ieee80211_hw_set(wcn->hw, HAS_RATE_CONTROL);
ieee80211_hw_set(wcn->hw, SINGLE_SCAN_ON_ALL_BANDS);
ieee80211_hw_set(wcn->hw, REPORTS_TX_ACK_STATUS);
ieee80211_hw_set(wcn->hw, CONNECTION_MONITOR);
wcn->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
@ -1499,6 +1504,7 @@ static int wcn36xx_probe(struct platform_device *pdev)
mutex_init(&wcn->conf_mutex);
mutex_init(&wcn->hal_mutex);
mutex_init(&wcn->scan_lock);
__skb_queue_head_init(&wcn->amsdu);
wcn->hal_buf = devm_kmalloc(wcn->dev, WCN36XX_HAL_BUF_SIZE, GFP_KERNEL);
if (!wcn->hal_buf) {
@ -1576,6 +1582,8 @@ static int wcn36xx_remove(struct platform_device *pdev)
iounmap(wcn->dxe_base);
iounmap(wcn->ccu_base);
__skb_queue_purge(&wcn->amsdu);
mutex_destroy(&wcn->hal_mutex);
ieee80211_free_hw(hw);

Просмотреть файл

@ -18,19 +18,19 @@
#include "wcn36xx.h"
#define WCN36XX_BMPS_FAIL_THREHOLD 3
int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn,
struct ieee80211_vif *vif)
{
int ret = 0;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
if (!vif_priv->allow_bmps)
return -ENOTSUPP;
/* TODO: Make sure the TX chain clean */
ret = wcn36xx_smd_enter_bmps(wcn, vif);
if (!ret) {
wcn36xx_dbg(WCN36XX_DBG_PMC, "Entered BMPS\n");
vif_priv->pw_state = WCN36XX_BMPS;
vif_priv->bmps_fail_ct = 0;
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
} else {
/*
@ -39,6 +39,11 @@ int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn,
* received just after auth complete
*/
wcn36xx_err("Can not enter BMPS!\n");
if (vif_priv->bmps_fail_ct++ == WCN36XX_BMPS_FAIL_THREHOLD) {
ieee80211_connection_loss(vif);
vif_priv->bmps_fail_ct = 0;
}
}
return ret;
}

Просмотреть файл

@ -16,6 +16,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bitfield.h>
#include <linux/etherdevice.h>
#include <linux/firmware.h>
#include <linux/bitops.h>
@ -266,7 +267,8 @@ static void wcn36xx_smd_set_sta_ht_params(struct ieee80211_sta *sta,
sta_params->max_ampdu_size = sta->ht_cap.ampdu_factor;
sta_params->max_ampdu_density = sta->ht_cap.ampdu_density;
sta_params->max_amsdu_size = is_cap_supported(caps,
/* max_amsdu_size: 1 : 3839 bytes, 0 : 7935 bytes (max) */
sta_params->max_amsdu_size = !is_cap_supported(caps,
IEEE80211_HT_CAP_MAX_AMSDU);
sta_params->sgi_20Mhz = is_cap_supported(caps,
IEEE80211_HT_CAP_SGI_20);
@ -927,6 +929,86 @@ out:
return ret;
}
int wcn36xx_smd_update_channel_list(struct wcn36xx *wcn, struct cfg80211_scan_request *req)
{
struct wcn36xx_hal_update_channel_list_req_msg *msg_body;
int ret, i;
msg_body = kzalloc(sizeof(*msg_body), GFP_KERNEL);
if (!msg_body)
return -ENOMEM;
INIT_HAL_MSG((*msg_body), WCN36XX_HAL_UPDATE_CHANNEL_LIST_REQ);
msg_body->num_channel = min_t(u8, req->n_channels, sizeof(msg_body->channels));
for (i = 0; i < msg_body->num_channel; i++) {
struct wcn36xx_hal_channel_param *param = &msg_body->channels[i];
u32 min_power = WCN36XX_HAL_DEFAULT_MIN_POWER;
u32 ant_gain = WCN36XX_HAL_DEFAULT_ANT_GAIN;
param->mhz = req->channels[i]->center_freq;
param->band_center_freq1 = req->channels[i]->center_freq;
param->band_center_freq2 = 0;
if (req->channels[i]->flags & IEEE80211_CHAN_NO_IR)
param->channel_info |= WCN36XX_HAL_CHAN_INFO_FLAG_PASSIVE;
if (req->channels[i]->flags & IEEE80211_CHAN_RADAR)
param->channel_info |= WCN36XX_HAL_CHAN_INFO_FLAG_DFS;
if (req->channels[i]->band == NL80211_BAND_5GHZ) {
param->channel_info |= WCN36XX_HAL_CHAN_INFO_FLAG_HT;
param->channel_info |= WCN36XX_HAL_CHAN_INFO_FLAG_VHT;
param->channel_info |= WCN36XX_HAL_CHAN_INFO_PHY_11A;
} else {
param->channel_info |= WCN36XX_HAL_CHAN_INFO_PHY_11BG;
}
if (min_power > req->channels[i]->max_power)
min_power = req->channels[i]->max_power;
if (req->channels[i]->max_antenna_gain)
ant_gain = req->channels[i]->max_antenna_gain;
u32p_replace_bits(&param->reg_info_1, min_power,
WCN36XX_HAL_CHAN_REG1_MIN_PWR_MASK);
u32p_replace_bits(&param->reg_info_1, req->channels[i]->max_power,
WCN36XX_HAL_CHAN_REG1_MAX_PWR_MASK);
u32p_replace_bits(&param->reg_info_1, req->channels[i]->max_reg_power,
WCN36XX_HAL_CHAN_REG1_REG_PWR_MASK);
u32p_replace_bits(&param->reg_info_1, 0,
WCN36XX_HAL_CHAN_REG1_CLASS_ID_MASK);
u32p_replace_bits(&param->reg_info_2, ant_gain,
WCN36XX_HAL_CHAN_REG2_ANT_GAIN_MASK);
wcn36xx_dbg(WCN36XX_DBG_HAL,
"%s: freq=%u, channel_info=%08x, reg_info1=%08x, reg_info2=%08x\n",
__func__, param->mhz, param->channel_info, param->reg_info_1,
param->reg_info_2);
}
mutex_lock(&wcn->hal_mutex);
PREPARE_HAL_BUF(wcn->hal_buf, (*msg_body));
ret = wcn36xx_smd_send_and_wait(wcn, msg_body->header.len);
if (ret) {
wcn36xx_err("Sending hal_update_channel_list failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_update_channel_list response failed err=%d\n", ret);
goto out;
}
out:
kfree(msg_body);
mutex_unlock(&wcn->hal_mutex);
return ret;
}
static int wcn36xx_smd_switch_channel_rsp(void *buf, size_t len)
{
struct wcn36xx_hal_switch_channel_rsp_msg *rsp;
@ -2394,8 +2476,11 @@ int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
INIT_HAL_MSG(msg_body, WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ);
set_feat_caps(msg_body.feat_caps, STA_POWERSAVE);
if (wcn->rf_id == RF_IRIS_WCN3680)
if (wcn->rf_id == RF_IRIS_WCN3680) {
set_feat_caps(msg_body.feat_caps, DOT11AC);
set_feat_caps(msg_body.feat_caps, WLAN_CH144);
set_feat_caps(msg_body.feat_caps, ANTENNA_DIVERSITY_SELECTION);
}
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
@ -3137,6 +3222,7 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
case WCN36XX_HAL_HOST_RESUME_RSP:
case WCN36XX_HAL_ENTER_IMPS_RSP:
case WCN36XX_HAL_EXIT_IMPS_RSP:
case WCN36XX_HAL_UPDATE_CHANNEL_LIST_RSP:
memcpy(wcn->hal_buf, buf, len);
wcn->hal_rsp_len = len;
complete(&wcn->hal_rsp_compl);

Просмотреть файл

@ -70,6 +70,7 @@ int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn, u8 *channels, size_t cha
int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif,
struct cfg80211_scan_request *req);
int wcn36xx_smd_stop_hw_scan(struct wcn36xx *wcn);
int wcn36xx_smd_update_channel_list(struct wcn36xx *wcn, struct cfg80211_scan_request *req);
int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif);
int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr);
int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index);

Просмотреть файл

@ -31,6 +31,13 @@ struct wcn36xx_rate {
enum rate_info_bw bw;
};
/* Buffer descriptor rx_ch field is limited to 5-bit (4+1), a mapping is used
* for 11A Channels.
*/
static const u8 ab_rx_ch_map[] = { 36, 40, 44, 48, 52, 56, 60, 64, 100, 104,
108, 112, 116, 120, 124, 128, 132, 136, 140,
149, 153, 157, 161, 165, 144 };
static const struct wcn36xx_rate wcn36xx_rate_table[] = {
/* 11b rates */
{ 10, 0, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
@ -224,6 +231,41 @@ static const struct wcn36xx_rate wcn36xx_rate_table[] = {
{ 4333, 9, RX_ENC_VHT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_80 },
};
static struct sk_buff *wcn36xx_unchain_msdu(struct sk_buff_head *amsdu)
{
struct sk_buff *skb, *first;
int total_len = 0;
int space;
first = __skb_dequeue(amsdu);
skb_queue_walk(amsdu, skb)
total_len += skb->len;
space = total_len - skb_tailroom(first);
if (space > 0 && pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0) {
__skb_queue_head(amsdu, first);
return NULL;
}
/* Walk list again, copying contents into msdu_head */
while ((skb = __skb_dequeue(amsdu))) {
skb_copy_from_linear_data(skb, skb_put(first, skb->len),
skb->len);
dev_kfree_skb_irq(skb);
}
return first;
}
static void __skb_queue_purge_irq(struct sk_buff_head *list)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue(list)) != NULL)
dev_kfree_skb_irq(skb);
}
int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
{
struct ieee80211_rx_status status;
@ -245,6 +287,26 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
"BD <<< ", (char *)bd,
sizeof(struct wcn36xx_rx_bd));
if (bd->pdu.mpdu_data_off <= bd->pdu.mpdu_header_off ||
bd->pdu.mpdu_len < bd->pdu.mpdu_header_len)
goto drop;
if (bd->asf && !bd->esf) { /* chained A-MSDU chunks */
/* Sanity check */
if (bd->pdu.mpdu_data_off + bd->pdu.mpdu_len > WCN36XX_PKT_SIZE)
goto drop;
skb_put(skb, bd->pdu.mpdu_data_off + bd->pdu.mpdu_len);
skb_pull(skb, bd->pdu.mpdu_data_off);
/* Only set status for first chained BD (with mac header) */
goto done;
}
if (bd->pdu.mpdu_header_off < sizeof(*bd) ||
bd->pdu.mpdu_header_off + bd->pdu.mpdu_len > WCN36XX_PKT_SIZE)
goto drop;
skb_put(skb, bd->pdu.mpdu_header_off + bd->pdu.mpdu_len);
skb_pull(skb, bd->pdu.mpdu_header_off);
@ -291,6 +353,22 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
ieee80211_is_probe_resp(hdr->frame_control))
status.boottime_ns = ktime_get_boottime_ns();
if (bd->scan_learn) {
/* If packet originates from hardware scanning, extract the
* band/channel from bd descriptor.
*/
u8 hwch = (bd->reserved0 << 4) + bd->rx_ch;
if (bd->rf_band != 1 && hwch <= sizeof(ab_rx_ch_map) && hwch >= 1) {
status.band = NL80211_BAND_5GHZ;
status.freq = ieee80211_channel_to_frequency(ab_rx_ch_map[hwch - 1],
status.band);
} else {
status.band = NL80211_BAND_2GHZ;
status.freq = ieee80211_channel_to_frequency(hwch, status.band);
}
}
memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
if (ieee80211_is_beacon(hdr->frame_control)) {
@ -305,9 +383,37 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
(char *)skb->data, skb->len);
}
done:
/* Chained AMSDU ? slow path */
if (unlikely(bd->asf && !(bd->lsf && bd->esf))) {
if (bd->esf && !skb_queue_empty(&wcn->amsdu)) {
wcn36xx_err("Discarding non complete chain");
__skb_queue_purge_irq(&wcn->amsdu);
}
__skb_queue_tail(&wcn->amsdu, skb);
if (!bd->lsf)
return 0; /* Not the last AMSDU, wait for more */
skb = wcn36xx_unchain_msdu(&wcn->amsdu);
if (!skb)
goto drop;
}
ieee80211_rx_irqsafe(wcn->hw, skb);
return 0;
drop: /* drop everything */
wcn36xx_err("Drop frame! skb:%p len:%u hoff:%u doff:%u asf=%u esf=%u lsf=%u\n",
skb, bd->pdu.mpdu_len, bd->pdu.mpdu_header_off,
bd->pdu.mpdu_data_off, bd->asf, bd->esf, bd->lsf);
dev_kfree_skb_irq(skb);
__skb_queue_purge_irq(&wcn->amsdu);
return -EINVAL;
}
static void wcn36xx_set_tx_pdu(struct wcn36xx_tx_bd *bd,
@ -321,8 +427,6 @@ static void wcn36xx_set_tx_pdu(struct wcn36xx_tx_bd *bd,
bd->pdu.mpdu_header_off;
bd->pdu.mpdu_len = len;
bd->pdu.tid = tid;
/* Use seq number generated by mac80211 */
bd->pdu.bd_ssn = WCN36XX_TXBD_SSN_FILL_HOST;
}
static inline struct wcn36xx_vif *get_vif_by_addr(struct wcn36xx *wcn,
@ -419,6 +523,9 @@ static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
tid = ieee80211_get_tid(hdr);
/* TID->QID is one-to-one mapping */
bd->queue_id = tid;
bd->pdu.bd_ssn = WCN36XX_TXBD_SSN_FILL_DPU_QOS;
} else {
bd->pdu.bd_ssn = WCN36XX_TXBD_SSN_FILL_DPU_NON_QOS;
}
if (info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT ||
@ -429,6 +536,9 @@ static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
if (ieee80211_is_any_nullfunc(hdr->frame_control)) {
/* Don't use a regular queue for null packet (no ampdu) */
bd->queue_id = WCN36XX_TX_U_WQ_ID;
bd->bd_rate = WCN36XX_BD_RATE_CTRL;
if (ieee80211_is_qos_nullfunc(hdr->frame_control))
bd->pdu.bd_ssn = WCN36XX_TXBD_SSN_FILL_HOST;
}
if (bcast) {
@ -488,6 +598,8 @@ static void wcn36xx_set_tx_mgmt(struct wcn36xx_tx_bd *bd,
bd->queue_id = WCN36XX_TX_U_WQ_ID;
*vif_priv = __vif_priv;
bd->pdu.bd_ssn = WCN36XX_TXBD_SSN_FILL_DPU_NON_QOS;
wcn36xx_set_tx_pdu(bd,
ieee80211_is_data_qos(hdr->frame_control) ?
sizeof(struct ieee80211_qos_hdr) :
@ -502,10 +614,11 @@ int wcn36xx_start_tx(struct wcn36xx *wcn,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct wcn36xx_vif *vif_priv = NULL;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
unsigned long flags;
bool is_low = ieee80211_is_data(hdr->frame_control);
bool bcast = is_broadcast_ether_addr(hdr->addr1) ||
is_multicast_ether_addr(hdr->addr1);
bool ack_ind = (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) &&
!(info->flags & IEEE80211_TX_CTL_NO_ACK);
struct wcn36xx_tx_bd bd;
int ret;
@ -521,30 +634,16 @@ int wcn36xx_start_tx(struct wcn36xx *wcn,
bd.dpu_rf = WCN36XX_BMU_WQ_TX;
if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
if (unlikely(ack_ind)) {
wcn36xx_dbg(WCN36XX_DBG_DXE, "TX_ACK status requested\n");
spin_lock_irqsave(&wcn->dxe_lock, flags);
if (wcn->tx_ack_skb) {
spin_unlock_irqrestore(&wcn->dxe_lock, flags);
wcn36xx_warn("tx_ack_skb already set\n");
return -EINVAL;
}
wcn->tx_ack_skb = skb;
spin_unlock_irqrestore(&wcn->dxe_lock, flags);
/* Only one at a time is supported by fw. Stop the TX queues
* until the ack status gets back.
*/
ieee80211_stop_queues(wcn->hw);
/* TX watchdog if no TX irq or ack indication received */
mod_timer(&wcn->tx_ack_timer, jiffies + HZ / 10);
/* Request ack indication from the firmware */
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
bd.tx_comp = 1;
bd.tx_comp = 1;
}
/* Data frames served first*/
@ -558,14 +657,8 @@ int wcn36xx_start_tx(struct wcn36xx *wcn,
bd.tx_bd_sign = 0xbdbdbdbd;
ret = wcn36xx_dxe_tx_frame(wcn, vif_priv, &bd, skb, is_low);
if (ret && (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
/* If the skb has not been transmitted,
* don't keep a reference to it.
*/
spin_lock_irqsave(&wcn->dxe_lock, flags);
wcn->tx_ack_skb = NULL;
spin_unlock_irqrestore(&wcn->dxe_lock, flags);
if (unlikely(ret && ack_ind)) {
/* If the skb has not been transmitted, resume TX queue */
ieee80211_wake_queues(wcn->hw);
}

Просмотреть файл

@ -110,7 +110,8 @@ struct wcn36xx_rx_bd {
/* 0x44 */
u32 exp_seq_num:12;
u32 cur_seq_num:12;
u32 fr_type_subtype:8;
u32 rf_band:2;
u32 fr_type_subtype:6;
/* 0x48 */
u32 msdu_size:16;

Просмотреть файл

@ -128,7 +128,6 @@ struct wcn36xx_vif {
enum wcn36xx_hal_bss_type bss_type;
/* Power management */
bool allow_bmps;
enum wcn36xx_power_state pw_state;
u8 bss_index;
@ -151,6 +150,8 @@ struct wcn36xx_vif {
} rekey_data;
struct list_head sta_list;
int bmps_fail_ct;
};
/**
@ -269,6 +270,9 @@ struct wcn36xx {
struct sk_buff *tx_ack_skb;
struct timer_list tx_ack_timer;
/* For A-MSDU re-aggregation */
struct sk_buff_head amsdu;
/* RF module */
unsigned rf_id;
@ -276,7 +280,6 @@ struct wcn36xx {
/* Debug file system entry */
struct wcn36xx_dfs_entry dfs;
#endif /* CONFIG_WCN36XX_DEBUGFS */
};
static inline bool wcn36xx_is_fw_version(struct wcn36xx *wcn,