wireless-drivers-next patches for 4.12
Few remaining patches for 4.12 submitted during the last week. Major changes: iwlwifi * the firmware for 7265D and 3168 NICs is frozen at version 29 * more support for the upcoming A000 series -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJZAbs6AAoJEG4XJFUm622boyEH/jtmD+lWzixDX5bkwX8rE9jr elwoHyODl83QWCOvqWuNkCf4XRJR7C38kWJD2+q6mJPVyC5vXvjM2CkIXDxYrPhc cOPSIdnZBMo2yQ5h7YCy5rBptSvx8p85D9vRaktbLVu9qkyKrZ19GlS2uDiOqNBT lLM3ZyOKw1c56L3HAn0fK/t+l9QlROOXCzZEgdjVt9YlStlDEZ4QiGtOP5BCnZGX gDsXcdsXyx2cpL511zmUmMVtZ+CYXZ6MJPozXdi0bIVyLwxfUoS0Lw593rXUlKav WAknenRKVoC5eFFJjutkVS88hL/gbWdwq15e+wQBOWEpOIdrIG4TBsawZzSH7pg= =eNdt -----END PGP SIGNATURE----- Merge tag 'wireless-drivers-next-for-davem-2017-04-27' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next Kalle Valo says: ==================== wireless-drivers-next patches for 4.12 Few remaining patches for 4.12 submitted during the last week. Major changes: iwlwifi * the firmware for 7265D and 3168 NICs is frozen at version 29 * more support for the upcoming A000 series ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Коммит
e221c1f0fe
|
@ -197,7 +197,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
|
|||
int ret;
|
||||
struct brcmf_if *ifp = netdev_priv(ndev);
|
||||
struct brcmf_pub *drvr = ifp->drvr;
|
||||
struct ethhdr *eh = (struct ethhdr *)(skb->data);
|
||||
struct ethhdr *eh;
|
||||
|
||||
brcmf_dbg(DATA, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx);
|
||||
|
||||
|
@ -210,22 +210,13 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
|
|||
goto done;
|
||||
}
|
||||
|
||||
/* Make sure there's enough room for any header */
|
||||
if (skb_headroom(skb) < drvr->hdrlen) {
|
||||
struct sk_buff *skb2;
|
||||
|
||||
brcmf_dbg(INFO, "%s: insufficient headroom\n",
|
||||
/* Make sure there's enough writable headroom*/
|
||||
ret = skb_cow_head(skb, drvr->hdrlen);
|
||||
if (ret < 0) {
|
||||
brcmf_err("%s: skb_cow_head failed\n",
|
||||
brcmf_ifname(ifp));
|
||||
drvr->bus_if->tx_realloc++;
|
||||
skb2 = skb_realloc_headroom(skb, drvr->hdrlen);
|
||||
dev_kfree_skb(skb);
|
||||
skb = skb2;
|
||||
if (skb == NULL) {
|
||||
brcmf_err("%s: skb_realloc_headroom failed\n",
|
||||
brcmf_ifname(ifp));
|
||||
ret = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* validate length for ether packet */
|
||||
|
@ -235,6 +226,8 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
|
|||
goto done;
|
||||
}
|
||||
|
||||
eh = (struct ethhdr *)(skb->data);
|
||||
|
||||
if (eh->h_proto == htons(ETH_P_PAE))
|
||||
atomic_inc(&ifp->pend_8021x_cnt);
|
||||
|
||||
|
|
|
@ -180,7 +180,7 @@ void iwlagn_dev_txfifo_flush(struct iwl_priv *priv)
|
|||
goto done;
|
||||
}
|
||||
IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n");
|
||||
iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
|
||||
iwl_trans_wait_tx_queues_empty(priv->trans, 0xffffffff);
|
||||
done:
|
||||
ieee80211_wake_queues(priv->hw);
|
||||
mutex_unlock(&priv->mutex);
|
||||
|
|
|
@ -1145,7 +1145,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
|||
}
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(priv, "wait transmit/flush all frames\n");
|
||||
iwl_trans_wait_tx_queue_empty(priv->trans, scd_queues);
|
||||
iwl_trans_wait_tx_queues_empty(priv->trans, scd_queues);
|
||||
done:
|
||||
mutex_unlock(&priv->mutex);
|
||||
IWL_DEBUG_MAC80211(priv, "leave\n");
|
||||
|
|
|
@ -73,8 +73,8 @@
|
|||
/* Highest firmware API version supported */
|
||||
#define IWL7260_UCODE_API_MAX 17
|
||||
#define IWL7265_UCODE_API_MAX 17
|
||||
#define IWL7265D_UCODE_API_MAX 30
|
||||
#define IWL3168_UCODE_API_MAX 30
|
||||
#define IWL7265D_UCODE_API_MAX 29
|
||||
#define IWL3168_UCODE_API_MAX 29
|
||||
|
||||
/* Lowest firmware API version supported */
|
||||
#define IWL7260_UCODE_API_MIN 17
|
||||
|
|
|
@ -85,7 +85,7 @@
|
|||
|
||||
static const struct iwl_base_params iwl_a000_base_params = {
|
||||
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_A000,
|
||||
.num_of_queues = 31,
|
||||
.num_of_queues = 512,
|
||||
.shadow_ram_support = true,
|
||||
.led_compensation = 57,
|
||||
.wd_timeout = IWL_LONG_WD_TIMEOUT,
|
||||
|
|
|
@ -170,7 +170,7 @@ struct iwl_base_params {
|
|||
apmg_wake_up_wa:1,
|
||||
scd_chain_ext_wa:1;
|
||||
|
||||
u8 num_of_queues; /* def: HW dependent */
|
||||
u16 num_of_queues; /* def: HW dependent */
|
||||
|
||||
u8 max_ll_items;
|
||||
u8 led_compensation;
|
||||
|
|
|
@ -1282,7 +1282,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
|
|||
|
||||
pieces = kzalloc(sizeof(*pieces), GFP_KERNEL);
|
||||
if (!pieces)
|
||||
return;
|
||||
goto out_free_fw;
|
||||
|
||||
if (!ucode_raw)
|
||||
goto try_again;
|
||||
|
@ -1494,7 +1494,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
|
|||
* or hangs loading.
|
||||
*/
|
||||
if (load_module) {
|
||||
err = request_module("%s", op->name);
|
||||
request_module("%s", op->name);
|
||||
#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR
|
||||
if (err)
|
||||
IWL_ERR(drv,
|
||||
|
@ -1512,17 +1512,18 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
|
|||
goto free;
|
||||
|
||||
out_free_fw:
|
||||
IWL_ERR(drv, "failed to allocate pci memory\n");
|
||||
iwl_dealloc_ucode(drv);
|
||||
release_firmware(ucode_raw);
|
||||
out_unbind:
|
||||
complete(&drv->request_firmware_complete);
|
||||
device_release_driver(drv->trans->dev);
|
||||
free:
|
||||
for (i = 0; i < ARRAY_SIZE(pieces->img); i++)
|
||||
kfree(pieces->img[i].sec);
|
||||
kfree(pieces->dbg_mem_tlv);
|
||||
kfree(pieces);
|
||||
if (pieces) {
|
||||
for (i = 0; i < ARRAY_SIZE(pieces->img); i++)
|
||||
kfree(pieces->img[i].sec);
|
||||
kfree(pieces->dbg_mem_tlv);
|
||||
kfree(pieces);
|
||||
}
|
||||
}
|
||||
|
||||
struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
|
||||
|
|
|
@ -243,6 +243,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
|
|||
* scan request.
|
||||
* @IWL_UCODE_TLV_API_TKIP_MIC_KEYS: This ucode supports version 2 of
|
||||
* ADD_MODIFY_STA_KEY_API_S_VER_2.
|
||||
* @IWL_UCODE_TLV_API_STA_TYPE: This ucode supports station type assignement.
|
||||
*
|
||||
* @NUM_IWL_UCODE_TLV_API: number of bits used
|
||||
*/
|
||||
|
@ -253,6 +254,7 @@ enum iwl_ucode_tlv_api {
|
|||
IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20,
|
||||
IWL_UCODE_TLV_API_SCAN_TSF_REPORT = (__force iwl_ucode_tlv_api_t)28,
|
||||
IWL_UCODE_TLV_API_TKIP_MIC_KEYS = (__force iwl_ucode_tlv_api_t)29,
|
||||
IWL_UCODE_TLV_API_STA_TYPE = (__force iwl_ucode_tlv_api_t)30,
|
||||
|
||||
NUM_IWL_UCODE_TLV_API
|
||||
#ifdef __CHECKER__
|
||||
|
|
|
@ -246,6 +246,9 @@ void iwl_force_nmi(struct iwl_trans *trans)
|
|||
DEVICE_SET_NMI_VAL_DRV);
|
||||
iwl_write_prph(trans, DEVICE_SET_NMI_REG,
|
||||
DEVICE_SET_NMI_VAL_HW);
|
||||
} else if (trans->cfg->gen2) {
|
||||
iwl_write_prph(trans, UREG_NIC_SET_NMI_DRIVER,
|
||||
DEVICE_SET_NMI_8000_VAL);
|
||||
} else {
|
||||
iwl_write_prph(trans, DEVICE_SET_NMI_8000_REG,
|
||||
DEVICE_SET_NMI_8000_VAL);
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
*
|
||||
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
|
@ -34,6 +34,7 @@
|
|||
*
|
||||
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -438,25 +439,16 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
|
|||
vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map;
|
||||
}
|
||||
|
||||
static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
|
||||
struct iwl_nvm_data *data,
|
||||
const __le16 *ch_section,
|
||||
u8 tx_chains, u8 rx_chains, bool lar_supported)
|
||||
void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
|
||||
struct iwl_nvm_data *data, const __le16 *nvm_ch_flags,
|
||||
u8 tx_chains, u8 rx_chains, bool lar_supported)
|
||||
{
|
||||
int n_channels;
|
||||
int n_used = 0;
|
||||
struct ieee80211_supported_band *sband;
|
||||
|
||||
if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
|
||||
n_channels = iwl_init_channel_map(
|
||||
dev, cfg, data,
|
||||
&ch_section[NVM_CHANNELS], lar_supported);
|
||||
else
|
||||
n_channels = iwl_init_channel_map(
|
||||
dev, cfg, data,
|
||||
&ch_section[NVM_CHANNELS_FAMILY_8000],
|
||||
lar_supported);
|
||||
|
||||
n_channels = iwl_init_channel_map(dev, cfg, data, nvm_ch_flags,
|
||||
lar_supported);
|
||||
sband = &data->bands[NL80211_BAND_2GHZ];
|
||||
sband->band = NL80211_BAND_2GHZ;
|
||||
sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
|
||||
|
@ -482,6 +474,7 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
|
|||
IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n",
|
||||
n_used, n_channels);
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_init_sbands);
|
||||
|
||||
static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
|
||||
const __le16 *phy_sku)
|
||||
|
@ -559,8 +552,8 @@ static void iwl_flip_hw_address(__le32 mac_addr0, __le32 mac_addr1, u8 *dest)
|
|||
dest[5] = hw_addr[0];
|
||||
}
|
||||
|
||||
static void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
|
||||
struct iwl_nvm_data *data)
|
||||
void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
|
||||
struct iwl_nvm_data *data)
|
||||
{
|
||||
__le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_STRAP));
|
||||
__le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_STRAP));
|
||||
|
@ -578,6 +571,7 @@ static void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
|
|||
|
||||
iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_set_hw_address_from_csr);
|
||||
|
||||
static void iwl_set_hw_address_family_8000(struct iwl_trans *trans,
|
||||
const struct iwl_cfg *cfg,
|
||||
|
@ -718,7 +712,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||
data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB);
|
||||
data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1);
|
||||
lar_enabled = true;
|
||||
ch_section = nvm_sw;
|
||||
ch_section = &nvm_sw[NVM_CHANNELS];
|
||||
} else {
|
||||
u16 lar_offset = data->nvm_version < 0xE39 ?
|
||||
NVM_LAR_OFFSET_FAMILY_8000_OLD :
|
||||
|
@ -728,7 +722,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||
data->lar_enabled = !!(lar_config &
|
||||
NVM_LAR_ENABLED_FAMILY_8000);
|
||||
lar_enabled = data->lar_enabled;
|
||||
ch_section = regulatory;
|
||||
ch_section = ®ulatory[NVM_CHANNELS_FAMILY_8000];
|
||||
}
|
||||
|
||||
/* If no valid mac address was found - bail out */
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2008 - 2015 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
|
@ -32,6 +32,7 @@
|
|||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -81,6 +82,19 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||
const __le16 *mac_override, const __le16 *phy_sku,
|
||||
u8 tx_chains, u8 rx_chains, bool lar_fw_supported);
|
||||
|
||||
/**
|
||||
* iwl_set_hw_address_from_csr - sets HW address for 9000 devices and on
|
||||
*/
|
||||
void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
|
||||
struct iwl_nvm_data *data);
|
||||
|
||||
/**
|
||||
* iwl_init_sbands - parse and set all channel profiles
|
||||
*/
|
||||
void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
|
||||
struct iwl_nvm_data *data, const __le16 *nvm_ch_flags,
|
||||
u8 tx_chains, u8 rx_chains, bool lar_supported);
|
||||
|
||||
/**
|
||||
* iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
|
||||
*
|
||||
|
|
|
@ -114,6 +114,7 @@
|
|||
#define DEVICE_SET_NMI_VAL_DRV BIT(7)
|
||||
#define DEVICE_SET_NMI_8000_REG 0x00a01c24
|
||||
#define DEVICE_SET_NMI_8000_VAL 0x1000000
|
||||
#define UREG_NIC_SET_NMI_DRIVER 0x00a05c10
|
||||
|
||||
/* Shared registers (0x0..0x3ff, via target indirect or periphery */
|
||||
#define SHR_BASE 0x00a10000
|
||||
|
|
|
@ -396,6 +396,8 @@ static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
|
|||
* currently supports
|
||||
*/
|
||||
#define IWL_MAX_HW_QUEUES 32
|
||||
#define IWL_MAX_TVQM_QUEUES 512
|
||||
|
||||
#define IWL_MAX_TID_COUNT 8
|
||||
#define IWL_MGMT_TID 15
|
||||
#define IWL_FRAME_LIMIT 64
|
||||
|
@ -689,7 +691,7 @@ struct iwl_trans_ops {
|
|||
void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
|
||||
bool shared);
|
||||
|
||||
int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm);
|
||||
int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm);
|
||||
void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
|
||||
bool freeze);
|
||||
void (*block_txq_ptrs)(struct iwl_trans *trans, bool block);
|
||||
|
@ -1193,15 +1195,15 @@ static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans,
|
|||
trans->ops->block_txq_ptrs(trans, block);
|
||||
}
|
||||
|
||||
static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans,
|
||||
u32 txqs)
|
||||
static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
|
||||
u32 txqs)
|
||||
{
|
||||
if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
|
||||
IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return trans->ops->wait_tx_queue_empty(trans, txqs);
|
||||
return trans->ops->wait_tx_queues_empty(trans, txqs);
|
||||
}
|
||||
|
||||
static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
|
||||
|
|
|
@ -86,6 +86,8 @@ static int iwl_mvm_binding_cmd(struct iwl_mvm *mvm, u32 action,
|
|||
u32 status;
|
||||
int size;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
|
||||
if (fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) {
|
||||
size = sizeof(cmd);
|
||||
|
@ -98,8 +100,6 @@ static int iwl_mvm_binding_cmd(struct iwl_mvm *mvm, u32 action,
|
|||
size = IWL_BINDING_CMD_SIZE_V1;
|
||||
}
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
|
||||
cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id,
|
||||
phyctxt->color));
|
||||
cmd.action = cpu_to_le32(action);
|
||||
|
|
|
@ -157,7 +157,8 @@ enum iwl_tsf_id {
|
|||
* @bi_reciprocal: 2^32 / bi
|
||||
* @dtim_interval: dtim transmit time in TU
|
||||
* @dtim_reciprocal: 2^32 / dtim_interval
|
||||
* @mcast_qid: queue ID for multicast traffic
|
||||
* @mcast_qid: queue ID for multicast traffic.
|
||||
* NOTE: obsolete from VER2 and on
|
||||
* @beacon_template: beacon template ID
|
||||
*/
|
||||
struct iwl_mac_data_ap {
|
||||
|
@ -169,7 +170,7 @@ struct iwl_mac_data_ap {
|
|||
__le32 dtim_reciprocal;
|
||||
__le32 mcast_qid;
|
||||
__le32 beacon_template;
|
||||
} __packed; /* AP_MAC_DATA_API_S_VER_1 */
|
||||
} __packed; /* AP_MAC_DATA_API_S_VER_2 */
|
||||
|
||||
/**
|
||||
* struct iwl_mac_data_ibss - configuration data for IBSS MAC context
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
|
@ -31,6 +32,7 @@
|
|||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -227,6 +229,9 @@ enum {
|
|||
*/
|
||||
#define RATE_LEGACY_RATE_MSK 0xff
|
||||
|
||||
/* Bit 10 - OFDM HE */
|
||||
#define RATE_MCS_OFDM_HE_POS 10
|
||||
#define RATE_MCS_OFDM_HE_MSK BIT(RATE_MCS_OFDM_HE_POS)
|
||||
|
||||
/*
|
||||
* Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz
|
||||
|
@ -255,18 +260,29 @@ enum {
|
|||
#define RATE_MCS_ANT_MSK RATE_MCS_ANT_ABC_MSK
|
||||
#define RATE_MCS_ANT_NUM 3
|
||||
|
||||
/* Bit 17-18: (0) SS, (1) SS*2 */
|
||||
/* Bit 17: (0) SS, (1) SS*2 */
|
||||
#define RATE_MCS_STBC_POS 17
|
||||
#define RATE_MCS_HT_STBC_MSK (3 << RATE_MCS_STBC_POS)
|
||||
#define RATE_MCS_VHT_STBC_MSK (1 << RATE_MCS_STBC_POS)
|
||||
#define RATE_MCS_STBC_MSK BIT(RATE_MCS_STBC_POS)
|
||||
|
||||
/* Bit 18: OFDM-HE dual carrier mode */
|
||||
#define RATE_HE_DUAL_CARRIER_MODE 18
|
||||
#define RATE_HE_DUAL_CARRIER_MODE_MSK BIT(RATE_HE_DUAL_CARRIER_MODE)
|
||||
|
||||
/* Bit 19: (0) Beamforming is off, (1) Beamforming is on */
|
||||
#define RATE_MCS_BF_POS 19
|
||||
#define RATE_MCS_BF_MSK (1 << RATE_MCS_BF_POS)
|
||||
|
||||
/* Bit 20: (0) ZLF is off, (1) ZLF is on */
|
||||
#define RATE_MCS_ZLF_POS 20
|
||||
#define RATE_MCS_ZLF_MSK (1 << RATE_MCS_ZLF_POS)
|
||||
/*
|
||||
* Bit 20-21: HE guard interval and LTF type.
|
||||
* (0) 1xLTF+1.6us, (1) 2xLTF+0.8us,
|
||||
* (2) 2xLTF+1.6us, (3) 4xLTF+3.2us
|
||||
*/
|
||||
#define RATE_MCS_HE_GI_LTF_POS 20
|
||||
#define RATE_MCS_HE_GI_LTF_MSK (3 << RATE_MCS_HE_GI_LTF_POS)
|
||||
|
||||
/* Bit 22-23: HE type. (0) SU, (1) SU_EXT, (2) MU, (3) trigger based */
|
||||
#define RATE_MCS_HE_TYPE_POS 22
|
||||
#define RATE_MCS_HE_TYPE_MSK (3 << RATE_MCS_HE_TYPE_POS)
|
||||
|
||||
/* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */
|
||||
#define RATE_MCS_DUP_POS 24
|
||||
|
|
|
@ -214,20 +214,6 @@ enum iwl_sta_sleep_flag {
|
|||
STA_SLEEP_STATE_MOREDATA = BIT(2),
|
||||
};
|
||||
|
||||
/* STA ID and color bits definitions */
|
||||
#define STA_ID_SEED (0x0f)
|
||||
#define STA_ID_POS (0)
|
||||
#define STA_ID_MSK (STA_ID_SEED << STA_ID_POS)
|
||||
|
||||
#define STA_COLOR_SEED (0x7)
|
||||
#define STA_COLOR_POS (4)
|
||||
#define STA_COLOR_MSK (STA_COLOR_SEED << STA_COLOR_POS)
|
||||
|
||||
#define STA_ID_N_COLOR_GET_COLOR(id_n_color) \
|
||||
(((id_n_color) & STA_COLOR_MSK) >> STA_COLOR_POS)
|
||||
#define STA_ID_N_COLOR_GET_ID(id_n_color) \
|
||||
(((id_n_color) & STA_ID_MSK) >> STA_ID_POS)
|
||||
|
||||
#define STA_KEY_MAX_NUM (16)
|
||||
#define STA_KEY_IDX_INVALID (0xff)
|
||||
#define STA_KEY_MAX_DATA_KEY_NUM (4)
|
||||
|
@ -323,6 +309,24 @@ struct iwl_mvm_add_sta_cmd_v7 {
|
|||
__le32 tfd_queue_msk;
|
||||
} __packed; /* ADD_STA_CMD_API_S_VER_7 */
|
||||
|
||||
/**
|
||||
* enum iwl_sta_type - FW station types
|
||||
* ( REPLY_ADD_STA = 0x18 )
|
||||
* @IWL_STA_LINK: Link station - normal RX and TX traffic.
|
||||
* @IWL_STA_GENERAL_PURPOSE: General purpose. In AP mode used for beacons
|
||||
* and probe responses.
|
||||
* @IWL_STA_MULTICAST: multicast traffic,
|
||||
* @IWL_STA_TDLS_LINK: TDLS link station
|
||||
* @IWL_STA_AUX_ACTIVITY: auxilary station (scan, ROC and so on).
|
||||
*/
|
||||
enum iwl_sta_type {
|
||||
IWL_STA_LINK,
|
||||
IWL_STA_GENERAL_PURPOSE,
|
||||
IWL_STA_MULTICAST,
|
||||
IWL_STA_TDLS_LINK,
|
||||
IWL_STA_AUX_ACTIVITY,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table.
|
||||
* ( REPLY_ADD_STA = 0x18 )
|
||||
|
@ -347,6 +351,7 @@ struct iwl_mvm_add_sta_cmd_v7 {
|
|||
* @sleep_tx_count: number of packets to transmit to station even though it is
|
||||
* asleep. Used to synchronise PS-poll and u-APSD responses while ucode
|
||||
* keeps track of STA sleep state.
|
||||
* @station_type: type of this station. See &enum iwl_sta_type.
|
||||
* @sleep_state_flags: Look at %iwl_sta_sleep_flag.
|
||||
* @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP
|
||||
* mac-addr.
|
||||
|
@ -381,14 +386,15 @@ struct iwl_mvm_add_sta_cmd {
|
|||
u8 remove_immediate_ba_tid;
|
||||
__le16 add_immediate_ba_ssn;
|
||||
__le16 sleep_tx_count;
|
||||
__le16 sleep_state_flags;
|
||||
u8 sleep_state_flags;
|
||||
u8 station_type;
|
||||
__le16 assoc_id;
|
||||
__le16 beamform_flags;
|
||||
__le32 tfd_queue_msk;
|
||||
__le16 rx_ba_window;
|
||||
u8 sp_length;
|
||||
u8 uapsd_acs;
|
||||
} __packed; /* ADD_STA_CMD_API_S_VER_9 */
|
||||
} __packed; /* ADD_STA_CMD_API_S_VER_10 */
|
||||
|
||||
/**
|
||||
* struct iwl_mvm_add_sta_key_common - add/modify sta key common part
|
||||
|
|
|
@ -698,6 +698,82 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
|
||||
{
|
||||
struct iwl_notification_wait init_wait;
|
||||
struct iwl_nvm_access_complete_cmd nvm_complete = {};
|
||||
struct iwl_init_extended_cfg_cmd init_cfg = {
|
||||
.init_flags = cpu_to_le32(BIT(IWL_INIT_NVM)),
|
||||
};
|
||||
static const u16 init_complete[] = {
|
||||
INIT_COMPLETE_NOTIF,
|
||||
};
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
iwl_init_notification_wait(&mvm->notif_wait,
|
||||
&init_wait,
|
||||
init_complete,
|
||||
ARRAY_SIZE(init_complete),
|
||||
iwl_wait_init_complete,
|
||||
NULL);
|
||||
|
||||
/* Will also start the device */
|
||||
ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* Send init config command to mark that we are sending NVM access
|
||||
* commands
|
||||
*/
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(SYSTEM_GROUP,
|
||||
INIT_EXTENDED_CFG_CMD), 0,
|
||||
sizeof(init_cfg), &init_cfg);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "Failed to run init config command: %d\n",
|
||||
ret);
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* Read the NVM only at driver load time, no need to do this twice */
|
||||
if (read_nvm) {
|
||||
/* Read nvm */
|
||||
ret = iwl_nvm_init(mvm, true);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
||||
/* In case we read the NVM from external file, load it to the NIC */
|
||||
if (mvm->nvm_file_name)
|
||||
iwl_mvm_load_nvm_to_nic(mvm);
|
||||
|
||||
ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
|
||||
if (WARN_ON(ret))
|
||||
goto error;
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
|
||||
NVM_ACCESS_COMPLETE), 0,
|
||||
sizeof(nvm_complete), &nvm_complete);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "Failed to run complete NVM access: %d\n",
|
||||
ret);
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* We wait for the INIT complete notification */
|
||||
return iwl_wait_notification(&mvm->notif_wait, &init_wait,
|
||||
MVM_UCODE_ALIVE_TIMEOUT);
|
||||
|
||||
error:
|
||||
iwl_remove_notification(&mvm->notif_wait, &init_wait);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
|
||||
{
|
||||
struct iwl_phy_cfg_cmd phy_cfg_cmd;
|
||||
|
@ -726,6 +802,9 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
|
|||
};
|
||||
int ret;
|
||||
|
||||
if (iwl_mvm_has_new_tx_api(mvm))
|
||||
return iwl_run_unified_mvm_ucode(mvm, true);
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
if (WARN_ON_ONCE(mvm->calibrating))
|
||||
|
@ -832,82 +911,6 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
|
||||
{
|
||||
struct iwl_notification_wait init_wait;
|
||||
struct iwl_nvm_access_complete_cmd nvm_complete = {};
|
||||
struct iwl_init_extended_cfg_cmd init_cfg = {
|
||||
.init_flags = cpu_to_le32(BIT(IWL_INIT_NVM)),
|
||||
};
|
||||
static const u16 init_complete[] = {
|
||||
INIT_COMPLETE_NOTIF,
|
||||
};
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
iwl_init_notification_wait(&mvm->notif_wait,
|
||||
&init_wait,
|
||||
init_complete,
|
||||
ARRAY_SIZE(init_complete),
|
||||
iwl_wait_init_complete,
|
||||
NULL);
|
||||
|
||||
/* Will also start the device */
|
||||
ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* Send init config command to mark that we are sending NVM access
|
||||
* commands
|
||||
*/
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(SYSTEM_GROUP,
|
||||
INIT_EXTENDED_CFG_CMD), 0,
|
||||
sizeof(init_cfg), &init_cfg);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "Failed to run init config command: %d\n",
|
||||
ret);
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* Read the NVM only at driver load time, no need to do this twice */
|
||||
if (read_nvm) {
|
||||
/* Read nvm */
|
||||
ret = iwl_nvm_init(mvm, true);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
||||
/* In case we read the NVM from external file, load it to the NIC */
|
||||
if (mvm->nvm_file_name)
|
||||
iwl_mvm_load_nvm_to_nic(mvm);
|
||||
|
||||
ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
|
||||
if (WARN_ON(ret))
|
||||
goto error;
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
|
||||
NVM_ACCESS_COMPLETE), 0,
|
||||
sizeof(nvm_complete), &nvm_complete);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "Failed to run complete NVM access: %d\n",
|
||||
ret);
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* We wait for the INIT complete notification */
|
||||
return iwl_wait_notification(&mvm->notif_wait, &init_wait,
|
||||
MVM_UCODE_ALIVE_TIMEOUT);
|
||||
|
||||
error:
|
||||
iwl_remove_notification(&mvm->notif_wait, &init_wait);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_packet *pkt)
|
||||
{
|
||||
|
@ -1198,6 +1201,12 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
|
|||
enabled = !!(wifi_pkg->package.elements[1].integer.value);
|
||||
n_profiles = wifi_pkg->package.elements[2].integer.value;
|
||||
|
||||
/* in case of BIOS bug */
|
||||
if (n_profiles <= 0) {
|
||||
ret = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
for (i = 0; i < n_profiles; i++) {
|
||||
/* the tables start at element 3 */
|
||||
static int pos = 3;
|
||||
|
|
|
@ -467,6 +467,11 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
|
|||
queue = IWL_MVM_DQA_GCAST_QUEUE;
|
||||
}
|
||||
|
||||
/*
|
||||
* For TVQM this will be overwritten later with the FW assigned
|
||||
* queue value (when queue is enabled).
|
||||
*/
|
||||
mvmvif->cab_queue = queue;
|
||||
vif->cab_queue = queue;
|
||||
} else {
|
||||
vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
|
||||
|
@ -902,7 +907,7 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
|
|||
|
||||
/* Allocate sniffer station */
|
||||
ret = iwl_mvm_allocate_int_sta(mvm, &mvm->snif_sta, tfd_queue_msk,
|
||||
vif->type);
|
||||
vif->type, IWL_STA_GENERAL_PURPOSE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1223,7 +1228,9 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
|
|||
cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int *
|
||||
vif->bss_conf.dtim_period));
|
||||
|
||||
ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue);
|
||||
if (!fw_has_api(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_API_STA_TYPE))
|
||||
ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue);
|
||||
|
||||
/*
|
||||
* Only set the beacon time when the MAC is being added, when we
|
||||
|
|
|
@ -1358,7 +1358,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
|
|||
* which shouldn't be in TFD mask anyway
|
||||
*/
|
||||
ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta,
|
||||
0, vif->type);
|
||||
0, vif->type,
|
||||
IWL_STA_MULTICAST);
|
||||
if (ret)
|
||||
goto out_release;
|
||||
}
|
||||
|
@ -1477,7 +1478,7 @@ static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
|
|||
* already marked as draining, so to complete the draining, we
|
||||
* just need to wait until the transport is empty.
|
||||
*/
|
||||
iwl_trans_wait_tx_queue_empty(mvm->trans, tfd_msk);
|
||||
iwl_trans_wait_tx_queues_empty(mvm->trans, tfd_msk);
|
||||
}
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
|
||||
|
@ -2111,15 +2112,15 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
|
|||
if (ret)
|
||||
goto out_remove;
|
||||
|
||||
ret = iwl_mvm_add_mcast_sta(mvm, vif);
|
||||
if (ret)
|
||||
goto out_unbind;
|
||||
|
||||
/* Send the bcast station. At this stage the TBTT and DTIM time events
|
||||
* are added and applied to the scheduler */
|
||||
ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
|
||||
if (ret)
|
||||
goto out_unbind;
|
||||
|
||||
ret = iwl_mvm_add_mcast_sta(mvm, vif);
|
||||
if (ret)
|
||||
goto out_rm_bcast;
|
||||
goto out_rm_mcast;
|
||||
|
||||
/* must be set before quota calculations */
|
||||
mvmvif->ap_ibss_active = true;
|
||||
|
@ -2148,9 +2149,9 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
|
|||
out_quota_failed:
|
||||
iwl_mvm_power_update_mac(mvm);
|
||||
mvmvif->ap_ibss_active = false;
|
||||
iwl_mvm_rm_mcast_sta(mvm, vif);
|
||||
out_rm_bcast:
|
||||
iwl_mvm_send_rm_bcast_sta(mvm, vif);
|
||||
out_rm_mcast:
|
||||
iwl_mvm_rm_mcast_sta(mvm, vif);
|
||||
out_unbind:
|
||||
iwl_mvm_binding_remove_vif(mvm, vif);
|
||||
out_remove:
|
||||
|
@ -2196,8 +2197,20 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
|
|||
iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
|
||||
|
||||
iwl_mvm_update_quotas(mvm, false, NULL);
|
||||
iwl_mvm_rm_mcast_sta(mvm, vif);
|
||||
|
||||
/*
|
||||
* This is not very nice, but the simplest:
|
||||
* For older FWs removing the mcast sta before the bcast station may
|
||||
* cause assert 0x2b00.
|
||||
* This is fixed in later FW (which will stop beaconing when removing
|
||||
* bcast station).
|
||||
* So make the order of removal depend on the TLV
|
||||
*/
|
||||
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
|
||||
iwl_mvm_rm_mcast_sta(mvm, vif);
|
||||
iwl_mvm_send_rm_bcast_sta(mvm, vif);
|
||||
if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
|
||||
iwl_mvm_rm_mcast_sta(mvm, vif);
|
||||
iwl_mvm_binding_remove_vif(mvm, vif);
|
||||
|
||||
iwl_mvm_power_update_mac(mvm);
|
||||
|
@ -2363,7 +2376,7 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
|
|||
tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
|
||||
continue;
|
||||
|
||||
if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE)
|
||||
if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE)
|
||||
continue;
|
||||
|
||||
__set_bit(tid_data->txq_id, &txqs);
|
||||
|
@ -3988,7 +4001,7 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
|
|||
/* this can take a while, and we may need/want other operations
|
||||
* to succeed while doing this, so do it without the mutex held
|
||||
*/
|
||||
iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
|
||||
iwl_trans_wait_tx_queues_empty(mvm->trans, msk);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -380,6 +380,8 @@ struct iwl_mvm_vif {
|
|||
bool associated;
|
||||
u8 ap_assoc_sta_count;
|
||||
|
||||
u16 cab_queue;
|
||||
|
||||
bool uploaded;
|
||||
bool ap_ibss_active;
|
||||
bool pm_enabled;
|
||||
|
@ -715,6 +717,8 @@ enum iwl_mvm_queue_status {
|
|||
};
|
||||
|
||||
#define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ)
|
||||
#define IWL_MVM_INVALID_QUEUE 0xFFFF
|
||||
|
||||
#define IWL_MVM_NUM_CIPHERS 10
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
|
@ -784,9 +788,9 @@ struct iwl_mvm {
|
|||
u64 on_time_scan;
|
||||
} radio_stats, accu_radio_stats;
|
||||
|
||||
u8 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES];
|
||||
|
||||
struct {
|
||||
/* Map to HW queue */
|
||||
u32 hw_queue_to_mac80211;
|
||||
u8 hw_queue_refcount;
|
||||
u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
|
||||
bool reserved; /* Is this the TXQ reserved for a STA */
|
||||
|
@ -1312,7 +1316,6 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm);
|
|||
******************/
|
||||
/* uCode */
|
||||
int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm);
|
||||
int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm);
|
||||
|
||||
/* Utils */
|
||||
int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
|
||||
|
@ -1828,6 +1831,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
|
|||
u32 size);
|
||||
void iwl_mvm_reorder_timer_expired(unsigned long data);
|
||||
struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
|
||||
bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm);
|
||||
|
||||
void iwl_mvm_inactivity_check(struct iwl_mvm *mvm);
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
|
@ -34,7 +34,7 @@
|
|||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -817,6 +817,11 @@ void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
|
|||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
if (iwl_mvm_is_vif_assoc(mvm) && notif->source_id == MCC_SOURCE_WIFI) {
|
||||
IWL_DEBUG_LAR(mvm, "Ignore mcc update while associated\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
|
||||
return;
|
||||
|
||||
|
|
|
@ -746,10 +746,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||
|
||||
mutex_lock(&mvm->mutex);
|
||||
iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
|
||||
if (iwl_mvm_has_new_tx_api(mvm))
|
||||
err = iwl_run_unified_mvm_ucode(mvm, true);
|
||||
else
|
||||
err = iwl_run_init_mvm_ucode(mvm, true);
|
||||
err = iwl_run_init_mvm_ucode(mvm, true);
|
||||
if (!err || !iwlmvm_mod_params.init_dbg)
|
||||
iwl_mvm_stop_device(mvm);
|
||||
iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
|
||||
|
@ -1047,7 +1044,7 @@ static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
|
|||
unsigned long mq;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
mq = mvm->queue_info[hw_queue].hw_queue_to_mac80211;
|
||||
mq = mvm->hw_queue_to_mac80211[hw_queue];
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
iwl_mvm_stop_mac_queues(mvm, mq);
|
||||
|
@ -1077,7 +1074,7 @@ static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
|
|||
unsigned long mq;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
mq = mvm->queue_info[hw_queue].hw_queue_to_mac80211;
|
||||
mq = mvm->hw_queue_to_mac80211[hw_queue];
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
iwl_mvm_start_mac_queues(mvm, mq);
|
||||
|
|
|
@ -826,7 +826,7 @@ static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm,
|
|||
if (is_siso(rate) && rate->stbc) {
|
||||
/* To enable STBC we need to set both a flag and ANT_AB */
|
||||
ucode_rate |= RATE_MCS_ANT_AB_MSK;
|
||||
ucode_rate |= RATE_MCS_VHT_STBC_MSK;
|
||||
ucode_rate |= RATE_MCS_STBC_MSK;
|
||||
}
|
||||
|
||||
ucode_rate |= rate->bw;
|
||||
|
@ -873,7 +873,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
|
|||
rate->sgi = true;
|
||||
if (ucode_rate & RATE_MCS_LDPC_MSK)
|
||||
rate->ldpc = true;
|
||||
if (ucode_rate & RATE_MCS_VHT_STBC_MSK)
|
||||
if (ucode_rate & RATE_MCS_STBC_MSK)
|
||||
rate->stbc = true;
|
||||
if (ucode_rate & RATE_MCS_BF_MSK)
|
||||
rate->bfer = true;
|
||||
|
@ -3641,13 +3641,12 @@ int rs_pretty_print_rate(char *buf, const u32 rate)
|
|||
bw = "BAD BW";
|
||||
}
|
||||
|
||||
return sprintf(buf, "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s%s\n",
|
||||
return sprintf(buf, "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s\n",
|
||||
type, rs_pretty_ant(ant), bw, mcs, nss,
|
||||
(rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
|
||||
(rate & RATE_MCS_HT_STBC_MSK) ? "STBC " : "",
|
||||
(rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
|
||||
(rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
|
||||
(rate & RATE_MCS_BF_MSK) ? "BF " : "",
|
||||
(rate & RATE_MCS_ZLF_MSK) ? "ZLF " : "");
|
||||
(rate & RATE_MCS_BF_MSK) ? "BF " : "");
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -443,13 +443,13 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
|
|||
if (rate_n_flags & RATE_MCS_LDPC_MSK)
|
||||
rx_status->flag |= RX_FLAG_LDPC;
|
||||
if (rate_n_flags & RATE_MCS_HT_MSK) {
|
||||
u8 stbc = (rate_n_flags & RATE_MCS_HT_STBC_MSK) >>
|
||||
u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
|
||||
RATE_MCS_STBC_POS;
|
||||
rx_status->flag |= RX_FLAG_HT;
|
||||
rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
|
||||
rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT;
|
||||
} else if (rate_n_flags & RATE_MCS_VHT_MSK) {
|
||||
u8 stbc = (rate_n_flags & RATE_MCS_VHT_STBC_MSK) >>
|
||||
u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
|
||||
RATE_MCS_STBC_POS;
|
||||
rx_status->vht_nss =
|
||||
((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
|
||||
|
|
|
@ -974,13 +974,13 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
|
|||
if (rate_n_flags & RATE_MCS_LDPC_MSK)
|
||||
rx_status->flag |= RX_FLAG_LDPC;
|
||||
if (rate_n_flags & RATE_MCS_HT_MSK) {
|
||||
u8 stbc = (rate_n_flags & RATE_MCS_HT_STBC_MSK) >>
|
||||
u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
|
||||
RATE_MCS_STBC_POS;
|
||||
rx_status->flag |= RX_FLAG_HT;
|
||||
rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
|
||||
rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT;
|
||||
} else if (rate_n_flags & RATE_MCS_VHT_MSK) {
|
||||
u8 stbc = (rate_n_flags & RATE_MCS_VHT_STBC_MSK) >>
|
||||
u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
|
||||
RATE_MCS_STBC_POS;
|
||||
rx_status->vht_nss =
|
||||
((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
|
||||
|
|
|
@ -81,44 +81,30 @@ enum iwl_mvm_traffic_load {
|
|||
IWL_MVM_TRAFFIC_HIGH,
|
||||
};
|
||||
|
||||
#define IWL_SCAN_DWELL_ACTIVE 10
|
||||
#define IWL_SCAN_DWELL_PASSIVE 110
|
||||
#define IWL_SCAN_DWELL_FRAGMENTED 44
|
||||
#define IWL_SCAN_DWELL_EXTENDED 90
|
||||
|
||||
struct iwl_mvm_scan_timing_params {
|
||||
u32 dwell_active;
|
||||
u32 dwell_passive;
|
||||
u32 dwell_fragmented;
|
||||
u32 dwell_extended;
|
||||
u32 suspend_time;
|
||||
u32 max_out_time;
|
||||
};
|
||||
|
||||
static struct iwl_mvm_scan_timing_params scan_timing[] = {
|
||||
[IWL_SCAN_TYPE_UNASSOC] = {
|
||||
.dwell_active = 10,
|
||||
.dwell_passive = 110,
|
||||
.dwell_fragmented = 44,
|
||||
.dwell_extended = 90,
|
||||
.suspend_time = 0,
|
||||
.max_out_time = 0,
|
||||
},
|
||||
[IWL_SCAN_TYPE_WILD] = {
|
||||
.dwell_active = 10,
|
||||
.dwell_passive = 110,
|
||||
.dwell_fragmented = 44,
|
||||
.dwell_extended = 90,
|
||||
.suspend_time = 30,
|
||||
.max_out_time = 120,
|
||||
},
|
||||
[IWL_SCAN_TYPE_MILD] = {
|
||||
.dwell_active = 10,
|
||||
.dwell_passive = 110,
|
||||
.dwell_fragmented = 44,
|
||||
.dwell_extended = 90,
|
||||
.suspend_time = 120,
|
||||
.max_out_time = 120,
|
||||
},
|
||||
[IWL_SCAN_TYPE_FRAGMENTED] = {
|
||||
.dwell_active = 10,
|
||||
.dwell_passive = 110,
|
||||
.dwell_fragmented = 44,
|
||||
.suspend_time = 95,
|
||||
.max_out_time = 44,
|
||||
},
|
||||
|
@ -294,34 +280,15 @@ int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm)
|
|||
return max_ie_len;
|
||||
}
|
||||
|
||||
static u8 *iwl_mvm_dump_channel_list(struct iwl_scan_results_notif *res,
|
||||
int num_res, u8 *buf, size_t buf_size)
|
||||
{
|
||||
int i;
|
||||
u8 *pos = buf, *end = buf + buf_size;
|
||||
|
||||
for (i = 0; pos < end && i < num_res; i++)
|
||||
pos += snprintf(pos, end - pos, " %u", res[i].channel);
|
||||
|
||||
/* terminate the string in case the buffer was too short */
|
||||
*(buf + buf_size - 1) = '\0';
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_cmd_buffer *rxb)
|
||||
{
|
||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||
struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data;
|
||||
u8 buf[256];
|
||||
|
||||
IWL_DEBUG_SCAN(mvm,
|
||||
"Scan offload iteration complete: status=0x%x scanned channels=%d channels list: %s\n",
|
||||
notif->status, notif->scanned_channels,
|
||||
iwl_mvm_dump_channel_list(notif->results,
|
||||
notif->scanned_channels, buf,
|
||||
sizeof(buf)));
|
||||
"Scan offload iteration complete: status=0x%x scanned channels=%d\n",
|
||||
notif->status, notif->scanned_channels);
|
||||
|
||||
if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) {
|
||||
IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n");
|
||||
|
@ -743,10 +710,10 @@ static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
|
|||
struct iwl_scan_req_lmac *cmd,
|
||||
struct iwl_mvm_scan_params *params)
|
||||
{
|
||||
cmd->active_dwell = scan_timing[params->type].dwell_active;
|
||||
cmd->passive_dwell = scan_timing[params->type].dwell_passive;
|
||||
cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented;
|
||||
cmd->extended_dwell = scan_timing[params->type].dwell_extended;
|
||||
cmd->active_dwell = IWL_SCAN_DWELL_ACTIVE;
|
||||
cmd->passive_dwell = IWL_SCAN_DWELL_PASSIVE;
|
||||
cmd->fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
|
||||
cmd->extended_dwell = IWL_SCAN_DWELL_EXTENDED;
|
||||
cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
|
||||
cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
|
||||
cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
|
||||
|
@ -944,13 +911,12 @@ static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm)
|
|||
}
|
||||
|
||||
static void iwl_mvm_fill_scan_dwell(struct iwl_mvm *mvm,
|
||||
struct iwl_scan_dwell *dwell,
|
||||
struct iwl_mvm_scan_timing_params *timing)
|
||||
struct iwl_scan_dwell *dwell)
|
||||
{
|
||||
dwell->active = timing->dwell_active;
|
||||
dwell->passive = timing->dwell_passive;
|
||||
dwell->fragmented = timing->dwell_fragmented;
|
||||
dwell->extended = timing->dwell_extended;
|
||||
dwell->active = IWL_SCAN_DWELL_ACTIVE;
|
||||
dwell->passive = IWL_SCAN_DWELL_PASSIVE;
|
||||
dwell->fragmented = IWL_SCAN_DWELL_FRAGMENTED;
|
||||
dwell->extended = IWL_SCAN_DWELL_EXTENDED;
|
||||
}
|
||||
|
||||
static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels)
|
||||
|
@ -979,7 +945,7 @@ static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
|
|||
cfg->out_of_channel_time = cpu_to_le32(scan_timing[type].max_out_time);
|
||||
cfg->suspend_time = cpu_to_le32(scan_timing[type].suspend_time);
|
||||
|
||||
iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell, &scan_timing[type]);
|
||||
iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell);
|
||||
|
||||
memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
|
||||
|
||||
|
@ -1010,7 +976,7 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
|
|||
cpu_to_le32(scan_timing[type].max_out_time);
|
||||
}
|
||||
|
||||
iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell, &scan_timing[type]);
|
||||
iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell);
|
||||
|
||||
memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
|
||||
|
||||
|
@ -1114,11 +1080,11 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
|
|||
cmd->passive_dwell = params->measurement_dwell;
|
||||
cmd->extended_dwell = params->measurement_dwell;
|
||||
} else {
|
||||
cmd->active_dwell = timing->dwell_active;
|
||||
cmd->passive_dwell = timing->dwell_passive;
|
||||
cmd->extended_dwell = timing->dwell_extended;
|
||||
cmd->active_dwell = IWL_SCAN_DWELL_ACTIVE;
|
||||
cmd->passive_dwell = IWL_SCAN_DWELL_PASSIVE;
|
||||
cmd->extended_dwell = IWL_SCAN_DWELL_EXTENDED;
|
||||
}
|
||||
cmd->fragmented_dwell = timing->dwell_fragmented;
|
||||
cmd->fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
|
||||
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
cmd->v6.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
|
||||
|
@ -1612,16 +1578,12 @@ void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
|
|||
{
|
||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||
struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
|
||||
u8 buf[256];
|
||||
|
||||
mvm->scan_start = le64_to_cpu(notif->start_tsf);
|
||||
|
||||
IWL_DEBUG_SCAN(mvm,
|
||||
"UMAC Scan iteration complete: status=0x%x scanned_channels=%d channels list: %s\n",
|
||||
notif->status, notif->scanned_channels,
|
||||
iwl_mvm_dump_channel_list(notif->results,
|
||||
notif->scanned_channels, buf,
|
||||
sizeof(buf)));
|
||||
"UMAC Scan iteration complete: status=0x%x scanned_channels=%d\n",
|
||||
notif->status, notif->scanned_channels);
|
||||
|
||||
if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) {
|
||||
IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n");
|
||||
|
|
|
@ -77,9 +77,11 @@
|
|||
*/
|
||||
static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
|
||||
{
|
||||
return iwl_mvm_has_new_rx_api(mvm) ?
|
||||
sizeof(struct iwl_mvm_add_sta_cmd) :
|
||||
sizeof(struct iwl_mvm_add_sta_cmd_v7);
|
||||
if (iwl_mvm_has_new_rx_api(mvm) ||
|
||||
fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
|
||||
return sizeof(struct iwl_mvm_add_sta_cmd);
|
||||
else
|
||||
return sizeof(struct iwl_mvm_add_sta_cmd_v7);
|
||||
}
|
||||
|
||||
static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
|
||||
|
@ -126,6 +128,9 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
|||
u32 status;
|
||||
u32 agg_size = 0, mpdu_dens = 0;
|
||||
|
||||
if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
|
||||
add_sta_cmd.station_type = mvm_sta->sta_type;
|
||||
|
||||
if (!update || (flags & STA_MODIFY_QUEUES)) {
|
||||
memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
|
||||
|
||||
|
@ -464,7 +469,7 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
|
|||
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
|
||||
if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
|
||||
disable_agg_tids |= BIT(tid);
|
||||
mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE;
|
||||
mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
|
||||
}
|
||||
|
||||
mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
|
||||
|
@ -495,6 +500,8 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
|
|||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
|
||||
if (WARN_ON(!mvmsta))
|
||||
return -EINVAL;
|
||||
|
||||
disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
|
||||
/* Disable the queue */
|
||||
|
@ -642,7 +649,7 @@ int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
|
|||
cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
|
||||
cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
|
||||
cmd.tid = mvm->queue_info[queue].txq_tid;
|
||||
mq = mvm->queue_info[queue].hw_queue_to_mac80211;
|
||||
mq = mvm->hw_queue_to_mac80211[queue];
|
||||
shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
|
@ -651,7 +658,7 @@ int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
|
|||
|
||||
/* Stop MAC queues and wait for this queue to empty */
|
||||
iwl_mvm_stop_mac_queues(mvm, mq);
|
||||
ret = iwl_trans_wait_tx_queue_empty(mvm->trans, BIT(queue));
|
||||
ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
|
||||
queue);
|
||||
|
@ -730,10 +737,6 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
|
|||
mvmsta->tfd_queue_msk |= BIT(queue);
|
||||
spin_unlock_bh(&mvmsta->lock);
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1084,7 +1087,7 @@ static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
|
|||
ac = iwl_mvm_tid_to_ac_queue(tid);
|
||||
mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
|
||||
|
||||
if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE &&
|
||||
if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE &&
|
||||
iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
|
||||
IWL_ERR(mvm,
|
||||
"Can't alloc TXQ for sta %d tid %d - dropping frame\n",
|
||||
|
@ -1129,8 +1132,12 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
|
|||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
||||
/* No queue reconfiguration in TVQM mode */
|
||||
if (iwl_mvm_has_new_tx_api(mvm))
|
||||
goto alloc_queues;
|
||||
|
||||
/* Reconfigure queues requiring reconfiguation */
|
||||
for (queue = 0; queue < IWL_MAX_HW_QUEUES; queue++) {
|
||||
for (queue = 0; queue < ARRAY_SIZE(mvm->queue_info); queue++) {
|
||||
bool reconfig;
|
||||
bool change_owner;
|
||||
|
||||
|
@ -1158,6 +1165,7 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
|
|||
iwl_mvm_change_queue_owner(mvm, queue);
|
||||
}
|
||||
|
||||
alloc_queues:
|
||||
/* Go over all stations with deferred traffic */
|
||||
for_each_set_bit(sta_id, mvm->sta_deferred_frames,
|
||||
IWL_MVM_STATION_COUNT) {
|
||||
|
@ -1186,6 +1194,10 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
|
|||
int queue;
|
||||
bool using_inactive_queue = false, same_sta = false;
|
||||
|
||||
/* queue reserving is disabled on new TX path */
|
||||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Check for inactive queues, so we don't reach a situation where we
|
||||
* can't add a STA due to a shortage in queues that doesn't really exist
|
||||
|
@ -1261,7 +1273,7 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
|
|||
int ac;
|
||||
u8 mac_queue;
|
||||
|
||||
if (txq_id == IEEE80211_INVAL_HW_QUEUE)
|
||||
if (txq_id == IWL_MVM_INVALID_QUEUE)
|
||||
continue;
|
||||
|
||||
skb_queue_head_init(&tid_data->deferred_tx_frames);
|
||||
|
@ -1292,9 +1304,8 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
|
|||
|
||||
iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg,
|
||||
wdg_timeout);
|
||||
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
|
||||
}
|
||||
|
||||
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
|
||||
}
|
||||
|
||||
atomic_set(&mvm->pending_frames[mvm_sta->sta_id], 0);
|
||||
|
@ -1336,6 +1347,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
|
|||
mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
|
||||
mvm_sta->tx_protection = 0;
|
||||
mvm_sta->tt_tx_protection = false;
|
||||
mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
|
||||
|
||||
/* HW restart, don't assume the memory has been zeroed */
|
||||
atomic_set(&mvm->pending_frames[sta_id], 0);
|
||||
|
@ -1369,7 +1381,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
|
|||
* Mark all queues for this STA as unallocated and defer TX
|
||||
* frames until the queue is allocated
|
||||
*/
|
||||
mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
|
||||
mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
|
||||
skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
|
||||
}
|
||||
mvm_sta->deferred_traffic_tid_map = 0;
|
||||
|
@ -1385,7 +1397,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
|
|||
mvm_sta->dup_data = dup_data;
|
||||
}
|
||||
|
||||
if (iwl_mvm_is_dqa_supported(mvm)) {
|
||||
if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
|
||||
ret = iwl_mvm_reserve_sta_stream(mvm, sta,
|
||||
ieee80211_vif_type_p2p(vif));
|
||||
if (ret)
|
||||
|
@ -1568,13 +1580,13 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
|
|||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
|
||||
if (mvm_sta->tid_data[i].txq_id == IEEE80211_INVAL_HW_QUEUE)
|
||||
if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
|
||||
continue;
|
||||
|
||||
ac = iwl_mvm_tid_to_ac_queue(i);
|
||||
iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
|
||||
vif->hw_queue[ac], i, 0);
|
||||
mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
|
||||
mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1602,8 +1614,8 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
|
|||
ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
|
||||
mvm_sta->tfd_queue_msk);
|
||||
ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
|
||||
mvm_sta->tfd_queue_msk);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
|
||||
|
@ -1719,7 +1731,8 @@ int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
|
|||
|
||||
int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_int_sta *sta,
|
||||
u32 qmask, enum nl80211_iftype iftype)
|
||||
u32 qmask, enum nl80211_iftype iftype,
|
||||
enum iwl_sta_type type)
|
||||
{
|
||||
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
|
||||
sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
|
||||
|
@ -1728,6 +1741,7 @@ int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
|
|||
}
|
||||
|
||||
sta->tfd_queue_msk = qmask;
|
||||
sta->type = type;
|
||||
|
||||
/* put a non-NULL value so iterating over the stations won't stop */
|
||||
rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
|
||||
|
@ -1756,6 +1770,8 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
|
|||
cmd.sta_id = sta->sta_id;
|
||||
cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
|
||||
color));
|
||||
if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
|
||||
cmd.station_type = sta->type;
|
||||
|
||||
if (!iwl_mvm_has_new_tx_api(mvm))
|
||||
cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
|
||||
|
@ -1820,7 +1836,8 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
|
|||
|
||||
/* Allocate aux station and assign to it the aux queue */
|
||||
ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
|
||||
NL80211_IFTYPE_UNSPECIFIED);
|
||||
NL80211_IFTYPE_UNSPECIFIED,
|
||||
IWL_STA_AUX_ACTIVITY);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1893,7 +1910,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
|
||||
static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
|
||||
const u8 *baddr = _baddr;
|
||||
int queue = 0;
|
||||
int queue;
|
||||
int ret;
|
||||
unsigned int wdg_timeout =
|
||||
iwl_mvm_get_wd_timeout(mvm, vif, false, false);
|
||||
|
@ -1938,10 +1955,11 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
* to firmware so enable queue here - after the station was added
|
||||
*/
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
|
||||
bsta->sta_id,
|
||||
IWL_MAX_TID_COUNT,
|
||||
wdg_timeout);
|
||||
queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
|
||||
bsta->sta_id,
|
||||
IWL_MAX_TID_COUNT,
|
||||
wdg_timeout);
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_AP)
|
||||
mvm->probe_queue = queue;
|
||||
else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
|
||||
|
@ -2018,7 +2036,8 @@ int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
}
|
||||
|
||||
return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
|
||||
ieee80211_vif_type_p2p(vif));
|
||||
ieee80211_vif_type_p2p(vif),
|
||||
IWL_STA_GENERAL_PURPOSE);
|
||||
}
|
||||
|
||||
/* Allocate a new station entry for the broadcast station to the given vif,
|
||||
|
@ -2104,6 +2123,16 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
if (WARN_ON(vif->type != NL80211_IFTYPE_AP))
|
||||
return -ENOTSUPP;
|
||||
|
||||
/*
|
||||
* While in previous FWs we had to exclude cab queue from TFD queue
|
||||
* mask, now it is needed as any other queue.
|
||||
*/
|
||||
if (!iwl_mvm_has_new_tx_api(mvm) &&
|
||||
fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
|
||||
iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
|
||||
&cfg, timeout);
|
||||
msta->tfd_queue_msk |= BIT(vif->cab_queue);
|
||||
}
|
||||
ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
|
||||
mvmvif->id, mvmvif->color);
|
||||
if (ret) {
|
||||
|
@ -2114,15 +2143,18 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
/*
|
||||
* Enable cab queue after the ADD_STA command is sent.
|
||||
* This is needed for a000 firmware which won't accept SCD_QUEUE_CFG
|
||||
* command with unknown station id.
|
||||
* command with unknown station id, and for FW that doesn't support
|
||||
* station API since the cab queue is not included in the
|
||||
* tfd_queue_mask.
|
||||
*/
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
|
||||
msta->sta_id,
|
||||
IWL_MAX_TID_COUNT,
|
||||
timeout);
|
||||
vif->cab_queue = queue;
|
||||
} else {
|
||||
mvmvif->cab_queue = queue;
|
||||
} else if (!fw_has_api(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_API_STA_TYPE)) {
|
||||
iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
|
||||
&cfg, timeout);
|
||||
}
|
||||
|
@ -2144,7 +2176,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
if (!iwl_mvm_is_dqa_supported(mvm))
|
||||
return 0;
|
||||
|
||||
iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
|
||||
iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
|
||||
IWL_MAX_TID_COUNT, 0);
|
||||
|
||||
ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
|
||||
|
@ -2485,10 +2517,18 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
* one and mark it as reserved
|
||||
* 3. In DQA mode, but no traffic yet on this TID: same treatment as in
|
||||
* non-DQA mode, since the TXQ hasn't yet been allocated
|
||||
* Don't support case 3 for new TX path as it is not expected to happen
|
||||
* and aggregation will be offloaded soon anyway
|
||||
*/
|
||||
txq_id = mvmsta->tid_data[tid].txq_id;
|
||||
if (iwl_mvm_is_dqa_supported(mvm) &&
|
||||
unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED)) {
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
if (txq_id == IWL_MVM_INVALID_QUEUE) {
|
||||
ret = -ENXIO;
|
||||
goto release_locks;
|
||||
}
|
||||
} else if (iwl_mvm_is_dqa_supported(mvm) &&
|
||||
unlikely(mvm->queue_info[txq_id].status ==
|
||||
IWL_MVM_QUEUE_SHARED)) {
|
||||
ret = -ENXIO;
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
"Can't start tid %d agg on shared queue!\n",
|
||||
|
@ -2584,6 +2624,20 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
tid_data->amsdu_in_ampdu_allowed = amsdu;
|
||||
spin_unlock_bh(&mvmsta->lock);
|
||||
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
/*
|
||||
* If no queue iwl_mvm_sta_tx_agg_start() would have failed so
|
||||
* no need to check queue's status
|
||||
*/
|
||||
if (buf_size < mvmsta->max_agg_bufsize)
|
||||
return -ENOTSUPP;
|
||||
|
||||
ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
|
||||
if (ret)
|
||||
return -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
|
@ -2601,19 +2655,12 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
* changed from current (become smaller)
|
||||
*/
|
||||
if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
|
||||
/*
|
||||
* On new TX API rs and BA manager are offloaded.
|
||||
* For now though, just don't support being reconfigured
|
||||
*/
|
||||
if (iwl_mvm_has_new_tx_api(mvm))
|
||||
return -ENOTSUPP;
|
||||
|
||||
/*
|
||||
* If reconfiguring an existing queue, it first must be
|
||||
* drained
|
||||
*/
|
||||
ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
|
||||
BIT(queue));
|
||||
ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
|
||||
BIT(queue));
|
||||
if (ret) {
|
||||
IWL_ERR(mvm,
|
||||
"Error draining queue before reconfig\n");
|
||||
|
@ -2648,6 +2695,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
out:
|
||||
/*
|
||||
* Even though in theory the peer could have different
|
||||
* aggregation reorder buffer sizes for different sessions,
|
||||
|
@ -2665,6 +2713,27 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
|
||||
}
|
||||
|
||||
static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_sta *mvmsta,
|
||||
u16 txq_id)
|
||||
{
|
||||
if (iwl_mvm_has_new_tx_api(mvm))
|
||||
return;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
/*
|
||||
* The TXQ is marked as reserved only if no traffic came through yet
|
||||
* This means no traffic has been sent on this TID (agg'd or not), so
|
||||
* we no longer have use for the queue. Since it hasn't even been
|
||||
* allocated through iwl_mvm_enable_txq, so we can just mark it back as
|
||||
* free.
|
||||
*/
|
||||
if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
|
||||
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
|
||||
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
}
|
||||
|
||||
int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta, u16 tid)
|
||||
{
|
||||
|
@ -2691,18 +2760,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
|
||||
mvmsta->agg_tids &= ~BIT(tid);
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
/*
|
||||
* The TXQ is marked as reserved only if no traffic came through yet
|
||||
* This means no traffic has been sent on this TID (agg'd or not), so
|
||||
* we no longer have use for the queue. Since it hasn't even been
|
||||
* allocated through iwl_mvm_enable_txq, so we can just mark it back as
|
||||
* free.
|
||||
*/
|
||||
if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
|
||||
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
|
||||
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id);
|
||||
|
||||
switch (tid_data->state) {
|
||||
case IWL_AGG_ON:
|
||||
|
@ -2782,24 +2840,14 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
mvmsta->agg_tids &= ~BIT(tid);
|
||||
spin_unlock_bh(&mvmsta->lock);
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
/*
|
||||
* The TXQ is marked as reserved only if no traffic came through yet
|
||||
* This means no traffic has been sent on this TID (agg'd or not), so
|
||||
* we no longer have use for the queue. Since it hasn't even been
|
||||
* allocated through iwl_mvm_enable_txq, so we can just mark it back as
|
||||
* free.
|
||||
*/
|
||||
if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
|
||||
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id);
|
||||
|
||||
if (old_state >= IWL_AGG_ON) {
|
||||
iwl_mvm_drain_sta(mvm, mvmsta, true);
|
||||
if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
|
||||
IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
|
||||
iwl_trans_wait_tx_queue_empty(mvm->trans,
|
||||
mvmsta->tfd_queue_msk);
|
||||
iwl_trans_wait_tx_queues_empty(mvm->trans,
|
||||
mvmsta->tfd_queue_msk);
|
||||
iwl_mvm_drain_sta(mvm, mvmsta, false);
|
||||
|
||||
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
|
||||
|
@ -3429,13 +3477,13 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
|
|||
|
||||
/* Note: this is ignored by firmware not supporting GO uAPSD */
|
||||
if (more_data)
|
||||
cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_MOREDATA);
|
||||
cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
|
||||
|
||||
if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
|
||||
mvmsta->next_status_eosp = true;
|
||||
cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_PS_POLL);
|
||||
cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
|
||||
} else {
|
||||
cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
|
||||
cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
|
||||
}
|
||||
|
||||
/* block the Tx queues until the FW updated the sleep Tx count */
|
||||
|
@ -3512,6 +3560,27 @@ void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
|
|||
spin_unlock_bh(&mvm_sta->lock);
|
||||
}
|
||||
|
||||
static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_vif *mvmvif,
|
||||
struct iwl_mvm_int_sta *sta,
|
||||
bool disable)
|
||||
{
|
||||
u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
|
||||
struct iwl_mvm_add_sta_cmd cmd = {
|
||||
.add_modify = STA_MODE_MODIFY,
|
||||
.sta_id = sta->sta_id,
|
||||
.station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
|
||||
.station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
|
||||
.mac_id_n_color = cpu_to_le32(id),
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0,
|
||||
iwl_mvm_add_sta_cmd_size(mvm), &cmd);
|
||||
if (ret)
|
||||
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
|
||||
}
|
||||
|
||||
void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_vif *mvmvif,
|
||||
bool disable)
|
||||
|
@ -3536,6 +3605,22 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
|
|||
|
||||
iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
|
||||
}
|
||||
|
||||
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
|
||||
return;
|
||||
|
||||
/* Need to block/unblock also multicast station */
|
||||
if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
|
||||
iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
|
||||
&mvmvif->mcast_sta, disable);
|
||||
|
||||
/*
|
||||
* Only unblock the broadcast station (FW blocks it for immediate
|
||||
* quiet, not the driver)
|
||||
*/
|
||||
if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
|
||||
iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
|
||||
&mvmvif->bcast_sta, disable);
|
||||
}
|
||||
|
||||
void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
|
|
|
@ -380,6 +380,7 @@ struct iwl_mvm_rxq_dup_data {
|
|||
* @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for
|
||||
* tid.
|
||||
* @max_agg_bufsize: the maximal size of the AGG buffer for this station
|
||||
* @sta_type: station type
|
||||
* @bt_reduced_txpower: is reduced tx power enabled for this station
|
||||
* @next_status_eosp: the next reclaimed packet is a PS-Poll response and
|
||||
* we need to signal the EOSP
|
||||
|
@ -416,6 +417,7 @@ struct iwl_mvm_sta {
|
|||
u32 mac_id_n_color;
|
||||
u16 tid_disable_agg;
|
||||
u8 max_agg_bufsize;
|
||||
enum iwl_sta_type sta_type;
|
||||
bool bt_reduced_txpower;
|
||||
bool next_status_eosp;
|
||||
spinlock_t lock;
|
||||
|
@ -453,10 +455,12 @@ iwl_mvm_sta_from_mac80211(struct ieee80211_sta *sta)
|
|||
* struct iwl_mvm_int_sta - representation of an internal station (auxiliary or
|
||||
* broadcast)
|
||||
* @sta_id: the index of the station in the fw (will be replaced by id_n_color)
|
||||
* @type: station type
|
||||
* @tfd_queue_msk: the tfd queues used by the station
|
||||
*/
|
||||
struct iwl_mvm_int_sta {
|
||||
u32 sta_id;
|
||||
enum iwl_sta_type type;
|
||||
u32 tfd_queue_msk;
|
||||
};
|
||||
|
||||
|
@ -536,7 +540,8 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
|
|||
int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
|
||||
int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_int_sta *sta,
|
||||
u32 qmask, enum nl80211_iftype iftype);
|
||||
u32 qmask, enum nl80211_iftype iftype,
|
||||
enum iwl_sta_type type);
|
||||
void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
|
||||
void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta);
|
||||
int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
|
||||
|
|
|
@ -630,6 +630,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
|
|||
if (queue < 0)
|
||||
return -1;
|
||||
|
||||
if (queue == info.control.vif->cab_queue)
|
||||
queue = mvmvif->cab_queue;
|
||||
} else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
|
||||
is_multicast_ether_addr(hdr->addr1)) {
|
||||
u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
|
||||
|
@ -918,7 +920,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
__le16 fc;
|
||||
u16 seq_number = 0;
|
||||
u8 tid = IWL_MAX_TID_COUNT;
|
||||
u8 txq_id = info->hw_queue;
|
||||
u16 txq_id = info->hw_queue;
|
||||
bool is_ampdu = false;
|
||||
int hdrlen;
|
||||
|
||||
|
@ -988,11 +990,11 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
|
||||
|
||||
/* Check if TXQ needs to be allocated or re-activated */
|
||||
if (unlikely(txq_id == IEEE80211_INVAL_HW_QUEUE ||
|
||||
if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE ||
|
||||
!mvmsta->tid_data[tid].is_tid_active) &&
|
||||
iwl_mvm_is_dqa_supported(mvm)) {
|
||||
/* If TXQ needs to be allocated... */
|
||||
if (txq_id == IEEE80211_INVAL_HW_QUEUE) {
|
||||
if (txq_id == IWL_MVM_INVALID_QUEUE) {
|
||||
iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
|
||||
|
||||
/*
|
||||
|
@ -1004,6 +1006,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* queue should always be active in new TX path */
|
||||
WARN_ON(iwl_mvm_has_new_tx_api(mvm));
|
||||
|
||||
/* If we are here - TXQ exists and needs to be re-activated */
|
||||
spin_lock(&mvm->queue_info_lock);
|
||||
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
|
||||
|
@ -1014,7 +1019,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
txq_id);
|
||||
}
|
||||
|
||||
if (iwl_mvm_is_dqa_supported(mvm)) {
|
||||
if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
|
||||
/* Keep track of the time of the last frame for this RA/TID */
|
||||
mvm->queue_info[txq_id].last_frame_time[tid] = jiffies;
|
||||
|
||||
|
|
|
@ -592,15 +592,16 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq)
|
|||
|
||||
lockdep_assert_held(&mvm->queue_info_lock);
|
||||
|
||||
/* This should not be hit with new TX path */
|
||||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
return -ENOSPC;
|
||||
|
||||
/* Start by looking for a free queue */
|
||||
for (i = minq; i <= maxq; i++)
|
||||
if (mvm->queue_info[i].hw_queue_refcount == 0 &&
|
||||
mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
|
||||
return i;
|
||||
|
||||
if (iwl_mvm_has_new_tx_api(mvm))
|
||||
return -ENOSPC;
|
||||
|
||||
/*
|
||||
* If no free queue found - settle for an inactive one to reconfigure
|
||||
* Make sure that the inactive queue either already belongs to this STA,
|
||||
|
@ -670,7 +671,8 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
|
|||
if (mvm->queue_info[queue].hw_queue_refcount > 0)
|
||||
enable_queue = false;
|
||||
|
||||
mvm->queue_info[queue].hw_queue_to_mac80211 |= BIT(mac80211_queue);
|
||||
mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
|
||||
|
||||
mvm->queue_info[queue].hw_queue_refcount++;
|
||||
mvm->queue_info[queue].tid_bitmap |= BIT(tid);
|
||||
mvm->queue_info[queue].ra_sta_id = sta_id;
|
||||
|
@ -688,7 +690,7 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
|
|||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
"Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
|
||||
queue, mvm->queue_info[queue].hw_queue_refcount,
|
||||
mvm->queue_info[queue].hw_queue_to_mac80211);
|
||||
mvm->hw_queue_to_mac80211[queue]);
|
||||
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
|
@ -720,7 +722,10 @@ int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
|
|||
IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
|
||||
queue, sta_id, tid);
|
||||
|
||||
iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue, sta_id, tid);
|
||||
mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
"Enabling TXQ #%d (mac80211 map:0x%x)\n",
|
||||
queue, mvm->hw_queue_to_mac80211[queue]);
|
||||
|
||||
return queue;
|
||||
}
|
||||
|
@ -764,6 +769,17 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
|||
.action = SCD_CFG_DISABLE_QUEUE,
|
||||
};
|
||||
bool remove_mac_queue = true;
|
||||
int ret;
|
||||
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac80211_queue);
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
iwl_trans_txq_free(mvm->trans, queue);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
|
||||
|
@ -791,7 +807,7 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
|||
}
|
||||
|
||||
if (remove_mac_queue)
|
||||
mvm->queue_info[queue].hw_queue_to_mac80211 &=
|
||||
mvm->hw_queue_to_mac80211[queue] &=
|
||||
~BIT(mac80211_queue);
|
||||
mvm->queue_info[queue].hw_queue_refcount--;
|
||||
|
||||
|
@ -804,7 +820,7 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
|||
"Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
|
||||
queue,
|
||||
mvm->queue_info[queue].hw_queue_refcount,
|
||||
mvm->queue_info[queue].hw_queue_to_mac80211);
|
||||
mvm->hw_queue_to_mac80211[queue]);
|
||||
|
||||
/* If the queue is still enabled - nothing left to do in this func */
|
||||
if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
|
||||
|
@ -818,39 +834,30 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
|||
/* Make sure queue info is correct even though we overwrite it */
|
||||
WARN(mvm->queue_info[queue].hw_queue_refcount ||
|
||||
mvm->queue_info[queue].tid_bitmap ||
|
||||
mvm->queue_info[queue].hw_queue_to_mac80211,
|
||||
mvm->hw_queue_to_mac80211[queue],
|
||||
"TXQ #%d info out-of-sync - refcount=%d, mac map=0x%x, tid=0x%x\n",
|
||||
queue, mvm->queue_info[queue].hw_queue_refcount,
|
||||
mvm->queue_info[queue].hw_queue_to_mac80211,
|
||||
mvm->hw_queue_to_mac80211[queue],
|
||||
mvm->queue_info[queue].tid_bitmap);
|
||||
|
||||
/* If we are here - the queue is freed and we can zero out these vals */
|
||||
mvm->queue_info[queue].hw_queue_refcount = 0;
|
||||
mvm->queue_info[queue].tid_bitmap = 0;
|
||||
mvm->queue_info[queue].hw_queue_to_mac80211 = 0;
|
||||
mvm->hw_queue_to_mac80211[queue] = 0;
|
||||
|
||||
/* Regardless if this is a reserved TXQ for a STA - mark it as false */
|
||||
mvm->queue_info[queue].reserved = false;
|
||||
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
iwl_trans_txq_free(mvm->trans, queue);
|
||||
} else {
|
||||
int ret;
|
||||
iwl_trans_txq_disable(mvm->trans, queue, false);
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
|
||||
sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
|
||||
|
||||
iwl_trans_txq_disable(mvm->trans, queue, false);
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
|
||||
sizeof(struct iwl_scd_txq_cfg_cmd),
|
||||
&cmd);
|
||||
|
||||
if (ret)
|
||||
IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
|
||||
queue, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
if (ret)
|
||||
IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
|
||||
queue, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1062,6 +1069,35 @@ struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm)
|
|||
return bss_iter_data.vif;
|
||||
}
|
||||
|
||||
struct iwl_sta_iter_data {
|
||||
bool assoc;
|
||||
};
|
||||
|
||||
static void iwl_mvm_sta_iface_iterator(void *_data, u8 *mac,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
struct iwl_sta_iter_data *data = _data;
|
||||
|
||||
if (vif->type != NL80211_IFTYPE_STATION)
|
||||
return;
|
||||
|
||||
if (vif->bss_conf.assoc)
|
||||
data->assoc = true;
|
||||
}
|
||||
|
||||
bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm)
|
||||
{
|
||||
struct iwl_sta_iter_data data = {
|
||||
.assoc = false,
|
||||
};
|
||||
|
||||
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
|
||||
IEEE80211_IFACE_ITER_NORMAL,
|
||||
iwl_mvm_sta_iface_iterator,
|
||||
&data);
|
||||
return data.assoc;
|
||||
}
|
||||
|
||||
unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif,
|
||||
bool tdls, bool cmd_q)
|
||||
|
@ -1173,8 +1209,8 @@ static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
|
|||
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
|
||||
int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
|
||||
|
||||
mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE;
|
||||
mvm->queue_info[queue].hw_queue_to_mac80211 &= ~BIT(mac_queue);
|
||||
mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
|
||||
mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue);
|
||||
mvm->queue_info[queue].hw_queue_refcount--;
|
||||
mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
|
||||
mvmsta->tid_data[tid].is_tid_active = false;
|
||||
|
@ -1194,7 +1230,7 @@ static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
|
|||
*/
|
||||
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
|
||||
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
|
||||
mvm->queue_info[queue].hw_queue_to_mac80211 |=
|
||||
mvm->hw_queue_to_mac80211[queue] |=
|
||||
BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
|
||||
}
|
||||
|
||||
|
|
|
@ -251,6 +251,10 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
|
|||
|
||||
iwl_enable_interrupts(trans);
|
||||
|
||||
/* Configure debug, if exists */
|
||||
if (trans->dbg_dest_tlv)
|
||||
iwl_pcie_apply_destination(trans);
|
||||
|
||||
/* kick FW self load */
|
||||
iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr);
|
||||
iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
|
||||
|
|
|
@ -414,9 +414,9 @@ struct iwl_trans_pcie {
|
|||
struct iwl_dma_ptr kw;
|
||||
|
||||
struct iwl_txq *txq_memory;
|
||||
struct iwl_txq *txq[IWL_MAX_HW_QUEUES];
|
||||
unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
|
||||
unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
|
||||
struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
|
||||
unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
|
||||
unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
|
||||
|
||||
/* PCI bus related data */
|
||||
struct pci_dev *pci_dev;
|
||||
|
@ -778,6 +778,7 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans,
|
|||
int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
|
||||
struct iwl_dma_ptr *ptr, size_t size);
|
||||
void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
|
||||
void iwl_pcie_apply_destination(struct iwl_trans *trans);
|
||||
|
||||
/* transport gen 2 exported functions */
|
||||
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
|
||||
|
|
|
@ -1147,7 +1147,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
|
|||
* Ucode should set SEQ_RX_FRAME bit if ucode-originated,
|
||||
* but apparently a few don't get set; catch them here. */
|
||||
reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
|
||||
if (reclaim) {
|
||||
if (reclaim && !pkt->hdr.group_id) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
|
||||
|
|
|
@ -831,7 +831,7 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void iwl_pcie_apply_destination(struct iwl_trans *trans)
|
||||
void iwl_pcie_apply_destination(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv;
|
||||
|
@ -2833,7 +2833,7 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans)
|
|||
.ref = iwl_trans_pcie_ref, \
|
||||
.unref = iwl_trans_pcie_unref, \
|
||||
.dump_data = iwl_trans_pcie_dump_data, \
|
||||
.wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty, \
|
||||
.wait_tx_queues_empty = iwl_trans_pcie_wait_txq_empty, \
|
||||
.d3_suspend = iwl_trans_pcie_d3_suspend, \
|
||||
.d3_resume = iwl_trans_pcie_d3_resume
|
||||
|
||||
|
@ -2976,7 +2976,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
|||
* PCI Tx retries from interfering with C3 CPU state */
|
||||
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
|
||||
|
||||
trans->dev = &pdev->dev;
|
||||
trans_pcie->pci_dev = pdev;
|
||||
iwl_disable_interrupts(trans);
|
||||
|
||||
|
|
|
@ -2283,7 +2283,7 @@ int orinoco_if_add(struct orinoco_private *priv,
|
|||
priv->ndev = dev;
|
||||
|
||||
/* Report what we've done */
|
||||
dev_dbg(priv->dev, "Registerred interface %s.\n", dev->name);
|
||||
dev_dbg(priv->dev, "Registered interface %s.\n", dev->name);
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -769,18 +769,31 @@ static int ezusb_submit_in_urb(struct ezusb_priv *upriv)
|
|||
|
||||
static inline int ezusb_8051_cpucs(struct ezusb_priv *upriv, int reset)
|
||||
{
|
||||
u8 res_val = reset; /* avoid argument promotion */
|
||||
int ret;
|
||||
u8 *res_val = NULL;
|
||||
|
||||
if (!upriv->udev) {
|
||||
err("%s: !upriv->udev", __func__);
|
||||
return -EFAULT;
|
||||
}
|
||||
return usb_control_msg(upriv->udev,
|
||||
|
||||
res_val = kmalloc(sizeof(*res_val), GFP_KERNEL);
|
||||
|
||||
if (!res_val)
|
||||
return -ENOMEM;
|
||||
|
||||
*res_val = reset; /* avoid argument promotion */
|
||||
|
||||
ret = usb_control_msg(upriv->udev,
|
||||
usb_sndctrlpipe(upriv->udev, 0),
|
||||
EZUSB_REQUEST_FW_TRANS,
|
||||
USB_TYPE_VENDOR | USB_RECIP_DEVICE |
|
||||
USB_DIR_OUT, EZUSB_CPUCS_REG, 0, &res_val,
|
||||
sizeof(res_val), DEF_TIMEOUT);
|
||||
USB_DIR_OUT, EZUSB_CPUCS_REG, 0, res_val,
|
||||
sizeof(*res_val), DEF_TIMEOUT);
|
||||
|
||||
kfree(res_val);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ezusb_firmware_download(struct ezusb_priv *upriv,
|
||||
|
|
|
@ -1181,6 +1181,10 @@ static int if_spi_probe(struct spi_device *spi)
|
|||
|
||||
/* Initialize interrupt handling stuff. */
|
||||
card->workqueue = alloc_workqueue("libertas_spi", WQ_MEM_RECLAIM, 0);
|
||||
if (!card->workqueue) {
|
||||
err = -ENOMEM;
|
||||
goto remove_card;
|
||||
}
|
||||
INIT_WORK(&card->packet_work, if_spi_host_to_card_worker);
|
||||
INIT_WORK(&card->resume_work, if_spi_resume_worker);
|
||||
|
||||
|
@ -1209,6 +1213,7 @@ release_irq:
|
|||
free_irq(spi->irq, card);
|
||||
terminate_workqueue:
|
||||
destroy_workqueue(card->workqueue);
|
||||
remove_card:
|
||||
lbs_remove_card(priv); /* will call free_netdev */
|
||||
free_card:
|
||||
free_if_spi_card(card);
|
||||
|
|
|
@ -66,8 +66,10 @@ mt7601u_mcu_msg_alloc(struct mt7601u_dev *dev, const void *data, int len)
|
|||
WARN_ON(len % 4); /* if length is not divisible by 4 we need to pad */
|
||||
|
||||
skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
|
||||
skb_reserve(skb, MT_DMA_HDR_LEN);
|
||||
memcpy(skb_put(skb, len), data, len);
|
||||
if (skb) {
|
||||
skb_reserve(skb, MT_DMA_HDR_LEN);
|
||||
memcpy(skb_put(skb, len), data, len);
|
||||
}
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
@ -170,6 +172,8 @@ static int mt7601u_mcu_function_select(struct mt7601u_dev *dev,
|
|||
};
|
||||
|
||||
skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg));
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
return mt7601u_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5);
|
||||
}
|
||||
|
||||
|
@ -205,6 +209,8 @@ mt7601u_mcu_calibrate(struct mt7601u_dev *dev, enum mcu_calibrate cal, u32 val)
|
|||
};
|
||||
|
||||
skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg));
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
return mt7601u_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true);
|
||||
}
|
||||
|
||||
|
|
|
@ -358,6 +358,107 @@ bool rtl8821ae_phy_rf_config(struct ieee80211_hw *hw)
|
|||
return rtl8821ae_phy_rf6052_config(hw);
|
||||
}
|
||||
|
||||
static void _rtl8812ae_phy_set_rfe_reg_24g(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct rtl_priv *rtlpriv = rtl_priv(hw);
|
||||
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
|
||||
u8 tmp;
|
||||
|
||||
switch (rtlhal->rfe_type) {
|
||||
case 3:
|
||||
rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x54337770);
|
||||
rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x54337770);
|
||||
rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
|
||||
rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
|
||||
rtl_set_bbreg(hw, 0x900, 0x00000303, 0x1);
|
||||
break;
|
||||
case 4:
|
||||
rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77777777);
|
||||
rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777);
|
||||
rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x001);
|
||||
rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x001);
|
||||
break;
|
||||
case 5:
|
||||
rtl_write_byte(rtlpriv, RA_RFE_PINMUX + 2, 0x77);
|
||||
rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777);
|
||||
tmp = rtl_read_byte(rtlpriv, RA_RFE_INV + 3);
|
||||
rtl_write_byte(rtlpriv, RA_RFE_INV + 3, tmp & ~0x1);
|
||||
rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
|
||||
break;
|
||||
case 1:
|
||||
if (rtlpriv->btcoexist.bt_coexistence) {
|
||||
rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xffffff, 0x777777);
|
||||
rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
|
||||
0x77777777);
|
||||
rtl_set_bbreg(hw, RA_RFE_INV, 0x33f00000, 0x000);
|
||||
rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
|
||||
break;
|
||||
}
|
||||
case 0:
|
||||
case 2:
|
||||
default:
|
||||
rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77777777);
|
||||
rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777);
|
||||
rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x000);
|
||||
rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void _rtl8812ae_phy_set_rfe_reg_5g(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct rtl_priv *rtlpriv = rtl_priv(hw);
|
||||
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
|
||||
u8 tmp;
|
||||
|
||||
switch (rtlhal->rfe_type) {
|
||||
case 0:
|
||||
rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77337717);
|
||||
rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337717);
|
||||
rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
|
||||
rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
|
||||
break;
|
||||
case 1:
|
||||
if (rtlpriv->btcoexist.bt_coexistence) {
|
||||
rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xffffff, 0x337717);
|
||||
rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
|
||||
0x77337717);
|
||||
rtl_set_bbreg(hw, RA_RFE_INV, 0x33f00000, 0x000);
|
||||
rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
|
||||
} else {
|
||||
rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD,
|
||||
0x77337717);
|
||||
rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
|
||||
0x77337717);
|
||||
rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x000);
|
||||
rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
|
||||
}
|
||||
break;
|
||||
case 3:
|
||||
rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x54337717);
|
||||
rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x54337717);
|
||||
rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
|
||||
rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
|
||||
rtl_set_bbreg(hw, 0x900, 0x00000303, 0x1);
|
||||
break;
|
||||
case 5:
|
||||
rtl_write_byte(rtlpriv, RA_RFE_PINMUX + 2, 0x33);
|
||||
rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337777);
|
||||
tmp = rtl_read_byte(rtlpriv, RA_RFE_INV + 3);
|
||||
rtl_write_byte(rtlpriv, RA_RFE_INV + 3, tmp | 0x1);
|
||||
rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
|
||||
break;
|
||||
case 2:
|
||||
case 4:
|
||||
default:
|
||||
rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77337777);
|
||||
rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337777);
|
||||
rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
|
||||
rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8 band,
|
||||
u8 rf_path)
|
||||
{
|
||||
|
@ -552,14 +653,9 @@ void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
|
|||
/* 0x82C[1:0] = 2b'00 */
|
||||
rtl_set_bbreg(hw, 0x82c, 0x3, 0);
|
||||
}
|
||||
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
|
||||
rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD,
|
||||
0x77777777);
|
||||
rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
|
||||
0x77777777);
|
||||
rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x000);
|
||||
rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x000);
|
||||
}
|
||||
|
||||
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
|
||||
_rtl8812ae_phy_set_rfe_reg_24g(hw);
|
||||
|
||||
rtl_set_bbreg(hw, RTXPATH, 0xf0, 0x1);
|
||||
rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, 0x1);
|
||||
|
@ -614,14 +710,8 @@ void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
|
|||
/* 0x82C[1:0] = 2'b00 */
|
||||
rtl_set_bbreg(hw, 0x82c, 0x3, 1);
|
||||
|
||||
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
|
||||
rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD,
|
||||
0x77337777);
|
||||
rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
|
||||
0x77337777);
|
||||
rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x010);
|
||||
rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x010);
|
||||
}
|
||||
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
|
||||
_rtl8812ae_phy_set_rfe_reg_5g(hw);
|
||||
|
||||
rtl_set_bbreg(hw, RTXPATH, 0xf0, 0);
|
||||
rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, 0xf);
|
||||
|
|
|
@ -2424,6 +2424,7 @@
|
|||
#define BMASKH4BITS 0xf0000000
|
||||
#define BMASKOFDM_D 0xffc00000
|
||||
#define BMASKCCK 0x3f3f3f3f
|
||||
#define BMASKRFEINV 0x3ff00000
|
||||
|
||||
#define BRFREGOFFSETMASK 0xfffff
|
||||
|
||||
|
|
|
@ -3428,6 +3428,10 @@ static int rndis_wlan_bind(struct usbnet *usbdev, struct usb_interface *intf)
|
|||
|
||||
/* because rndis_command() sleeps we need to use workqueue */
|
||||
priv->workqueue = create_singlethread_workqueue("rndis_wlan");
|
||||
if (!priv->workqueue) {
|
||||
wiphy_free(wiphy);
|
||||
return -ENOMEM;
|
||||
}
|
||||
INIT_WORK(&priv->work, rndis_wlan_worker);
|
||||
INIT_DELAYED_WORK(&priv->dev_poller_work, rndis_device_poller);
|
||||
INIT_DELAYED_WORK(&priv->scan_work, rndis_get_scan_results);
|
||||
|
|
Загрузка…
Ссылка в новой задаче