Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next into for-davem
Conflicts: drivers/net/ethernet/broadcom/Kconfig
This commit is contained in:
Коммит
89c2af3c14
|
@ -4360,7 +4360,7 @@ F: drivers/net/wireless/iwlegacy/
|
|||
|
||||
INTEL WIRELESS WIFI LINK (iwlwifi)
|
||||
M: Johannes Berg <johannes.berg@intel.com>
|
||||
M: Wey-Yi Guy <wey-yi.w.guy@intel.com>
|
||||
M: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
|
||||
M: Intel Linux Wireless <ilw@linux.intel.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
W: http://intellinuxwireless.org
|
||||
|
|
|
@ -35,8 +35,14 @@ config BCMA_DRIVER_PCI_HOSTMODE
|
|||
PCI core hostmode operation (external PCI bus).
|
||||
|
||||
config BCMA_HOST_SOC
|
||||
bool
|
||||
depends on BCMA_DRIVER_MIPS
|
||||
bool "Support for BCMA in a SoC"
|
||||
depends on BCMA
|
||||
help
|
||||
Host interface for a Broadcom AIX bus directly mapped into
|
||||
the memory. This only works with the Broadcom SoCs from the
|
||||
BCM47XX line.
|
||||
|
||||
If unsure, say N
|
||||
|
||||
config BCMA_DRIVER_MIPS
|
||||
bool "BCMA Broadcom MIPS core driver"
|
||||
|
|
|
@ -237,7 +237,7 @@ int bcma_bus_register(struct bcma_bus *bus)
|
|||
err = bcma_bus_scan(bus);
|
||||
if (err) {
|
||||
bcma_err(bus, "Failed to scan: %d\n", err);
|
||||
return -1;
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Early init CC core */
|
||||
|
|
|
@ -32,6 +32,18 @@ static const struct bcma_device_id_name bcma_bcm_device_names[] = {
|
|||
{ BCMA_CORE_4706_CHIPCOMMON, "BCM4706 ChipCommon" },
|
||||
{ BCMA_CORE_4706_SOC_RAM, "BCM4706 SOC RAM" },
|
||||
{ BCMA_CORE_4706_MAC_GBIT, "BCM4706 GBit MAC" },
|
||||
{ BCMA_CORE_PCIEG2, "PCIe Gen 2" },
|
||||
{ BCMA_CORE_DMA, "DMA" },
|
||||
{ BCMA_CORE_SDIO3, "SDIO3" },
|
||||
{ BCMA_CORE_USB20, "USB 2.0" },
|
||||
{ BCMA_CORE_USB30, "USB 3.0" },
|
||||
{ BCMA_CORE_A9JTAG, "ARM Cortex A9 JTAG" },
|
||||
{ BCMA_CORE_DDR23, "Denali DDR2/DDR3 memory controller" },
|
||||
{ BCMA_CORE_ROM, "ROM" },
|
||||
{ BCMA_CORE_NAND, "NAND flash controller" },
|
||||
{ BCMA_CORE_QSPI, "SPI flash controller" },
|
||||
{ BCMA_CORE_CHIPCOMMON_B, "Chipcommon B" },
|
||||
{ BCMA_CORE_ARMCA9, "ARM Cortex A9 core (ihost)" },
|
||||
{ BCMA_CORE_AMEMC, "AMEMC (DDR)" },
|
||||
{ BCMA_CORE_ALTA, "ALTA (I2S)" },
|
||||
{ BCMA_CORE_INVALID, "Invalid" },
|
||||
|
@ -201,7 +213,7 @@ static s32 bcma_erom_get_mst_port(struct bcma_bus *bus, u32 __iomem **eromptr)
|
|||
return ent;
|
||||
}
|
||||
|
||||
static s32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr,
|
||||
static u32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr,
|
||||
u32 type, u8 port)
|
||||
{
|
||||
u32 addrl, addrh, sizel, sizeh = 0;
|
||||
|
@ -213,7 +225,7 @@ static s32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr,
|
|||
((ent & SCAN_ADDR_TYPE) != type) ||
|
||||
(((ent & SCAN_ADDR_PORT) >> SCAN_ADDR_PORT_SHIFT) != port)) {
|
||||
bcma_erom_push_ent(eromptr);
|
||||
return -EINVAL;
|
||||
return (u32)-EINVAL;
|
||||
}
|
||||
|
||||
addrl = ent & SCAN_ADDR_ADDR;
|
||||
|
@ -261,7 +273,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
|
|||
struct bcma_device_id *match, int core_num,
|
||||
struct bcma_device *core)
|
||||
{
|
||||
s32 tmp;
|
||||
u32 tmp;
|
||||
u8 i, j;
|
||||
s32 cia, cib;
|
||||
u8 ports[2], wrappers[2];
|
||||
|
@ -339,11 +351,11 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
|
|||
* the main register space for the core
|
||||
*/
|
||||
tmp = bcma_erom_get_addr_desc(bus, eromptr, SCAN_ADDR_TYPE_SLAVE, 0);
|
||||
if (tmp <= 0) {
|
||||
if (tmp == 0 || IS_ERR_VALUE(tmp)) {
|
||||
/* Try again to see if it is a bridge */
|
||||
tmp = bcma_erom_get_addr_desc(bus, eromptr,
|
||||
SCAN_ADDR_TYPE_BRIDGE, 0);
|
||||
if (tmp <= 0) {
|
||||
if (tmp == 0 || IS_ERR_VALUE(tmp)) {
|
||||
return -EILSEQ;
|
||||
} else {
|
||||
bcma_info(bus, "Bridge found\n");
|
||||
|
@ -357,7 +369,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
|
|||
for (j = 0; ; j++) {
|
||||
tmp = bcma_erom_get_addr_desc(bus, eromptr,
|
||||
SCAN_ADDR_TYPE_SLAVE, i);
|
||||
if (tmp < 0) {
|
||||
if (IS_ERR_VALUE(tmp)) {
|
||||
/* no more entries for port _i_ */
|
||||
/* pr_debug("erom: slave port %d "
|
||||
* "has %d descriptors\n", i, j); */
|
||||
|
@ -374,7 +386,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
|
|||
for (j = 0; ; j++) {
|
||||
tmp = bcma_erom_get_addr_desc(bus, eromptr,
|
||||
SCAN_ADDR_TYPE_MWRAP, i);
|
||||
if (tmp < 0) {
|
||||
if (IS_ERR_VALUE(tmp)) {
|
||||
/* no more entries for port _i_ */
|
||||
/* pr_debug("erom: master wrapper %d "
|
||||
* "has %d descriptors\n", i, j); */
|
||||
|
@ -392,7 +404,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
|
|||
for (j = 0; ; j++) {
|
||||
tmp = bcma_erom_get_addr_desc(bus, eromptr,
|
||||
SCAN_ADDR_TYPE_SWRAP, i + hack);
|
||||
if (tmp < 0) {
|
||||
if (IS_ERR_VALUE(tmp)) {
|
||||
/* no more entries for port _i_ */
|
||||
/* pr_debug("erom: master wrapper %d "
|
||||
* has %d descriptors\n", i, j); */
|
||||
|
|
|
@ -130,7 +130,7 @@ config BNX2X_SRIOV
|
|||
|
||||
config BGMAC
|
||||
tristate "BCMA bus GBit core support"
|
||||
depends on BCMA_HOST_SOC && HAS_DMA
|
||||
depends on BCMA_HOST_SOC && HAS_DMA && BCM47XX
|
||||
select PHYLIB
|
||||
---help---
|
||||
This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus.
|
||||
|
|
|
@ -159,7 +159,7 @@ struct ath_common {
|
|||
|
||||
bool btcoex_enabled;
|
||||
bool disable_ani;
|
||||
bool antenna_diversity;
|
||||
bool bt_ant_diversity;
|
||||
};
|
||||
|
||||
struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
|
||||
|
|
|
@ -20,6 +20,12 @@
|
|||
#include "debug.h"
|
||||
#include "htc.h"
|
||||
|
||||
void ath10k_bmi_start(struct ath10k *ar)
|
||||
{
|
||||
ath10k_dbg(ATH10K_DBG_CORE, "BMI started\n");
|
||||
ar->bmi.done_sent = false;
|
||||
}
|
||||
|
||||
int ath10k_bmi_done(struct ath10k *ar)
|
||||
{
|
||||
struct bmi_cmd cmd;
|
||||
|
@ -105,7 +111,8 @@ int ath10k_bmi_read_memory(struct ath10k *ar,
|
|||
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
|
||||
&resp, &rxlen);
|
||||
if (ret) {
|
||||
ath10k_warn("unable to read from the device\n");
|
||||
ath10k_warn("unable to read from the device (%d)\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -149,7 +156,8 @@ int ath10k_bmi_write_memory(struct ath10k *ar,
|
|||
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
|
||||
NULL, NULL);
|
||||
if (ret) {
|
||||
ath10k_warn("unable to write to the device\n");
|
||||
ath10k_warn("unable to write to the device (%d)\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -184,6 +184,7 @@ struct bmi_target_info {
|
|||
#define BMI_CE_NUM_TO_TARG 0
|
||||
#define BMI_CE_NUM_TO_HOST 1
|
||||
|
||||
void ath10k_bmi_start(struct ath10k *ar);
|
||||
int ath10k_bmi_done(struct ath10k *ar);
|
||||
int ath10k_bmi_get_target_info(struct ath10k *ar,
|
||||
struct bmi_target_info *target_info);
|
||||
|
|
|
@ -79,7 +79,7 @@ static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
|
|||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
void __iomem *indicator_addr;
|
||||
|
||||
if (!test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features)) {
|
||||
if (!test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) {
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -100,7 +100,7 @@ static int ath10k_init_connect_htc(struct ath10k *ar)
|
|||
goto conn_fail;
|
||||
|
||||
/* Start HTC */
|
||||
status = ath10k_htc_start(ar->htc);
|
||||
status = ath10k_htc_start(&ar->htc);
|
||||
if (status)
|
||||
goto conn_fail;
|
||||
|
||||
|
@ -116,7 +116,7 @@ static int ath10k_init_connect_htc(struct ath10k *ar)
|
|||
return 0;
|
||||
|
||||
timeout:
|
||||
ath10k_htc_stop(ar->htc);
|
||||
ath10k_htc_stop(&ar->htc);
|
||||
conn_fail:
|
||||
return status;
|
||||
}
|
||||
|
@ -247,19 +247,11 @@ static int ath10k_push_board_ext_data(struct ath10k *ar,
|
|||
|
||||
static int ath10k_download_board_data(struct ath10k *ar)
|
||||
{
|
||||
const struct firmware *fw = ar->board_data;
|
||||
u32 board_data_size = QCA988X_BOARD_DATA_SZ;
|
||||
u32 address;
|
||||
const struct firmware *fw;
|
||||
int ret;
|
||||
|
||||
fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
|
||||
ar->hw_params.fw.board);
|
||||
if (IS_ERR(fw)) {
|
||||
ath10k_err("could not fetch board data fw file (%ld)\n",
|
||||
PTR_ERR(fw));
|
||||
return PTR_ERR(fw);
|
||||
}
|
||||
|
||||
ret = ath10k_push_board_ext_data(ar, fw);
|
||||
if (ret) {
|
||||
ath10k_err("could not push board ext data (%d)\n", ret);
|
||||
|
@ -286,32 +278,20 @@ static int ath10k_download_board_data(struct ath10k *ar)
|
|||
}
|
||||
|
||||
exit:
|
||||
release_firmware(fw);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ath10k_download_and_run_otp(struct ath10k *ar)
|
||||
{
|
||||
const struct firmware *fw;
|
||||
u32 address;
|
||||
const struct firmware *fw = ar->otp;
|
||||
u32 address = ar->hw_params.patch_load_addr;
|
||||
u32 exec_param;
|
||||
int ret;
|
||||
|
||||
/* OTP is optional */
|
||||
|
||||
if (ar->hw_params.fw.otp == NULL) {
|
||||
ath10k_info("otp file not defined\n");
|
||||
if (!ar->otp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
address = ar->hw_params.patch_load_addr;
|
||||
|
||||
fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
|
||||
ar->hw_params.fw.otp);
|
||||
if (IS_ERR(fw)) {
|
||||
ath10k_warn("could not fetch otp (%ld)\n", PTR_ERR(fw));
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size);
|
||||
if (ret) {
|
||||
|
@ -327,28 +307,17 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
|
|||
}
|
||||
|
||||
exit:
|
||||
release_firmware(fw);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ath10k_download_fw(struct ath10k *ar)
|
||||
{
|
||||
const struct firmware *fw;
|
||||
const struct firmware *fw = ar->firmware;
|
||||
u32 address;
|
||||
int ret;
|
||||
|
||||
if (ar->hw_params.fw.fw == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
address = ar->hw_params.patch_load_addr;
|
||||
|
||||
fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
|
||||
ar->hw_params.fw.fw);
|
||||
if (IS_ERR(fw)) {
|
||||
ath10k_err("could not fetch fw (%ld)\n", PTR_ERR(fw));
|
||||
return PTR_ERR(fw);
|
||||
}
|
||||
|
||||
ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size);
|
||||
if (ret) {
|
||||
ath10k_err("could not write fw (%d)\n", ret);
|
||||
|
@ -356,7 +325,74 @@ static int ath10k_download_fw(struct ath10k *ar)
|
|||
}
|
||||
|
||||
exit:
|
||||
release_firmware(fw);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ath10k_core_free_firmware_files(struct ath10k *ar)
|
||||
{
|
||||
if (ar->board_data && !IS_ERR(ar->board_data))
|
||||
release_firmware(ar->board_data);
|
||||
|
||||
if (ar->otp && !IS_ERR(ar->otp))
|
||||
release_firmware(ar->otp);
|
||||
|
||||
if (ar->firmware && !IS_ERR(ar->firmware))
|
||||
release_firmware(ar->firmware);
|
||||
|
||||
ar->board_data = NULL;
|
||||
ar->otp = NULL;
|
||||
ar->firmware = NULL;
|
||||
}
|
||||
|
||||
static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (ar->hw_params.fw.fw == NULL) {
|
||||
ath10k_err("firmware file not defined\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ar->hw_params.fw.board == NULL) {
|
||||
ath10k_err("board data file not defined");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ar->board_data = ath10k_fetch_fw_file(ar,
|
||||
ar->hw_params.fw.dir,
|
||||
ar->hw_params.fw.board);
|
||||
if (IS_ERR(ar->board_data)) {
|
||||
ret = PTR_ERR(ar->board_data);
|
||||
ath10k_err("could not fetch board data (%d)\n", ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
ar->firmware = ath10k_fetch_fw_file(ar,
|
||||
ar->hw_params.fw.dir,
|
||||
ar->hw_params.fw.fw);
|
||||
if (IS_ERR(ar->firmware)) {
|
||||
ret = PTR_ERR(ar->firmware);
|
||||
ath10k_err("could not fetch firmware (%d)\n", ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* OTP may be undefined. If so, don't fetch it at all */
|
||||
if (ar->hw_params.fw.otp == NULL)
|
||||
return 0;
|
||||
|
||||
ar->otp = ath10k_fetch_fw_file(ar,
|
||||
ar->hw_params.fw.dir,
|
||||
ar->hw_params.fw.otp);
|
||||
if (IS_ERR(ar->otp)) {
|
||||
ret = PTR_ERR(ar->otp);
|
||||
ath10k_err("could not fetch otp (%d)\n", ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
ath10k_core_free_firmware_files(ar);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -440,8 +476,35 @@ static int ath10k_init_hw_params(struct ath10k *ar)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void ath10k_core_restart(struct work_struct *work)
|
||||
{
|
||||
struct ath10k *ar = container_of(work, struct ath10k, restart_work);
|
||||
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
|
||||
switch (ar->state) {
|
||||
case ATH10K_STATE_ON:
|
||||
ath10k_halt(ar);
|
||||
ar->state = ATH10K_STATE_RESTARTING;
|
||||
ieee80211_restart_hw(ar->hw);
|
||||
break;
|
||||
case ATH10K_STATE_OFF:
|
||||
/* this can happen if driver is being unloaded */
|
||||
ath10k_warn("cannot restart a device that hasn't been started\n");
|
||||
break;
|
||||
case ATH10K_STATE_RESTARTING:
|
||||
case ATH10K_STATE_RESTARTED:
|
||||
ar->state = ATH10K_STATE_WEDGED;
|
||||
/* fall through */
|
||||
case ATH10K_STATE_WEDGED:
|
||||
ath10k_warn("device is wedged, will not restart\n");
|
||||
break;
|
||||
}
|
||||
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
}
|
||||
|
||||
struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
|
||||
enum ath10k_bus bus,
|
||||
const struct ath10k_hif_ops *hif_ops)
|
||||
{
|
||||
struct ath10k *ar;
|
||||
|
@ -458,9 +521,6 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
|
|||
|
||||
ar->hif.priv = hif_priv;
|
||||
ar->hif.ops = hif_ops;
|
||||
ar->hif.bus = bus;
|
||||
|
||||
ar->free_vdev_map = 0xFF; /* 8 vdevs */
|
||||
|
||||
init_completion(&ar->scan.started);
|
||||
init_completion(&ar->scan.completed);
|
||||
|
@ -487,6 +547,8 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
|
|||
|
||||
init_waitqueue_head(&ar->event_queue);
|
||||
|
||||
INIT_WORK(&ar->restart_work, ath10k_core_restart);
|
||||
|
||||
return ar;
|
||||
|
||||
err_wq:
|
||||
|
@ -504,24 +566,11 @@ void ath10k_core_destroy(struct ath10k *ar)
|
|||
}
|
||||
EXPORT_SYMBOL(ath10k_core_destroy);
|
||||
|
||||
|
||||
int ath10k_core_register(struct ath10k *ar)
|
||||
int ath10k_core_start(struct ath10k *ar)
|
||||
{
|
||||
struct ath10k_htc_ops htc_ops;
|
||||
struct bmi_target_info target_info;
|
||||
int status;
|
||||
|
||||
memset(&target_info, 0, sizeof(target_info));
|
||||
status = ath10k_bmi_get_target_info(ar, &target_info);
|
||||
if (status)
|
||||
goto err;
|
||||
|
||||
ar->target_version = target_info.version;
|
||||
ar->hw->wiphy->hw_version = target_info.version;
|
||||
|
||||
status = ath10k_init_hw_params(ar);
|
||||
if (status)
|
||||
goto err;
|
||||
ath10k_bmi_start(ar);
|
||||
|
||||
if (ath10k_init_configure_target(ar)) {
|
||||
status = -EINVAL;
|
||||
|
@ -536,32 +585,32 @@ int ath10k_core_register(struct ath10k *ar)
|
|||
if (status)
|
||||
goto err;
|
||||
|
||||
htc_ops.target_send_suspend_complete = ath10k_send_suspend_complete;
|
||||
ar->htc.htc_ops.target_send_suspend_complete =
|
||||
ath10k_send_suspend_complete;
|
||||
|
||||
ar->htc = ath10k_htc_create(ar, &htc_ops);
|
||||
if (IS_ERR(ar->htc)) {
|
||||
status = PTR_ERR(ar->htc);
|
||||
ath10k_err("could not create HTC (%d)\n", status);
|
||||
status = ath10k_htc_init(ar);
|
||||
if (status) {
|
||||
ath10k_err("could not init HTC (%d)\n", status);
|
||||
goto err;
|
||||
}
|
||||
|
||||
status = ath10k_bmi_done(ar);
|
||||
if (status)
|
||||
goto err_htc_destroy;
|
||||
goto err;
|
||||
|
||||
status = ath10k_wmi_attach(ar);
|
||||
if (status) {
|
||||
ath10k_err("WMI attach failed: %d\n", status);
|
||||
goto err_htc_destroy;
|
||||
goto err;
|
||||
}
|
||||
|
||||
status = ath10k_htc_wait_target(ar->htc);
|
||||
status = ath10k_htc_wait_target(&ar->htc);
|
||||
if (status)
|
||||
goto err_wmi_detach;
|
||||
|
||||
ar->htt = ath10k_htt_attach(ar);
|
||||
if (!ar->htt) {
|
||||
status = -ENOMEM;
|
||||
status = ath10k_htt_attach(ar);
|
||||
if (status) {
|
||||
ath10k_err("could not attach htt (%d)\n", status);
|
||||
goto err_wmi_detach;
|
||||
}
|
||||
|
||||
|
@ -588,13 +637,101 @@ int ath10k_core_register(struct ath10k *ar)
|
|||
goto err_disconnect_htc;
|
||||
}
|
||||
|
||||
status = ath10k_htt_attach_target(ar->htt);
|
||||
status = ath10k_htt_attach_target(&ar->htt);
|
||||
if (status)
|
||||
goto err_disconnect_htc;
|
||||
|
||||
ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
|
||||
|
||||
return 0;
|
||||
|
||||
err_disconnect_htc:
|
||||
ath10k_htc_stop(&ar->htc);
|
||||
err_htt_detach:
|
||||
ath10k_htt_detach(&ar->htt);
|
||||
err_wmi_detach:
|
||||
ath10k_wmi_detach(ar);
|
||||
err:
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL(ath10k_core_start);
|
||||
|
||||
void ath10k_core_stop(struct ath10k *ar)
|
||||
{
|
||||
ath10k_htc_stop(&ar->htc);
|
||||
ath10k_htt_detach(&ar->htt);
|
||||
ath10k_wmi_detach(ar);
|
||||
}
|
||||
EXPORT_SYMBOL(ath10k_core_stop);
|
||||
|
||||
/* mac80211 manages fw/hw initialization through start/stop hooks. However in
|
||||
* order to know what hw capabilities should be advertised to mac80211 it is
|
||||
* necessary to load the firmware (and tear it down immediately since start
|
||||
* hook will try to init it again) before registering */
|
||||
static int ath10k_core_probe_fw(struct ath10k *ar)
|
||||
{
|
||||
struct bmi_target_info target_info;
|
||||
int ret = 0;
|
||||
|
||||
ret = ath10k_hif_power_up(ar);
|
||||
if (ret) {
|
||||
ath10k_err("could not start pci hif (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
memset(&target_info, 0, sizeof(target_info));
|
||||
ret = ath10k_bmi_get_target_info(ar, &target_info);
|
||||
if (ret) {
|
||||
ath10k_err("could not get target info (%d)\n", ret);
|
||||
ath10k_hif_power_down(ar);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ar->target_version = target_info.version;
|
||||
ar->hw->wiphy->hw_version = target_info.version;
|
||||
|
||||
ret = ath10k_init_hw_params(ar);
|
||||
if (ret) {
|
||||
ath10k_err("could not get hw params (%d)\n", ret);
|
||||
ath10k_hif_power_down(ar);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ath10k_core_fetch_firmware_files(ar);
|
||||
if (ret) {
|
||||
ath10k_err("could not fetch firmware files (%d)\n", ret);
|
||||
ath10k_hif_power_down(ar);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ath10k_core_start(ar);
|
||||
if (ret) {
|
||||
ath10k_err("could not init core (%d)\n", ret);
|
||||
ath10k_core_free_firmware_files(ar);
|
||||
ath10k_hif_power_down(ar);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ath10k_core_stop(ar);
|
||||
ath10k_hif_power_down(ar);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ath10k_core_register(struct ath10k *ar)
|
||||
{
|
||||
int status;
|
||||
|
||||
status = ath10k_core_probe_fw(ar);
|
||||
if (status) {
|
||||
ath10k_err("could not probe fw (%d)\n", status);
|
||||
return status;
|
||||
}
|
||||
|
||||
status = ath10k_mac_register(ar);
|
||||
if (status)
|
||||
goto err_disconnect_htc;
|
||||
if (status) {
|
||||
ath10k_err("could not register to mac80211 (%d)\n", status);
|
||||
goto err_release_fw;
|
||||
}
|
||||
|
||||
status = ath10k_debug_create(ar);
|
||||
if (status) {
|
||||
|
@ -606,15 +743,8 @@ int ath10k_core_register(struct ath10k *ar)
|
|||
|
||||
err_unregister_mac:
|
||||
ath10k_mac_unregister(ar);
|
||||
err_disconnect_htc:
|
||||
ath10k_htc_stop(ar->htc);
|
||||
err_htt_detach:
|
||||
ath10k_htt_detach(ar->htt);
|
||||
err_wmi_detach:
|
||||
ath10k_wmi_detach(ar);
|
||||
err_htc_destroy:
|
||||
ath10k_htc_destroy(ar->htc);
|
||||
err:
|
||||
err_release_fw:
|
||||
ath10k_core_free_firmware_files(ar);
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL(ath10k_core_register);
|
||||
|
@ -625,41 +755,10 @@ void ath10k_core_unregister(struct ath10k *ar)
|
|||
* Otherwise we will fail to submit commands to FW and mac80211 will be
|
||||
* unhappy about callback failures. */
|
||||
ath10k_mac_unregister(ar);
|
||||
ath10k_htc_stop(ar->htc);
|
||||
ath10k_htt_detach(ar->htt);
|
||||
ath10k_wmi_detach(ar);
|
||||
ath10k_htc_destroy(ar->htc);
|
||||
ath10k_core_free_firmware_files(ar);
|
||||
}
|
||||
EXPORT_SYMBOL(ath10k_core_unregister);
|
||||
|
||||
int ath10k_core_target_suspend(struct ath10k *ar)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ath10k_dbg(ATH10K_DBG_CORE, "%s: called", __func__);
|
||||
|
||||
ret = ath10k_wmi_pdev_suspend_target(ar);
|
||||
if (ret)
|
||||
ath10k_warn("could not suspend target (%d)\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ath10k_core_target_suspend);
|
||||
|
||||
int ath10k_core_target_resume(struct ath10k *ar)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ath10k_dbg(ATH10K_DBG_CORE, "%s: called", __func__);
|
||||
|
||||
ret = ath10k_wmi_pdev_resume_target(ar);
|
||||
if (ret)
|
||||
ath10k_warn("could not resume target (%d)\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ath10k_core_target_resume);
|
||||
|
||||
MODULE_AUTHOR("Qualcomm Atheros");
|
||||
MODULE_DESCRIPTION("Core module for QCA988X PCIe devices.");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include "htt.h"
|
||||
#include "htc.h"
|
||||
#include "hw.h"
|
||||
#include "targaddrs.h"
|
||||
|
@ -43,10 +44,6 @@
|
|||
|
||||
struct ath10k;
|
||||
|
||||
enum ath10k_bus {
|
||||
ATH10K_BUS_PCI,
|
||||
};
|
||||
|
||||
struct ath10k_skb_cb {
|
||||
dma_addr_t paddr;
|
||||
bool is_mapped;
|
||||
|
@ -250,6 +247,28 @@ struct ath10k_debug {
|
|||
struct completion event_stats_compl;
|
||||
};
|
||||
|
||||
enum ath10k_state {
|
||||
ATH10K_STATE_OFF = 0,
|
||||
ATH10K_STATE_ON,
|
||||
|
||||
/* When doing firmware recovery the device is first powered down.
|
||||
* mac80211 is supposed to call in to start() hook later on. It is
|
||||
* however possible that driver unloading and firmware crash overlap.
|
||||
* mac80211 can wait on conf_mutex in stop() while the device is
|
||||
* stopped in ath10k_core_restart() work holding conf_mutex. The state
|
||||
* RESTARTED means that the device is up and mac80211 has started hw
|
||||
* reconfiguration. Once mac80211 is done with the reconfiguration we
|
||||
* set the state to STATE_ON in restart_complete(). */
|
||||
ATH10K_STATE_RESTARTING,
|
||||
ATH10K_STATE_RESTARTED,
|
||||
|
||||
/* The device has crashed while restarting hw. This state is like ON
|
||||
* but commands are blocked in HTC and -ECOMM response is given. This
|
||||
* prevents completion timeouts and makes the driver more responsive to
|
||||
* userspace commands. This is also prevents recursive recovery. */
|
||||
ATH10K_STATE_WEDGED,
|
||||
};
|
||||
|
||||
struct ath10k {
|
||||
struct ath_common ath_common;
|
||||
struct ieee80211_hw *hw;
|
||||
|
@ -274,19 +293,16 @@ struct ath10k {
|
|||
|
||||
struct {
|
||||
void *priv;
|
||||
enum ath10k_bus bus;
|
||||
const struct ath10k_hif_ops *ops;
|
||||
} hif;
|
||||
|
||||
struct ath10k_wmi wmi;
|
||||
|
||||
wait_queue_head_t event_queue;
|
||||
bool is_target_paused;
|
||||
|
||||
struct ath10k_bmi bmi;
|
||||
|
||||
struct ath10k_htc *htc;
|
||||
struct ath10k_htt *htt;
|
||||
struct ath10k_wmi wmi;
|
||||
struct ath10k_htc htc;
|
||||
struct ath10k_htt htt;
|
||||
|
||||
struct ath10k_hw_params {
|
||||
u32 id;
|
||||
|
@ -301,6 +317,10 @@ struct ath10k {
|
|||
} fw;
|
||||
} hw_params;
|
||||
|
||||
const struct firmware *board_data;
|
||||
const struct firmware *otp;
|
||||
const struct firmware *firmware;
|
||||
|
||||
struct {
|
||||
struct completion started;
|
||||
struct completion completed;
|
||||
|
@ -350,20 +370,22 @@ struct ath10k {
|
|||
struct completion offchan_tx_completed;
|
||||
struct sk_buff *offchan_tx_skb;
|
||||
|
||||
enum ath10k_state state;
|
||||
|
||||
struct work_struct restart_work;
|
||||
|
||||
#ifdef CONFIG_ATH10K_DEBUGFS
|
||||
struct ath10k_debug debug;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
|
||||
enum ath10k_bus bus,
|
||||
const struct ath10k_hif_ops *hif_ops);
|
||||
void ath10k_core_destroy(struct ath10k *ar);
|
||||
|
||||
int ath10k_core_start(struct ath10k *ar);
|
||||
void ath10k_core_stop(struct ath10k *ar);
|
||||
int ath10k_core_register(struct ath10k *ar);
|
||||
void ath10k_core_unregister(struct ath10k *ar);
|
||||
|
||||
int ath10k_core_target_suspend(struct ath10k *ar);
|
||||
int ath10k_core_target_resume(struct ath10k *ar);
|
||||
|
||||
#endif /* _CORE_H_ */
|
||||
|
|
|
@ -161,7 +161,7 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
|
|||
struct wmi_pdev_stats *ps;
|
||||
int i;
|
||||
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
spin_lock_bh(&ar->data_lock);
|
||||
|
||||
stats = &ar->debug.target_stats;
|
||||
|
||||
|
@ -259,6 +259,7 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
|
|||
}
|
||||
}
|
||||
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
complete(&ar->debug.event_stats_compl);
|
||||
}
|
||||
|
@ -268,35 +269,35 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
|
|||
{
|
||||
struct ath10k *ar = file->private_data;
|
||||
struct ath10k_target_stats *fw_stats;
|
||||
char *buf;
|
||||
char *buf = NULL;
|
||||
unsigned int len = 0, buf_len = 2500;
|
||||
ssize_t ret_cnt;
|
||||
ssize_t ret_cnt = 0;
|
||||
long left;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
fw_stats = &ar->debug.target_stats;
|
||||
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
|
||||
if (ar->state != ATH10K_STATE_ON)
|
||||
goto exit;
|
||||
|
||||
buf = kzalloc(buf_len, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
goto exit;
|
||||
|
||||
ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT);
|
||||
if (ret) {
|
||||
ath10k_warn("could not request stats (%d)\n", ret);
|
||||
kfree(buf);
|
||||
return -EIO;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
left = wait_for_completion_timeout(&ar->debug.event_stats_compl, 1*HZ);
|
||||
if (left <= 0)
|
||||
goto exit;
|
||||
|
||||
if (left <= 0) {
|
||||
kfree(buf);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
|
||||
spin_lock_bh(&ar->data_lock);
|
||||
len += scnprintf(buf + len, buf_len - len, "\n");
|
||||
len += scnprintf(buf + len, buf_len - len, "%30s\n",
|
||||
"ath10k PDEV stats");
|
||||
|
@ -424,14 +425,15 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
|
|||
fw_stats->peer_stat[i].peer_tx_rate);
|
||||
len += scnprintf(buf + len, buf_len - len, "\n");
|
||||
}
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
|
||||
if (len > buf_len)
|
||||
len = buf_len;
|
||||
|
||||
ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
|
||||
|
||||
exit:
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
|
||||
kfree(buf);
|
||||
return ret_cnt;
|
||||
}
|
||||
|
@ -443,6 +445,60 @@ static const struct file_operations fops_fw_stats = {
|
|||
.llseek = default_llseek,
|
||||
};
|
||||
|
||||
static ssize_t ath10k_read_simulate_fw_crash(struct file *file,
|
||||
char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
const char buf[] = "To simulate firmware crash write the keyword"
|
||||
" `crash` to this file.\nThis will force firmware"
|
||||
" to report a crash to the host system.\n";
|
||||
return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
|
||||
}
|
||||
|
||||
static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
|
||||
const char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath10k *ar = file->private_data;
|
||||
char buf[32] = {};
|
||||
int ret;
|
||||
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
|
||||
simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
|
||||
if (strcmp(buf, "crash") && strcmp(buf, "crash\n")) {
|
||||
ret = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (ar->state != ATH10K_STATE_ON &&
|
||||
ar->state != ATH10K_STATE_RESTARTED) {
|
||||
ret = -ENETDOWN;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
ath10k_info("simulating firmware crash\n");
|
||||
|
||||
ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
|
||||
if (ret)
|
||||
ath10k_warn("failed to force fw hang (%d)\n", ret);
|
||||
|
||||
if (ret == 0)
|
||||
ret = count;
|
||||
|
||||
exit:
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct file_operations fops_simulate_fw_crash = {
|
||||
.read = ath10k_read_simulate_fw_crash,
|
||||
.write = ath10k_write_simulate_fw_crash,
|
||||
.open = simple_open,
|
||||
.owner = THIS_MODULE,
|
||||
.llseek = default_llseek,
|
||||
};
|
||||
|
||||
int ath10k_debug_create(struct ath10k *ar)
|
||||
{
|
||||
ar->debug.debugfs_phy = debugfs_create_dir("ath10k",
|
||||
|
@ -459,6 +515,9 @@ int ath10k_debug_create(struct ath10k *ar)
|
|||
debugfs_create_file("wmi_services", S_IRUSR, ar->debug.debugfs_phy, ar,
|
||||
&fops_wmi_services);
|
||||
|
||||
debugfs_create_file("simulate_fw_crash", S_IRUSR, ar->debug.debugfs_phy,
|
||||
ar, &fops_simulate_fw_crash);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_ATH10K_DEBUGFS */
|
||||
|
|
|
@ -46,8 +46,11 @@ struct ath10k_hif_ops {
|
|||
void *request, u32 request_len,
|
||||
void *response, u32 *response_len);
|
||||
|
||||
/* Post BMI phase, after FW is loaded. Starts regular operation */
|
||||
int (*start)(struct ath10k *ar);
|
||||
|
||||
/* Clean up what start() did. This does not revert to BMI phase. If
|
||||
* desired so, call power_down() and power_up() */
|
||||
void (*stop)(struct ath10k *ar);
|
||||
|
||||
int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id,
|
||||
|
@ -66,10 +69,20 @@ struct ath10k_hif_ops {
|
|||
*/
|
||||
void (*send_complete_check)(struct ath10k *ar, u8 pipe_id, int force);
|
||||
|
||||
void (*init)(struct ath10k *ar,
|
||||
struct ath10k_hif_cb *callbacks);
|
||||
void (*set_callbacks)(struct ath10k *ar,
|
||||
struct ath10k_hif_cb *callbacks);
|
||||
|
||||
u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id);
|
||||
|
||||
/* Power up the device and enter BMI transfer mode for FW download */
|
||||
int (*power_up)(struct ath10k *ar);
|
||||
|
||||
/* Power down the device and free up resources. stop() must be called
|
||||
* before this if start() was called earlier */
|
||||
void (*power_down)(struct ath10k *ar);
|
||||
|
||||
int (*suspend)(struct ath10k *ar);
|
||||
int (*resume)(struct ath10k *ar);
|
||||
};
|
||||
|
||||
|
||||
|
@ -122,10 +135,10 @@ static inline void ath10k_hif_send_complete_check(struct ath10k *ar,
|
|||
ar->hif.ops->send_complete_check(ar, pipe_id, force);
|
||||
}
|
||||
|
||||
static inline void ath10k_hif_init(struct ath10k *ar,
|
||||
struct ath10k_hif_cb *callbacks)
|
||||
static inline void ath10k_hif_set_callbacks(struct ath10k *ar,
|
||||
struct ath10k_hif_cb *callbacks)
|
||||
{
|
||||
ar->hif.ops->init(ar, callbacks);
|
||||
ar->hif.ops->set_callbacks(ar, callbacks);
|
||||
}
|
||||
|
||||
static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar,
|
||||
|
@ -134,4 +147,30 @@ static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar,
|
|||
return ar->hif.ops->get_free_queue_number(ar, pipe_id);
|
||||
}
|
||||
|
||||
static inline int ath10k_hif_power_up(struct ath10k *ar)
|
||||
{
|
||||
return ar->hif.ops->power_up(ar);
|
||||
}
|
||||
|
||||
static inline void ath10k_hif_power_down(struct ath10k *ar)
|
||||
{
|
||||
ar->hif.ops->power_down(ar);
|
||||
}
|
||||
|
||||
static inline int ath10k_hif_suspend(struct ath10k *ar)
|
||||
{
|
||||
if (!ar->hif.ops->suspend)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return ar->hif.ops->suspend(ar);
|
||||
}
|
||||
|
||||
static inline int ath10k_hif_resume(struct ath10k *ar)
|
||||
{
|
||||
if (!ar->hif.ops->resume)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return ar->hif.ops->resume(ar);
|
||||
}
|
||||
|
||||
#endif /* _HIF_H_ */
|
||||
|
|
|
@ -246,15 +246,22 @@ int ath10k_htc_send(struct ath10k_htc *htc,
|
|||
{
|
||||
struct ath10k_htc_ep *ep = &htc->endpoint[eid];
|
||||
|
||||
if (htc->ar->state == ATH10K_STATE_WEDGED)
|
||||
return -ECOMM;
|
||||
|
||||
if (eid >= ATH10K_HTC_EP_COUNT) {
|
||||
ath10k_warn("Invalid endpoint id: %d\n", eid);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
skb_push(skb, sizeof(struct ath10k_htc_hdr));
|
||||
|
||||
spin_lock_bh(&htc->tx_lock);
|
||||
if (htc->stopped) {
|
||||
spin_unlock_bh(&htc->tx_lock);
|
||||
return -ESHUTDOWN;
|
||||
}
|
||||
|
||||
__skb_queue_tail(&ep->tx_queue, skb);
|
||||
skb_push(skb, sizeof(struct ath10k_htc_hdr));
|
||||
spin_unlock_bh(&htc->tx_lock);
|
||||
|
||||
queue_work(htc->ar->workqueue, &ep->send_work);
|
||||
|
@ -265,25 +272,19 @@ static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
|
|||
struct sk_buff *skb,
|
||||
unsigned int eid)
|
||||
{
|
||||
struct ath10k_htc *htc = ar->htc;
|
||||
struct ath10k_htc *htc = &ar->htc;
|
||||
struct ath10k_htc_ep *ep = &htc->endpoint[eid];
|
||||
bool stopping;
|
||||
|
||||
ath10k_htc_notify_tx_completion(ep, skb);
|
||||
/* the skb now belongs to the completion handler */
|
||||
|
||||
/* note: when using TX credit flow, the re-checking of queues happens
|
||||
* when credits flow back from the target. in the non-TX credit case,
|
||||
* we recheck after the packet completes */
|
||||
spin_lock_bh(&htc->tx_lock);
|
||||
stopping = htc->stopping;
|
||||
spin_unlock_bh(&htc->tx_lock);
|
||||
|
||||
if (!ep->tx_credit_flow_enabled && !stopping)
|
||||
/*
|
||||
* note: when using TX credit flow, the re-checking of
|
||||
* queues happens when credits flow back from the target.
|
||||
* in the non-TX credit case, we recheck after the packet
|
||||
* completes
|
||||
*/
|
||||
if (!ep->tx_credit_flow_enabled && !htc->stopped)
|
||||
queue_work(ar->workqueue, &ep->send_work);
|
||||
spin_unlock_bh(&htc->tx_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -414,7 +415,7 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
|
|||
u8 pipe_id)
|
||||
{
|
||||
int status = 0;
|
||||
struct ath10k_htc *htc = ar->htc;
|
||||
struct ath10k_htc *htc = &ar->htc;
|
||||
struct ath10k_htc_hdr *hdr;
|
||||
struct ath10k_htc_ep *ep;
|
||||
u16 payload_len;
|
||||
|
@ -751,8 +752,9 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
|
|||
tx_alloc = ath10k_htc_get_credit_allocation(htc,
|
||||
conn_req->service_id);
|
||||
if (!tx_alloc)
|
||||
ath10k_warn("HTC Service %s does not allocate target credits\n",
|
||||
htc_service_name(conn_req->service_id));
|
||||
ath10k_dbg(ATH10K_DBG_HTC,
|
||||
"HTC Service %s does not allocate target credits\n",
|
||||
htc_service_name(conn_req->service_id));
|
||||
|
||||
skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
|
||||
if (!skb) {
|
||||
|
@ -947,7 +949,7 @@ void ath10k_htc_stop(struct ath10k_htc *htc)
|
|||
struct ath10k_htc_ep *ep;
|
||||
|
||||
spin_lock_bh(&htc->tx_lock);
|
||||
htc->stopping = true;
|
||||
htc->stopped = true;
|
||||
spin_unlock_bh(&htc->tx_lock);
|
||||
|
||||
for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
|
||||
|
@ -956,26 +958,18 @@ void ath10k_htc_stop(struct ath10k_htc *htc)
|
|||
}
|
||||
|
||||
ath10k_hif_stop(htc->ar);
|
||||
ath10k_htc_reset_endpoint_states(htc);
|
||||
}
|
||||
|
||||
/* registered target arrival callback from the HIF layer */
|
||||
struct ath10k_htc *ath10k_htc_create(struct ath10k *ar,
|
||||
struct ath10k_htc_ops *htc_ops)
|
||||
int ath10k_htc_init(struct ath10k *ar)
|
||||
{
|
||||
struct ath10k_hif_cb htc_callbacks;
|
||||
struct ath10k_htc_ep *ep = NULL;
|
||||
struct ath10k_htc *htc = NULL;
|
||||
|
||||
/* FIXME: use struct ath10k instead */
|
||||
htc = kzalloc(sizeof(struct ath10k_htc), GFP_KERNEL);
|
||||
if (!htc)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
struct ath10k_htc *htc = &ar->htc;
|
||||
|
||||
spin_lock_init(&htc->tx_lock);
|
||||
|
||||
memcpy(&htc->htc_ops, htc_ops, sizeof(struct ath10k_htc_ops));
|
||||
|
||||
htc->stopped = false;
|
||||
ath10k_htc_reset_endpoint_states(htc);
|
||||
|
||||
/* setup HIF layer callbacks */
|
||||
|
@ -986,15 +980,10 @@ struct ath10k_htc *ath10k_htc_create(struct ath10k *ar,
|
|||
/* Get HIF default pipe for HTC message exchange */
|
||||
ep = &htc->endpoint[ATH10K_HTC_EP_0];
|
||||
|
||||
ath10k_hif_init(ar, &htc_callbacks);
|
||||
ath10k_hif_set_callbacks(ar, &htc_callbacks);
|
||||
ath10k_hif_get_default_pipe(ar, &ep->ul_pipe_id, &ep->dl_pipe_id);
|
||||
|
||||
init_completion(&htc->ctl_resp);
|
||||
|
||||
return htc;
|
||||
}
|
||||
|
||||
void ath10k_htc_destroy(struct ath10k_htc *htc)
|
||||
{
|
||||
kfree(htc);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -335,7 +335,7 @@ struct ath10k_htc {
|
|||
struct ath10k *ar;
|
||||
struct ath10k_htc_ep endpoint[ATH10K_HTC_EP_COUNT];
|
||||
|
||||
/* protects endpoint and stopping fields */
|
||||
/* protects endpoint and stopped fields */
|
||||
spinlock_t tx_lock;
|
||||
|
||||
struct ath10k_htc_ops htc_ops;
|
||||
|
@ -349,11 +349,10 @@ struct ath10k_htc {
|
|||
struct ath10k_htc_svc_tx_credits service_tx_alloc[ATH10K_HTC_EP_COUNT];
|
||||
int target_credit_size;
|
||||
|
||||
bool stopping;
|
||||
bool stopped;
|
||||
};
|
||||
|
||||
struct ath10k_htc *ath10k_htc_create(struct ath10k *ar,
|
||||
struct ath10k_htc_ops *htc_ops);
|
||||
int ath10k_htc_init(struct ath10k *ar);
|
||||
int ath10k_htc_wait_target(struct ath10k_htc *htc);
|
||||
int ath10k_htc_start(struct ath10k_htc *htc);
|
||||
int ath10k_htc_connect_service(struct ath10k_htc *htc,
|
||||
|
@ -362,7 +361,6 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
|
|||
int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid,
|
||||
struct sk_buff *packet);
|
||||
void ath10k_htc_stop(struct ath10k_htc *htc);
|
||||
void ath10k_htc_destroy(struct ath10k_htc *htc);
|
||||
struct sk_buff *ath10k_htc_alloc_skb(int size);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/if_ether.h>
|
||||
|
||||
#include "htt.h"
|
||||
#include "core.h"
|
||||
|
@ -36,7 +37,7 @@ static int ath10k_htt_htc_attach(struct ath10k_htt *htt)
|
|||
/* connect to control service */
|
||||
conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG;
|
||||
|
||||
status = ath10k_htc_connect_service(htt->ar->htc, &conn_req,
|
||||
status = ath10k_htc_connect_service(&htt->ar->htc, &conn_req,
|
||||
&conn_resp);
|
||||
|
||||
if (status)
|
||||
|
@ -47,15 +48,11 @@ static int ath10k_htt_htc_attach(struct ath10k_htt *htt)
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar)
|
||||
int ath10k_htt_attach(struct ath10k *ar)
|
||||
{
|
||||
struct ath10k_htt *htt;
|
||||
struct ath10k_htt *htt = &ar->htt;
|
||||
int ret;
|
||||
|
||||
htt = kzalloc(sizeof(*htt), GFP_KERNEL);
|
||||
if (!htt)
|
||||
return NULL;
|
||||
|
||||
htt->ar = ar;
|
||||
htt->max_throughput_mbps = 800;
|
||||
|
||||
|
@ -65,8 +62,11 @@ struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar)
|
|||
* since ath10k_htt_rx_attach involves sending a rx ring configure
|
||||
* message to the target.
|
||||
*/
|
||||
if (ath10k_htt_htc_attach(htt))
|
||||
ret = ath10k_htt_htc_attach(htt);
|
||||
if (ret) {
|
||||
ath10k_err("could not attach htt htc (%d)\n", ret);
|
||||
goto err_htc_attach;
|
||||
}
|
||||
|
||||
ret = ath10k_htt_tx_attach(htt);
|
||||
if (ret) {
|
||||
|
@ -74,8 +74,11 @@ struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar)
|
|||
goto err_htc_attach;
|
||||
}
|
||||
|
||||
if (ath10k_htt_rx_attach(htt))
|
||||
ret = ath10k_htt_rx_attach(htt);
|
||||
if (ret) {
|
||||
ath10k_err("could not attach htt rx (%d)\n", ret);
|
||||
goto err_rx_attach;
|
||||
}
|
||||
|
||||
/*
|
||||
* Prefetch enough data to satisfy target
|
||||
|
@ -89,13 +92,12 @@ struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar)
|
|||
8 + /* llc snap */
|
||||
2; /* ip4 dscp or ip6 priority */
|
||||
|
||||
return htt;
|
||||
return 0;
|
||||
|
||||
err_rx_attach:
|
||||
ath10k_htt_tx_detach(htt);
|
||||
err_htc_attach:
|
||||
kfree(htt);
|
||||
return NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ)
|
||||
|
@ -148,5 +150,4 @@ void ath10k_htt_detach(struct ath10k_htt *htt)
|
|||
{
|
||||
ath10k_htt_rx_detach(htt);
|
||||
ath10k_htt_tx_detach(htt);
|
||||
kfree(htt);
|
||||
}
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
|
||||
#include <linux/bug.h>
|
||||
|
||||
#include "core.h"
|
||||
#include "htc.h"
|
||||
#include "rx_desc.h"
|
||||
|
||||
|
@ -1317,7 +1316,7 @@ struct htt_rx_desc {
|
|||
#define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */
|
||||
#define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
|
||||
|
||||
struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar);
|
||||
int ath10k_htt_attach(struct ath10k *ar);
|
||||
int ath10k_htt_attach_target(struct ath10k_htt *htt);
|
||||
void ath10k_htt_detach(struct ath10k_htt *htt);
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "core.h"
|
||||
#include "htc.h"
|
||||
#include "htt.h"
|
||||
#include "txrx.h"
|
||||
|
@ -1036,7 +1037,7 @@ end:
|
|||
|
||||
void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
|
||||
{
|
||||
struct ath10k_htt *htt = ar->htt;
|
||||
struct ath10k_htt *htt = &ar->htt;
|
||||
struct htt_resp *resp = (struct htt_resp *)skb->data;
|
||||
|
||||
/* confirm alignment */
|
||||
|
|
|
@ -92,7 +92,7 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt)
|
|||
|
||||
/* At the beginning free queue number should hint us the maximum
|
||||
* queue length */
|
||||
pipe = htt->ar->htc->endpoint[htt->eid].ul_pipe_id;
|
||||
pipe = htt->ar->htc.endpoint[htt->eid].ul_pipe_id;
|
||||
htt->max_num_pending_tx = ath10k_hif_get_free_queue_number(htt->ar,
|
||||
pipe);
|
||||
|
||||
|
@ -153,7 +153,7 @@ void ath10k_htt_tx_detach(struct ath10k_htt *htt)
|
|||
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
|
||||
{
|
||||
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
|
||||
struct ath10k_htt *htt = ar->htt;
|
||||
struct ath10k_htt *htt = &ar->htt;
|
||||
|
||||
if (skb_cb->htt.is_conf) {
|
||||
dev_kfree_skb_any(skb);
|
||||
|
@ -194,7 +194,7 @@ int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
|
|||
|
||||
ATH10K_SKB_CB(skb)->htt.is_conf = true;
|
||||
|
||||
ret = ath10k_htc_send(htt->ar->htc, htt->eid, skb);
|
||||
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
|
||||
if (ret) {
|
||||
dev_kfree_skb_any(skb);
|
||||
return ret;
|
||||
|
@ -281,7 +281,7 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
|
|||
|
||||
ATH10K_SKB_CB(skb)->htt.is_conf = true;
|
||||
|
||||
ret = ath10k_htc_send(htt->ar->htc, htt->eid, skb);
|
||||
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
|
||||
if (ret) {
|
||||
dev_kfree_skb_any(skb);
|
||||
return ret;
|
||||
|
@ -346,7 +346,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
|
|||
skb_cb->htt.refcount = 2;
|
||||
skb_cb->htt.msdu = msdu;
|
||||
|
||||
res = ath10k_htc_send(htt->ar->htc, htt->eid, txdesc);
|
||||
res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
|
||||
if (res)
|
||||
goto err;
|
||||
|
||||
|
@ -486,7 +486,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
|
|||
skb_cb->htt.txfrag = txfrag;
|
||||
skb_cb->htt.msdu = msdu;
|
||||
|
||||
res = ath10k_htc_send(htt->ar->htc, htt->eid, txdesc);
|
||||
res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
|
||||
if (res)
|
||||
goto err;
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <net/mac80211.h>
|
||||
#include <linux/etherdevice.h>
|
||||
|
||||
#include "hif.h"
|
||||
#include "core.h"
|
||||
#include "debug.h"
|
||||
#include "wmi.h"
|
||||
|
@ -43,6 +44,8 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
|
|||
.macaddr = macaddr,
|
||||
};
|
||||
|
||||
lockdep_assert_held(&arvif->ar->conf_mutex);
|
||||
|
||||
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
|
||||
arg.key_flags = WMI_KEY_PAIRWISE;
|
||||
else
|
||||
|
@ -87,6 +90,8 @@ static int ath10k_install_key(struct ath10k_vif *arvif,
|
|||
struct ath10k *ar = arvif->ar;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&ar->conf_mutex);
|
||||
|
||||
INIT_COMPLETION(ar->install_key_done);
|
||||
|
||||
ret = ath10k_send_key(arvif, key, cmd, macaddr);
|
||||
|
@ -327,6 +332,29 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
|
||||
{
|
||||
if (value != 0xFFFFFFFF)
|
||||
value = min_t(u32, arvif->ar->hw->wiphy->rts_threshold,
|
||||
ATH10K_RTS_MAX);
|
||||
|
||||
return ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id,
|
||||
WMI_VDEV_PARAM_RTS_THRESHOLD,
|
||||
value);
|
||||
}
|
||||
|
||||
static int ath10k_mac_set_frag(struct ath10k_vif *arvif, u32 value)
|
||||
{
|
||||
if (value != 0xFFFFFFFF)
|
||||
value = clamp_t(u32, arvif->ar->hw->wiphy->frag_threshold,
|
||||
ATH10K_FRAGMT_THRESHOLD_MIN,
|
||||
ATH10K_FRAGMT_THRESHOLD_MAX);
|
||||
|
||||
return ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id,
|
||||
WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
|
||||
value);
|
||||
}
|
||||
|
||||
static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
|
||||
{
|
||||
int ret;
|
||||
|
@ -364,6 +392,20 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
|
|||
spin_unlock_bh(&ar->data_lock);
|
||||
}
|
||||
|
||||
static void ath10k_peer_cleanup_all(struct ath10k *ar)
|
||||
{
|
||||
struct ath10k_peer *peer, *tmp;
|
||||
|
||||
lockdep_assert_held(&ar->conf_mutex);
|
||||
|
||||
spin_lock_bh(&ar->data_lock);
|
||||
list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
|
||||
list_del(&peer->list);
|
||||
kfree(peer);
|
||||
}
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
}
|
||||
|
||||
/************************/
|
||||
/* Interface management */
|
||||
/************************/
|
||||
|
@ -372,6 +414,8 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
|
|||
{
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&ar->conf_mutex);
|
||||
|
||||
ret = wait_for_completion_timeout(&ar->vdev_setup_done,
|
||||
ATH10K_VDEV_SETUP_TIMEOUT_HZ);
|
||||
if (ret == 0)
|
||||
|
@ -605,6 +649,8 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
lockdep_assert_held(&arvif->ar->conf_mutex);
|
||||
|
||||
if (!info->enable_beacon) {
|
||||
ath10k_vdev_stop(arvif);
|
||||
return;
|
||||
|
@ -631,6 +677,8 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
lockdep_assert_held(&arvif->ar->conf_mutex);
|
||||
|
||||
if (!info->ibss_joined) {
|
||||
ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer);
|
||||
if (ret)
|
||||
|
@ -680,6 +728,8 @@ static void ath10k_ps_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
|
|||
enum wmi_sta_ps_mode psmode;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&arvif->ar->conf_mutex);
|
||||
|
||||
if (vif->type != NL80211_IFTYPE_STATION)
|
||||
return;
|
||||
|
||||
|
@ -722,6 +772,8 @@ static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
|
|||
struct ieee80211_bss_conf *bss_conf,
|
||||
struct wmi_peer_assoc_complete_arg *arg)
|
||||
{
|
||||
lockdep_assert_held(&ar->conf_mutex);
|
||||
|
||||
memcpy(arg->addr, sta->addr, ETH_ALEN);
|
||||
arg->vdev_id = arvif->vdev_id;
|
||||
arg->peer_aid = sta->aid;
|
||||
|
@ -764,6 +816,8 @@ static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
|
|||
const u8 *rsnie = NULL;
|
||||
const u8 *wpaie = NULL;
|
||||
|
||||
lockdep_assert_held(&ar->conf_mutex);
|
||||
|
||||
bss = cfg80211_get_bss(ar->hw->wiphy, ar->hw->conf.chandef.chan,
|
||||
info->bssid, NULL, 0, 0, 0);
|
||||
if (bss) {
|
||||
|
@ -804,6 +858,8 @@ static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
|
|||
u32 ratemask;
|
||||
int i;
|
||||
|
||||
lockdep_assert_held(&ar->conf_mutex);
|
||||
|
||||
sband = ar->hw->wiphy->bands[ar->hw->conf.chandef.chan->band];
|
||||
ratemask = sta->supp_rates[ar->hw->conf.chandef.chan->band];
|
||||
rates = sband->bitrates;
|
||||
|
@ -827,6 +883,8 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
|
|||
int smps;
|
||||
int i, n;
|
||||
|
||||
lockdep_assert_held(&ar->conf_mutex);
|
||||
|
||||
if (!ht_cap->ht_supported)
|
||||
return;
|
||||
|
||||
|
@ -905,6 +963,8 @@ static void ath10k_peer_assoc_h_qos_ap(struct ath10k *ar,
|
|||
u32 uapsd = 0;
|
||||
u32 max_sp = 0;
|
||||
|
||||
lockdep_assert_held(&ar->conf_mutex);
|
||||
|
||||
if (sta->wme)
|
||||
arg->peer_flags |= WMI_PEER_QOS;
|
||||
|
||||
|
@ -1056,6 +1116,8 @@ static int ath10k_peer_assoc(struct ath10k *ar,
|
|||
{
|
||||
struct wmi_peer_assoc_complete_arg arg;
|
||||
|
||||
lockdep_assert_held(&ar->conf_mutex);
|
||||
|
||||
memset(&arg, 0, sizeof(struct wmi_peer_assoc_complete_arg));
|
||||
|
||||
ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, &arg);
|
||||
|
@ -1079,6 +1141,8 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
|
|||
struct ieee80211_sta *ap_sta;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&ar->conf_mutex);
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
|
||||
|
@ -1119,6 +1183,8 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
|
|||
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&ar->conf_mutex);
|
||||
|
||||
/*
|
||||
* For some reason, calling VDEV-DOWN before VDEV-STOP
|
||||
* makes the FW to send frames via HTT after disassociation.
|
||||
|
@ -1152,6 +1218,8 @@ static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
lockdep_assert_held(&ar->conf_mutex);
|
||||
|
||||
ret = ath10k_peer_assoc(ar, arvif, sta, NULL);
|
||||
if (ret) {
|
||||
ath10k_warn("WMI peer assoc failed for %pM\n", sta->addr);
|
||||
|
@ -1172,6 +1240,8 @@ static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif,
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
lockdep_assert_held(&ar->conf_mutex);
|
||||
|
||||
ret = ath10k_clear_peer_keys(arvif, sta->addr);
|
||||
if (ret) {
|
||||
ath10k_warn("could not clear all peer wep keys (%d)\n", ret);
|
||||
|
@ -1198,6 +1268,8 @@ static int ath10k_update_channel_list(struct ath10k *ar)
|
|||
int ret;
|
||||
int i;
|
||||
|
||||
lockdep_assert_held(&ar->conf_mutex);
|
||||
|
||||
bands = hw->wiphy->bands;
|
||||
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
|
||||
if (!bands[band])
|
||||
|
@ -1276,21 +1348,19 @@ static int ath10k_update_channel_list(struct ath10k *ar)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void ath10k_reg_notifier(struct wiphy *wiphy,
|
||||
struct regulatory_request *request)
|
||||
static void ath10k_regd_update(struct ath10k *ar)
|
||||
{
|
||||
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
|
||||
struct reg_dmn_pair_mapping *regpair;
|
||||
struct ath10k *ar = hw->priv;
|
||||
int ret;
|
||||
|
||||
ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
|
||||
lockdep_assert_held(&ar->conf_mutex);
|
||||
|
||||
ret = ath10k_update_channel_list(ar);
|
||||
if (ret)
|
||||
ath10k_warn("could not update channel list (%d)\n", ret);
|
||||
|
||||
regpair = ar->ath_common.regulatory.regpair;
|
||||
|
||||
/* Target allows setting up per-band regdomain but ath_common provides
|
||||
* a combined one only */
|
||||
ret = ath10k_wmi_pdev_set_regdomain(ar,
|
||||
|
@ -1303,6 +1373,20 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,
|
|||
ath10k_warn("could not set pdev regdomain (%d)\n", ret);
|
||||
}
|
||||
|
||||
static void ath10k_reg_notifier(struct wiphy *wiphy,
|
||||
struct regulatory_request *request)
|
||||
{
|
||||
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
|
||||
struct ath10k *ar = hw->priv;
|
||||
|
||||
ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
|
||||
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
if (ar->state == ATH10K_STATE_ON)
|
||||
ath10k_regd_update(ar);
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
}
|
||||
|
||||
/***************/
|
||||
/* TX handlers */
|
||||
/***************/
|
||||
|
@ -1397,15 +1481,15 @@ static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
|
|||
int ret;
|
||||
|
||||
if (ieee80211_is_mgmt(hdr->frame_control))
|
||||
ret = ath10k_htt_mgmt_tx(ar->htt, skb);
|
||||
ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
|
||||
else if (ieee80211_is_nullfunc(hdr->frame_control))
|
||||
/* FW does not report tx status properly for NullFunc frames
|
||||
* unless they are sent through mgmt tx path. mac80211 sends
|
||||
* those frames when it detects link/beacon loss and depends on
|
||||
* the tx status to be correct. */
|
||||
ret = ath10k_htt_mgmt_tx(ar->htt, skb);
|
||||
ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
|
||||
else
|
||||
ret = ath10k_htt_tx(ar->htt, skb);
|
||||
ret = ath10k_htt_tx(&ar->htt, skb);
|
||||
|
||||
if (ret) {
|
||||
ath10k_warn("tx failed (%d). dropping packet.\n", ret);
|
||||
|
@ -1552,6 +1636,10 @@ static int ath10k_abort_scan(struct ath10k *ar)
|
|||
ret = ath10k_wmi_stop_scan(ar, &arg);
|
||||
if (ret) {
|
||||
ath10k_warn("could not submit wmi stop scan (%d)\n", ret);
|
||||
spin_lock_bh(&ar->data_lock);
|
||||
ar->scan.in_progress = false;
|
||||
ath10k_offchan_tx_purge(ar);
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
|
@ -1645,10 +1733,14 @@ static void ath10k_tx(struct ieee80211_hw *hw,
|
|||
tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
|
||||
}
|
||||
|
||||
ath10k_tx_h_qos_workaround(hw, control, skb);
|
||||
ath10k_tx_h_update_wep_key(skb);
|
||||
ath10k_tx_h_add_p2p_noa_ie(ar, skb);
|
||||
ath10k_tx_h_seq_no(skb);
|
||||
/* it makes no sense to process injected frames like that */
|
||||
if (info->control.vif &&
|
||||
info->control.vif->type != NL80211_IFTYPE_MONITOR) {
|
||||
ath10k_tx_h_qos_workaround(hw, control, skb);
|
||||
ath10k_tx_h_update_wep_key(skb);
|
||||
ath10k_tx_h_add_p2p_noa_ie(ar, skb);
|
||||
ath10k_tx_h_seq_no(skb);
|
||||
}
|
||||
|
||||
memset(ATH10K_SKB_CB(skb), 0, sizeof(*ATH10K_SKB_CB(skb)));
|
||||
ATH10K_SKB_CB(skb)->htt.vdev_id = vdev_id;
|
||||
|
@ -1673,10 +1765,57 @@ static void ath10k_tx(struct ieee80211_hw *hw,
|
|||
/*
|
||||
* Initialize various parameters with default vaules.
|
||||
*/
|
||||
void ath10k_halt(struct ath10k *ar)
|
||||
{
|
||||
lockdep_assert_held(&ar->conf_mutex);
|
||||
|
||||
del_timer_sync(&ar->scan.timeout);
|
||||
ath10k_offchan_tx_purge(ar);
|
||||
ath10k_peer_cleanup_all(ar);
|
||||
ath10k_core_stop(ar);
|
||||
ath10k_hif_power_down(ar);
|
||||
|
||||
spin_lock_bh(&ar->data_lock);
|
||||
if (ar->scan.in_progress) {
|
||||
del_timer(&ar->scan.timeout);
|
||||
ar->scan.in_progress = false;
|
||||
ieee80211_scan_completed(ar->hw, true);
|
||||
}
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
}
|
||||
|
||||
static int ath10k_start(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct ath10k *ar = hw->priv;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
|
||||
if (ar->state != ATH10K_STATE_OFF &&
|
||||
ar->state != ATH10K_STATE_RESTARTING) {
|
||||
ret = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
ret = ath10k_hif_power_up(ar);
|
||||
if (ret) {
|
||||
ath10k_err("could not init hif (%d)\n", ret);
|
||||
ar->state = ATH10K_STATE_OFF;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
ret = ath10k_core_start(ar);
|
||||
if (ret) {
|
||||
ath10k_err("could not init core (%d)\n", ret);
|
||||
ath10k_hif_power_down(ar);
|
||||
ar->state = ATH10K_STATE_OFF;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (ar->state == ATH10K_STATE_OFF)
|
||||
ar->state = ATH10K_STATE_ON;
|
||||
else if (ar->state == ATH10K_STATE_RESTARTING)
|
||||
ar->state = ATH10K_STATE_RESTARTED;
|
||||
|
||||
ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS, 1);
|
||||
if (ret)
|
||||
|
@ -1688,6 +1827,10 @@ static int ath10k_start(struct ieee80211_hw *hw)
|
|||
ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n",
|
||||
ret);
|
||||
|
||||
ath10k_regd_update(ar);
|
||||
|
||||
exit:
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1695,18 +1838,48 @@ static void ath10k_stop(struct ieee80211_hw *hw)
|
|||
{
|
||||
struct ath10k *ar = hw->priv;
|
||||
|
||||
/* avoid leaks in case FW never confirms scan for offchannel */
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
if (ar->state == ATH10K_STATE_ON ||
|
||||
ar->state == ATH10K_STATE_RESTARTED ||
|
||||
ar->state == ATH10K_STATE_WEDGED)
|
||||
ath10k_halt(ar);
|
||||
|
||||
ar->state = ATH10K_STATE_OFF;
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
|
||||
cancel_work_sync(&ar->offchan_tx_work);
|
||||
ath10k_offchan_tx_purge(ar);
|
||||
cancel_work_sync(&ar->restart_work);
|
||||
}
|
||||
|
||||
static void ath10k_config_ps(struct ath10k *ar)
|
||||
{
|
||||
struct ath10k_generic_iter ar_iter;
|
||||
|
||||
lockdep_assert_held(&ar->conf_mutex);
|
||||
|
||||
/* During HW reconfiguration mac80211 reports all interfaces that were
|
||||
* running until reconfiguration was started. Since FW doesn't have any
|
||||
* vdevs at this point we must not iterate over this interface list.
|
||||
* This setting will be updated upon add_interface(). */
|
||||
if (ar->state == ATH10K_STATE_RESTARTED)
|
||||
return;
|
||||
|
||||
memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
|
||||
ar_iter.ar = ar;
|
||||
|
||||
ieee80211_iterate_active_interfaces_atomic(
|
||||
ar->hw, IEEE80211_IFACE_ITER_NORMAL,
|
||||
ath10k_ps_iter, &ar_iter);
|
||||
|
||||
if (ar_iter.ret)
|
||||
ath10k_warn("failed to set ps config (%d)\n", ar_iter.ret);
|
||||
}
|
||||
|
||||
static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
|
||||
{
|
||||
struct ath10k_generic_iter ar_iter;
|
||||
struct ath10k *ar = hw->priv;
|
||||
struct ieee80211_conf *conf = &hw->conf;
|
||||
int ret = 0;
|
||||
u32 flags;
|
||||
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
|
||||
|
@ -1718,18 +1891,8 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
|
|||
spin_unlock_bh(&ar->data_lock);
|
||||
}
|
||||
|
||||
if (changed & IEEE80211_CONF_CHANGE_PS) {
|
||||
memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
|
||||
ar_iter.ar = ar;
|
||||
flags = IEEE80211_IFACE_ITER_RESUME_ALL;
|
||||
|
||||
ieee80211_iterate_active_interfaces_atomic(hw,
|
||||
flags,
|
||||
ath10k_ps_iter,
|
||||
&ar_iter);
|
||||
|
||||
ret = ar_iter.ret;
|
||||
}
|
||||
if (changed & IEEE80211_CONF_CHANGE_PS)
|
||||
ath10k_config_ps(ar);
|
||||
|
||||
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
|
||||
if (conf->flags & IEEE80211_CONF_MONITOR)
|
||||
|
@ -1738,6 +1901,7 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
|
|||
ret = ath10k_monitor_destroy(ar);
|
||||
}
|
||||
|
||||
ath10k_wmi_flush_tx(ar);
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1859,6 +2023,16 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
|
|||
ath10k_warn("Failed to set PSPOLL count: %d\n", ret);
|
||||
}
|
||||
|
||||
ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
|
||||
if (ret)
|
||||
ath10k_warn("failed to set rts threshold for vdev %d (%d)\n",
|
||||
arvif->vdev_id, ret);
|
||||
|
||||
ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold);
|
||||
if (ret)
|
||||
ath10k_warn("failed to set frag threshold for vdev %d (%d)\n",
|
||||
arvif->vdev_id, ret);
|
||||
|
||||
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
|
||||
ar->monitor_present = true;
|
||||
|
||||
|
@ -2363,6 +2537,8 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
|
|||
u32 value = 0;
|
||||
int ret = 0;
|
||||
|
||||
lockdep_assert_held(&ar->conf_mutex);
|
||||
|
||||
if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
|
||||
return 0;
|
||||
|
||||
|
@ -2558,11 +2734,16 @@ static void ath10k_set_rts_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
|
|||
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
|
||||
u32 rts = ar_iter->ar->hw->wiphy->rts_threshold;
|
||||
|
||||
rts = min_t(u32, rts, ATH10K_RTS_MAX);
|
||||
lockdep_assert_held(&arvif->ar->conf_mutex);
|
||||
|
||||
ar_iter->ret = ath10k_wmi_vdev_set_param(ar_iter->ar, arvif->vdev_id,
|
||||
WMI_VDEV_PARAM_RTS_THRESHOLD,
|
||||
rts);
|
||||
/* During HW reconfiguration mac80211 reports all interfaces that were
|
||||
* running until reconfiguration was started. Since FW doesn't have any
|
||||
* vdevs at this point we must not iterate over this interface list.
|
||||
* This setting will be updated upon add_interface(). */
|
||||
if (ar_iter->ar->state == ATH10K_STATE_RESTARTED)
|
||||
return;
|
||||
|
||||
ar_iter->ret = ath10k_mac_set_rts(arvif, rts);
|
||||
if (ar_iter->ret)
|
||||
ath10k_warn("Failed to set RTS threshold for VDEV: %d\n",
|
||||
arvif->vdev_id);
|
||||
|
@ -2581,8 +2762,9 @@ static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
|
|||
ar_iter.ar = ar;
|
||||
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
|
||||
ath10k_set_rts_iter, &ar_iter);
|
||||
ieee80211_iterate_active_interfaces_atomic(
|
||||
hw, IEEE80211_IFACE_ITER_NORMAL,
|
||||
ath10k_set_rts_iter, &ar_iter);
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
|
||||
return ar_iter.ret;
|
||||
|
@ -2593,17 +2775,17 @@ static void ath10k_set_frag_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
|
|||
struct ath10k_generic_iter *ar_iter = data;
|
||||
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
|
||||
u32 frag = ar_iter->ar->hw->wiphy->frag_threshold;
|
||||
int ret;
|
||||
|
||||
frag = clamp_t(u32, frag,
|
||||
ATH10K_FRAGMT_THRESHOLD_MIN,
|
||||
ATH10K_FRAGMT_THRESHOLD_MAX);
|
||||
lockdep_assert_held(&arvif->ar->conf_mutex);
|
||||
|
||||
ret = ath10k_wmi_vdev_set_param(ar_iter->ar, arvif->vdev_id,
|
||||
WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
|
||||
frag);
|
||||
/* During HW reconfiguration mac80211 reports all interfaces that were
|
||||
* running until reconfiguration was started. Since FW doesn't have any
|
||||
* vdevs at this point we must not iterate over this interface list.
|
||||
* This setting will be updated upon add_interface(). */
|
||||
if (ar_iter->ar->state == ATH10K_STATE_RESTARTED)
|
||||
return;
|
||||
|
||||
ar_iter->ret = ret;
|
||||
ar_iter->ret = ath10k_mac_set_frag(arvif, frag);
|
||||
if (ar_iter->ret)
|
||||
ath10k_warn("Failed to set frag threshold for VDEV: %d\n",
|
||||
arvif->vdev_id);
|
||||
|
@ -2622,8 +2804,9 @@ static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
|
|||
ar_iter.ar = ar;
|
||||
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
|
||||
ath10k_set_frag_iter, &ar_iter);
|
||||
ieee80211_iterate_active_interfaces_atomic(
|
||||
hw, IEEE80211_IFACE_ITER_NORMAL,
|
||||
ath10k_set_frag_iter, &ar_iter);
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
|
||||
return ar_iter.ret;
|
||||
|
@ -2632,6 +2815,7 @@ static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
|
|||
static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
|
||||
{
|
||||
struct ath10k *ar = hw->priv;
|
||||
bool skip;
|
||||
int ret;
|
||||
|
||||
/* mac80211 doesn't care if we really xmit queued frames or not
|
||||
|
@ -2639,16 +2823,29 @@ static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
|
|||
if (drop)
|
||||
return;
|
||||
|
||||
ret = wait_event_timeout(ar->htt->empty_tx_wq, ({
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
|
||||
if (ar->state == ATH10K_STATE_WEDGED)
|
||||
goto skip;
|
||||
|
||||
ret = wait_event_timeout(ar->htt.empty_tx_wq, ({
|
||||
bool empty;
|
||||
spin_lock_bh(&ar->htt->tx_lock);
|
||||
empty = bitmap_empty(ar->htt->used_msdu_ids,
|
||||
ar->htt->max_num_pending_tx);
|
||||
spin_unlock_bh(&ar->htt->tx_lock);
|
||||
(empty);
|
||||
|
||||
spin_lock_bh(&ar->htt.tx_lock);
|
||||
empty = bitmap_empty(ar->htt.used_msdu_ids,
|
||||
ar->htt.max_num_pending_tx);
|
||||
spin_unlock_bh(&ar->htt.tx_lock);
|
||||
|
||||
skip = (ar->state == ATH10K_STATE_WEDGED);
|
||||
|
||||
(empty || skip);
|
||||
}), ATH10K_FLUSH_TIMEOUT_HZ);
|
||||
if (ret <= 0)
|
||||
|
||||
if (ret <= 0 || skip)
|
||||
ath10k_warn("tx not flushed\n");
|
||||
|
||||
skip:
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
}
|
||||
|
||||
/* TODO: Implement this function properly
|
||||
|
@ -2660,6 +2857,83 @@ static int ath10k_tx_last_beacon(struct ieee80211_hw *hw)
|
|||
return 1;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static int ath10k_suspend(struct ieee80211_hw *hw,
|
||||
struct cfg80211_wowlan *wowlan)
|
||||
{
|
||||
struct ath10k *ar = hw->priv;
|
||||
int ret;
|
||||
|
||||
ar->is_target_paused = false;
|
||||
|
||||
ret = ath10k_wmi_pdev_suspend_target(ar);
|
||||
if (ret) {
|
||||
ath10k_warn("could not suspend target (%d)\n", ret);
|
||||
return 1;
|
||||
}
|
||||
|
||||
ret = wait_event_interruptible_timeout(ar->event_queue,
|
||||
ar->is_target_paused == true,
|
||||
1 * HZ);
|
||||
if (ret < 0) {
|
||||
ath10k_warn("suspend interrupted (%d)\n", ret);
|
||||
goto resume;
|
||||
} else if (ret == 0) {
|
||||
ath10k_warn("suspend timed out - target pause event never came\n");
|
||||
goto resume;
|
||||
}
|
||||
|
||||
ret = ath10k_hif_suspend(ar);
|
||||
if (ret) {
|
||||
ath10k_warn("could not suspend hif (%d)\n", ret);
|
||||
goto resume;
|
||||
}
|
||||
|
||||
return 0;
|
||||
resume:
|
||||
ret = ath10k_wmi_pdev_resume_target(ar);
|
||||
if (ret)
|
||||
ath10k_warn("could not resume target (%d)\n", ret);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int ath10k_resume(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct ath10k *ar = hw->priv;
|
||||
int ret;
|
||||
|
||||
ret = ath10k_hif_resume(ar);
|
||||
if (ret) {
|
||||
ath10k_warn("could not resume hif (%d)\n", ret);
|
||||
return 1;
|
||||
}
|
||||
|
||||
ret = ath10k_wmi_pdev_resume_target(ar);
|
||||
if (ret) {
|
||||
ath10k_warn("could not resume target (%d)\n", ret);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void ath10k_restart_complete(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct ath10k *ar = hw->priv;
|
||||
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
|
||||
/* If device failed to restart it will be in a different state, e.g.
|
||||
* ATH10K_STATE_WEDGED */
|
||||
if (ar->state == ATH10K_STATE_RESTARTED) {
|
||||
ath10k_info("device successfully recovered\n");
|
||||
ar->state = ATH10K_STATE_ON;
|
||||
}
|
||||
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
}
|
||||
|
||||
static const struct ieee80211_ops ath10k_ops = {
|
||||
.tx = ath10k_tx,
|
||||
.start = ath10k_start,
|
||||
|
@ -2680,6 +2954,11 @@ static const struct ieee80211_ops ath10k_ops = {
|
|||
.set_frag_threshold = ath10k_set_frag_threshold,
|
||||
.flush = ath10k_flush,
|
||||
.tx_last_beacon = ath10k_tx_last_beacon,
|
||||
.restart_complete = ath10k_restart_complete,
|
||||
#ifdef CONFIG_PM
|
||||
.suspend = ath10k_suspend,
|
||||
.resume = ath10k_resume,
|
||||
#endif
|
||||
};
|
||||
|
||||
#define RATETAB_ENT(_rate, _rateid, _flags) { \
|
||||
|
@ -2948,8 +3227,10 @@ int ath10k_mac_register(struct ath10k *ar)
|
|||
channels = kmemdup(ath10k_2ghz_channels,
|
||||
sizeof(ath10k_2ghz_channels),
|
||||
GFP_KERNEL);
|
||||
if (!channels)
|
||||
return -ENOMEM;
|
||||
if (!channels) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
|
||||
band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
|
||||
|
@ -2968,11 +3249,8 @@ int ath10k_mac_register(struct ath10k *ar)
|
|||
sizeof(ath10k_5ghz_channels),
|
||||
GFP_KERNEL);
|
||||
if (!channels) {
|
||||
if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
|
||||
band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
|
||||
kfree(band->channels);
|
||||
}
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
band = &ar->mac.sbands[IEEE80211_BAND_5GHZ];
|
||||
|
@ -3036,25 +3314,30 @@ int ath10k_mac_register(struct ath10k *ar)
|
|||
ath10k_reg_notifier);
|
||||
if (ret) {
|
||||
ath10k_err("Regulatory initialization failed\n");
|
||||
return ret;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
ret = ieee80211_register_hw(ar->hw);
|
||||
if (ret) {
|
||||
ath10k_err("ieee80211 registration failed: %d\n", ret);
|
||||
return ret;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
if (!ath_is_world_regd(&ar->ath_common.regulatory)) {
|
||||
ret = regulatory_hint(ar->hw->wiphy,
|
||||
ar->ath_common.regulatory.alpha2);
|
||||
if (ret)
|
||||
goto exit;
|
||||
goto err_unregister;
|
||||
}
|
||||
|
||||
return 0;
|
||||
exit:
|
||||
|
||||
err_unregister:
|
||||
ieee80211_unregister_hw(ar->hw);
|
||||
err_free:
|
||||
kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);
|
||||
kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -34,6 +34,7 @@ struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id);
|
|||
void ath10k_reset_scan(unsigned long ptr);
|
||||
void ath10k_offchan_tx_purge(struct ath10k *ar);
|
||||
void ath10k_offchan_tx_work(struct work_struct *work);
|
||||
void ath10k_halt(struct ath10k *ar);
|
||||
|
||||
static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
|
||||
{
|
||||
|
|
|
@ -54,6 +54,8 @@ static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
|
|||
int num);
|
||||
static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info);
|
||||
static void ath10k_pci_stop_ce(struct ath10k *ar);
|
||||
static void ath10k_pci_device_reset(struct ath10k *ar);
|
||||
static int ath10k_pci_reset_target(struct ath10k *ar);
|
||||
|
||||
static const struct ce_attr host_ce_config_wlan[] = {
|
||||
/* host->target HTC control and raw streams */
|
||||
|
@ -718,6 +720,8 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)
|
|||
reg_dump_values[i + 1],
|
||||
reg_dump_values[i + 2],
|
||||
reg_dump_values[i + 3]);
|
||||
|
||||
ieee80211_queue_work(ar->hw, &ar->restart_work);
|
||||
}
|
||||
|
||||
static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
|
||||
|
@ -744,8 +748,8 @@ static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
|
|||
ath10k_ce_per_engine_service(ar, pipe);
|
||||
}
|
||||
|
||||
static void ath10k_pci_hif_post_init(struct ath10k *ar,
|
||||
struct ath10k_hif_cb *callbacks)
|
||||
static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
|
||||
struct ath10k_hif_cb *callbacks)
|
||||
{
|
||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
|
||||
|
@ -1263,7 +1267,6 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
|
|||
ath10k_pci_process_ce(ar);
|
||||
ath10k_pci_cleanup_ce(ar);
|
||||
ath10k_pci_buffer_cleanup(ar);
|
||||
ath10k_pci_ce_deinit(ar);
|
||||
}
|
||||
|
||||
static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
|
||||
|
@ -1735,6 +1738,115 @@ static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
|
|||
ath10k_pci_sleep(ar);
|
||||
}
|
||||
|
||||
static int ath10k_pci_hif_power_up(struct ath10k *ar)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Bring the target up cleanly.
|
||||
*
|
||||
* The target may be in an undefined state with an AUX-powered Target
|
||||
* and a Host in WoW mode. If the Host crashes, loses power, or is
|
||||
* restarted (without unloading the driver) then the Target is left
|
||||
* (aux) powered and running. On a subsequent driver load, the Target
|
||||
* is in an unexpected state. We try to catch that here in order to
|
||||
* reset the Target and retry the probe.
|
||||
*/
|
||||
ath10k_pci_device_reset(ar);
|
||||
|
||||
ret = ath10k_pci_reset_target(ar);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (ath10k_target_ps) {
|
||||
ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save enabled\n");
|
||||
} else {
|
||||
/* Force AWAKE forever */
|
||||
ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save disabled\n");
|
||||
ath10k_do_pci_wake(ar);
|
||||
}
|
||||
|
||||
ret = ath10k_pci_ce_init(ar);
|
||||
if (ret)
|
||||
goto err_ps;
|
||||
|
||||
ret = ath10k_pci_init_config(ar);
|
||||
if (ret)
|
||||
goto err_ce;
|
||||
|
||||
ret = ath10k_pci_wake_target_cpu(ar);
|
||||
if (ret) {
|
||||
ath10k_err("could not wake up target CPU (%d)\n", ret);
|
||||
goto err_ce;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_ce:
|
||||
ath10k_pci_ce_deinit(ar);
|
||||
err_ps:
|
||||
if (!ath10k_target_ps)
|
||||
ath10k_do_pci_sleep(ar);
|
||||
err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ath10k_pci_hif_power_down(struct ath10k *ar)
|
||||
{
|
||||
ath10k_pci_ce_deinit(ar);
|
||||
if (!ath10k_target_ps)
|
||||
ath10k_do_pci_sleep(ar);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
||||
#define ATH10K_PCI_PM_CONTROL 0x44
|
||||
|
||||
static int ath10k_pci_hif_suspend(struct ath10k *ar)
|
||||
{
|
||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
struct pci_dev *pdev = ar_pci->pdev;
|
||||
u32 val;
|
||||
|
||||
pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
|
||||
|
||||
if ((val & 0x000000ff) != 0x3) {
|
||||
pci_save_state(pdev);
|
||||
pci_disable_device(pdev);
|
||||
pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
|
||||
(val & 0xffffff00) | 0x03);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_pci_hif_resume(struct ath10k *ar)
|
||||
{
|
||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
struct pci_dev *pdev = ar_pci->pdev;
|
||||
u32 val;
|
||||
|
||||
pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
|
||||
|
||||
if ((val & 0x000000ff) != 0) {
|
||||
pci_restore_state(pdev);
|
||||
pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
|
||||
val & 0xffffff00);
|
||||
/*
|
||||
* Suspend/Resume resets the PCI configuration space,
|
||||
* so we have to re-disable the RETRY_TIMEOUT register (0x41)
|
||||
* to keep PCI Tx retries from interfering with C3 CPU state
|
||||
*/
|
||||
pci_read_config_dword(pdev, 0x40, &val);
|
||||
|
||||
if ((val & 0x0000ff00) != 0)
|
||||
pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
|
||||
.send_head = ath10k_pci_hif_send_head,
|
||||
.exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
|
||||
|
@ -1743,8 +1855,14 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
|
|||
.map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
|
||||
.get_default_pipe = ath10k_pci_hif_get_default_pipe,
|
||||
.send_complete_check = ath10k_pci_hif_send_complete_check,
|
||||
.init = ath10k_pci_hif_post_init,
|
||||
.set_callbacks = ath10k_pci_hif_set_callbacks,
|
||||
.get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
|
||||
.power_up = ath10k_pci_hif_power_up,
|
||||
.power_down = ath10k_pci_hif_power_down,
|
||||
#ifdef CONFIG_PM
|
||||
.suspend = ath10k_pci_hif_suspend,
|
||||
.resume = ath10k_pci_hif_resume,
|
||||
#endif
|
||||
};
|
||||
|
||||
static void ath10k_pci_ce_tasklet(unsigned long ptr)
|
||||
|
@ -2059,9 +2177,9 @@ static int ath10k_pci_reset_target(struct ath10k *ar)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void ath10k_pci_device_reset(struct ath10k_pci *ar_pci)
|
||||
static void ath10k_pci_device_reset(struct ath10k *ar)
|
||||
{
|
||||
struct ath10k *ar = ar_pci->ar;
|
||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
void __iomem *mem = ar_pci->mem;
|
||||
int i;
|
||||
u32 val;
|
||||
|
@ -2118,7 +2236,7 @@ static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
|
|||
case ATH10K_PCI_FEATURE_MSI_X:
|
||||
ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n");
|
||||
break;
|
||||
case ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND:
|
||||
case ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND:
|
||||
ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n");
|
||||
break;
|
||||
}
|
||||
|
@ -2145,7 +2263,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
|
|||
|
||||
switch (pci_dev->device) {
|
||||
case QCA988X_1_0_DEVICE_ID:
|
||||
set_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features);
|
||||
set_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features);
|
||||
break;
|
||||
case QCA988X_2_0_DEVICE_ID:
|
||||
set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
|
||||
|
@ -2158,8 +2276,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
|
|||
|
||||
ath10k_pci_dump_features(ar_pci);
|
||||
|
||||
ar = ath10k_core_create(ar_pci, ar_pci->dev, ATH10K_BUS_PCI,
|
||||
&ath10k_pci_hif_ops);
|
||||
ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
|
||||
if (!ar) {
|
||||
ath10k_err("ath10k_core_create failed!\n");
|
||||
ret = -EINVAL;
|
||||
|
@ -2167,7 +2284,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
|
|||
}
|
||||
|
||||
/* Enable QCA988X_1.0 HW workarounds */
|
||||
if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features))
|
||||
if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features))
|
||||
spin_lock_init(&ar_pci->hw_v1_workaround_lock);
|
||||
|
||||
ar_pci->ar = ar;
|
||||
|
@ -2247,54 +2364,14 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
|
|||
goto err_iomap;
|
||||
}
|
||||
|
||||
/*
|
||||
* Bring the target up cleanly.
|
||||
*
|
||||
* The target may be in an undefined state with an AUX-powered Target
|
||||
* and a Host in WoW mode. If the Host crashes, loses power, or is
|
||||
* restarted (without unloading the driver) then the Target is left
|
||||
* (aux) powered and running. On a subsequent driver load, the Target
|
||||
* is in an unexpected state. We try to catch that here in order to
|
||||
* reset the Target and retry the probe.
|
||||
*/
|
||||
ath10k_pci_device_reset(ar_pci);
|
||||
|
||||
ret = ath10k_pci_reset_target(ar);
|
||||
if (ret)
|
||||
goto err_intr;
|
||||
|
||||
if (ath10k_target_ps) {
|
||||
ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save enabled\n");
|
||||
} else {
|
||||
/* Force AWAKE forever */
|
||||
ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save disabled\n");
|
||||
ath10k_do_pci_wake(ar);
|
||||
}
|
||||
|
||||
ret = ath10k_pci_ce_init(ar);
|
||||
if (ret)
|
||||
goto err_intr;
|
||||
|
||||
ret = ath10k_pci_init_config(ar);
|
||||
if (ret)
|
||||
goto err_ce;
|
||||
|
||||
ret = ath10k_pci_wake_target_cpu(ar);
|
||||
if (ret) {
|
||||
ath10k_err("could not wake up target CPU (%d)\n", ret);
|
||||
goto err_ce;
|
||||
}
|
||||
|
||||
ret = ath10k_core_register(ar);
|
||||
if (ret) {
|
||||
ath10k_err("could not register driver core (%d)\n", ret);
|
||||
goto err_ce;
|
||||
goto err_intr;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_ce:
|
||||
ath10k_pci_ce_deinit(ar);
|
||||
err_intr:
|
||||
ath10k_pci_stop_intr(ar);
|
||||
err_iomap:
|
||||
|
@ -2345,128 +2422,6 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
|
|||
kfree(ar_pci);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_PM_SLEEP)
|
||||
|
||||
#define ATH10K_PCI_PM_CONTROL 0x44
|
||||
|
||||
static int ath10k_pci_suspend(struct device *device)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(device);
|
||||
struct ath10k *ar = pci_get_drvdata(pdev);
|
||||
struct ath10k_pci *ar_pci;
|
||||
u32 val;
|
||||
int ret, retval;
|
||||
|
||||
ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
|
||||
|
||||
if (!ar)
|
||||
return -ENODEV;
|
||||
|
||||
ar_pci = ath10k_pci_priv(ar);
|
||||
if (!ar_pci)
|
||||
return -ENODEV;
|
||||
|
||||
if (ath10k_core_target_suspend(ar))
|
||||
return -EBUSY;
|
||||
|
||||
ret = wait_event_interruptible_timeout(ar->event_queue,
|
||||
ar->is_target_paused == true,
|
||||
1 * HZ);
|
||||
if (ret < 0) {
|
||||
ath10k_warn("suspend interrupted (%d)\n", ret);
|
||||
retval = ret;
|
||||
goto resume;
|
||||
} else if (ret == 0) {
|
||||
ath10k_warn("suspend timed out - target pause event never came\n");
|
||||
retval = EIO;
|
||||
goto resume;
|
||||
}
|
||||
|
||||
/*
|
||||
* reset is_target_paused and host can check that in next time,
|
||||
* or it will always be TRUE and host just skip the waiting
|
||||
* condition, it causes target assert due to host already
|
||||
* suspend
|
||||
*/
|
||||
ar->is_target_paused = false;
|
||||
|
||||
pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
|
||||
|
||||
if ((val & 0x000000ff) != 0x3) {
|
||||
pci_save_state(pdev);
|
||||
pci_disable_device(pdev);
|
||||
pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
|
||||
(val & 0xffffff00) | 0x03);
|
||||
}
|
||||
|
||||
return 0;
|
||||
resume:
|
||||
ret = ath10k_core_target_resume(ar);
|
||||
if (ret)
|
||||
ath10k_warn("could not resume (%d)\n", ret);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
static int ath10k_pci_resume(struct device *device)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(device);
|
||||
struct ath10k *ar = pci_get_drvdata(pdev);
|
||||
struct ath10k_pci *ar_pci;
|
||||
int ret;
|
||||
u32 val;
|
||||
|
||||
ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
|
||||
|
||||
if (!ar)
|
||||
return -ENODEV;
|
||||
ar_pci = ath10k_pci_priv(ar);
|
||||
|
||||
if (!ar_pci)
|
||||
return -ENODEV;
|
||||
|
||||
ret = pci_enable_device(pdev);
|
||||
if (ret) {
|
||||
ath10k_warn("cannot enable PCI device: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
|
||||
|
||||
if ((val & 0x000000ff) != 0) {
|
||||
pci_restore_state(pdev);
|
||||
pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
|
||||
val & 0xffffff00);
|
||||
/*
|
||||
* Suspend/Resume resets the PCI configuration space,
|
||||
* so we have to re-disable the RETRY_TIMEOUT register (0x41)
|
||||
* to keep PCI Tx retries from interfering with C3 CPU state
|
||||
*/
|
||||
pci_read_config_dword(pdev, 0x40, &val);
|
||||
|
||||
if ((val & 0x0000ff00) != 0)
|
||||
pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
|
||||
}
|
||||
|
||||
ret = ath10k_core_target_resume(ar);
|
||||
if (ret)
|
||||
ath10k_warn("target resume failed: %d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static SIMPLE_DEV_PM_OPS(ath10k_dev_pm_ops,
|
||||
ath10k_pci_suspend,
|
||||
ath10k_pci_resume);
|
||||
|
||||
#define ATH10K_PCI_PM_OPS (&ath10k_dev_pm_ops)
|
||||
|
||||
#else
|
||||
|
||||
#define ATH10K_PCI_PM_OPS NULL
|
||||
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
|
||||
|
||||
static struct pci_driver ath10k_pci_driver = {
|
||||
|
@ -2474,7 +2429,6 @@ static struct pci_driver ath10k_pci_driver = {
|
|||
.id_table = ath10k_pci_id_table,
|
||||
.probe = ath10k_pci_probe,
|
||||
.remove = ath10k_pci_remove,
|
||||
.driver.pm = ATH10K_PCI_PM_OPS,
|
||||
};
|
||||
|
||||
static int __init ath10k_pci_init(void)
|
||||
|
|
|
@ -152,7 +152,7 @@ struct service_to_pipe {
|
|||
|
||||
enum ath10k_pci_features {
|
||||
ATH10K_PCI_FEATURE_MSI_X = 0,
|
||||
ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND = 1,
|
||||
ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND = 1,
|
||||
|
||||
/* keep last */
|
||||
ATH10K_PCI_FEATURE_COUNT
|
||||
|
@ -311,7 +311,7 @@ static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset,
|
|||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
void __iomem *addr = ar_pci->mem;
|
||||
|
||||
if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features)) {
|
||||
if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) {
|
||||
unsigned long irq_flags;
|
||||
|
||||
spin_lock_irqsave(&ar_pci->hw_v1_workaround_lock, irq_flags);
|
||||
|
|
|
@ -27,6 +27,13 @@ void ath10k_wmi_flush_tx(struct ath10k *ar)
|
|||
{
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&ar->conf_mutex);
|
||||
|
||||
if (ar->state == ATH10K_STATE_WEDGED) {
|
||||
ath10k_warn("wmi flush skipped - device is wedged anyway\n");
|
||||
return;
|
||||
}
|
||||
|
||||
ret = wait_event_timeout(ar->wmi.wq,
|
||||
atomic_read(&ar->wmi.pending_tx_count) == 0,
|
||||
5*HZ);
|
||||
|
@ -111,7 +118,7 @@ static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
|
|||
|
||||
trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len);
|
||||
|
||||
status = ath10k_htc_send(ar->htc, ar->wmi.eid, skb);
|
||||
status = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
|
||||
if (status) {
|
||||
dev_kfree_skb_any(skb);
|
||||
atomic_dec(&ar->wmi.pending_tx_count);
|
||||
|
@ -501,8 +508,8 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
|
|||
ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies,
|
||||
(u8 *)skb_tail_pointer(bcn) - ies);
|
||||
if (!ie) {
|
||||
/* highly unlikely for mac80211 */
|
||||
ath10k_warn("no tim ie found;\n");
|
||||
if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
|
||||
ath10k_warn("no tim ie found;\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1114,7 +1121,7 @@ int ath10k_wmi_connect_htc_service(struct ath10k *ar)
|
|||
/* connect to control service */
|
||||
conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
|
||||
|
||||
status = ath10k_htc_connect_service(ar->htc, &conn_req, &conn_resp);
|
||||
status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
|
||||
if (status) {
|
||||
ath10k_warn("failed to connect to WMI CONTROL service status: %d\n",
|
||||
status);
|
||||
|
@ -1748,6 +1755,9 @@ int ath10k_wmi_vdev_install_key(struct ath10k *ar,
|
|||
if (arg->key_data)
|
||||
memcpy(cmd->key_data, arg->key_data, arg->key_len);
|
||||
|
||||
ath10k_dbg(ATH10K_DBG_WMI,
|
||||
"wmi vdev install key idx %d cipher %d len %d\n",
|
||||
arg->key_idx, arg->key_cipher, arg->key_len);
|
||||
return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_INSTALL_KEY_CMDID);
|
||||
}
|
||||
|
||||
|
@ -2011,6 +2021,9 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
|
|||
cmd->peer_vht_rates.tx_mcs_set =
|
||||
__cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
|
||||
|
||||
ath10k_dbg(ATH10K_DBG_WMI,
|
||||
"wmi peer assoc vdev %d addr %pM\n",
|
||||
arg->vdev_id, arg->addr);
|
||||
return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_ASSOC_CMDID);
|
||||
}
|
||||
|
||||
|
@ -2079,3 +2092,22 @@ int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
|
|||
ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id);
|
||||
return ath10k_wmi_cmd_send(ar, skb, WMI_REQUEST_STATS_CMDID);
|
||||
}
|
||||
|
||||
int ath10k_wmi_force_fw_hang(struct ath10k *ar,
|
||||
enum wmi_force_fw_hang_type type, u32 delay_ms)
|
||||
{
|
||||
struct wmi_force_fw_hang_cmd *cmd;
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
|
||||
cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
|
||||
cmd->type = __cpu_to_le32(type);
|
||||
cmd->delay_ms = __cpu_to_le32(delay_ms);
|
||||
|
||||
ath10k_dbg(ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
|
||||
type, delay_ms);
|
||||
return ath10k_wmi_cmd_send(ar, skb, WMI_FORCE_FW_HANG_CMDID);
|
||||
}
|
||||
|
|
|
@ -416,6 +416,7 @@ enum wmi_cmd_id {
|
|||
WMI_PDEV_FTM_INTG_CMDID,
|
||||
WMI_VDEV_SET_KEEPALIVE_CMDID,
|
||||
WMI_VDEV_GET_KEEPALIVE_CMDID,
|
||||
WMI_FORCE_FW_HANG_CMDID,
|
||||
|
||||
/* GPIO Configuration */
|
||||
WMI_GPIO_CONFIG_CMDID = WMI_CMD_GRP(WMI_GRP_GPIO),
|
||||
|
@ -2972,6 +2973,22 @@ struct wmi_sta_keepalive_cmd {
|
|||
struct wmi_sta_keepalive_arp_resp arp_resp;
|
||||
} __packed;
|
||||
|
||||
enum wmi_force_fw_hang_type {
|
||||
WMI_FORCE_FW_HANG_ASSERT = 1,
|
||||
WMI_FORCE_FW_HANG_NO_DETECT,
|
||||
WMI_FORCE_FW_HANG_CTRL_EP_FULL,
|
||||
WMI_FORCE_FW_HANG_EMPTY_POINT,
|
||||
WMI_FORCE_FW_HANG_STACK_OVERFLOW,
|
||||
WMI_FORCE_FW_HANG_INFINITE_LOOP,
|
||||
};
|
||||
|
||||
#define WMI_FORCE_FW_HANG_RANDOM_TIME 0xFFFFFFFF
|
||||
|
||||
struct wmi_force_fw_hang_cmd {
|
||||
__le32 type;
|
||||
__le32 delay_ms;
|
||||
} __packed;
|
||||
|
||||
#define ATH10K_RTS_MAX 2347
|
||||
#define ATH10K_FRAGMT_THRESHOLD_MIN 540
|
||||
#define ATH10K_FRAGMT_THRESHOLD_MAX 2346
|
||||
|
@ -3048,5 +3065,7 @@ int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg);
|
|||
int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
|
||||
const struct wmi_pdev_set_wmm_params_arg *arg);
|
||||
int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id);
|
||||
int ath10k_wmi_force_fw_hang(struct ath10k *ar,
|
||||
enum wmi_force_fw_hang_type type, u32 delay_ms);
|
||||
|
||||
#endif /* _WMI_H_ */
|
||||
|
|
|
@ -96,6 +96,16 @@ config ATH9K_LEGACY_RATE_CONTROL
|
|||
has to be passed to mac80211 using the module parameter,
|
||||
ieee80211_default_rc_algo.
|
||||
|
||||
config ATH9K_RFKILL
|
||||
bool "Atheros ath9k rfkill support" if EXPERT
|
||||
depends on ATH9K
|
||||
depends on RFKILL=y || RFKILL=ATH9K
|
||||
default y
|
||||
help
|
||||
Say Y to have ath9k poll the RF-Kill GPIO every couple of
|
||||
seconds. Turn off to save power, but enable it if you have
|
||||
a platform that can toggle the RF-Kill GPIO.
|
||||
|
||||
config ATH9K_HTC
|
||||
tristate "Atheros HTC based wireless cards support"
|
||||
depends on USB && MAC80211
|
||||
|
|
|
@ -16,37 +16,119 @@
|
|||
|
||||
#include "ath9k.h"
|
||||
|
||||
static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta,
|
||||
/*
|
||||
* AR9285
|
||||
* ======
|
||||
*
|
||||
* EEPROM has 2 4-bit fields containing the card configuration.
|
||||
*
|
||||
* antdiv_ctl1:
|
||||
* ------------
|
||||
* bb_enable_ant_div_lnadiv : 1
|
||||
* bb_ant_div_alt_gaintb : 1
|
||||
* bb_ant_div_main_gaintb : 1
|
||||
* bb_enable_ant_fast_div : 1
|
||||
*
|
||||
* antdiv_ctl2:
|
||||
* -----------
|
||||
* bb_ant_div_alt_lnaconf : 2
|
||||
* bb_ant_div_main_lnaconf : 2
|
||||
*
|
||||
* The EEPROM bits are used as follows:
|
||||
* ------------------------------------
|
||||
*
|
||||
* bb_enable_ant_div_lnadiv - Enable LNA path rx antenna diversity/combining.
|
||||
* Set in AR_PHY_MULTICHAIN_GAIN_CTL.
|
||||
*
|
||||
* bb_ant_div_[alt/main]_gaintb - 0 -> Antenna config Alt/Main uses gaintable 0
|
||||
* 1 -> Antenna config Alt/Main uses gaintable 1
|
||||
* Set in AR_PHY_MULTICHAIN_GAIN_CTL.
|
||||
*
|
||||
* bb_enable_ant_fast_div - Enable fast antenna diversity.
|
||||
* Set in AR_PHY_CCK_DETECT.
|
||||
*
|
||||
* bb_ant_div_[alt/main]_lnaconf - Alt/Main LNA diversity/combining input config.
|
||||
* Set in AR_PHY_MULTICHAIN_GAIN_CTL.
|
||||
* 10=LNA1
|
||||
* 01=LNA2
|
||||
* 11=LNA1+LNA2
|
||||
* 00=LNA1-LNA2
|
||||
*
|
||||
* AR9485 / AR9565 / AR9331
|
||||
* ========================
|
||||
*
|
||||
* The same bits are present in the EEPROM, but the location in the
|
||||
* EEPROM is different (ant_div_control in ar9300_BaseExtension_1).
|
||||
*
|
||||
* ant_div_alt_lnaconf ==> bit 0~1
|
||||
* ant_div_main_lnaconf ==> bit 2~3
|
||||
* ant_div_alt_gaintb ==> bit 4
|
||||
* ant_div_main_gaintb ==> bit 5
|
||||
* enable_ant_div_lnadiv ==> bit 6
|
||||
* enable_ant_fast_div ==> bit 7
|
||||
*/
|
||||
|
||||
static inline bool ath_is_alt_ant_ratio_better(struct ath_ant_comb *antcomb,
|
||||
int alt_ratio, int maxdelta,
|
||||
int mindelta, int main_rssi_avg,
|
||||
int alt_rssi_avg, int pkt_count)
|
||||
{
|
||||
return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
|
||||
(alt_rssi_avg > main_rssi_avg + maxdelta)) ||
|
||||
(alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50);
|
||||
if (pkt_count <= 50)
|
||||
return false;
|
||||
|
||||
if (alt_rssi_avg > main_rssi_avg + mindelta)
|
||||
return true;
|
||||
|
||||
if (alt_ratio >= antcomb->ant_ratio2 &&
|
||||
alt_rssi_avg >= antcomb->low_rssi_thresh &&
|
||||
(alt_rssi_avg > main_rssi_avg + maxdelta))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool ath_ant_div_comb_alt_check(u8 div_group, int alt_ratio,
|
||||
int curr_main_set, int curr_alt_set,
|
||||
int alt_rssi_avg, int main_rssi_avg)
|
||||
static inline bool ath_ant_div_comb_alt_check(struct ath_hw_antcomb_conf *conf,
|
||||
struct ath_ant_comb *antcomb,
|
||||
int alt_ratio, int alt_rssi_avg,
|
||||
int main_rssi_avg)
|
||||
{
|
||||
bool result = false;
|
||||
switch (div_group) {
|
||||
bool result, set1, set2;
|
||||
|
||||
result = set1 = set2 = false;
|
||||
|
||||
if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2 &&
|
||||
conf->alt_lna_conf == ATH_ANT_DIV_COMB_LNA1)
|
||||
set1 = true;
|
||||
|
||||
if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA1 &&
|
||||
conf->alt_lna_conf == ATH_ANT_DIV_COMB_LNA2)
|
||||
set2 = true;
|
||||
|
||||
switch (conf->div_group) {
|
||||
case 0:
|
||||
if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
|
||||
result = true;
|
||||
break;
|
||||
case 1:
|
||||
case 2:
|
||||
if ((((curr_main_set == ATH_ANT_DIV_COMB_LNA2) &&
|
||||
(curr_alt_set == ATH_ANT_DIV_COMB_LNA1) &&
|
||||
(alt_rssi_avg >= (main_rssi_avg - 5))) ||
|
||||
((curr_main_set == ATH_ANT_DIV_COMB_LNA1) &&
|
||||
(curr_alt_set == ATH_ANT_DIV_COMB_LNA2) &&
|
||||
(alt_rssi_avg >= (main_rssi_avg - 2)))) &&
|
||||
(alt_rssi_avg >= 4))
|
||||
if (alt_rssi_avg < 4 || alt_rssi_avg < antcomb->low_rssi_thresh)
|
||||
break;
|
||||
|
||||
if ((set1 && (alt_rssi_avg >= (main_rssi_avg - 5))) ||
|
||||
(set2 && (alt_rssi_avg >= (main_rssi_avg - 2))) ||
|
||||
(alt_ratio > antcomb->ant_ratio))
|
||||
result = true;
|
||||
else
|
||||
result = false;
|
||||
|
||||
break;
|
||||
case 3:
|
||||
if (alt_rssi_avg < 4 || alt_rssi_avg < antcomb->low_rssi_thresh)
|
||||
break;
|
||||
|
||||
if ((set1 && (alt_rssi_avg >= (main_rssi_avg - 3))) ||
|
||||
(set2 && (alt_rssi_avg >= (main_rssi_avg + 3))) ||
|
||||
(alt_ratio > antcomb->ant_ratio))
|
||||
result = true;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -108,6 +190,74 @@ static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb,
|
|||
}
|
||||
}
|
||||
|
||||
static void ath_ant_set_alt_ratio(struct ath_ant_comb *antcomb,
|
||||
struct ath_hw_antcomb_conf *conf)
|
||||
{
|
||||
/* set alt to the conf with maximun ratio */
|
||||
if (antcomb->first_ratio && antcomb->second_ratio) {
|
||||
if (antcomb->rssi_second > antcomb->rssi_third) {
|
||||
/* first alt*/
|
||||
if ((antcomb->first_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) ||
|
||||
(antcomb->first_quick_scan_conf == ATH_ANT_DIV_COMB_LNA2))
|
||||
/* Set alt LNA1 or LNA2*/
|
||||
if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
|
||||
conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
|
||||
else
|
||||
conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
|
||||
else
|
||||
/* Set alt to A+B or A-B */
|
||||
conf->alt_lna_conf =
|
||||
antcomb->first_quick_scan_conf;
|
||||
} else if ((antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) ||
|
||||
(antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA2)) {
|
||||
/* Set alt LNA1 or LNA2 */
|
||||
if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
|
||||
conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
|
||||
else
|
||||
conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
|
||||
} else {
|
||||
/* Set alt to A+B or A-B */
|
||||
conf->alt_lna_conf = antcomb->second_quick_scan_conf;
|
||||
}
|
||||
} else if (antcomb->first_ratio) {
|
||||
/* first alt */
|
||||
if ((antcomb->first_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) ||
|
||||
(antcomb->first_quick_scan_conf == ATH_ANT_DIV_COMB_LNA2))
|
||||
/* Set alt LNA1 or LNA2 */
|
||||
if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
|
||||
conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
|
||||
else
|
||||
conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
|
||||
else
|
||||
/* Set alt to A+B or A-B */
|
||||
conf->alt_lna_conf = antcomb->first_quick_scan_conf;
|
||||
} else if (antcomb->second_ratio) {
|
||||
/* second alt */
|
||||
if ((antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) ||
|
||||
(antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA2))
|
||||
/* Set alt LNA1 or LNA2 */
|
||||
if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
|
||||
conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
|
||||
else
|
||||
conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
|
||||
else
|
||||
/* Set alt to A+B or A-B */
|
||||
conf->alt_lna_conf = antcomb->second_quick_scan_conf;
|
||||
} else {
|
||||
/* main is largest */
|
||||
if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) ||
|
||||
(antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2))
|
||||
/* Set alt LNA1 or LNA2 */
|
||||
if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
|
||||
conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
|
||||
else
|
||||
conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
|
||||
else
|
||||
/* Set alt to A+B or A-B */
|
||||
conf->alt_lna_conf = antcomb->main_conf;
|
||||
}
|
||||
}
|
||||
|
||||
static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
|
||||
struct ath_hw_antcomb_conf *div_ant_conf,
|
||||
int main_rssi_avg, int alt_rssi_avg,
|
||||
|
@ -129,7 +279,7 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
|
|||
|
||||
if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
|
||||
/* main is LNA1 */
|
||||
if (ath_is_alt_ant_ratio_better(alt_ratio,
|
||||
if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
|
||||
ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
|
||||
ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
|
||||
main_rssi_avg, alt_rssi_avg,
|
||||
|
@ -138,7 +288,7 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
|
|||
else
|
||||
antcomb->first_ratio = false;
|
||||
} else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
|
||||
if (ath_is_alt_ant_ratio_better(alt_ratio,
|
||||
if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
|
||||
ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
|
||||
ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
|
||||
main_rssi_avg, alt_rssi_avg,
|
||||
|
@ -147,11 +297,11 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
|
|||
else
|
||||
antcomb->first_ratio = false;
|
||||
} else {
|
||||
if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
|
||||
(alt_rssi_avg > main_rssi_avg +
|
||||
ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
|
||||
(alt_rssi_avg > main_rssi_avg)) &&
|
||||
(antcomb->total_pkt_count > 50))
|
||||
if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
|
||||
ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
|
||||
0,
|
||||
main_rssi_avg, alt_rssi_avg,
|
||||
antcomb->total_pkt_count))
|
||||
antcomb->first_ratio = true;
|
||||
else
|
||||
antcomb->first_ratio = false;
|
||||
|
@ -164,17 +314,21 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
|
|||
antcomb->rssi_first = main_rssi_avg;
|
||||
antcomb->rssi_third = alt_rssi_avg;
|
||||
|
||||
if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1)
|
||||
switch(antcomb->second_quick_scan_conf) {
|
||||
case ATH_ANT_DIV_COMB_LNA1:
|
||||
antcomb->rssi_lna1 = alt_rssi_avg;
|
||||
else if (antcomb->second_quick_scan_conf ==
|
||||
ATH_ANT_DIV_COMB_LNA2)
|
||||
break;
|
||||
case ATH_ANT_DIV_COMB_LNA2:
|
||||
antcomb->rssi_lna2 = alt_rssi_avg;
|
||||
else if (antcomb->second_quick_scan_conf ==
|
||||
ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) {
|
||||
break;
|
||||
case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
|
||||
if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)
|
||||
antcomb->rssi_lna2 = main_rssi_avg;
|
||||
else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1)
|
||||
antcomb->rssi_lna1 = main_rssi_avg;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (antcomb->rssi_lna2 > antcomb->rssi_lna1 +
|
||||
|
@ -184,7 +338,7 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
|
|||
div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
|
||||
|
||||
if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
|
||||
if (ath_is_alt_ant_ratio_better(alt_ratio,
|
||||
if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
|
||||
ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
|
||||
ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
|
||||
main_rssi_avg, alt_rssi_avg,
|
||||
|
@ -193,7 +347,7 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
|
|||
else
|
||||
antcomb->second_ratio = false;
|
||||
} else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
|
||||
if (ath_is_alt_ant_ratio_better(alt_ratio,
|
||||
if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
|
||||
ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
|
||||
ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
|
||||
main_rssi_avg, alt_rssi_avg,
|
||||
|
@ -202,105 +356,18 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
|
|||
else
|
||||
antcomb->second_ratio = false;
|
||||
} else {
|
||||
if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
|
||||
(alt_rssi_avg > main_rssi_avg +
|
||||
ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
|
||||
(alt_rssi_avg > main_rssi_avg)) &&
|
||||
(antcomb->total_pkt_count > 50))
|
||||
if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
|
||||
ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
|
||||
0,
|
||||
main_rssi_avg, alt_rssi_avg,
|
||||
antcomb->total_pkt_count))
|
||||
antcomb->second_ratio = true;
|
||||
else
|
||||
antcomb->second_ratio = false;
|
||||
}
|
||||
|
||||
/* set alt to the conf with maximun ratio */
|
||||
if (antcomb->first_ratio && antcomb->second_ratio) {
|
||||
if (antcomb->rssi_second > antcomb->rssi_third) {
|
||||
/* first alt*/
|
||||
if ((antcomb->first_quick_scan_conf ==
|
||||
ATH_ANT_DIV_COMB_LNA1) ||
|
||||
(antcomb->first_quick_scan_conf ==
|
||||
ATH_ANT_DIV_COMB_LNA2))
|
||||
/* Set alt LNA1 or LNA2*/
|
||||
if (div_ant_conf->main_lna_conf ==
|
||||
ATH_ANT_DIV_COMB_LNA2)
|
||||
div_ant_conf->alt_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA1;
|
||||
else
|
||||
div_ant_conf->alt_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA2;
|
||||
else
|
||||
/* Set alt to A+B or A-B */
|
||||
div_ant_conf->alt_lna_conf =
|
||||
antcomb->first_quick_scan_conf;
|
||||
} else if ((antcomb->second_quick_scan_conf ==
|
||||
ATH_ANT_DIV_COMB_LNA1) ||
|
||||
(antcomb->second_quick_scan_conf ==
|
||||
ATH_ANT_DIV_COMB_LNA2)) {
|
||||
/* Set alt LNA1 or LNA2 */
|
||||
if (div_ant_conf->main_lna_conf ==
|
||||
ATH_ANT_DIV_COMB_LNA2)
|
||||
div_ant_conf->alt_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA1;
|
||||
else
|
||||
div_ant_conf->alt_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA2;
|
||||
} else {
|
||||
/* Set alt to A+B or A-B */
|
||||
div_ant_conf->alt_lna_conf =
|
||||
antcomb->second_quick_scan_conf;
|
||||
}
|
||||
} else if (antcomb->first_ratio) {
|
||||
/* first alt */
|
||||
if ((antcomb->first_quick_scan_conf ==
|
||||
ATH_ANT_DIV_COMB_LNA1) ||
|
||||
(antcomb->first_quick_scan_conf ==
|
||||
ATH_ANT_DIV_COMB_LNA2))
|
||||
/* Set alt LNA1 or LNA2 */
|
||||
if (div_ant_conf->main_lna_conf ==
|
||||
ATH_ANT_DIV_COMB_LNA2)
|
||||
div_ant_conf->alt_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA1;
|
||||
else
|
||||
div_ant_conf->alt_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA2;
|
||||
else
|
||||
/* Set alt to A+B or A-B */
|
||||
div_ant_conf->alt_lna_conf =
|
||||
antcomb->first_quick_scan_conf;
|
||||
} else if (antcomb->second_ratio) {
|
||||
/* second alt */
|
||||
if ((antcomb->second_quick_scan_conf ==
|
||||
ATH_ANT_DIV_COMB_LNA1) ||
|
||||
(antcomb->second_quick_scan_conf ==
|
||||
ATH_ANT_DIV_COMB_LNA2))
|
||||
/* Set alt LNA1 or LNA2 */
|
||||
if (div_ant_conf->main_lna_conf ==
|
||||
ATH_ANT_DIV_COMB_LNA2)
|
||||
div_ant_conf->alt_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA1;
|
||||
else
|
||||
div_ant_conf->alt_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA2;
|
||||
else
|
||||
/* Set alt to A+B or A-B */
|
||||
div_ant_conf->alt_lna_conf =
|
||||
antcomb->second_quick_scan_conf;
|
||||
} else {
|
||||
/* main is largest */
|
||||
if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) ||
|
||||
(antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2))
|
||||
/* Set alt LNA1 or LNA2 */
|
||||
if (div_ant_conf->main_lna_conf ==
|
||||
ATH_ANT_DIV_COMB_LNA2)
|
||||
div_ant_conf->alt_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA1;
|
||||
else
|
||||
div_ant_conf->alt_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA2;
|
||||
else
|
||||
/* Set alt to A+B or A-B */
|
||||
div_ant_conf->alt_lna_conf = antcomb->main_conf;
|
||||
}
|
||||
ath_ant_set_alt_ratio(antcomb, div_ant_conf);
|
||||
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -430,8 +497,7 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
|
|||
ant_conf->fast_div_bias = 0x1;
|
||||
break;
|
||||
case 0x10: /* LNA2 A-B */
|
||||
if (!(antcomb->scan) &&
|
||||
(alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
|
||||
if (!antcomb->scan && (alt_ratio > antcomb->ant_ratio))
|
||||
ant_conf->fast_div_bias = 0x1;
|
||||
else
|
||||
ant_conf->fast_div_bias = 0x2;
|
||||
|
@ -440,15 +506,13 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
|
|||
ant_conf->fast_div_bias = 0x1;
|
||||
break;
|
||||
case 0x13: /* LNA2 A+B */
|
||||
if (!(antcomb->scan) &&
|
||||
(alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
|
||||
if (!antcomb->scan && (alt_ratio > antcomb->ant_ratio))
|
||||
ant_conf->fast_div_bias = 0x1;
|
||||
else
|
||||
ant_conf->fast_div_bias = 0x2;
|
||||
break;
|
||||
case 0x20: /* LNA1 A-B */
|
||||
if (!(antcomb->scan) &&
|
||||
(alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
|
||||
if (!antcomb->scan && (alt_ratio > antcomb->ant_ratio))
|
||||
ant_conf->fast_div_bias = 0x1;
|
||||
else
|
||||
ant_conf->fast_div_bias = 0x2;
|
||||
|
@ -457,8 +521,7 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
|
|||
ant_conf->fast_div_bias = 0x1;
|
||||
break;
|
||||
case 0x23: /* LNA1 A+B */
|
||||
if (!(antcomb->scan) &&
|
||||
(alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
|
||||
if (!antcomb->scan && (alt_ratio > antcomb->ant_ratio))
|
||||
ant_conf->fast_div_bias = 0x1;
|
||||
else
|
||||
ant_conf->fast_div_bias = 0x2;
|
||||
|
@ -475,6 +538,9 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
|
|||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (antcomb->fast_div_bias)
|
||||
ant_conf->fast_div_bias = antcomb->fast_div_bias;
|
||||
} else if (ant_conf->div_group == 3) {
|
||||
switch ((ant_conf->main_lna_conf << 4) |
|
||||
ant_conf->alt_lna_conf) {
|
||||
|
@ -540,6 +606,138 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
|
|||
}
|
||||
}
|
||||
|
||||
static void ath_ant_try_scan(struct ath_ant_comb *antcomb,
|
||||
struct ath_hw_antcomb_conf *conf,
|
||||
int curr_alt_set, int alt_rssi_avg,
|
||||
int main_rssi_avg)
|
||||
{
|
||||
switch (curr_alt_set) {
|
||||
case ATH_ANT_DIV_COMB_LNA2:
|
||||
antcomb->rssi_lna2 = alt_rssi_avg;
|
||||
antcomb->rssi_lna1 = main_rssi_avg;
|
||||
antcomb->scan = true;
|
||||
/* set to A+B */
|
||||
conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
|
||||
conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
|
||||
break;
|
||||
case ATH_ANT_DIV_COMB_LNA1:
|
||||
antcomb->rssi_lna1 = alt_rssi_avg;
|
||||
antcomb->rssi_lna2 = main_rssi_avg;
|
||||
antcomb->scan = true;
|
||||
/* set to A+B */
|
||||
conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
|
||||
conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
|
||||
break;
|
||||
case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
|
||||
antcomb->rssi_add = alt_rssi_avg;
|
||||
antcomb->scan = true;
|
||||
/* set to A-B */
|
||||
conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
|
||||
break;
|
||||
case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2:
|
||||
antcomb->rssi_sub = alt_rssi_avg;
|
||||
antcomb->scan = false;
|
||||
if (antcomb->rssi_lna2 >
|
||||
(antcomb->rssi_lna1 + ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
|
||||
/* use LNA2 as main LNA */
|
||||
if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
|
||||
(antcomb->rssi_add > antcomb->rssi_sub)) {
|
||||
/* set to A+B */
|
||||
conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
|
||||
conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
|
||||
} else if (antcomb->rssi_sub >
|
||||
antcomb->rssi_lna1) {
|
||||
/* set to A-B */
|
||||
conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
|
||||
conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
|
||||
} else {
|
||||
/* set to LNA1 */
|
||||
conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
|
||||
conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
|
||||
}
|
||||
} else {
|
||||
/* use LNA1 as main LNA */
|
||||
if ((antcomb->rssi_add > antcomb->rssi_lna2) &&
|
||||
(antcomb->rssi_add > antcomb->rssi_sub)) {
|
||||
/* set to A+B */
|
||||
conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
|
||||
conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
|
||||
} else if (antcomb->rssi_sub >
|
||||
antcomb->rssi_lna1) {
|
||||
/* set to A-B */
|
||||
conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
|
||||
conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
|
||||
} else {
|
||||
/* set to LNA2 */
|
||||
conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
|
||||
conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static bool ath_ant_try_switch(struct ath_hw_antcomb_conf *div_ant_conf,
|
||||
struct ath_ant_comb *antcomb,
|
||||
int alt_ratio, int alt_rssi_avg,
|
||||
int main_rssi_avg, int curr_main_set,
|
||||
int curr_alt_set)
|
||||
{
|
||||
bool ret = false;
|
||||
|
||||
if (ath_ant_div_comb_alt_check(div_ant_conf, antcomb, alt_ratio,
|
||||
alt_rssi_avg, main_rssi_avg)) {
|
||||
if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) {
|
||||
/*
|
||||
* Switch main and alt LNA.
|
||||
*/
|
||||
div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
|
||||
div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
|
||||
} else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) {
|
||||
div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
|
||||
div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
|
||||
}
|
||||
|
||||
ret = true;
|
||||
} else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) &&
|
||||
(curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) {
|
||||
/*
|
||||
Set alt to another LNA.
|
||||
*/
|
||||
if (curr_main_set == ATH_ANT_DIV_COMB_LNA2)
|
||||
div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
|
||||
else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1)
|
||||
div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
|
||||
|
||||
ret = true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool ath_ant_short_scan_check(struct ath_ant_comb *antcomb)
|
||||
{
|
||||
int alt_ratio;
|
||||
|
||||
if (!antcomb->scan || !antcomb->alt_good)
|
||||
return false;
|
||||
|
||||
if (time_after(jiffies, antcomb->scan_start_time +
|
||||
msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR)))
|
||||
return true;
|
||||
|
||||
if (antcomb->total_pkt_count == ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) {
|
||||
alt_ratio = ((antcomb->alt_recv_cnt * 100) /
|
||||
antcomb->total_pkt_count);
|
||||
if (alt_ratio < antcomb->ant_ratio)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
|
||||
{
|
||||
struct ath_hw_antcomb_conf div_ant_conf;
|
||||
|
@ -549,41 +747,46 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
|
|||
int main_rssi = rs->rs_rssi_ctl0;
|
||||
int alt_rssi = rs->rs_rssi_ctl1;
|
||||
int rx_ant_conf, main_ant_conf;
|
||||
bool short_scan = false;
|
||||
bool short_scan = false, ret;
|
||||
|
||||
rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) &
|
||||
ATH_ANT_RX_MASK;
|
||||
main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) &
|
||||
ATH_ANT_RX_MASK;
|
||||
|
||||
if (alt_rssi >= antcomb->low_rssi_thresh) {
|
||||
antcomb->ant_ratio = ATH_ANT_DIV_COMB_ALT_ANT_RATIO;
|
||||
antcomb->ant_ratio2 = ATH_ANT_DIV_COMB_ALT_ANT_RATIO2;
|
||||
} else {
|
||||
antcomb->ant_ratio = ATH_ANT_DIV_COMB_ALT_ANT_RATIO_LOW_RSSI;
|
||||
antcomb->ant_ratio2 = ATH_ANT_DIV_COMB_ALT_ANT_RATIO2_LOW_RSSI;
|
||||
}
|
||||
|
||||
/* Record packet only when both main_rssi and alt_rssi is positive */
|
||||
if (main_rssi > 0 && alt_rssi > 0) {
|
||||
antcomb->total_pkt_count++;
|
||||
antcomb->main_total_rssi += main_rssi;
|
||||
antcomb->alt_total_rssi += alt_rssi;
|
||||
|
||||
if (main_ant_conf == rx_ant_conf)
|
||||
antcomb->main_recv_cnt++;
|
||||
else
|
||||
antcomb->alt_recv_cnt++;
|
||||
}
|
||||
|
||||
/* Short scan check */
|
||||
if (antcomb->scan && antcomb->alt_good) {
|
||||
if (time_after(jiffies, antcomb->scan_start_time +
|
||||
msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR)))
|
||||
short_scan = true;
|
||||
else
|
||||
if (antcomb->total_pkt_count ==
|
||||
ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) {
|
||||
alt_ratio = ((antcomb->alt_recv_cnt * 100) /
|
||||
antcomb->total_pkt_count);
|
||||
if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
|
||||
short_scan = true;
|
||||
}
|
||||
if (main_ant_conf == rx_ant_conf) {
|
||||
ANT_STAT_INC(ANT_MAIN, recv_cnt);
|
||||
ANT_LNA_INC(ANT_MAIN, rx_ant_conf);
|
||||
} else {
|
||||
ANT_STAT_INC(ANT_ALT, recv_cnt);
|
||||
ANT_LNA_INC(ANT_ALT, rx_ant_conf);
|
||||
}
|
||||
|
||||
/* Short scan check */
|
||||
short_scan = ath_ant_short_scan_check(antcomb);
|
||||
|
||||
if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) ||
|
||||
rs->rs_moreaggr) && !short_scan)
|
||||
rs->rs_moreaggr) && !short_scan)
|
||||
return;
|
||||
|
||||
if (antcomb->total_pkt_count) {
|
||||
|
@ -595,15 +798,13 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
|
|||
antcomb->total_pkt_count);
|
||||
}
|
||||
|
||||
|
||||
ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf);
|
||||
curr_alt_set = div_ant_conf.alt_lna_conf;
|
||||
curr_main_set = div_ant_conf.main_lna_conf;
|
||||
|
||||
antcomb->count++;
|
||||
|
||||
if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) {
|
||||
if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) {
|
||||
if (alt_ratio > antcomb->ant_ratio) {
|
||||
ath_lnaconf_alt_good_scan(antcomb, div_ant_conf,
|
||||
main_rssi_avg);
|
||||
antcomb->alt_good = true;
|
||||
|
@ -617,153 +818,47 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
|
|||
}
|
||||
|
||||
if (!antcomb->scan) {
|
||||
if (ath_ant_div_comb_alt_check(div_ant_conf.div_group,
|
||||
alt_ratio, curr_main_set, curr_alt_set,
|
||||
alt_rssi_avg, main_rssi_avg)) {
|
||||
if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) {
|
||||
/* Switch main and alt LNA */
|
||||
div_ant_conf.main_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA2;
|
||||
div_ant_conf.alt_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA1;
|
||||
} else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) {
|
||||
div_ant_conf.main_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA1;
|
||||
div_ant_conf.alt_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA2;
|
||||
}
|
||||
|
||||
goto div_comb_done;
|
||||
} else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) &&
|
||||
(curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) {
|
||||
/* Set alt to another LNA */
|
||||
if (curr_main_set == ATH_ANT_DIV_COMB_LNA2)
|
||||
div_ant_conf.alt_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA1;
|
||||
else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1)
|
||||
div_ant_conf.alt_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA2;
|
||||
|
||||
goto div_comb_done;
|
||||
}
|
||||
|
||||
if ((alt_rssi_avg < (main_rssi_avg +
|
||||
div_ant_conf.lna1_lna2_delta)))
|
||||
ret = ath_ant_try_switch(&div_ant_conf, antcomb, alt_ratio,
|
||||
alt_rssi_avg, main_rssi_avg,
|
||||
curr_main_set, curr_alt_set);
|
||||
if (ret)
|
||||
goto div_comb_done;
|
||||
}
|
||||
|
||||
if (!antcomb->scan &&
|
||||
(alt_rssi_avg < (main_rssi_avg + div_ant_conf.lna1_lna2_delta)))
|
||||
goto div_comb_done;
|
||||
|
||||
if (!antcomb->scan_not_start) {
|
||||
switch (curr_alt_set) {
|
||||
case ATH_ANT_DIV_COMB_LNA2:
|
||||
antcomb->rssi_lna2 = alt_rssi_avg;
|
||||
antcomb->rssi_lna1 = main_rssi_avg;
|
||||
antcomb->scan = true;
|
||||
/* set to A+B */
|
||||
div_ant_conf.main_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA1;
|
||||
div_ant_conf.alt_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
|
||||
break;
|
||||
case ATH_ANT_DIV_COMB_LNA1:
|
||||
antcomb->rssi_lna1 = alt_rssi_avg;
|
||||
antcomb->rssi_lna2 = main_rssi_avg;
|
||||
antcomb->scan = true;
|
||||
/* set to A+B */
|
||||
div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
|
||||
div_ant_conf.alt_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
|
||||
break;
|
||||
case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
|
||||
antcomb->rssi_add = alt_rssi_avg;
|
||||
antcomb->scan = true;
|
||||
/* set to A-B */
|
||||
div_ant_conf.alt_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
|
||||
break;
|
||||
case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2:
|
||||
antcomb->rssi_sub = alt_rssi_avg;
|
||||
antcomb->scan = false;
|
||||
if (antcomb->rssi_lna2 >
|
||||
(antcomb->rssi_lna1 +
|
||||
ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
|
||||
/* use LNA2 as main LNA */
|
||||
if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
|
||||
(antcomb->rssi_add > antcomb->rssi_sub)) {
|
||||
/* set to A+B */
|
||||
div_ant_conf.main_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA2;
|
||||
div_ant_conf.alt_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
|
||||
} else if (antcomb->rssi_sub >
|
||||
antcomb->rssi_lna1) {
|
||||
/* set to A-B */
|
||||
div_ant_conf.main_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA2;
|
||||
div_ant_conf.alt_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
|
||||
} else {
|
||||
/* set to LNA1 */
|
||||
div_ant_conf.main_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA2;
|
||||
div_ant_conf.alt_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA1;
|
||||
}
|
||||
} else {
|
||||
/* use LNA1 as main LNA */
|
||||
if ((antcomb->rssi_add > antcomb->rssi_lna2) &&
|
||||
(antcomb->rssi_add > antcomb->rssi_sub)) {
|
||||
/* set to A+B */
|
||||
div_ant_conf.main_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA1;
|
||||
div_ant_conf.alt_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
|
||||
} else if (antcomb->rssi_sub >
|
||||
antcomb->rssi_lna1) {
|
||||
/* set to A-B */
|
||||
div_ant_conf.main_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA1;
|
||||
div_ant_conf.alt_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
|
||||
} else {
|
||||
/* set to LNA2 */
|
||||
div_ant_conf.main_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA1;
|
||||
div_ant_conf.alt_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA2;
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
ath_ant_try_scan(antcomb, &div_ant_conf, curr_alt_set,
|
||||
alt_rssi_avg, main_rssi_avg);
|
||||
} else {
|
||||
if (!antcomb->alt_good) {
|
||||
antcomb->scan_not_start = false;
|
||||
/* Set alt to another LNA */
|
||||
if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) {
|
||||
div_ant_conf.main_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA2;
|
||||
ATH_ANT_DIV_COMB_LNA2;
|
||||
div_ant_conf.alt_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA1;
|
||||
ATH_ANT_DIV_COMB_LNA1;
|
||||
} else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) {
|
||||
div_ant_conf.main_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA1;
|
||||
ATH_ANT_DIV_COMB_LNA1;
|
||||
div_ant_conf.alt_lna_conf =
|
||||
ATH_ANT_DIV_COMB_LNA2;
|
||||
ATH_ANT_DIV_COMB_LNA2;
|
||||
}
|
||||
goto div_comb_done;
|
||||
}
|
||||
ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf,
|
||||
main_rssi_avg, alt_rssi_avg,
|
||||
alt_ratio);
|
||||
antcomb->quick_scan_cnt++;
|
||||
}
|
||||
|
||||
ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf,
|
||||
main_rssi_avg, alt_rssi_avg,
|
||||
alt_ratio);
|
||||
|
||||
antcomb->quick_scan_cnt++;
|
||||
|
||||
div_comb_done:
|
||||
ath_ant_div_conf_fast_divbias(&div_ant_conf, antcomb, alt_ratio);
|
||||
ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf);
|
||||
ath9k_debug_stat_ant(sc, &div_ant_conf, main_rssi_avg, alt_rssi_avg);
|
||||
|
||||
antcomb->scan_start_time = jiffies;
|
||||
antcomb->total_pkt_count = 0;
|
||||
|
@ -772,26 +867,3 @@ div_comb_done:
|
|||
antcomb->main_recv_cnt = 0;
|
||||
antcomb->alt_recv_cnt = 0;
|
||||
}
|
||||
|
||||
void ath_ant_comb_update(struct ath_softc *sc)
|
||||
{
|
||||
struct ath_hw *ah = sc->sc_ah;
|
||||
struct ath_common *common = ath9k_hw_common(ah);
|
||||
struct ath_hw_antcomb_conf div_ant_conf;
|
||||
u8 lna_conf;
|
||||
|
||||
ath9k_hw_antdiv_comb_conf_get(ah, &div_ant_conf);
|
||||
|
||||
if (sc->ant_rx == 1)
|
||||
lna_conf = ATH_ANT_DIV_COMB_LNA1;
|
||||
else
|
||||
lna_conf = ATH_ANT_DIV_COMB_LNA2;
|
||||
|
||||
div_ant_conf.main_lna_conf = lna_conf;
|
||||
div_ant_conf.alt_lna_conf = lna_conf;
|
||||
|
||||
ath9k_hw_antdiv_comb_conf_set(ah, &div_ant_conf);
|
||||
|
||||
if (common->antenna_diversity)
|
||||
ath9k_hw_antctrl_shared_chain_lnadiv(ah, true);
|
||||
}
|
||||
|
|
|
@ -555,6 +555,69 @@ static void ar9002_hw_antdiv_comb_conf_set(struct ath_hw *ah,
|
|||
REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regval);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
|
||||
|
||||
static void ar9002_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
|
||||
{
|
||||
struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
|
||||
u8 antdiv_ctrl1, antdiv_ctrl2;
|
||||
u32 regval;
|
||||
|
||||
if (enable) {
|
||||
antdiv_ctrl1 = ATH_BT_COEX_ANTDIV_CONTROL1_ENABLE;
|
||||
antdiv_ctrl2 = ATH_BT_COEX_ANTDIV_CONTROL2_ENABLE;
|
||||
|
||||
/*
|
||||
* Don't disable BT ant to allow BB to control SWCOM.
|
||||
*/
|
||||
btcoex->bt_coex_mode2 &= (~(AR_BT_DISABLE_BT_ANT));
|
||||
REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex->bt_coex_mode2);
|
||||
|
||||
REG_WRITE(ah, AR_PHY_SWITCH_COM, ATH_BT_COEX_ANT_DIV_SWITCH_COM);
|
||||
REG_RMW(ah, AR_PHY_SWITCH_CHAIN_0, 0, 0xf0000000);
|
||||
} else {
|
||||
/*
|
||||
* Disable antenna diversity, use LNA1 only.
|
||||
*/
|
||||
antdiv_ctrl1 = ATH_BT_COEX_ANTDIV_CONTROL1_FIXED_A;
|
||||
antdiv_ctrl2 = ATH_BT_COEX_ANTDIV_CONTROL2_FIXED_A;
|
||||
|
||||
/*
|
||||
* Disable BT Ant. to allow concurrent BT and WLAN receive.
|
||||
*/
|
||||
btcoex->bt_coex_mode2 |= AR_BT_DISABLE_BT_ANT;
|
||||
REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex->bt_coex_mode2);
|
||||
|
||||
/*
|
||||
* Program SWCOM table to make sure RF switch always parks
|
||||
* at BT side.
|
||||
*/
|
||||
REG_WRITE(ah, AR_PHY_SWITCH_COM, 0);
|
||||
REG_RMW(ah, AR_PHY_SWITCH_CHAIN_0, 0, 0xf0000000);
|
||||
}
|
||||
|
||||
regval = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL);
|
||||
regval &= (~(AR_PHY_9285_ANT_DIV_CTL_ALL));
|
||||
/*
|
||||
* Clear ant_fast_div_bias [14:9] since for WB195,
|
||||
* the main LNA is always LNA1.
|
||||
*/
|
||||
regval &= (~(AR_PHY_9285_FAST_DIV_BIAS));
|
||||
regval |= SM(antdiv_ctrl1, AR_PHY_9285_ANT_DIV_CTL);
|
||||
regval |= SM(antdiv_ctrl2, AR_PHY_9285_ANT_DIV_ALT_LNACONF);
|
||||
regval |= SM((antdiv_ctrl2 >> 2), AR_PHY_9285_ANT_DIV_MAIN_LNACONF);
|
||||
regval |= SM((antdiv_ctrl1 >> 1), AR_PHY_9285_ANT_DIV_ALT_GAINTB);
|
||||
regval |= SM((antdiv_ctrl1 >> 2), AR_PHY_9285_ANT_DIV_MAIN_GAINTB);
|
||||
REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regval);
|
||||
|
||||
regval = REG_READ(ah, AR_PHY_CCK_DETECT);
|
||||
regval &= (~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
|
||||
regval |= SM((antdiv_ctrl1 >> 3), AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
|
||||
REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static void ar9002_hw_spectral_scan_config(struct ath_hw *ah,
|
||||
struct ath_spec_scan *param)
|
||||
{
|
||||
|
@ -634,5 +697,9 @@ void ar9002_hw_attach_phy_ops(struct ath_hw *ah)
|
|||
ops->spectral_scan_trigger = ar9002_hw_spectral_scan_trigger;
|
||||
ops->spectral_scan_wait = ar9002_hw_spectral_scan_wait;
|
||||
|
||||
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
|
||||
ops->set_bt_ant_diversity = ar9002_hw_set_bt_ant_diversity;
|
||||
#endif
|
||||
|
||||
ar9002_hw_set_nf_limits(ah);
|
||||
}
|
||||
|
|
|
@ -317,13 +317,15 @@
|
|||
#define AR_PHY_9285_ANT_DIV_ALT_GAINTB_S 29
|
||||
#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB 0x40000000
|
||||
#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB_S 30
|
||||
#define AR_PHY_9285_ANT_DIV_LNA1 2
|
||||
#define AR_PHY_9285_ANT_DIV_LNA2 1
|
||||
#define AR_PHY_9285_ANT_DIV_LNA1_PLUS_LNA2 3
|
||||
#define AR_PHY_9285_ANT_DIV_LNA1_MINUS_LNA2 0
|
||||
#define AR_PHY_9285_ANT_DIV_GAINTB_0 0
|
||||
#define AR_PHY_9285_ANT_DIV_GAINTB_1 1
|
||||
|
||||
#define ATH_BT_COEX_ANTDIV_CONTROL1_ENABLE 0x0b
|
||||
#define ATH_BT_COEX_ANTDIV_CONTROL2_ENABLE 0x09
|
||||
#define ATH_BT_COEX_ANTDIV_CONTROL1_FIXED_A 0x04
|
||||
#define ATH_BT_COEX_ANTDIV_CONTROL2_FIXED_A 0x09
|
||||
#define ATH_BT_COEX_ANT_DIV_SWITCH_COM 0x66666666
|
||||
|
||||
#define AR_PHY_EXT_CCA0 0x99b8
|
||||
#define AR_PHY_EXT_CCA0_THRESH62 0x000000FF
|
||||
#define AR_PHY_EXT_CCA0_THRESH62_S 0
|
||||
|
|
|
@ -3541,13 +3541,12 @@ static u16 ar9003_switch_com_spdt_get(struct ath_hw *ah, bool is2ghz)
|
|||
return le16_to_cpu(ar9003_modal_header(ah, is2ghz)->switchcomspdt);
|
||||
}
|
||||
|
||||
|
||||
static u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz)
|
||||
u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz)
|
||||
{
|
||||
return le32_to_cpu(ar9003_modal_header(ah, is2ghz)->antCtrlCommon);
|
||||
}
|
||||
|
||||
static u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz)
|
||||
u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz)
|
||||
{
|
||||
return le32_to_cpu(ar9003_modal_header(ah, is2ghz)->antCtrlCommon2);
|
||||
}
|
||||
|
@ -3561,6 +3560,7 @@ static u16 ar9003_hw_ant_ctrl_chain_get(struct ath_hw *ah, int chain,
|
|||
|
||||
static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
|
||||
{
|
||||
struct ath_common *common = ath9k_hw_common(ah);
|
||||
struct ath9k_hw_capabilities *pCap = &ah->caps;
|
||||
int chain;
|
||||
u32 regval, value, gpio;
|
||||
|
@ -3614,6 +3614,11 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
|
|||
}
|
||||
|
||||
value = ar9003_hw_ant_ctrl_common_2_get(ah, is2ghz);
|
||||
if (AR_SREV_9485(ah) && common->bt_ant_diversity) {
|
||||
regval &= ~AR_SWITCH_TABLE_COM2_ALL;
|
||||
regval |= ah->config.ant_ctrl_comm2g_switch_enable;
|
||||
|
||||
}
|
||||
REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM_2, AR_SWITCH_TABLE_COM2_ALL, value);
|
||||
|
||||
if ((AR_SREV_9462(ah)) && (ah->rxchainmask == 0x2)) {
|
||||
|
@ -3645,8 +3650,11 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
|
|||
regval &= (~AR_PHY_ANT_DIV_LNADIV);
|
||||
regval |= ((value >> 6) & 0x1) << AR_PHY_ANT_DIV_LNADIV_S;
|
||||
|
||||
if (AR_SREV_9485(ah) && common->bt_ant_diversity)
|
||||
regval |= AR_ANT_DIV_ENABLE;
|
||||
|
||||
if (AR_SREV_9565(ah)) {
|
||||
if (ah->shared_chain_lnadiv) {
|
||||
if (common->bt_ant_diversity) {
|
||||
regval |= (1 << AR_PHY_ANT_SW_RX_PROT_S);
|
||||
} else {
|
||||
regval &= ~(1 << AR_PHY_ANT_DIV_LNADIV_S);
|
||||
|
@ -3656,10 +3664,14 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
|
|||
|
||||
REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
|
||||
|
||||
/*enable fast_div */
|
||||
/* enable fast_div */
|
||||
regval = REG_READ(ah, AR_PHY_CCK_DETECT);
|
||||
regval &= (~AR_FAST_DIV_ENABLE);
|
||||
regval |= ((value >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S;
|
||||
|
||||
if (AR_SREV_9485(ah) && common->bt_ant_diversity)
|
||||
regval |= AR_FAST_DIV_ENABLE;
|
||||
|
||||
REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
|
||||
|
||||
if (pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
|
||||
|
@ -3673,9 +3685,9 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
|
|||
AR_PHY_ANT_DIV_ALT_GAINTB |
|
||||
AR_PHY_ANT_DIV_MAIN_GAINTB));
|
||||
/* by default use LNA1 for the main antenna */
|
||||
regval |= (AR_PHY_ANT_DIV_LNA1 <<
|
||||
regval |= (ATH_ANT_DIV_COMB_LNA1 <<
|
||||
AR_PHY_ANT_DIV_MAIN_LNACONF_S);
|
||||
regval |= (AR_PHY_ANT_DIV_LNA2 <<
|
||||
regval |= (ATH_ANT_DIV_COMB_LNA2 <<
|
||||
AR_PHY_ANT_DIV_ALT_LNACONF_S);
|
||||
REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
|
||||
}
|
||||
|
|
|
@ -334,6 +334,8 @@ struct ar9300_eeprom {
|
|||
|
||||
s32 ar9003_hw_get_tx_gain_idx(struct ath_hw *ah);
|
||||
s32 ar9003_hw_get_rx_gain_idx(struct ath_hw *ah);
|
||||
u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz);
|
||||
u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz);
|
||||
|
||||
u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is_2ghz);
|
||||
|
||||
|
|
|
@ -632,6 +632,22 @@ static void ar9003_hw_override_ini(struct ath_hw *ah)
|
|||
|
||||
REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
|
||||
AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
|
||||
|
||||
if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
|
||||
REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE,
|
||||
AR_GLB_SWREG_DISCONT_EN_BT_WLAN);
|
||||
|
||||
if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0,
|
||||
AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL))
|
||||
ah->enabled_cals |= TX_IQ_CAL;
|
||||
else
|
||||
ah->enabled_cals &= ~TX_IQ_CAL;
|
||||
|
||||
if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE)
|
||||
ah->enabled_cals |= TX_CL_CAL;
|
||||
else
|
||||
ah->enabled_cals &= ~TX_CL_CAL;
|
||||
}
|
||||
}
|
||||
|
||||
static void ar9003_hw_prog_ini(struct ath_hw *ah,
|
||||
|
@ -814,29 +830,12 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
|
|||
if (chan->channel == 2484)
|
||||
ar9003_hw_prog_ini(ah, &ah->iniCckfirJapan2484, 1);
|
||||
|
||||
if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
|
||||
REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE,
|
||||
AR_GLB_SWREG_DISCONT_EN_BT_WLAN);
|
||||
|
||||
ah->modes_index = modesIndex;
|
||||
ar9003_hw_override_ini(ah);
|
||||
ar9003_hw_set_channel_regs(ah, chan);
|
||||
ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
|
||||
ath9k_hw_apply_txpower(ah, chan, false);
|
||||
|
||||
if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
|
||||
if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0,
|
||||
AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL))
|
||||
ah->enabled_cals |= TX_IQ_CAL;
|
||||
else
|
||||
ah->enabled_cals &= ~TX_IQ_CAL;
|
||||
|
||||
if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE)
|
||||
ah->enabled_cals |= TX_CL_CAL;
|
||||
else
|
||||
ah->enabled_cals &= ~TX_CL_CAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1413,65 +1412,111 @@ static void ar9003_hw_antdiv_comb_conf_set(struct ath_hw *ah,
|
|||
REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
|
||||
}
|
||||
|
||||
static void ar9003_hw_antctrl_shared_chain_lnadiv(struct ath_hw *ah,
|
||||
bool enable)
|
||||
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
|
||||
|
||||
static void ar9003_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
|
||||
{
|
||||
struct ath9k_hw_capabilities *pCap = &ah->caps;
|
||||
u8 ant_div_ctl1;
|
||||
u32 regval;
|
||||
|
||||
if (!AR_SREV_9565(ah))
|
||||
if (!AR_SREV_9485(ah) && !AR_SREV_9565(ah))
|
||||
return;
|
||||
|
||||
ah->shared_chain_lnadiv = enable;
|
||||
if (AR_SREV_9485(ah)) {
|
||||
regval = ar9003_hw_ant_ctrl_common_2_get(ah,
|
||||
IS_CHAN_2GHZ(ah->curchan));
|
||||
if (enable) {
|
||||
regval &= ~AR_SWITCH_TABLE_COM2_ALL;
|
||||
regval |= ah->config.ant_ctrl_comm2g_switch_enable;
|
||||
}
|
||||
REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM_2,
|
||||
AR_SWITCH_TABLE_COM2_ALL, regval);
|
||||
}
|
||||
|
||||
ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
|
||||
|
||||
/*
|
||||
* Set MAIN/ALT LNA conf.
|
||||
* Set MAIN/ALT gain_tb.
|
||||
*/
|
||||
regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
|
||||
regval &= (~AR_ANT_DIV_CTRL_ALL);
|
||||
regval |= (ant_div_ctl1 & 0x3f) << AR_ANT_DIV_CTRL_ALL_S;
|
||||
regval &= ~AR_PHY_ANT_DIV_LNADIV;
|
||||
regval |= ((ant_div_ctl1 >> 6) & 0x1) << AR_PHY_ANT_DIV_LNADIV_S;
|
||||
|
||||
if (enable)
|
||||
regval |= AR_ANT_DIV_ENABLE;
|
||||
|
||||
REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
|
||||
|
||||
regval = REG_READ(ah, AR_PHY_CCK_DETECT);
|
||||
regval &= ~AR_FAST_DIV_ENABLE;
|
||||
regval |= ((ant_div_ctl1 >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S;
|
||||
|
||||
if (enable)
|
||||
regval |= AR_FAST_DIV_ENABLE;
|
||||
|
||||
REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
|
||||
|
||||
if (enable) {
|
||||
REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL,
|
||||
(1 << AR_PHY_ANT_SW_RX_PROT_S));
|
||||
if (ah->curchan && IS_CHAN_2GHZ(ah->curchan))
|
||||
REG_SET_BIT(ah, AR_PHY_RESTART,
|
||||
AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
|
||||
REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV,
|
||||
AR_BTCOEX_WL_LNADIV_FORCE_ON);
|
||||
} else {
|
||||
REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL, AR_ANT_DIV_ENABLE);
|
||||
REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
|
||||
(1 << AR_PHY_ANT_SW_RX_PROT_S));
|
||||
REG_CLR_BIT(ah, AR_PHY_CCK_DETECT, AR_FAST_DIV_ENABLE);
|
||||
REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV,
|
||||
AR_BTCOEX_WL_LNADIV_FORCE_ON);
|
||||
|
||||
if (AR_SREV_9485_11(ah)) {
|
||||
/*
|
||||
* Enable LNA diversity.
|
||||
*/
|
||||
regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
|
||||
regval &= ~(AR_PHY_ANT_DIV_MAIN_LNACONF |
|
||||
AR_PHY_ANT_DIV_ALT_LNACONF |
|
||||
AR_PHY_ANT_DIV_MAIN_GAINTB |
|
||||
AR_PHY_ANT_DIV_ALT_GAINTB);
|
||||
regval |= (AR_PHY_ANT_DIV_LNA1 << AR_PHY_ANT_DIV_MAIN_LNACONF_S);
|
||||
regval |= (AR_PHY_ANT_DIV_LNA2 << AR_PHY_ANT_DIV_ALT_LNACONF_S);
|
||||
regval &= ~AR_PHY_ANT_DIV_LNADIV;
|
||||
regval |= ((ant_div_ctl1 >> 6) & 0x1) << AR_PHY_ANT_DIV_LNADIV_S;
|
||||
if (enable)
|
||||
regval |= AR_ANT_DIV_ENABLE;
|
||||
|
||||
REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
|
||||
|
||||
/*
|
||||
* Enable fast antenna diversity.
|
||||
*/
|
||||
regval = REG_READ(ah, AR_PHY_CCK_DETECT);
|
||||
regval &= ~AR_FAST_DIV_ENABLE;
|
||||
regval |= ((ant_div_ctl1 >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S;
|
||||
if (enable)
|
||||
regval |= AR_FAST_DIV_ENABLE;
|
||||
|
||||
REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
|
||||
|
||||
if (pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
|
||||
regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
|
||||
regval &= (~(AR_PHY_ANT_DIV_MAIN_LNACONF |
|
||||
AR_PHY_ANT_DIV_ALT_LNACONF |
|
||||
AR_PHY_ANT_DIV_ALT_GAINTB |
|
||||
AR_PHY_ANT_DIV_MAIN_GAINTB));
|
||||
/*
|
||||
* Set MAIN to LNA1 and ALT to LNA2 at the
|
||||
* beginning.
|
||||
*/
|
||||
regval |= (ATH_ANT_DIV_COMB_LNA1 <<
|
||||
AR_PHY_ANT_DIV_MAIN_LNACONF_S);
|
||||
regval |= (ATH_ANT_DIV_COMB_LNA2 <<
|
||||
AR_PHY_ANT_DIV_ALT_LNACONF_S);
|
||||
REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
|
||||
}
|
||||
} else if (AR_SREV_9565(ah)) {
|
||||
if (enable) {
|
||||
REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL,
|
||||
(1 << AR_PHY_ANT_SW_RX_PROT_S));
|
||||
if (ah->curchan && IS_CHAN_2GHZ(ah->curchan))
|
||||
REG_SET_BIT(ah, AR_PHY_RESTART,
|
||||
AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
|
||||
REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV,
|
||||
AR_BTCOEX_WL_LNADIV_FORCE_ON);
|
||||
} else {
|
||||
REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL, AR_ANT_DIV_ENABLE);
|
||||
REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
|
||||
(1 << AR_PHY_ANT_SW_RX_PROT_S));
|
||||
REG_CLR_BIT(ah, AR_PHY_CCK_DETECT, AR_FAST_DIV_ENABLE);
|
||||
REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV,
|
||||
AR_BTCOEX_WL_LNADIV_FORCE_ON);
|
||||
|
||||
regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
|
||||
regval &= ~(AR_PHY_ANT_DIV_MAIN_LNACONF |
|
||||
AR_PHY_ANT_DIV_ALT_LNACONF |
|
||||
AR_PHY_ANT_DIV_MAIN_GAINTB |
|
||||
AR_PHY_ANT_DIV_ALT_GAINTB);
|
||||
regval |= (ATH_ANT_DIV_COMB_LNA1 <<
|
||||
AR_PHY_ANT_DIV_MAIN_LNACONF_S);
|
||||
regval |= (ATH_ANT_DIV_COMB_LNA2 <<
|
||||
AR_PHY_ANT_DIV_ALT_LNACONF_S);
|
||||
REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
|
||||
struct ath9k_channel *chan,
|
||||
u8 *ini_reloaded)
|
||||
|
@ -1518,6 +1563,18 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
|
|||
|
||||
REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
|
||||
|
||||
if (AR_SREV_9462_20_OR_LATER(ah)) {
|
||||
/*
|
||||
* CUS217 mix LNA mode.
|
||||
*/
|
||||
if (ar9003_hw_get_rx_gain_idx(ah) == 2) {
|
||||
REG_WRITE_ARRAY(&ah->ini_modes_rxgain_bb_core,
|
||||
1, regWrites);
|
||||
REG_WRITE_ARRAY(&ah->ini_modes_rxgain_bb_postamble,
|
||||
modesIndex, regWrites);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* For 5GHz channels requiring Fast Clock, apply
|
||||
* different modal values.
|
||||
|
@ -1528,7 +1585,11 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
|
|||
if (AR_SREV_9565(ah))
|
||||
REG_WRITE_ARRAY(&ah->iniModesFastClock, 1, regWrites);
|
||||
|
||||
REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites);
|
||||
/*
|
||||
* JAPAN regulatory.
|
||||
*/
|
||||
if (chan->channel == 2484)
|
||||
ar9003_hw_prog_ini(ah, &ah->iniCckfirJapan2484, 1);
|
||||
|
||||
ah->modes_index = modesIndex;
|
||||
*ini_reloaded = true;
|
||||
|
@ -1631,11 +1692,14 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
|
|||
|
||||
ops->antdiv_comb_conf_get = ar9003_hw_antdiv_comb_conf_get;
|
||||
ops->antdiv_comb_conf_set = ar9003_hw_antdiv_comb_conf_set;
|
||||
ops->antctrl_shared_chain_lnadiv = ar9003_hw_antctrl_shared_chain_lnadiv;
|
||||
ops->spectral_scan_config = ar9003_hw_spectral_scan_config;
|
||||
ops->spectral_scan_trigger = ar9003_hw_spectral_scan_trigger;
|
||||
ops->spectral_scan_wait = ar9003_hw_spectral_scan_wait;
|
||||
|
||||
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
|
||||
ops->set_bt_ant_diversity = ar9003_hw_set_bt_ant_diversity;
|
||||
#endif
|
||||
|
||||
ar9003_hw_set_nf_limits(ah);
|
||||
ar9003_hw_set_radar_conf(ah);
|
||||
memcpy(ah->nf_regs, ar9300_cca_regs, sizeof(ah->nf_regs));
|
||||
|
|
|
@ -296,11 +296,6 @@
|
|||
#define AR_PHY_ANT_DIV_MAIN_GAINTB 0x40000000
|
||||
#define AR_PHY_ANT_DIV_MAIN_GAINTB_S 30
|
||||
|
||||
#define AR_PHY_ANT_DIV_LNA1_MINUS_LNA2 0x0
|
||||
#define AR_PHY_ANT_DIV_LNA2 0x1
|
||||
#define AR_PHY_ANT_DIV_LNA1 0x2
|
||||
#define AR_PHY_ANT_DIV_LNA1_PLUS_LNA2 0x3
|
||||
|
||||
#define AR_PHY_EXTCHN_PWRTHR1 (AR_AGC_BASE + 0x2c)
|
||||
#define AR_PHY_EXT_CHN_WIN (AR_AGC_BASE + 0x30)
|
||||
#define AR_PHY_20_40_DET_THR (AR_AGC_BASE + 0x34)
|
||||
|
|
|
@ -137,7 +137,8 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
|
|||
#define ATH_AGGR_ENCRYPTDELIM 10
|
||||
/* minimum h/w qdepth to be sustained to maximize aggregation */
|
||||
#define ATH_AGGR_MIN_QDEPTH 2
|
||||
#define ATH_AMPDU_SUBFRAME_DEFAULT 32
|
||||
/* minimum h/w qdepth for non-aggregated traffic */
|
||||
#define ATH_NON_AGGR_MIN_QDEPTH 8
|
||||
|
||||
#define IEEE80211_SEQ_SEQ_SHIFT 4
|
||||
#define IEEE80211_SEQ_MAX 4096
|
||||
|
@ -174,12 +175,6 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
|
|||
|
||||
#define ATH_TX_COMPLETE_POLL_INT 1000
|
||||
|
||||
enum ATH_AGGR_STATUS {
|
||||
ATH_AGGR_DONE,
|
||||
ATH_AGGR_BAW_CLOSED,
|
||||
ATH_AGGR_LIMITED,
|
||||
};
|
||||
|
||||
#define ATH_TXFIFO_DEPTH 8
|
||||
struct ath_txq {
|
||||
int mac80211_qnum; /* mac80211 queue number, -1 means not mac80211 Q */
|
||||
|
@ -212,8 +207,9 @@ struct ath_frame_info {
|
|||
int framelen;
|
||||
enum ath9k_key_type keytype;
|
||||
u8 keyix;
|
||||
u8 retries;
|
||||
u8 rtscts_rate;
|
||||
u8 retries : 7;
|
||||
u8 baw_tracked : 1;
|
||||
};
|
||||
|
||||
struct ath_buf_state {
|
||||
|
@ -241,6 +237,7 @@ struct ath_buf {
|
|||
struct ath_atx_tid {
|
||||
struct list_head list;
|
||||
struct sk_buff_head buf_q;
|
||||
struct sk_buff_head retry_q;
|
||||
struct ath_node *an;
|
||||
struct ath_atx_ac *ac;
|
||||
unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)];
|
||||
|
@ -268,6 +265,7 @@ struct ath_node {
|
|||
u8 mpdudensity;
|
||||
|
||||
bool sleeping;
|
||||
bool no_ps_filter;
|
||||
|
||||
#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
|
||||
struct dentry *node_stat;
|
||||
|
@ -367,6 +365,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
|
|||
/********/
|
||||
|
||||
struct ath_vif {
|
||||
struct ath_node mcast_node;
|
||||
int av_bslot;
|
||||
bool primary_sta_vif;
|
||||
__le64 tsf_adjust; /* TSF adjustment for staggered beacons */
|
||||
|
@ -585,19 +584,14 @@ static inline void ath_fill_led_pin(struct ath_softc *sc)
|
|||
#define ATH_ANT_DIV_COMB_MAX_COUNT 100
|
||||
#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO 30
|
||||
#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO2 20
|
||||
#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO_LOW_RSSI 50
|
||||
#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO2_LOW_RSSI 50
|
||||
|
||||
#define ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA -1
|
||||
#define ATH_ANT_DIV_COMB_LNA1_DELTA_HI -4
|
||||
#define ATH_ANT_DIV_COMB_LNA1_DELTA_MID -2
|
||||
#define ATH_ANT_DIV_COMB_LNA1_DELTA_LOW 2
|
||||
|
||||
enum ath9k_ant_div_comb_lna_conf {
|
||||
ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2,
|
||||
ATH_ANT_DIV_COMB_LNA2,
|
||||
ATH_ANT_DIV_COMB_LNA1,
|
||||
ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2,
|
||||
};
|
||||
|
||||
struct ath_ant_comb {
|
||||
u16 count;
|
||||
u16 total_pkt_count;
|
||||
|
@ -614,27 +608,35 @@ struct ath_ant_comb {
|
|||
int rssi_first;
|
||||
int rssi_second;
|
||||
int rssi_third;
|
||||
int ant_ratio;
|
||||
int ant_ratio2;
|
||||
bool alt_good;
|
||||
int quick_scan_cnt;
|
||||
int main_conf;
|
||||
enum ath9k_ant_div_comb_lna_conf main_conf;
|
||||
enum ath9k_ant_div_comb_lna_conf first_quick_scan_conf;
|
||||
enum ath9k_ant_div_comb_lna_conf second_quick_scan_conf;
|
||||
bool first_ratio;
|
||||
bool second_ratio;
|
||||
unsigned long scan_start_time;
|
||||
|
||||
/*
|
||||
* Card-specific config values.
|
||||
*/
|
||||
int low_rssi_thresh;
|
||||
int fast_div_bias;
|
||||
};
|
||||
|
||||
void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs);
|
||||
void ath_ant_comb_update(struct ath_softc *sc);
|
||||
|
||||
/********************/
|
||||
/* Main driver core */
|
||||
/********************/
|
||||
|
||||
#define ATH9K_PCI_CUS198 0x0001
|
||||
#define ATH9K_PCI_CUS230 0x0002
|
||||
#define ATH9K_PCI_CUS217 0x0004
|
||||
#define ATH9K_PCI_WOW 0x0008
|
||||
#define ATH9K_PCI_CUS198 0x0001
|
||||
#define ATH9K_PCI_CUS230 0x0002
|
||||
#define ATH9K_PCI_CUS217 0x0004
|
||||
#define ATH9K_PCI_WOW 0x0008
|
||||
#define ATH9K_PCI_BT_ANT_DIV 0x0010
|
||||
|
||||
/*
|
||||
* Default cache line size, in bytes.
|
||||
|
|
|
@ -270,25 +270,29 @@ static const struct file_operations fops_ani = {
|
|||
.llseek = default_llseek,
|
||||
};
|
||||
|
||||
static ssize_t read_file_ant_diversity(struct file *file, char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
|
||||
|
||||
static ssize_t read_file_bt_ant_diversity(struct file *file,
|
||||
char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath_softc *sc = file->private_data;
|
||||
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
|
||||
char buf[32];
|
||||
unsigned int len;
|
||||
|
||||
len = sprintf(buf, "%d\n", common->antenna_diversity);
|
||||
len = sprintf(buf, "%d\n", common->bt_ant_diversity);
|
||||
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
|
||||
}
|
||||
|
||||
static ssize_t write_file_ant_diversity(struct file *file,
|
||||
const char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
static ssize_t write_file_bt_ant_diversity(struct file *file,
|
||||
const char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath_softc *sc = file->private_data;
|
||||
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
|
||||
unsigned long antenna_diversity;
|
||||
struct ath9k_hw_capabilities *pCap = &sc->sc_ah->caps;
|
||||
unsigned long bt_ant_diversity;
|
||||
char buf[32];
|
||||
ssize_t len;
|
||||
|
||||
|
@ -296,26 +300,147 @@ static ssize_t write_file_ant_diversity(struct file *file,
|
|||
if (copy_from_user(buf, user_buf, len))
|
||||
return -EFAULT;
|
||||
|
||||
if (!AR_SREV_9565(sc->sc_ah))
|
||||
if (!(pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV))
|
||||
goto exit;
|
||||
|
||||
buf[len] = '\0';
|
||||
if (kstrtoul(buf, 0, &antenna_diversity))
|
||||
if (kstrtoul(buf, 0, &bt_ant_diversity))
|
||||
return -EINVAL;
|
||||
|
||||
common->antenna_diversity = !!antenna_diversity;
|
||||
common->bt_ant_diversity = !!bt_ant_diversity;
|
||||
ath9k_ps_wakeup(sc);
|
||||
ath_ant_comb_update(sc);
|
||||
ath_dbg(common, CONFIG, "Antenna diversity: %d\n",
|
||||
common->antenna_diversity);
|
||||
ath9k_hw_set_bt_ant_diversity(sc->sc_ah, common->bt_ant_diversity);
|
||||
ath_dbg(common, CONFIG, "Enable WLAN/BT RX Antenna diversity: %d\n",
|
||||
common->bt_ant_diversity);
|
||||
ath9k_ps_restore(sc);
|
||||
exit:
|
||||
return count;
|
||||
}
|
||||
|
||||
static const struct file_operations fops_ant_diversity = {
|
||||
.read = read_file_ant_diversity,
|
||||
.write = write_file_ant_diversity,
|
||||
static const struct file_operations fops_bt_ant_diversity = {
|
||||
.read = read_file_bt_ant_diversity,
|
||||
.write = write_file_bt_ant_diversity,
|
||||
.open = simple_open,
|
||||
.owner = THIS_MODULE,
|
||||
.llseek = default_llseek,
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
void ath9k_debug_stat_ant(struct ath_softc *sc,
|
||||
struct ath_hw_antcomb_conf *div_ant_conf,
|
||||
int main_rssi_avg, int alt_rssi_avg)
|
||||
{
|
||||
struct ath_antenna_stats *as_main = &sc->debug.stats.ant_stats[ANT_MAIN];
|
||||
struct ath_antenna_stats *as_alt = &sc->debug.stats.ant_stats[ANT_ALT];
|
||||
|
||||
as_main->lna_attempt_cnt[div_ant_conf->main_lna_conf]++;
|
||||
as_alt->lna_attempt_cnt[div_ant_conf->alt_lna_conf]++;
|
||||
|
||||
as_main->rssi_avg = main_rssi_avg;
|
||||
as_alt->rssi_avg = alt_rssi_avg;
|
||||
}
|
||||
|
||||
static ssize_t read_file_antenna_diversity(struct file *file,
|
||||
char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath_softc *sc = file->private_data;
|
||||
struct ath_hw *ah = sc->sc_ah;
|
||||
struct ath9k_hw_capabilities *pCap = &ah->caps;
|
||||
struct ath_antenna_stats *as_main = &sc->debug.stats.ant_stats[ANT_MAIN];
|
||||
struct ath_antenna_stats *as_alt = &sc->debug.stats.ant_stats[ANT_ALT];
|
||||
struct ath_hw_antcomb_conf div_ant_conf;
|
||||
unsigned int len = 0, size = 1024;
|
||||
ssize_t retval = 0;
|
||||
char *buf;
|
||||
char *lna_conf_str[4] = {"LNA1_MINUS_LNA2",
|
||||
"LNA2",
|
||||
"LNA1",
|
||||
"LNA1_PLUS_LNA2"};
|
||||
|
||||
buf = kzalloc(size, GFP_KERNEL);
|
||||
if (buf == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!(pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)) {
|
||||
len += snprintf(buf + len, size - len, "%s\n",
|
||||
"Antenna Diversity Combining is disabled");
|
||||
goto exit;
|
||||
}
|
||||
|
||||
ath9k_ps_wakeup(sc);
|
||||
ath9k_hw_antdiv_comb_conf_get(ah, &div_ant_conf);
|
||||
len += snprintf(buf + len, size - len, "Current MAIN config : %s\n",
|
||||
lna_conf_str[div_ant_conf.main_lna_conf]);
|
||||
len += snprintf(buf + len, size - len, "Current ALT config : %s\n",
|
||||
lna_conf_str[div_ant_conf.alt_lna_conf]);
|
||||
len += snprintf(buf + len, size - len, "Average MAIN RSSI : %d\n",
|
||||
as_main->rssi_avg);
|
||||
len += snprintf(buf + len, size - len, "Average ALT RSSI : %d\n\n",
|
||||
as_alt->rssi_avg);
|
||||
ath9k_ps_restore(sc);
|
||||
|
||||
len += snprintf(buf + len, size - len, "Packet Receive Cnt:\n");
|
||||
len += snprintf(buf + len, size - len, "-------------------\n");
|
||||
|
||||
len += snprintf(buf + len, size - len, "%30s%15s\n",
|
||||
"MAIN", "ALT");
|
||||
len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
|
||||
"TOTAL COUNT",
|
||||
as_main->recv_cnt,
|
||||
as_alt->recv_cnt);
|
||||
len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
|
||||
"LNA1",
|
||||
as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1],
|
||||
as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1]);
|
||||
len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
|
||||
"LNA2",
|
||||
as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2],
|
||||
as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2]);
|
||||
len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
|
||||
"LNA1 + LNA2",
|
||||
as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
|
||||
as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
|
||||
len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
|
||||
"LNA1 - LNA2",
|
||||
as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
|
||||
as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
|
||||
|
||||
len += snprintf(buf + len, size - len, "\nLNA Config Attempts:\n");
|
||||
len += snprintf(buf + len, size - len, "--------------------\n");
|
||||
|
||||
len += snprintf(buf + len, size - len, "%30s%15s\n",
|
||||
"MAIN", "ALT");
|
||||
len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
|
||||
"LNA1",
|
||||
as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1],
|
||||
as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1]);
|
||||
len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
|
||||
"LNA2",
|
||||
as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2],
|
||||
as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2]);
|
||||
len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
|
||||
"LNA1 + LNA2",
|
||||
as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
|
||||
as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
|
||||
len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
|
||||
"LNA1 - LNA2",
|
||||
as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
|
||||
as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
|
||||
|
||||
exit:
|
||||
if (len > size)
|
||||
len = size;
|
||||
|
||||
retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
|
||||
kfree(buf);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
static const struct file_operations fops_antenna_diversity = {
|
||||
.read = read_file_antenna_diversity,
|
||||
.open = simple_open,
|
||||
.owner = THIS_MODULE,
|
||||
.llseek = default_llseek,
|
||||
|
@ -607,6 +732,28 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
|
|||
return retval;
|
||||
}
|
||||
|
||||
static ssize_t print_queue(struct ath_softc *sc, struct ath_txq *txq,
|
||||
char *buf, ssize_t size)
|
||||
{
|
||||
ssize_t len = 0;
|
||||
|
||||
ath_txq_lock(sc, txq);
|
||||
|
||||
len += snprintf(buf + len, size - len, "%s: %d ",
|
||||
"qnum", txq->axq_qnum);
|
||||
len += snprintf(buf + len, size - len, "%s: %2d ",
|
||||
"qdepth", txq->axq_depth);
|
||||
len += snprintf(buf + len, size - len, "%s: %2d ",
|
||||
"ampdu-depth", txq->axq_ampdu_depth);
|
||||
len += snprintf(buf + len, size - len, "%s: %3d ",
|
||||
"pending", txq->pending_frames);
|
||||
len += snprintf(buf + len, size - len, "%s: %d\n",
|
||||
"stopped", txq->stopped);
|
||||
|
||||
ath_txq_unlock(sc, txq);
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t read_file_queues(struct file *file, char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
|
@ -624,24 +771,13 @@ static ssize_t read_file_queues(struct file *file, char __user *user_buf,
|
|||
|
||||
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
|
||||
txq = sc->tx.txq_map[i];
|
||||
len += snprintf(buf + len, size - len, "(%s): ", qname[i]);
|
||||
|
||||
ath_txq_lock(sc, txq);
|
||||
|
||||
len += snprintf(buf + len, size - len, "%s: %d ",
|
||||
"qnum", txq->axq_qnum);
|
||||
len += snprintf(buf + len, size - len, "%s: %2d ",
|
||||
"qdepth", txq->axq_depth);
|
||||
len += snprintf(buf + len, size - len, "%s: %2d ",
|
||||
"ampdu-depth", txq->axq_ampdu_depth);
|
||||
len += snprintf(buf + len, size - len, "%s: %3d ",
|
||||
"pending", txq->pending_frames);
|
||||
len += snprintf(buf + len, size - len, "%s: %d\n",
|
||||
"stopped", txq->stopped);
|
||||
|
||||
ath_txq_unlock(sc, txq);
|
||||
len += snprintf(buf + len, size - len, "(%s): ", qname[i]);
|
||||
len += print_queue(sc, txq, buf + len, size - len);
|
||||
}
|
||||
|
||||
len += snprintf(buf + len, size - len, "(CAB): ");
|
||||
len += print_queue(sc, sc->beacon.cabq, buf + len, size - len);
|
||||
|
||||
if (len > size)
|
||||
len = size;
|
||||
|
||||
|
@ -1814,9 +1950,11 @@ int ath9k_init_debug(struct ath_hw *ah)
|
|||
sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask);
|
||||
debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR,
|
||||
sc->debug.debugfs_phy, &sc->sc_ah->gpio_val);
|
||||
debugfs_create_file("diversity", S_IRUSR | S_IWUSR,
|
||||
sc->debug.debugfs_phy, sc, &fops_ant_diversity);
|
||||
debugfs_create_file("antenna_diversity", S_IRUSR,
|
||||
sc->debug.debugfs_phy, sc, &fops_antenna_diversity);
|
||||
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
|
||||
debugfs_create_file("bt_ant_diversity", S_IRUSR | S_IWUSR,
|
||||
sc->debug.debugfs_phy, sc, &fops_bt_ant_diversity);
|
||||
debugfs_create_file("btcoex", S_IRUSR, sc->debug.debugfs_phy, sc,
|
||||
&fops_btcoex);
|
||||
#endif
|
||||
|
|
|
@ -28,9 +28,13 @@ struct fft_sample_tlv;
|
|||
#ifdef CONFIG_ATH9K_DEBUGFS
|
||||
#define TX_STAT_INC(q, c) sc->debug.stats.txstats[q].c++
|
||||
#define RESET_STAT_INC(sc, type) sc->debug.stats.reset[type]++
|
||||
#define ANT_STAT_INC(i, c) sc->debug.stats.ant_stats[i].c++
|
||||
#define ANT_LNA_INC(i, c) sc->debug.stats.ant_stats[i].lna_recv_cnt[c]++;
|
||||
#else
|
||||
#define TX_STAT_INC(q, c) do { } while (0)
|
||||
#define RESET_STAT_INC(sc, type) do { } while (0)
|
||||
#define ANT_STAT_INC(i, c) do { } while (0)
|
||||
#define ANT_LNA_INC(i, c) do { } while (0)
|
||||
#endif
|
||||
|
||||
enum ath_reset_type {
|
||||
|
@ -243,11 +247,22 @@ struct ath_rx_stats {
|
|||
u32 rx_spectral;
|
||||
};
|
||||
|
||||
#define ANT_MAIN 0
|
||||
#define ANT_ALT 1
|
||||
|
||||
struct ath_antenna_stats {
|
||||
u32 recv_cnt;
|
||||
u32 rssi_avg;
|
||||
u32 lna_recv_cnt[4];
|
||||
u32 lna_attempt_cnt[4];
|
||||
};
|
||||
|
||||
struct ath_stats {
|
||||
struct ath_interrupt_stats istats;
|
||||
struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES];
|
||||
struct ath_rx_stats rxstats;
|
||||
struct ath_dfs_stats dfs_stats;
|
||||
struct ath_antenna_stats ant_stats[2];
|
||||
u32 reset[__RESET_TYPE_MAX];
|
||||
};
|
||||
|
||||
|
@ -281,10 +296,11 @@ void ath9k_sta_remove_debugfs(struct ieee80211_hw *hw,
|
|||
struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta,
|
||||
struct dentry *dir);
|
||||
|
||||
void ath_debug_send_fft_sample(struct ath_softc *sc,
|
||||
struct fft_sample_tlv *fft_sample);
|
||||
|
||||
void ath9k_debug_stat_ant(struct ath_softc *sc,
|
||||
struct ath_hw_antcomb_conf *div_ant_conf,
|
||||
int main_rssi_avg, int alt_rssi_avg);
|
||||
#else
|
||||
|
||||
#define RX_STAT_INC(c) /* NOP */
|
||||
|
@ -297,12 +313,10 @@ static inline int ath9k_init_debug(struct ath_hw *ah)
|
|||
static inline void ath9k_deinit_debug(struct ath_softc *sc)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void ath_debug_stat_interrupt(struct ath_softc *sc,
|
||||
enum ath9k_int status)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void ath_debug_stat_tx(struct ath_softc *sc,
|
||||
struct ath_buf *bf,
|
||||
struct ath_tx_status *ts,
|
||||
|
@ -310,10 +324,15 @@ static inline void ath_debug_stat_tx(struct ath_softc *sc,
|
|||
unsigned int flags)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void ath_debug_stat_rx(struct ath_softc *sc,
|
||||
struct ath_rx_status *rs)
|
||||
{
|
||||
}
|
||||
static inline void ath9k_debug_stat_ant(struct ath_softc *sc,
|
||||
struct ath_hw_antcomb_conf *div_ant_conf,
|
||||
int main_rssi_avg, int alt_rssi_avg)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
#endif /* CONFIG_ATH9K_DEBUGFS */
|
||||
|
|
|
@ -812,6 +812,7 @@ static void ath9k_hw_4k_set_gain(struct ath_hw *ah,
|
|||
static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
|
||||
struct ath9k_channel *chan)
|
||||
{
|
||||
struct ath9k_hw_capabilities *pCap = &ah->caps;
|
||||
struct modal_eep_4k_header *pModal;
|
||||
struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
|
||||
struct base_eep_header_4k *pBase = &eep->baseEepHeader;
|
||||
|
@ -858,6 +859,24 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
|
|||
|
||||
REG_WRITE(ah, AR_PHY_CCK_DETECT, regVal);
|
||||
regVal = REG_READ(ah, AR_PHY_CCK_DETECT);
|
||||
|
||||
if (pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
|
||||
/*
|
||||
* If diversity combining is enabled,
|
||||
* set MAIN to LNA1 and ALT to LNA2 initially.
|
||||
*/
|
||||
regVal = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL);
|
||||
regVal &= (~(AR_PHY_9285_ANT_DIV_MAIN_LNACONF |
|
||||
AR_PHY_9285_ANT_DIV_ALT_LNACONF));
|
||||
|
||||
regVal |= (ATH_ANT_DIV_COMB_LNA1 <<
|
||||
AR_PHY_9285_ANT_DIV_MAIN_LNACONF_S);
|
||||
regVal |= (ATH_ANT_DIV_COMB_LNA2 <<
|
||||
AR_PHY_9285_ANT_DIV_ALT_LNACONF_S);
|
||||
regVal &= (~(AR_PHY_9285_FAST_DIV_BIAS));
|
||||
regVal |= (0 << AR_PHY_9285_FAST_DIV_BIAS_S);
|
||||
REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regVal);
|
||||
}
|
||||
}
|
||||
|
||||
if (pModal->version >= 2) {
|
||||
|
|
|
@ -78,13 +78,16 @@ static inline void ath9k_hw_antdiv_comb_conf_set(struct ath_hw *ah,
|
|||
ath9k_hw_ops(ah)->antdiv_comb_conf_set(ah, antconf);
|
||||
}
|
||||
|
||||
static inline void ath9k_hw_antctrl_shared_chain_lnadiv(struct ath_hw *ah,
|
||||
bool enable)
|
||||
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
|
||||
|
||||
static inline void ath9k_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
|
||||
{
|
||||
if (ath9k_hw_ops(ah)->antctrl_shared_chain_lnadiv)
|
||||
ath9k_hw_ops(ah)->antctrl_shared_chain_lnadiv(ah, enable);
|
||||
if (ath9k_hw_ops(ah)->set_bt_ant_diversity)
|
||||
ath9k_hw_ops(ah)->set_bt_ant_diversity(ah, enable);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* Private hardware call ops */
|
||||
|
||||
/* PHY ops */
|
||||
|
|
|
@ -1496,16 +1496,18 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
|
|||
struct ath9k_channel *chan)
|
||||
{
|
||||
struct ath_common *common = ath9k_hw_common(ah);
|
||||
struct ath9k_hw_capabilities *pCap = &ah->caps;
|
||||
bool band_switch = false, mode_diff = false;
|
||||
u8 ini_reloaded = 0;
|
||||
u32 qnum;
|
||||
int r;
|
||||
bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
|
||||
bool band_switch, mode_diff;
|
||||
u8 ini_reloaded;
|
||||
|
||||
band_switch = (chan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ)) !=
|
||||
(ah->curchan->channelFlags & (CHANNEL_2GHZ |
|
||||
CHANNEL_5GHZ));
|
||||
mode_diff = (chan->chanmode != ah->curchan->chanmode);
|
||||
if (pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH) {
|
||||
u32 cur = ah->curchan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ);
|
||||
u32 new = chan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ);
|
||||
band_switch = (cur != new);
|
||||
mode_diff = (chan->chanmode != ah->curchan->chanmode);
|
||||
}
|
||||
|
||||
for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
|
||||
if (ath9k_hw_numtxpending(ah, qnum)) {
|
||||
|
@ -1520,11 +1522,12 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
|
|||
return false;
|
||||
}
|
||||
|
||||
if (edma && (band_switch || mode_diff)) {
|
||||
if (band_switch || mode_diff) {
|
||||
ath9k_hw_mark_phy_inactive(ah);
|
||||
udelay(5);
|
||||
|
||||
ath9k_hw_init_pll(ah, NULL);
|
||||
if (band_switch)
|
||||
ath9k_hw_init_pll(ah, chan);
|
||||
|
||||
if (ath9k_hw_fast_chan_change(ah, chan, &ini_reloaded)) {
|
||||
ath_err(common, "Failed to do fast channel change\n");
|
||||
|
@ -1541,22 +1544,21 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
|
|||
}
|
||||
ath9k_hw_set_clockrate(ah);
|
||||
ath9k_hw_apply_txpower(ah, chan, false);
|
||||
ath9k_hw_rfbus_done(ah);
|
||||
|
||||
if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
|
||||
ath9k_hw_set_delta_slope(ah, chan);
|
||||
|
||||
ath9k_hw_spur_mitigate_freq(ah, chan);
|
||||
|
||||
if (edma && (band_switch || mode_diff)) {
|
||||
if (band_switch || ini_reloaded)
|
||||
ah->eep_ops->set_board_values(ah, chan);
|
||||
|
||||
ath9k_hw_init_bb(ah, chan);
|
||||
ath9k_hw_rfbus_done(ah);
|
||||
|
||||
if (band_switch || ini_reloaded) {
|
||||
ah->ah_flags |= AH_FASTCC;
|
||||
if (band_switch || ini_reloaded)
|
||||
ah->eep_ops->set_board_values(ah, chan);
|
||||
|
||||
ath9k_hw_init_bb(ah, chan);
|
||||
|
||||
if (band_switch || ini_reloaded)
|
||||
ath9k_hw_init_cal(ah, chan);
|
||||
ath9k_hw_init_cal(ah, chan);
|
||||
ah->ah_flags &= ~AH_FASTCC;
|
||||
}
|
||||
|
||||
|
@ -1778,16 +1780,11 @@ static void ath9k_hw_init_desc(struct ath_hw *ah)
|
|||
/*
|
||||
* Fast channel change:
|
||||
* (Change synthesizer based on channel freq without resetting chip)
|
||||
*
|
||||
* Don't do FCC when
|
||||
* - Flag is not set
|
||||
* - Chip is just coming out of full sleep
|
||||
* - Channel to be set is same as current channel
|
||||
* - Channel flags are different, (eg.,moving from 2GHz to 5GHz channel)
|
||||
*/
|
||||
static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
|
||||
{
|
||||
struct ath_common *common = ath9k_hw_common(ah);
|
||||
struct ath9k_hw_capabilities *pCap = &ah->caps;
|
||||
int ret;
|
||||
|
||||
if (AR_SREV_9280(ah) && common->bus_ops->ath_bus_type == ATH_PCI)
|
||||
|
@ -1806,9 +1803,21 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
|
|||
(CHANNEL_HALF | CHANNEL_QUARTER))
|
||||
goto fail;
|
||||
|
||||
if ((chan->channelFlags & CHANNEL_ALL) !=
|
||||
(ah->curchan->channelFlags & CHANNEL_ALL))
|
||||
goto fail;
|
||||
/*
|
||||
* If cross-band fcc is not supoprted, bail out if
|
||||
* either channelFlags or chanmode differ.
|
||||
*
|
||||
* chanmode will be different if the HT operating mode
|
||||
* changes because of CSA.
|
||||
*/
|
||||
if (!(pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH)) {
|
||||
if ((chan->channelFlags & CHANNEL_ALL) !=
|
||||
(ah->curchan->channelFlags & CHANNEL_ALL))
|
||||
goto fail;
|
||||
|
||||
if (chan->chanmode != ah->curchan->chanmode)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (!ath9k_hw_check_alive(ah))
|
||||
goto fail;
|
||||
|
@ -2047,7 +2056,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
|
|||
|
||||
ath9k_hw_apply_gpio_override(ah);
|
||||
|
||||
if (AR_SREV_9565(ah) && ah->shared_chain_lnadiv)
|
||||
if (AR_SREV_9565(ah) && common->bt_ant_diversity)
|
||||
REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV, AR_BTCOEX_WL_LNADIV_FORCE_ON);
|
||||
|
||||
return 0;
|
||||
|
@ -2504,7 +2513,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
|
|||
else
|
||||
pCap->rts_aggr_limit = (8 * 1024);
|
||||
|
||||
#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
|
||||
#ifdef CONFIG_ATH9K_RFKILL
|
||||
ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT);
|
||||
if (ah->rfsilent & EEP_RFSILENT_ENABLED) {
|
||||
ah->rfkill_gpio =
|
||||
|
@ -2550,34 +2559,28 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
|
|||
if (AR_SREV_9287_11_OR_LATER(ah) || AR_SREV_9271(ah))
|
||||
pCap->hw_caps |= ATH9K_HW_CAP_SGI_20;
|
||||
|
||||
if (AR_SREV_9285(ah))
|
||||
if (AR_SREV_9285(ah)) {
|
||||
if (ah->eep_ops->get_eeprom(ah, EEP_MODAL_VER) >= 3) {
|
||||
ant_div_ctl1 =
|
||||
ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
|
||||
if ((ant_div_ctl1 & 0x1) && ((ant_div_ctl1 >> 3) & 0x1))
|
||||
if ((ant_div_ctl1 & 0x1) && ((ant_div_ctl1 >> 3) & 0x1)) {
|
||||
pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB;
|
||||
ath_info(common, "Enable LNA combining\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (AR_SREV_9300_20_OR_LATER(ah)) {
|
||||
if (ah->eep_ops->get_eeprom(ah, EEP_CHAIN_MASK_REDUCE))
|
||||
pCap->hw_caps |= ATH9K_HW_CAP_APM;
|
||||
}
|
||||
|
||||
|
||||
if (AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah)) {
|
||||
ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
|
||||
/*
|
||||
* enable the diversity-combining algorithm only when
|
||||
* both enable_lna_div and enable_fast_div are set
|
||||
* Table for Diversity
|
||||
* ant_div_alt_lnaconf bit 0-1
|
||||
* ant_div_main_lnaconf bit 2-3
|
||||
* ant_div_alt_gaintb bit 4
|
||||
* ant_div_main_gaintb bit 5
|
||||
* enable_ant_div_lnadiv bit 6
|
||||
* enable_ant_fast_div bit 7
|
||||
*/
|
||||
if ((ant_div_ctl1 >> 0x6) == 0x3)
|
||||
if ((ant_div_ctl1 >> 0x6) == 0x3) {
|
||||
pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB;
|
||||
ath_info(common, "Enable LNA combining\n");
|
||||
}
|
||||
}
|
||||
|
||||
if (ath9k_hw_dfs_tested(ah))
|
||||
|
@ -2610,6 +2613,13 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
|
|||
ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
|
||||
pCap->hw_caps |= ATH9K_HW_CAP_PAPRD;
|
||||
|
||||
/*
|
||||
* Fast channel change across bands is available
|
||||
* only for AR9462 and AR9565.
|
||||
*/
|
||||
if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
|
||||
pCap->hw_caps |= ATH9K_HW_CAP_FCC_BAND_SWITCH;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -247,6 +247,8 @@ enum ath9k_hw_caps {
|
|||
ATH9K_HW_CAP_DFS = BIT(16),
|
||||
ATH9K_HW_WOW_DEVICE_CAPABLE = BIT(17),
|
||||
ATH9K_HW_CAP_PAPRD = BIT(18),
|
||||
ATH9K_HW_CAP_FCC_BAND_SWITCH = BIT(19),
|
||||
ATH9K_HW_CAP_BT_ANT_DIV = BIT(20),
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -310,6 +312,7 @@ struct ath9k_ops_config {
|
|||
|
||||
/* Platform specific config */
|
||||
u32 xlna_gpio;
|
||||
u32 ant_ctrl_comm2g_switch_enable;
|
||||
bool xatten_margin_cfg;
|
||||
};
|
||||
|
||||
|
@ -716,11 +719,14 @@ struct ath_hw_ops {
|
|||
struct ath_hw_antcomb_conf *antconf);
|
||||
void (*antdiv_comb_conf_set)(struct ath_hw *ah,
|
||||
struct ath_hw_antcomb_conf *antconf);
|
||||
void (*antctrl_shared_chain_lnadiv)(struct ath_hw *hw, bool enable);
|
||||
void (*spectral_scan_config)(struct ath_hw *ah,
|
||||
struct ath_spec_scan *param);
|
||||
void (*spectral_scan_trigger)(struct ath_hw *ah);
|
||||
void (*spectral_scan_wait)(struct ath_hw *ah);
|
||||
|
||||
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
|
||||
void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
|
||||
#endif
|
||||
};
|
||||
|
||||
struct ath_nf_limits {
|
||||
|
@ -765,7 +771,6 @@ struct ath_hw {
|
|||
bool aspm_enabled;
|
||||
bool is_monitoring;
|
||||
bool need_an_top2_fixup;
|
||||
bool shared_chain_lnadiv;
|
||||
u16 tx_trig_level;
|
||||
|
||||
u32 nf_regs[6];
|
||||
|
|
|
@ -53,9 +53,9 @@ static int ath9k_btcoex_enable;
|
|||
module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
|
||||
MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
|
||||
|
||||
static int ath9k_enable_diversity;
|
||||
module_param_named(enable_diversity, ath9k_enable_diversity, int, 0444);
|
||||
MODULE_PARM_DESC(enable_diversity, "Enable Antenna diversity for AR9565");
|
||||
static int ath9k_bt_ant_diversity;
|
||||
module_param_named(bt_ant_diversity, ath9k_bt_ant_diversity, int, 0444);
|
||||
MODULE_PARM_DESC(bt_ant_diversity, "Enable WLAN/BT RX antenna diversity");
|
||||
|
||||
bool is_ath9k_unloaded;
|
||||
/* We use the hw_value as an index into our private channel structure */
|
||||
|
@ -516,6 +516,7 @@ static void ath9k_init_misc(struct ath_softc *sc)
|
|||
static void ath9k_init_platform(struct ath_softc *sc)
|
||||
{
|
||||
struct ath_hw *ah = sc->sc_ah;
|
||||
struct ath9k_hw_capabilities *pCap = &ah->caps;
|
||||
struct ath_common *common = ath9k_hw_common(ah);
|
||||
|
||||
if (common->bus_ops->ath_bus_type != ATH_PCI)
|
||||
|
@ -525,12 +526,21 @@ static void ath9k_init_platform(struct ath_softc *sc)
|
|||
ATH9K_PCI_CUS230)) {
|
||||
ah->config.xlna_gpio = 9;
|
||||
ah->config.xatten_margin_cfg = true;
|
||||
ah->config.ant_ctrl_comm2g_switch_enable = 0x000BBB88;
|
||||
sc->ant_comb.low_rssi_thresh = 20;
|
||||
sc->ant_comb.fast_div_bias = 3;
|
||||
|
||||
ath_info(common, "Set parameters for %s\n",
|
||||
(sc->driver_data & ATH9K_PCI_CUS198) ?
|
||||
"CUS198" : "CUS230");
|
||||
} else if (sc->driver_data & ATH9K_PCI_CUS217) {
|
||||
}
|
||||
|
||||
if (sc->driver_data & ATH9K_PCI_CUS217)
|
||||
ath_info(common, "CUS217 card detected\n");
|
||||
|
||||
if (sc->driver_data & ATH9K_PCI_BT_ANT_DIV) {
|
||||
pCap->hw_caps |= ATH9K_HW_CAP_BT_ANT_DIV;
|
||||
ath_info(common, "Set BT/WLAN RX diversity capability\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -584,6 +594,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
|
|||
{
|
||||
struct ath9k_platform_data *pdata = sc->dev->platform_data;
|
||||
struct ath_hw *ah = NULL;
|
||||
struct ath9k_hw_capabilities *pCap;
|
||||
struct ath_common *common;
|
||||
int ret = 0, i;
|
||||
int csz = 0;
|
||||
|
@ -600,6 +611,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
|
|||
ah->reg_ops.rmw = ath9k_reg_rmw;
|
||||
atomic_set(&ah->intr_ref_cnt, -1);
|
||||
sc->sc_ah = ah;
|
||||
pCap = &ah->caps;
|
||||
|
||||
sc->dfs_detector = dfs_pattern_detector_init(ah, NL80211_DFS_UNSET);
|
||||
|
||||
|
@ -631,11 +643,15 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
|
|||
ath9k_init_platform(sc);
|
||||
|
||||
/*
|
||||
* Enable Antenna diversity only when BTCOEX is disabled
|
||||
* and the user manually requests the feature.
|
||||
* Enable WLAN/BT RX Antenna diversity only when:
|
||||
*
|
||||
* - BTCOEX is disabled.
|
||||
* - the user manually requests the feature.
|
||||
* - the HW cap is set using the platform data.
|
||||
*/
|
||||
if (!common->btcoex_enabled && ath9k_enable_diversity)
|
||||
common->antenna_diversity = 1;
|
||||
if (!common->btcoex_enabled && ath9k_bt_ant_diversity &&
|
||||
(pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV))
|
||||
common->bt_ant_diversity = 1;
|
||||
|
||||
spin_lock_init(&common->cc_lock);
|
||||
|
||||
|
|
|
@ -238,9 +238,6 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
|
|||
ath_restart_work(sc);
|
||||
}
|
||||
|
||||
if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx != 3)
|
||||
ath_ant_comb_update(sc);
|
||||
|
||||
ieee80211_wake_queues(sc->hw);
|
||||
|
||||
return true;
|
||||
|
@ -966,6 +963,8 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
|
|||
struct ath_softc *sc = hw->priv;
|
||||
struct ath_hw *ah = sc->sc_ah;
|
||||
struct ath_common *common = ath9k_hw_common(ah);
|
||||
struct ath_vif *avp = (void *)vif->drv_priv;
|
||||
struct ath_node *an = &avp->mcast_node;
|
||||
|
||||
mutex_lock(&sc->mutex);
|
||||
|
||||
|
@ -979,6 +978,12 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
|
|||
if (ath9k_uses_beacons(vif->type))
|
||||
ath9k_beacon_assign_slot(sc, vif);
|
||||
|
||||
an->sc = sc;
|
||||
an->sta = NULL;
|
||||
an->vif = vif;
|
||||
an->no_ps_filter = true;
|
||||
ath_tx_node_init(sc, an);
|
||||
|
||||
mutex_unlock(&sc->mutex);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1016,6 +1021,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
|
|||
{
|
||||
struct ath_softc *sc = hw->priv;
|
||||
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
|
||||
struct ath_vif *avp = (void *)vif->drv_priv;
|
||||
|
||||
ath_dbg(common, CONFIG, "Detach Interface\n");
|
||||
|
||||
|
@ -1030,6 +1036,8 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
|
|||
ath9k_calculate_summary_state(hw, NULL);
|
||||
ath9k_ps_restore(sc);
|
||||
|
||||
ath_tx_node_cleanup(sc, &avp->mcast_node);
|
||||
|
||||
mutex_unlock(&sc->mutex);
|
||||
}
|
||||
|
||||
|
@ -1374,9 +1382,6 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw,
|
|||
struct ath_softc *sc = hw->priv;
|
||||
struct ath_node *an = (struct ath_node *) sta->drv_priv;
|
||||
|
||||
if (!sta->ht_cap.ht_supported)
|
||||
return;
|
||||
|
||||
switch (cmd) {
|
||||
case STA_NOTIFY_SLEEP:
|
||||
an->sleeping = true;
|
||||
|
@ -2094,7 +2099,7 @@ static void ath9k_wow_add_pattern(struct ath_softc *sc,
|
|||
{
|
||||
struct ath_hw *ah = sc->sc_ah;
|
||||
struct ath9k_wow_pattern *wow_pattern = NULL;
|
||||
struct cfg80211_wowlan_trig_pkt_pattern *patterns = wowlan->patterns;
|
||||
struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
|
||||
int mask_len;
|
||||
s8 i = 0;
|
||||
|
||||
|
|
|
@ -29,6 +29,14 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
|
|||
{ PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */
|
||||
{ PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */
|
||||
{ PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
|
||||
|
||||
/* AR9285 card for Asus */
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
|
||||
0x002B,
|
||||
PCI_VENDOR_ID_AZWAVE,
|
||||
0x2C37),
|
||||
.driver_data = ATH9K_PCI_BT_ANT_DIV },
|
||||
|
||||
{ PCI_VDEVICE(ATHEROS, 0x002B) }, /* PCI-E */
|
||||
{ PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */
|
||||
{ PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */
|
||||
|
@ -40,29 +48,101 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
|
|||
0x0032,
|
||||
PCI_VENDOR_ID_AZWAVE,
|
||||
0x2086),
|
||||
.driver_data = ATH9K_PCI_CUS198 },
|
||||
.driver_data = ATH9K_PCI_CUS198 | ATH9K_PCI_BT_ANT_DIV },
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
|
||||
0x0032,
|
||||
PCI_VENDOR_ID_AZWAVE,
|
||||
0x1237),
|
||||
.driver_data = ATH9K_PCI_CUS198 },
|
||||
.driver_data = ATH9K_PCI_CUS198 | ATH9K_PCI_BT_ANT_DIV },
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
|
||||
0x0032,
|
||||
PCI_VENDOR_ID_AZWAVE,
|
||||
0x2126),
|
||||
.driver_data = ATH9K_PCI_CUS198 },
|
||||
.driver_data = ATH9K_PCI_CUS198 | ATH9K_PCI_BT_ANT_DIV },
|
||||
|
||||
/* PCI-E CUS230 */
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
|
||||
0x0032,
|
||||
PCI_VENDOR_ID_AZWAVE,
|
||||
0x2152),
|
||||
.driver_data = ATH9K_PCI_CUS230 },
|
||||
.driver_data = ATH9K_PCI_CUS230 | ATH9K_PCI_BT_ANT_DIV },
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
|
||||
0x0032,
|
||||
PCI_VENDOR_ID_FOXCONN,
|
||||
0xE075),
|
||||
.driver_data = ATH9K_PCI_CUS230 },
|
||||
.driver_data = ATH9K_PCI_CUS230 | ATH9K_PCI_BT_ANT_DIV },
|
||||
|
||||
/* WB225 */
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
|
||||
0x0032,
|
||||
PCI_VENDOR_ID_ATHEROS,
|
||||
0x3119),
|
||||
.driver_data = ATH9K_PCI_BT_ANT_DIV },
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
|
||||
0x0032,
|
||||
PCI_VENDOR_ID_ATHEROS,
|
||||
0x3122),
|
||||
.driver_data = ATH9K_PCI_BT_ANT_DIV },
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
|
||||
0x0032,
|
||||
0x185F, /* WNC */
|
||||
0x3119),
|
||||
.driver_data = ATH9K_PCI_BT_ANT_DIV },
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
|
||||
0x0032,
|
||||
0x185F, /* WNC */
|
||||
0x3027),
|
||||
.driver_data = ATH9K_PCI_BT_ANT_DIV },
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
|
||||
0x0032,
|
||||
PCI_VENDOR_ID_SAMSUNG,
|
||||
0x4105),
|
||||
.driver_data = ATH9K_PCI_BT_ANT_DIV },
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
|
||||
0x0032,
|
||||
PCI_VENDOR_ID_SAMSUNG,
|
||||
0x4106),
|
||||
.driver_data = ATH9K_PCI_BT_ANT_DIV },
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
|
||||
0x0032,
|
||||
PCI_VENDOR_ID_SAMSUNG,
|
||||
0x410D),
|
||||
.driver_data = ATH9K_PCI_BT_ANT_DIV },
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
|
||||
0x0032,
|
||||
PCI_VENDOR_ID_SAMSUNG,
|
||||
0x410E),
|
||||
.driver_data = ATH9K_PCI_BT_ANT_DIV },
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
|
||||
0x0032,
|
||||
PCI_VENDOR_ID_SAMSUNG,
|
||||
0x410F),
|
||||
.driver_data = ATH9K_PCI_BT_ANT_DIV },
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
|
||||
0x0032,
|
||||
PCI_VENDOR_ID_SAMSUNG,
|
||||
0xC706),
|
||||
.driver_data = ATH9K_PCI_BT_ANT_DIV },
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
|
||||
0x0032,
|
||||
PCI_VENDOR_ID_SAMSUNG,
|
||||
0xC680),
|
||||
.driver_data = ATH9K_PCI_BT_ANT_DIV },
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
|
||||
0x0032,
|
||||
PCI_VENDOR_ID_SAMSUNG,
|
||||
0xC708),
|
||||
.driver_data = ATH9K_PCI_BT_ANT_DIV },
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
|
||||
0x0032,
|
||||
PCI_VENDOR_ID_LENOVO,
|
||||
0x3218),
|
||||
.driver_data = ATH9K_PCI_BT_ANT_DIV },
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
|
||||
0x0032,
|
||||
PCI_VENDOR_ID_LENOVO,
|
||||
0x3219),
|
||||
.driver_data = ATH9K_PCI_BT_ANT_DIV },
|
||||
|
||||
{ PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */
|
||||
{ PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */
|
||||
|
|
|
@ -48,4 +48,11 @@
|
|||
#define AR_PHY_PLL_CONTROL 0x16180
|
||||
#define AR_PHY_PLL_MODE 0x16184
|
||||
|
||||
enum ath9k_ant_div_comb_lna_conf {
|
||||
ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2,
|
||||
ATH_ANT_DIV_COMB_LNA2,
|
||||
ATH_ANT_DIV_COMB_LNA1,
|
||||
ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2,
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1275,6 +1275,7 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
|
|||
}
|
||||
|
||||
static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
|
||||
struct cfg80211_chan_def *chandef,
|
||||
struct ieee80211_sta *sta, void *priv_sta)
|
||||
{
|
||||
struct ath_softc *sc = priv;
|
||||
|
@ -1313,6 +1314,7 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
|
|||
}
|
||||
|
||||
static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
|
||||
struct cfg80211_chan_def *chandef,
|
||||
struct ieee80211_sta *sta, void *priv_sta,
|
||||
u32 changed)
|
||||
{
|
||||
|
|
|
@ -1157,6 +1157,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
|
|||
struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb;
|
||||
struct ieee80211_rx_status *rxs;
|
||||
struct ath_hw *ah = sc->sc_ah;
|
||||
struct ath9k_hw_capabilities *pCap = &ah->caps;
|
||||
struct ath_common *common = ath9k_hw_common(ah);
|
||||
struct ieee80211_hw *hw = sc->hw;
|
||||
struct ieee80211_hdr *hdr;
|
||||
|
@ -1328,22 +1329,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
|
|||
skb = hdr_skb;
|
||||
}
|
||||
|
||||
|
||||
if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
|
||||
|
||||
/*
|
||||
* change the default rx antenna if rx diversity
|
||||
* chooses the other antenna 3 times in a row.
|
||||
*/
|
||||
if (sc->rx.defant != rs.rs_antenna) {
|
||||
if (++sc->rx.rxotherant >= 3)
|
||||
ath_setdefantenna(sc, rs.rs_antenna);
|
||||
} else {
|
||||
sc->rx.rxotherant = 0;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (rxs->flag & RX_FLAG_MMIC_STRIPPED)
|
||||
skb_trim(skb, skb->len - 8);
|
||||
|
||||
|
@ -1355,8 +1340,35 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
|
|||
ath_rx_ps(sc, skb, rs.is_mybeacon);
|
||||
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
|
||||
|
||||
if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx == 3)
|
||||
ath_ant_comb_scan(sc, &rs);
|
||||
/*
|
||||
* Run the LNA combining algorithm only in these cases:
|
||||
*
|
||||
* Standalone WLAN cards with both LNA/Antenna diversity
|
||||
* enabled in the EEPROM.
|
||||
*
|
||||
* WLAN+BT cards which are in the supported card list
|
||||
* in ath_pci_id_table and the user has loaded the
|
||||
* driver with "bt_ant_diversity" set to true.
|
||||
*/
|
||||
if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
|
||||
/*
|
||||
* Change the default rx antenna if rx diversity
|
||||
* chooses the other antenna 3 times in a row.
|
||||
*/
|
||||
if (sc->rx.defant != rs.rs_antenna) {
|
||||
if (++sc->rx.rxotherant >= 3)
|
||||
ath_setdefantenna(sc, rs.rs_antenna);
|
||||
} else {
|
||||
sc->rx.rxotherant = 0;
|
||||
}
|
||||
|
||||
if (pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV) {
|
||||
if (common->bt_ant_diversity)
|
||||
ath_ant_comb_scan(sc, &rs);
|
||||
} else {
|
||||
ath_ant_comb_scan(sc, &rs);
|
||||
}
|
||||
}
|
||||
|
||||
ath9k_apply_ampdu_details(sc, &rs, rxs);
|
||||
|
||||
|
|
|
@ -135,6 +135,9 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
|
|||
|
||||
static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
|
||||
{
|
||||
if (!tid->an->sta)
|
||||
return;
|
||||
|
||||
ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
|
||||
seqno << IEEE80211_SEQ_SEQ_SHIFT);
|
||||
}
|
||||
|
@ -168,6 +171,71 @@ static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
|
|||
}
|
||||
}
|
||||
|
||||
static struct ath_atx_tid *
|
||||
ath_get_skb_tid(struct ath_softc *sc, struct ath_node *an, struct sk_buff *skb)
|
||||
{
|
||||
struct ieee80211_hdr *hdr;
|
||||
u8 tidno = 0;
|
||||
|
||||
hdr = (struct ieee80211_hdr *) skb->data;
|
||||
if (ieee80211_is_data_qos(hdr->frame_control))
|
||||
tidno = ieee80211_get_qos_ctl(hdr)[0];
|
||||
|
||||
tidno &= IEEE80211_QOS_CTL_TID_MASK;
|
||||
return ATH_AN_2_TID(an, tidno);
|
||||
}
|
||||
|
||||
static bool ath_tid_has_buffered(struct ath_atx_tid *tid)
|
||||
{
|
||||
return !skb_queue_empty(&tid->buf_q) || !skb_queue_empty(&tid->retry_q);
|
||||
}
|
||||
|
||||
static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = __skb_dequeue(&tid->retry_q);
|
||||
if (!skb)
|
||||
skb = __skb_dequeue(&tid->buf_q);
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
/*
|
||||
* ath_tx_tid_change_state:
|
||||
* - clears a-mpdu flag of previous session
|
||||
* - force sequence number allocation to fix next BlockAck Window
|
||||
*/
|
||||
static void
|
||||
ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid)
|
||||
{
|
||||
struct ath_txq *txq = tid->ac->txq;
|
||||
struct ieee80211_tx_info *tx_info;
|
||||
struct sk_buff *skb, *tskb;
|
||||
struct ath_buf *bf;
|
||||
struct ath_frame_info *fi;
|
||||
|
||||
skb_queue_walk_safe(&tid->buf_q, skb, tskb) {
|
||||
fi = get_frame_info(skb);
|
||||
bf = fi->bf;
|
||||
|
||||
tx_info = IEEE80211_SKB_CB(skb);
|
||||
tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;
|
||||
|
||||
if (bf)
|
||||
continue;
|
||||
|
||||
bf = ath_tx_setup_buffer(sc, txq, tid, skb);
|
||||
if (!bf) {
|
||||
__skb_unlink(skb, &tid->buf_q);
|
||||
ath_txq_skb_done(sc, txq, skb);
|
||||
ieee80211_free_txskb(sc->hw, skb);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
|
||||
{
|
||||
struct ath_txq *txq = tid->ac->txq;
|
||||
|
@ -182,28 +250,22 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
|
|||
|
||||
memset(&ts, 0, sizeof(ts));
|
||||
|
||||
while ((skb = __skb_dequeue(&tid->buf_q))) {
|
||||
while ((skb = __skb_dequeue(&tid->retry_q))) {
|
||||
fi = get_frame_info(skb);
|
||||
bf = fi->bf;
|
||||
|
||||
if (!bf) {
|
||||
bf = ath_tx_setup_buffer(sc, txq, tid, skb);
|
||||
if (!bf) {
|
||||
ath_txq_skb_done(sc, txq, skb);
|
||||
ieee80211_free_txskb(sc->hw, skb);
|
||||
continue;
|
||||
}
|
||||
ath_txq_skb_done(sc, txq, skb);
|
||||
ieee80211_free_txskb(sc->hw, skb);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (fi->retries) {
|
||||
list_add_tail(&bf->list, &bf_head);
|
||||
if (fi->baw_tracked) {
|
||||
ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
|
||||
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
|
||||
sendbar = true;
|
||||
} else {
|
||||
ath_set_rates(tid->an->vif, tid->an->sta, bf);
|
||||
ath_tx_send_normal(sc, txq, NULL, skb);
|
||||
}
|
||||
|
||||
list_add_tail(&bf->list, &bf_head);
|
||||
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
|
||||
}
|
||||
|
||||
if (sendbar) {
|
||||
|
@ -232,13 +294,16 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
|
|||
}
|
||||
|
||||
static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
|
||||
u16 seqno)
|
||||
struct ath_buf *bf)
|
||||
{
|
||||
struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
|
||||
u16 seqno = bf->bf_state.seqno;
|
||||
int index, cindex;
|
||||
|
||||
index = ATH_BA_INDEX(tid->seq_start, seqno);
|
||||
cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
|
||||
__set_bit(cindex, tid->tx_buf);
|
||||
fi->baw_tracked = 1;
|
||||
|
||||
if (index >= ((tid->baw_tail - tid->baw_head) &
|
||||
(ATH_TID_MAX_BUFS - 1))) {
|
||||
|
@ -266,7 +331,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
|
|||
memset(&ts, 0, sizeof(ts));
|
||||
INIT_LIST_HEAD(&bf_head);
|
||||
|
||||
while ((skb = __skb_dequeue(&tid->buf_q))) {
|
||||
while ((skb = ath_tid_dequeue(tid))) {
|
||||
fi = get_frame_info(skb);
|
||||
bf = fi->bf;
|
||||
|
||||
|
@ -403,7 +468,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
|
|||
struct ieee80211_tx_rate rates[4];
|
||||
struct ath_frame_info *fi;
|
||||
int nframes;
|
||||
u8 tidno;
|
||||
bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
|
||||
int i, retries;
|
||||
int bar_index = -1;
|
||||
|
@ -440,8 +504,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
|
|||
}
|
||||
|
||||
an = (struct ath_node *)sta->drv_priv;
|
||||
tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
|
||||
tid = ATH_AN_2_TID(an, tidno);
|
||||
tid = ath_get_skb_tid(sc, an, skb);
|
||||
seq_first = tid->seq_start;
|
||||
isba = ts->ts_flags & ATH9K_TX_BA;
|
||||
|
||||
|
@ -453,7 +516,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
|
|||
* Only BlockAcks have a TID and therefore normal Acks cannot be
|
||||
* checked
|
||||
*/
|
||||
if (isba && tidno != ts->tid)
|
||||
if (isba && tid->tidno != ts->tid)
|
||||
txok = false;
|
||||
|
||||
isaggr = bf_isaggr(bf);
|
||||
|
@ -489,7 +552,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
|
|||
tx_info = IEEE80211_SKB_CB(skb);
|
||||
fi = get_frame_info(skb);
|
||||
|
||||
if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
|
||||
if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno) ||
|
||||
!tid->active) {
|
||||
/*
|
||||
* Outside of the current BlockAck window,
|
||||
* maybe part of a previous session
|
||||
|
@ -583,7 +647,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
|
|||
if (an->sleeping)
|
||||
ieee80211_sta_set_buffered(sta, tid->tidno, true);
|
||||
|
||||
skb_queue_splice(&bf_pending, &tid->buf_q);
|
||||
skb_queue_splice_tail(&bf_pending, &tid->retry_q);
|
||||
if (!an->sleeping) {
|
||||
ath_tx_queue_tid(txq, tid);
|
||||
|
||||
|
@ -641,7 +705,7 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
|
|||
} else
|
||||
ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok);
|
||||
|
||||
if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && !flush)
|
||||
if (!flush)
|
||||
ath_txq_schedule(sc, txq);
|
||||
}
|
||||
|
||||
|
@ -815,15 +879,20 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
|
|||
|
||||
static struct ath_buf *
|
||||
ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
|
||||
struct ath_atx_tid *tid)
|
||||
struct ath_atx_tid *tid, struct sk_buff_head **q)
|
||||
{
|
||||
struct ieee80211_tx_info *tx_info;
|
||||
struct ath_frame_info *fi;
|
||||
struct sk_buff *skb;
|
||||
struct ath_buf *bf;
|
||||
u16 seqno;
|
||||
|
||||
while (1) {
|
||||
skb = skb_peek(&tid->buf_q);
|
||||
*q = &tid->retry_q;
|
||||
if (skb_queue_empty(*q))
|
||||
*q = &tid->buf_q;
|
||||
|
||||
skb = skb_peek(*q);
|
||||
if (!skb)
|
||||
break;
|
||||
|
||||
|
@ -833,12 +902,22 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
|
|||
bf = ath_tx_setup_buffer(sc, txq, tid, skb);
|
||||
|
||||
if (!bf) {
|
||||
__skb_unlink(skb, &tid->buf_q);
|
||||
__skb_unlink(skb, *q);
|
||||
ath_txq_skb_done(sc, txq, skb);
|
||||
ieee80211_free_txskb(sc->hw, skb);
|
||||
continue;
|
||||
}
|
||||
|
||||
bf->bf_next = NULL;
|
||||
bf->bf_lastbf = bf;
|
||||
|
||||
tx_info = IEEE80211_SKB_CB(skb);
|
||||
tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
|
||||
if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
|
||||
bf->bf_state.bf_type = 0;
|
||||
return bf;
|
||||
}
|
||||
|
||||
bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
|
||||
seqno = bf->bf_state.seqno;
|
||||
|
||||
|
@ -852,73 +931,52 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
|
|||
|
||||
INIT_LIST_HEAD(&bf_head);
|
||||
list_add(&bf->list, &bf_head);
|
||||
__skb_unlink(skb, &tid->buf_q);
|
||||
__skb_unlink(skb, *q);
|
||||
ath_tx_update_baw(sc, tid, seqno);
|
||||
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
|
||||
continue;
|
||||
}
|
||||
|
||||
bf->bf_next = NULL;
|
||||
bf->bf_lastbf = bf;
|
||||
return bf;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
|
||||
struct ath_txq *txq,
|
||||
struct ath_atx_tid *tid,
|
||||
struct list_head *bf_q,
|
||||
int *aggr_len)
|
||||
static bool
|
||||
ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq,
|
||||
struct ath_atx_tid *tid, struct list_head *bf_q,
|
||||
struct ath_buf *bf_first, struct sk_buff_head *tid_q,
|
||||
int *aggr_len)
|
||||
{
|
||||
#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
|
||||
struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
|
||||
int rl = 0, nframes = 0, ndelim, prev_al = 0;
|
||||
struct ath_buf *bf = bf_first, *bf_prev = NULL;
|
||||
int nframes = 0, ndelim;
|
||||
u16 aggr_limit = 0, al = 0, bpad = 0,
|
||||
al_delta, h_baw = tid->baw_size / 2;
|
||||
enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
|
||||
al_delta, h_baw = tid->baw_size / 2;
|
||||
struct ieee80211_tx_info *tx_info;
|
||||
struct ath_frame_info *fi;
|
||||
struct sk_buff *skb;
|
||||
bool closed = false;
|
||||
|
||||
bf = bf_first;
|
||||
aggr_limit = ath_lookup_rate(sc, bf, tid);
|
||||
|
||||
do {
|
||||
bf = ath_tx_get_tid_subframe(sc, txq, tid);
|
||||
if (!bf) {
|
||||
status = ATH_AGGR_BAW_CLOSED;
|
||||
break;
|
||||
}
|
||||
|
||||
skb = bf->bf_mpdu;
|
||||
fi = get_frame_info(skb);
|
||||
|
||||
if (!bf_first)
|
||||
bf_first = bf;
|
||||
|
||||
if (!rl) {
|
||||
ath_set_rates(tid->an->vif, tid->an->sta, bf);
|
||||
aggr_limit = ath_lookup_rate(sc, bf, tid);
|
||||
rl = 1;
|
||||
}
|
||||
|
||||
/* do not exceed aggregation limit */
|
||||
al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
|
||||
if (nframes) {
|
||||
if (aggr_limit < al + bpad + al_delta ||
|
||||
ath_lookup_legacy(bf) || nframes >= h_baw)
|
||||
break;
|
||||
|
||||
if (nframes &&
|
||||
((aggr_limit < (al + bpad + al_delta + prev_al)) ||
|
||||
ath_lookup_legacy(bf))) {
|
||||
status = ATH_AGGR_LIMITED;
|
||||
break;
|
||||
}
|
||||
|
||||
tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
|
||||
if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
|
||||
break;
|
||||
|
||||
/* do not exceed subframe limit */
|
||||
if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
|
||||
status = ATH_AGGR_LIMITED;
|
||||
break;
|
||||
tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
|
||||
if ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
|
||||
!(tx_info->flags & IEEE80211_TX_CTL_AMPDU))
|
||||
break;
|
||||
}
|
||||
|
||||
/* add padding for previous frame to aggregation length */
|
||||
|
@ -936,22 +994,37 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
|
|||
bf->bf_next = NULL;
|
||||
|
||||
/* link buffers of this frame to the aggregate */
|
||||
if (!fi->retries)
|
||||
ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
|
||||
if (!fi->baw_tracked)
|
||||
ath_tx_addto_baw(sc, tid, bf);
|
||||
bf->bf_state.ndelim = ndelim;
|
||||
|
||||
__skb_unlink(skb, &tid->buf_q);
|
||||
__skb_unlink(skb, tid_q);
|
||||
list_add_tail(&bf->list, bf_q);
|
||||
if (bf_prev)
|
||||
bf_prev->bf_next = bf;
|
||||
|
||||
bf_prev = bf;
|
||||
|
||||
} while (!skb_queue_empty(&tid->buf_q));
|
||||
bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
|
||||
if (!bf) {
|
||||
closed = true;
|
||||
break;
|
||||
}
|
||||
} while (ath_tid_has_buffered(tid));
|
||||
|
||||
bf = bf_first;
|
||||
bf->bf_lastbf = bf_prev;
|
||||
|
||||
if (bf == bf_prev) {
|
||||
al = get_frame_info(bf->bf_mpdu)->framelen;
|
||||
bf->bf_state.bf_type = BUF_AMPDU;
|
||||
} else {
|
||||
TX_STAT_INC(txq->axq_qnum, a_aggr);
|
||||
}
|
||||
|
||||
*aggr_len = al;
|
||||
|
||||
return status;
|
||||
return closed;
|
||||
#undef PADBYTES
|
||||
}
|
||||
|
||||
|
@ -1023,7 +1096,7 @@ void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop)
|
|||
}
|
||||
|
||||
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
|
||||
struct ath_tx_info *info, int len)
|
||||
struct ath_tx_info *info, int len, bool rts)
|
||||
{
|
||||
struct ath_hw *ah = sc->sc_ah;
|
||||
struct sk_buff *skb;
|
||||
|
@ -1032,6 +1105,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
|
|||
const struct ieee80211_rate *rate;
|
||||
struct ieee80211_hdr *hdr;
|
||||
struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
|
||||
u32 rts_thresh = sc->hw->wiphy->rts_threshold;
|
||||
int i;
|
||||
u8 rix = 0;
|
||||
|
||||
|
@ -1054,7 +1128,17 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
|
|||
rix = rates[i].idx;
|
||||
info->rates[i].Tries = rates[i].count;
|
||||
|
||||
if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
|
||||
/*
|
||||
* Handle RTS threshold for unaggregated HT frames.
|
||||
*/
|
||||
if (bf_isampdu(bf) && !bf_isaggr(bf) &&
|
||||
(rates[i].flags & IEEE80211_TX_RC_MCS) &&
|
||||
unlikely(rts_thresh != (u32) -1)) {
|
||||
if (!rts_thresh || (len > rts_thresh))
|
||||
rts = true;
|
||||
}
|
||||
|
||||
if (rts || rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
|
||||
info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
|
||||
info->flags |= ATH9K_TXDESC_RTSENA;
|
||||
} else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
|
||||
|
@ -1147,6 +1231,8 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
|
|||
struct ath_hw *ah = sc->sc_ah;
|
||||
struct ath_buf *bf_first = NULL;
|
||||
struct ath_tx_info info;
|
||||
u32 rts_thresh = sc->hw->wiphy->rts_threshold;
|
||||
bool rts = false;
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
info.is_first = true;
|
||||
|
@ -1183,7 +1269,22 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
|
|||
info.flags |= (u32) bf->bf_state.bfs_paprd <<
|
||||
ATH9K_TXDESC_PAPRD_S;
|
||||
|
||||
ath_buf_set_rate(sc, bf, &info, len);
|
||||
/*
|
||||
* mac80211 doesn't handle RTS threshold for HT because
|
||||
* the decision has to be taken based on AMPDU length
|
||||
* and aggregation is done entirely inside ath9k.
|
||||
* Set the RTS/CTS flag for the first subframe based
|
||||
* on the threshold.
|
||||
*/
|
||||
if (aggr && (bf == bf_first) &&
|
||||
unlikely(rts_thresh != (u32) -1)) {
|
||||
/*
|
||||
* "len" is the size of the entire AMPDU.
|
||||
*/
|
||||
if (!rts_thresh || (len > rts_thresh))
|
||||
rts = true;
|
||||
}
|
||||
ath_buf_set_rate(sc, bf, &info, len, rts);
|
||||
}
|
||||
|
||||
info.buf_addr[0] = bf->bf_buf_addr;
|
||||
|
@ -1212,53 +1313,86 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
|
|||
}
|
||||
}
|
||||
|
||||
static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
|
||||
struct ath_atx_tid *tid)
|
||||
static void
|
||||
ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq,
|
||||
struct ath_atx_tid *tid, struct list_head *bf_q,
|
||||
struct ath_buf *bf_first, struct sk_buff_head *tid_q)
|
||||
{
|
||||
struct ath_buf *bf;
|
||||
enum ATH_AGGR_STATUS status;
|
||||
struct ieee80211_tx_info *tx_info;
|
||||
struct list_head bf_q;
|
||||
int aggr_len;
|
||||
struct ath_buf *bf = bf_first, *bf_prev = NULL;
|
||||
struct sk_buff *skb;
|
||||
int nframes = 0;
|
||||
|
||||
do {
|
||||
if (skb_queue_empty(&tid->buf_q))
|
||||
return;
|
||||
struct ieee80211_tx_info *tx_info;
|
||||
skb = bf->bf_mpdu;
|
||||
|
||||
INIT_LIST_HEAD(&bf_q);
|
||||
nframes++;
|
||||
__skb_unlink(skb, tid_q);
|
||||
list_add_tail(&bf->list, bf_q);
|
||||
if (bf_prev)
|
||||
bf_prev->bf_next = bf;
|
||||
bf_prev = bf;
|
||||
|
||||
status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
|
||||
|
||||
/*
|
||||
* no frames picked up to be aggregated;
|
||||
* block-ack window is not open.
|
||||
*/
|
||||
if (list_empty(&bf_q))
|
||||
if (nframes >= 2)
|
||||
break;
|
||||
|
||||
bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
|
||||
if (!bf)
|
||||
break;
|
||||
|
||||
bf = list_first_entry(&bf_q, struct ath_buf, list);
|
||||
bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
|
||||
tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
|
||||
if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
|
||||
break;
|
||||
|
||||
if (tid->ac->clear_ps_filter) {
|
||||
tid->ac->clear_ps_filter = false;
|
||||
tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
|
||||
} else {
|
||||
tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
|
||||
}
|
||||
ath_set_rates(tid->an->vif, tid->an->sta, bf);
|
||||
} while (1);
|
||||
}
|
||||
|
||||
/* if only one frame, send as non-aggregate */
|
||||
if (bf == bf->bf_lastbf) {
|
||||
aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
|
||||
bf->bf_state.bf_type = BUF_AMPDU;
|
||||
} else {
|
||||
TX_STAT_INC(txq->axq_qnum, a_aggr);
|
||||
}
|
||||
static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
|
||||
struct ath_atx_tid *tid, bool *stop)
|
||||
{
|
||||
struct ath_buf *bf;
|
||||
struct ieee80211_tx_info *tx_info;
|
||||
struct sk_buff_head *tid_q;
|
||||
struct list_head bf_q;
|
||||
int aggr_len = 0;
|
||||
bool aggr, last = true;
|
||||
|
||||
ath_tx_fill_desc(sc, bf, txq, aggr_len);
|
||||
ath_tx_txqaddbuf(sc, txq, &bf_q, false);
|
||||
} while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
|
||||
status != ATH_AGGR_BAW_CLOSED);
|
||||
if (!ath_tid_has_buffered(tid))
|
||||
return false;
|
||||
|
||||
INIT_LIST_HEAD(&bf_q);
|
||||
|
||||
bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
|
||||
if (!bf)
|
||||
return false;
|
||||
|
||||
tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
|
||||
aggr = !!(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
|
||||
if ((aggr && txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) ||
|
||||
(!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) {
|
||||
*stop = true;
|
||||
return false;
|
||||
}
|
||||
|
||||
ath_set_rates(tid->an->vif, tid->an->sta, bf);
|
||||
if (aggr)
|
||||
last = ath_tx_form_aggr(sc, txq, tid, &bf_q, bf,
|
||||
tid_q, &aggr_len);
|
||||
else
|
||||
ath_tx_form_burst(sc, txq, tid, &bf_q, bf, tid_q);
|
||||
|
||||
if (list_empty(&bf_q))
|
||||
return false;
|
||||
|
||||
if (tid->ac->clear_ps_filter || tid->an->no_ps_filter) {
|
||||
tid->ac->clear_ps_filter = false;
|
||||
tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
|
||||
}
|
||||
|
||||
ath_tx_fill_desc(sc, bf, txq, aggr_len);
|
||||
ath_tx_txqaddbuf(sc, txq, &bf_q, false);
|
||||
return true;
|
||||
}
|
||||
|
||||
int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
|
||||
|
@ -1282,6 +1416,9 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
|
|||
an->mpdudensity = density;
|
||||
}
|
||||
|
||||
/* force sequence number allocation for pending frames */
|
||||
ath_tx_tid_change_state(sc, txtid);
|
||||
|
||||
txtid->active = true;
|
||||
txtid->paused = true;
|
||||
*ssn = txtid->seq_start = txtid->seq_next;
|
||||
|
@ -1301,8 +1438,9 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
|
|||
|
||||
ath_txq_lock(sc, txq);
|
||||
txtid->active = false;
|
||||
txtid->paused = true;
|
||||
txtid->paused = false;
|
||||
ath_tx_flush_tid(sc, txtid);
|
||||
ath_tx_tid_change_state(sc, txtid);
|
||||
ath_txq_unlock_complete(sc, txq);
|
||||
}
|
||||
|
||||
|
@ -1326,7 +1464,7 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
|
|||
|
||||
ath_txq_lock(sc, txq);
|
||||
|
||||
buffered = !skb_queue_empty(&tid->buf_q);
|
||||
buffered = ath_tid_has_buffered(tid);
|
||||
|
||||
tid->sched = false;
|
||||
list_del(&tid->list);
|
||||
|
@ -1358,7 +1496,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
|
|||
ath_txq_lock(sc, txq);
|
||||
ac->clear_ps_filter = true;
|
||||
|
||||
if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
|
||||
if (!tid->paused && ath_tid_has_buffered(tid)) {
|
||||
ath_tx_queue_tid(txq, tid);
|
||||
ath_txq_schedule(sc, txq);
|
||||
}
|
||||
|
@ -1383,7 +1521,7 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
|
|||
tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
|
||||
tid->paused = false;
|
||||
|
||||
if (!skb_queue_empty(&tid->buf_q)) {
|
||||
if (ath_tid_has_buffered(tid)) {
|
||||
ath_tx_queue_tid(txq, tid);
|
||||
ath_txq_schedule(sc, txq);
|
||||
}
|
||||
|
@ -1403,6 +1541,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
|
|||
struct ieee80211_tx_info *info;
|
||||
struct list_head bf_q;
|
||||
struct ath_buf *bf_tail = NULL, *bf;
|
||||
struct sk_buff_head *tid_q;
|
||||
int sent = 0;
|
||||
int i;
|
||||
|
||||
|
@ -1418,15 +1557,15 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
|
|||
continue;
|
||||
|
||||
ath_txq_lock(sc, tid->ac->txq);
|
||||
while (!skb_queue_empty(&tid->buf_q) && nframes > 0) {
|
||||
bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid);
|
||||
while (nframes > 0) {
|
||||
bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid, &tid_q);
|
||||
if (!bf)
|
||||
break;
|
||||
|
||||
__skb_unlink(bf->bf_mpdu, &tid->buf_q);
|
||||
__skb_unlink(bf->bf_mpdu, tid_q);
|
||||
list_add_tail(&bf->list, &bf_q);
|
||||
ath_set_rates(tid->an->vif, tid->an->sta, bf);
|
||||
ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
|
||||
ath_tx_addto_baw(sc, tid, bf);
|
||||
bf->bf_state.bf_type &= ~BUF_AGGR;
|
||||
if (bf_tail)
|
||||
bf_tail->bf_next = bf;
|
||||
|
@ -1436,7 +1575,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
|
|||
sent++;
|
||||
TX_STAT_INC(txq->axq_qnum, a_queued_hw);
|
||||
|
||||
if (skb_queue_empty(&tid->buf_q))
|
||||
if (an->sta && !ath_tid_has_buffered(tid))
|
||||
ieee80211_sta_set_buffered(an->sta, i, false);
|
||||
}
|
||||
ath_txq_unlock_complete(sc, tid->ac->txq);
|
||||
|
@ -1689,25 +1828,27 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
|
|||
*/
|
||||
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
|
||||
{
|
||||
struct ath_atx_ac *ac, *ac_tmp, *last_ac;
|
||||
struct ath_atx_ac *ac, *last_ac;
|
||||
struct ath_atx_tid *tid, *last_tid;
|
||||
bool sent = false;
|
||||
|
||||
if (test_bit(SC_OP_HW_RESET, &sc->sc_flags) ||
|
||||
list_empty(&txq->axq_acq) ||
|
||||
txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
|
||||
list_empty(&txq->axq_acq))
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
|
||||
last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
|
||||
while (!list_empty(&txq->axq_acq)) {
|
||||
bool stop = false;
|
||||
|
||||
list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
|
||||
ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
|
||||
last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
|
||||
list_del(&ac->list);
|
||||
ac->sched = false;
|
||||
|
||||
while (!list_empty(&ac->tid_q)) {
|
||||
|
||||
tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
|
||||
list);
|
||||
list_del(&tid->list);
|
||||
|
@ -1716,17 +1857,17 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
|
|||
if (tid->paused)
|
||||
continue;
|
||||
|
||||
ath_tx_sched_aggr(sc, txq, tid);
|
||||
if (ath_tx_sched_aggr(sc, txq, tid, &stop))
|
||||
sent = true;
|
||||
|
||||
/*
|
||||
* add tid to round-robin queue if more frames
|
||||
* are pending for the tid
|
||||
*/
|
||||
if (!skb_queue_empty(&tid->buf_q))
|
||||
if (ath_tid_has_buffered(tid))
|
||||
ath_tx_queue_tid(txq, tid);
|
||||
|
||||
if (tid == last_tid ||
|
||||
txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
|
||||
if (stop || tid == last_tid)
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1735,9 +1876,17 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
|
|||
list_add_tail(&ac->list, &txq->axq_acq);
|
||||
}
|
||||
|
||||
if (ac == last_ac ||
|
||||
txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
|
||||
if (stop)
|
||||
break;
|
||||
|
||||
if (ac == last_ac) {
|
||||
if (!sent)
|
||||
break;
|
||||
|
||||
sent = false;
|
||||
last_ac = list_entry(txq->axq_acq.prev,
|
||||
struct ath_atx_ac, list);
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
@ -1816,58 +1965,6 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
|
|||
}
|
||||
}
|
||||
|
||||
static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_txq *txq,
|
||||
struct ath_atx_tid *tid, struct sk_buff *skb,
|
||||
struct ath_tx_control *txctl)
|
||||
{
|
||||
struct ath_frame_info *fi = get_frame_info(skb);
|
||||
struct list_head bf_head;
|
||||
struct ath_buf *bf;
|
||||
|
||||
/*
|
||||
* Do not queue to h/w when any of the following conditions is true:
|
||||
* - there are pending frames in software queue
|
||||
* - the TID is currently paused for ADDBA/BAR request
|
||||
* - seqno is not within block-ack window
|
||||
* - h/w queue depth exceeds low water mark
|
||||
*/
|
||||
if ((!skb_queue_empty(&tid->buf_q) || tid->paused ||
|
||||
!BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
|
||||
txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) &&
|
||||
txq != sc->tx.uapsdq) {
|
||||
/*
|
||||
* Add this frame to software queue for scheduling later
|
||||
* for aggregation.
|
||||
*/
|
||||
TX_STAT_INC(txq->axq_qnum, a_queued_sw);
|
||||
__skb_queue_tail(&tid->buf_q, skb);
|
||||
if (!txctl->an || !txctl->an->sleeping)
|
||||
ath_tx_queue_tid(txq, tid);
|
||||
return;
|
||||
}
|
||||
|
||||
bf = ath_tx_setup_buffer(sc, txq, tid, skb);
|
||||
if (!bf) {
|
||||
ath_txq_skb_done(sc, txq, skb);
|
||||
ieee80211_free_txskb(sc->hw, skb);
|
||||
return;
|
||||
}
|
||||
|
||||
ath_set_rates(tid->an->vif, tid->an->sta, bf);
|
||||
bf->bf_state.bf_type = BUF_AMPDU;
|
||||
INIT_LIST_HEAD(&bf_head);
|
||||
list_add(&bf->list, &bf_head);
|
||||
|
||||
/* Add sub-frame to BAW */
|
||||
ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
|
||||
|
||||
/* Queue to h/w without aggregation */
|
||||
TX_STAT_INC(txq->axq_qnum, a_queued_hw);
|
||||
bf->bf_lastbf = bf;
|
||||
ath_tx_fill_desc(sc, bf, txq, fi->framelen);
|
||||
ath_tx_txqaddbuf(sc, txq, &bf_head, false);
|
||||
}
|
||||
|
||||
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
|
||||
struct ath_atx_tid *tid, struct sk_buff *skb)
|
||||
{
|
||||
|
@ -2010,6 +2107,7 @@ static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
|
|||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_sta *sta = txctl->sta;
|
||||
struct ieee80211_vif *vif = info->control.vif;
|
||||
struct ath_vif *avp;
|
||||
struct ath_softc *sc = hw->priv;
|
||||
int frmlen = skb->len + FCS_LEN;
|
||||
int padpos, padsize;
|
||||
|
@ -2017,6 +2115,10 @@ static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
|
|||
/* NOTE: sta can be NULL according to net/mac80211.h */
|
||||
if (sta)
|
||||
txctl->an = (struct ath_node *)sta->drv_priv;
|
||||
else if (vif && ieee80211_is_data(hdr->frame_control)) {
|
||||
avp = (void *)vif->drv_priv;
|
||||
txctl->an = &avp->mcast_node;
|
||||
}
|
||||
|
||||
if (info->control.hw_key)
|
||||
frmlen += info->control.hw_key->icv_len;
|
||||
|
@ -2066,7 +2168,6 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
|
|||
struct ath_txq *txq = txctl->txq;
|
||||
struct ath_atx_tid *tid = NULL;
|
||||
struct ath_buf *bf;
|
||||
u8 tidno;
|
||||
int q;
|
||||
int ret;
|
||||
|
||||
|
@ -2094,22 +2195,25 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
|
|||
ath_txq_unlock(sc, txq);
|
||||
txq = sc->tx.uapsdq;
|
||||
ath_txq_lock(sc, txq);
|
||||
}
|
||||
|
||||
if (txctl->an && ieee80211_is_data_qos(hdr->frame_control)) {
|
||||
tidno = ieee80211_get_qos_ctl(hdr)[0] &
|
||||
IEEE80211_QOS_CTL_TID_MASK;
|
||||
tid = ATH_AN_2_TID(txctl->an, tidno);
|
||||
} else if (txctl->an &&
|
||||
ieee80211_is_data_present(hdr->frame_control)) {
|
||||
tid = ath_get_skb_tid(sc, txctl->an, skb);
|
||||
|
||||
WARN_ON(tid->ac->txq != txctl->txq);
|
||||
}
|
||||
|
||||
if ((info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
|
||||
if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
|
||||
tid->ac->clear_ps_filter = true;
|
||||
|
||||
/*
|
||||
* Try aggregation if it's a unicast data frame
|
||||
* and the destination is HT capable.
|
||||
* Add this frame to software queue for scheduling later
|
||||
* for aggregation.
|
||||
*/
|
||||
ath_tx_send_ampdu(sc, txq, tid, skb, txctl);
|
||||
TX_STAT_INC(txq->axq_qnum, a_queued_sw);
|
||||
__skb_queue_tail(&tid->buf_q, skb);
|
||||
if (!txctl->an->sleeping)
|
||||
ath_tx_queue_tid(txq, tid);
|
||||
|
||||
ath_txq_schedule(sc, txq);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -2168,7 +2272,7 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
|||
|
||||
bf->bf_lastbf = bf;
|
||||
ath_set_rates(vif, NULL, bf);
|
||||
ath_buf_set_rate(sc, bf, &info, fi->framelen);
|
||||
ath_buf_set_rate(sc, bf, &info, fi->framelen, false);
|
||||
duration += info.rates[0].PktDuration;
|
||||
if (bf_tail)
|
||||
bf_tail->bf_next = bf;
|
||||
|
@ -2372,8 +2476,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
|
|||
|
||||
if (list_empty(&txq->axq_q)) {
|
||||
txq->axq_link = NULL;
|
||||
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
|
||||
ath_txq_schedule(sc, txq);
|
||||
ath_txq_schedule(sc, txq);
|
||||
break;
|
||||
}
|
||||
bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
|
||||
|
@ -2595,6 +2698,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
|
|||
tid->paused = false;
|
||||
tid->active = false;
|
||||
__skb_queue_head_init(&tid->buf_q);
|
||||
__skb_queue_head_init(&tid->retry_q);
|
||||
acno = TID_TO_WME_AC(tidno);
|
||||
tid->ac = &an->ac[acno];
|
||||
}
|
||||
|
@ -2602,6 +2706,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
|
|||
for (acno = 0, ac = &an->ac[acno];
|
||||
acno < IEEE80211_NUM_ACS; acno++, ac++) {
|
||||
ac->sched = false;
|
||||
ac->clear_ps_filter = true;
|
||||
ac->txq = sc->tx.txq_map[acno];
|
||||
INIT_LIST_HEAD(&ac->tid_q);
|
||||
}
|
||||
|
|
|
@ -11,9 +11,6 @@ wil6210-y += txrx.o
|
|||
wil6210-y += debug.o
|
||||
wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
|
||||
|
||||
ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
|
||||
subdir-ccflags-y += -Werror
|
||||
endif
|
||||
# for tracing framework to find trace.h
|
||||
CFLAGS_trace.o := -I$(src)
|
||||
|
||||
|
|
|
@ -51,7 +51,7 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
|
|||
if ((i % 64) == 0 && (i != 0))
|
||||
seq_printf(s, "\n");
|
||||
seq_printf(s, "%s", (d->dma.status & BIT(0)) ?
|
||||
"S" : (vring->ctx[i] ? "H" : "h"));
|
||||
"S" : (vring->ctx[i].skb ? "H" : "h"));
|
||||
}
|
||||
seq_printf(s, "\n");
|
||||
}
|
||||
|
@ -406,7 +406,7 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
|
|||
volatile struct vring_tx_desc *d =
|
||||
&(vring->va[dbg_txdesc_index].tx);
|
||||
volatile u32 *u = (volatile u32 *)d;
|
||||
struct sk_buff *skb = vring->ctx[dbg_txdesc_index];
|
||||
struct sk_buff *skb = vring->ctx[dbg_txdesc_index].skb;
|
||||
|
||||
seq_printf(s, "Tx[%3d] = {\n", dbg_txdesc_index);
|
||||
seq_printf(s, " MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n",
|
||||
|
|
|
@ -127,6 +127,8 @@ void *wil_if_alloc(struct device *dev, void __iomem *csr)
|
|||
|
||||
ndev->netdev_ops = &wil_netdev_ops;
|
||||
ndev->ieee80211_ptr = wdev;
|
||||
ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
|
||||
ndev->features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
|
||||
SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
|
||||
wdev->netdev = ndev;
|
||||
|
||||
|
|
|
@ -37,36 +37,40 @@ static inline void trace_ ## name(proto) {}
|
|||
#endif /* !CONFIG_WIL6210_TRACING || defined(__CHECKER__) */
|
||||
|
||||
DECLARE_EVENT_CLASS(wil6210_wmi,
|
||||
TP_PROTO(u16 id, void *buf, u16 buf_len),
|
||||
TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len),
|
||||
|
||||
TP_ARGS(id, buf, buf_len),
|
||||
TP_ARGS(wmi, buf, buf_len),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u8, mid)
|
||||
__field(u16, id)
|
||||
__field(u32, timestamp)
|
||||
__field(u16, buf_len)
|
||||
__dynamic_array(u8, buf, buf_len)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->id = id;
|
||||
__entry->mid = wmi->mid;
|
||||
__entry->id = le16_to_cpu(wmi->id);
|
||||
__entry->timestamp = le32_to_cpu(wmi->timestamp);
|
||||
__entry->buf_len = buf_len;
|
||||
memcpy(__get_dynamic_array(buf), buf, buf_len);
|
||||
),
|
||||
|
||||
TP_printk(
|
||||
"id 0x%04x len %d",
|
||||
__entry->id, __entry->buf_len
|
||||
"MID %d id 0x%04x len %d timestamp %d",
|
||||
__entry->mid, __entry->id, __entry->buf_len, __entry->timestamp
|
||||
)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(wil6210_wmi, wil6210_wmi_cmd,
|
||||
TP_PROTO(u16 id, void *buf, u16 buf_len),
|
||||
TP_ARGS(id, buf, buf_len)
|
||||
TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len),
|
||||
TP_ARGS(wmi, buf, buf_len)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(wil6210_wmi, wil6210_wmi_event,
|
||||
TP_PROTO(u16 id, void *buf, u16 buf_len),
|
||||
TP_ARGS(id, buf, buf_len)
|
||||
TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len),
|
||||
TP_ARGS(wmi, buf, buf_len)
|
||||
);
|
||||
|
||||
#define WIL6210_MSG_MAX (200)
|
||||
|
|
|
@ -18,6 +18,9 @@
|
|||
#include <net/ieee80211_radiotap.h>
|
||||
#include <linux/if_arp.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/ipv6.h>
|
||||
#include <net/ipv6.h>
|
||||
|
||||
#include "wil6210.h"
|
||||
#include "wmi.h"
|
||||
|
@ -70,7 +73,7 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
|
|||
|
||||
vring->swhead = 0;
|
||||
vring->swtail = 0;
|
||||
vring->ctx = kzalloc(vring->size * sizeof(vring->ctx[0]), GFP_KERNEL);
|
||||
vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL);
|
||||
if (!vring->ctx) {
|
||||
vring->va = NULL;
|
||||
return -ENOMEM;
|
||||
|
@ -108,39 +111,39 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
|
|||
|
||||
while (!wil_vring_is_empty(vring)) {
|
||||
dma_addr_t pa;
|
||||
struct sk_buff *skb;
|
||||
u16 dmalen;
|
||||
struct wil_ctx *ctx;
|
||||
|
||||
if (tx) {
|
||||
struct vring_tx_desc dd, *d = ⅆ
|
||||
volatile struct vring_tx_desc *_d =
|
||||
&vring->va[vring->swtail].tx;
|
||||
|
||||
ctx = &vring->ctx[vring->swtail];
|
||||
*d = *_d;
|
||||
pa = wil_desc_addr(&d->dma.addr);
|
||||
dmalen = le16_to_cpu(d->dma.length);
|
||||
skb = vring->ctx[vring->swtail];
|
||||
if (skb) {
|
||||
dma_unmap_single(dev, pa, dmalen,
|
||||
DMA_TO_DEVICE);
|
||||
dev_kfree_skb_any(skb);
|
||||
vring->ctx[vring->swtail] = NULL;
|
||||
} else {
|
||||
if (vring->ctx[vring->swtail].mapped_as_page) {
|
||||
dma_unmap_page(dev, pa, dmalen,
|
||||
DMA_TO_DEVICE);
|
||||
} else {
|
||||
dma_unmap_single(dev, pa, dmalen,
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
if (ctx->skb)
|
||||
dev_kfree_skb_any(ctx->skb);
|
||||
vring->swtail = wil_vring_next_tail(vring);
|
||||
} else { /* rx */
|
||||
struct vring_rx_desc dd, *d = ⅆ
|
||||
volatile struct vring_rx_desc *_d =
|
||||
&vring->va[vring->swtail].rx;
|
||||
&vring->va[vring->swhead].rx;
|
||||
|
||||
ctx = &vring->ctx[vring->swhead];
|
||||
*d = *_d;
|
||||
pa = wil_desc_addr(&d->dma.addr);
|
||||
dmalen = le16_to_cpu(d->dma.length);
|
||||
skb = vring->ctx[vring->swhead];
|
||||
dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
|
||||
kfree_skb(skb);
|
||||
kfree_skb(ctx->skb);
|
||||
wil_vring_advance_head(vring, 1);
|
||||
}
|
||||
}
|
||||
|
@ -187,7 +190,7 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
|
|||
d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
|
||||
d->dma.length = cpu_to_le16(sz);
|
||||
*_d = *d;
|
||||
vring->ctx[i] = skb;
|
||||
vring->ctx[i].skb = skb;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -352,11 +355,11 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
skb = vring->ctx[vring->swhead];
|
||||
skb = vring->ctx[vring->swhead].skb;
|
||||
d = wil_skb_rxdesc(skb);
|
||||
*d = *_d;
|
||||
pa = wil_desc_addr(&d->dma.addr);
|
||||
vring->ctx[vring->swhead] = NULL;
|
||||
vring->ctx[vring->swhead].skb = NULL;
|
||||
wil_vring_advance_head(vring, 1);
|
||||
|
||||
dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
|
||||
|
@ -407,6 +410,21 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/* L4 IDENT is on when HW calculated checksum, check status
|
||||
* and in case of error drop the packet
|
||||
* higher stack layers will handle retransmission (if required)
|
||||
*/
|
||||
if (d->dma.status & RX_DMA_STATUS_L4_IDENT) {
|
||||
/* L4 protocol identified, csum calculated */
|
||||
if ((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0) {
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
} else {
|
||||
wil_err(wil, "Incorrect checksum reported\n");
|
||||
kfree_skb(skb);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
ds_bits = wil_rxdesc_ds_bits(d);
|
||||
if (ds_bits == 1) {
|
||||
/*
|
||||
|
@ -646,6 +664,53 @@ static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
|
||||
struct vring_tx_desc *d,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
int protocol;
|
||||
|
||||
if (skb->ip_summed != CHECKSUM_PARTIAL)
|
||||
return 0;
|
||||
|
||||
switch (skb->protocol) {
|
||||
case cpu_to_be16(ETH_P_IP):
|
||||
protocol = ip_hdr(skb)->protocol;
|
||||
break;
|
||||
case cpu_to_be16(ETH_P_IPV6):
|
||||
protocol = ipv6_hdr(skb)->nexthdr;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (protocol) {
|
||||
case IPPROTO_TCP:
|
||||
d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
|
||||
/* L4 header len: TCP header length */
|
||||
d->dma.d0 |=
|
||||
(tcp_hdrlen(skb) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
|
||||
break;
|
||||
case IPPROTO_UDP:
|
||||
/* L4 header len: UDP header length */
|
||||
d->dma.d0 |=
|
||||
(sizeof(struct udphdr) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
d->dma.ip_length = skb_network_header_len(skb);
|
||||
d->dma.b11 = ETH_HLEN; /* MAC header length */
|
||||
d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS);
|
||||
/* Enable TCP/UDP checksum */
|
||||
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
|
||||
/* Calculate pseudo-header */
|
||||
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
|
@ -655,7 +720,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
|
|||
u32 swhead = vring->swhead;
|
||||
int avail = wil_vring_avail_tx(vring);
|
||||
int nr_frags = skb_shinfo(skb)->nr_frags;
|
||||
uint f;
|
||||
uint f = 0;
|
||||
int vring_index = vring - wil->vring_tx;
|
||||
uint i = swhead;
|
||||
dma_addr_t pa;
|
||||
|
@ -686,13 +751,20 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
|
|||
return -EINVAL;
|
||||
/* 1-st segment */
|
||||
wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index);
|
||||
/* Process TCP/UDP checksum offloading */
|
||||
if (wil_tx_desc_offload_cksum_set(wil, d, skb)) {
|
||||
wil_err(wil, "VRING #%d Failed to set cksum, drop packet\n",
|
||||
vring_index);
|
||||
goto dma_error;
|
||||
}
|
||||
|
||||
d->mac.d[2] |= ((nr_frags + 1) <<
|
||||
MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
|
||||
if (nr_frags)
|
||||
*_d = *d;
|
||||
|
||||
/* middle segments */
|
||||
for (f = 0; f < nr_frags; f++) {
|
||||
for (; f < nr_frags; f++) {
|
||||
const struct skb_frag_struct *frag =
|
||||
&skb_shinfo(skb)->frags[f];
|
||||
int len = skb_frag_size(frag);
|
||||
|
@ -703,7 +775,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
|
|||
if (unlikely(dma_mapping_error(dev, pa)))
|
||||
goto dma_error;
|
||||
wil_tx_desc_map(d, pa, len, vring_index);
|
||||
vring->ctx[i] = NULL;
|
||||
vring->ctx[i].mapped_as_page = 1;
|
||||
*_d = *d;
|
||||
}
|
||||
/* for the last seg only */
|
||||
|
@ -712,6 +784,12 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
|
|||
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
|
||||
*_d = *d;
|
||||
|
||||
/* hold reference to skb
|
||||
* to prevent skb release before accounting
|
||||
* in case of immediate "tx done"
|
||||
*/
|
||||
vring->ctx[i].skb = skb_get(skb);
|
||||
|
||||
wil_hex_dump_txrx("Tx ", DUMP_PREFIX_NONE, 32, 4,
|
||||
(const void *)d, sizeof(*d), false);
|
||||
|
||||
|
@ -720,29 +798,31 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
|
|||
wil_dbg_txrx(wil, "Tx swhead %d -> %d\n", swhead, vring->swhead);
|
||||
trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags);
|
||||
iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail));
|
||||
/* hold reference to skb
|
||||
* to prevent skb release before accounting
|
||||
* in case of immediate "tx done"
|
||||
*/
|
||||
vring->ctx[i] = skb_get(skb);
|
||||
|
||||
return 0;
|
||||
dma_error:
|
||||
/* unmap what we have mapped */
|
||||
/* Note: increment @f to operate with positive index */
|
||||
for (f++; f > 0; f--) {
|
||||
nr_frags = f + 1; /* frags mapped + one for skb head */
|
||||
for (f = 0; f < nr_frags; f++) {
|
||||
u16 dmalen;
|
||||
struct wil_ctx *ctx;
|
||||
|
||||
i = (swhead + f) % vring->size;
|
||||
ctx = &vring->ctx[i];
|
||||
_d = &(vring->va[i].tx);
|
||||
*d = *_d;
|
||||
_d->dma.status = TX_DMA_STATUS_DU;
|
||||
pa = wil_desc_addr(&d->dma.addr);
|
||||
dmalen = le16_to_cpu(d->dma.length);
|
||||
if (vring->ctx[i])
|
||||
dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
|
||||
else
|
||||
if (ctx->mapped_as_page)
|
||||
dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
|
||||
else
|
||||
dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
|
||||
|
||||
if (ctx->skb)
|
||||
dev_kfree_skb_any(ctx->skb);
|
||||
|
||||
memset(ctx, 0, sizeof(*ctx));
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
|
@ -821,8 +901,9 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
|
|||
&vring->va[vring->swtail].tx;
|
||||
struct vring_tx_desc dd, *d = ⅆ
|
||||
dma_addr_t pa;
|
||||
struct sk_buff *skb;
|
||||
u16 dmalen;
|
||||
struct wil_ctx *ctx = &vring->ctx[vring->swtail];
|
||||
struct sk_buff *skb = ctx->skb;
|
||||
|
||||
*d = *_d;
|
||||
|
||||
|
@ -840,7 +921,11 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
|
|||
(const void *)d, sizeof(*d), false);
|
||||
|
||||
pa = wil_desc_addr(&d->dma.addr);
|
||||
skb = vring->ctx[vring->swtail];
|
||||
if (ctx->mapped_as_page)
|
||||
dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
|
||||
else
|
||||
dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
|
||||
|
||||
if (skb) {
|
||||
if (d->dma.error == 0) {
|
||||
ndev->stats.tx_packets++;
|
||||
|
@ -849,16 +934,15 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
|
|||
ndev->stats.tx_errors++;
|
||||
}
|
||||
|
||||
dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
|
||||
dev_kfree_skb_any(skb);
|
||||
vring->ctx[vring->swtail] = NULL;
|
||||
} else {
|
||||
dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
|
||||
}
|
||||
d->dma.addr.addr_low = 0;
|
||||
d->dma.addr.addr_high = 0;
|
||||
d->dma.length = 0;
|
||||
d->dma.status = TX_DMA_STATUS_DU;
|
||||
memset(ctx, 0, sizeof(*ctx));
|
||||
/*
|
||||
* There is no need to touch HW descriptor:
|
||||
* - ststus bit TX_DMA_STATUS_DU is set by design,
|
||||
* so hardware will not try to process this desc.,
|
||||
* - rest of descriptor will be initialized on Tx.
|
||||
*/
|
||||
vring->swtail = wil_vring_next_tail(vring);
|
||||
done++;
|
||||
}
|
||||
|
|
|
@ -235,7 +235,16 @@ struct vring_tx_mac {
|
|||
|
||||
#define DMA_CFG_DESC_TX_0_L4_TYPE_POS 30
|
||||
#define DMA_CFG_DESC_TX_0_L4_TYPE_LEN 2
|
||||
#define DMA_CFG_DESC_TX_0_L4_TYPE_MSK 0xC0000000
|
||||
#define DMA_CFG_DESC_TX_0_L4_TYPE_MSK 0xC0000000 /* L4 type: 0-UDP, 2-TCP */
|
||||
|
||||
|
||||
#define DMA_CFG_DESC_TX_OFFLOAD_CFG_MAC_LEN_POS 0
|
||||
#define DMA_CFG_DESC_TX_OFFLOAD_CFG_MAC_LEN_LEN 7
|
||||
#define DMA_CFG_DESC_TX_OFFLOAD_CFG_MAC_LEN_MSK 0x7F /* MAC hdr len */
|
||||
|
||||
#define DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS 7
|
||||
#define DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_LEN 1
|
||||
#define DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_MSK 0x80 /* 1-IPv4, 0-IPv6 */
|
||||
|
||||
|
||||
#define TX_DMA_STATUS_DU BIT(0)
|
||||
|
@ -334,8 +343,17 @@ struct vring_rx_mac {
|
|||
|
||||
#define RX_DMA_D0_CMD_DMA_IT BIT(10)
|
||||
|
||||
/* Error field, offload bits */
|
||||
#define RX_DMA_ERROR_L3_ERR BIT(4)
|
||||
#define RX_DMA_ERROR_L4_ERR BIT(5)
|
||||
|
||||
|
||||
/* Status field */
|
||||
#define RX_DMA_STATUS_DU BIT(0)
|
||||
#define RX_DMA_STATUS_ERROR BIT(2)
|
||||
|
||||
#define RX_DMA_STATUS_L3_IDENT BIT(4)
|
||||
#define RX_DMA_STATUS_L4_IDENT BIT(5)
|
||||
#define RX_DMA_STATUS_PHY_INFO BIT(6)
|
||||
|
||||
struct vring_rx_dma {
|
||||
|
|
|
@ -156,11 +156,22 @@ struct wil6210_mbox_hdr {
|
|||
/* max. value for wil6210_mbox_hdr.len */
|
||||
#define MAX_MBOXITEM_SIZE (240)
|
||||
|
||||
/**
|
||||
* struct wil6210_mbox_hdr_wmi - WMI header
|
||||
*
|
||||
* @mid: MAC ID
|
||||
* 00 - default, created by FW
|
||||
* 01..0f - WiFi ports, driver to create
|
||||
* 10..fe - debug
|
||||
* ff - broadcast
|
||||
* @id: command/event ID
|
||||
* @timestamp: FW fills for events, free-running msec timer
|
||||
*/
|
||||
struct wil6210_mbox_hdr_wmi {
|
||||
u8 reserved0[2];
|
||||
u8 mid;
|
||||
u8 reserved;
|
||||
__le16 id;
|
||||
__le16 info1; /* bits [0..3] - device_id, rest - unused */
|
||||
u8 reserved1[2];
|
||||
__le32 timestamp;
|
||||
} __packed;
|
||||
|
||||
struct pending_wmi_event {
|
||||
|
@ -172,6 +183,14 @@ struct pending_wmi_event {
|
|||
} __packed event;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct wil_ctx - software context for Vring descriptor
|
||||
*/
|
||||
struct wil_ctx {
|
||||
struct sk_buff *skb;
|
||||
u8 mapped_as_page:1;
|
||||
};
|
||||
|
||||
union vring_desc;
|
||||
|
||||
struct vring {
|
||||
|
@ -181,7 +200,7 @@ struct vring {
|
|||
u32 swtail;
|
||||
u32 swhead;
|
||||
u32 hwtail; /* write here to inform hw */
|
||||
void **ctx; /* void *ctx[size] - software context */
|
||||
struct wil_ctx *ctx; /* ctx[size] - software context */
|
||||
};
|
||||
|
||||
enum { /* for wil6210_priv.status */
|
||||
|
|
|
@ -172,8 +172,8 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
|
|||
.len = cpu_to_le16(sizeof(cmd.wmi) + len),
|
||||
},
|
||||
.wmi = {
|
||||
.mid = 0,
|
||||
.id = cpu_to_le16(cmdid),
|
||||
.info1 = 0,
|
||||
},
|
||||
};
|
||||
struct wil6210_mbox_ring *r = &wil->mbox_ctl.tx;
|
||||
|
@ -248,7 +248,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
|
|||
iowrite32(r->head = next_head, wil->csr + HOST_MBOX +
|
||||
offsetof(struct wil6210_mbox_ctl, tx.head));
|
||||
|
||||
trace_wil6210_wmi_cmd(cmdid, buf, len);
|
||||
trace_wil6210_wmi_cmd(&cmd.wmi, buf, len);
|
||||
|
||||
/* interrupt to FW */
|
||||
iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT);
|
||||
|
@ -640,9 +640,13 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
|
|||
hdr.flags);
|
||||
if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
|
||||
(len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
|
||||
u16 id = le16_to_cpu(evt->event.wmi.id);
|
||||
wil_dbg_wmi(wil, "WMI event 0x%04x\n", id);
|
||||
trace_wil6210_wmi_event(id, &evt->event.wmi, len);
|
||||
struct wil6210_mbox_hdr_wmi *wmi = &evt->event.wmi;
|
||||
u16 id = le16_to_cpu(wmi->id);
|
||||
u32 tstamp = le32_to_cpu(wmi->timestamp);
|
||||
wil_dbg_wmi(wil, "WMI event 0x%04x MID %d @%d msec\n",
|
||||
id, wmi->mid, tstamp);
|
||||
trace_wil6210_wmi_event(wmi, &wmi[1],
|
||||
len - sizeof(*wmi));
|
||||
}
|
||||
wil_hex_dump_wmi("evt ", DUMP_PREFIX_OFFSET, 16, 1,
|
||||
&evt->event.hdr, sizeof(hdr) + len, true);
|
||||
|
@ -920,6 +924,12 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
|
|||
cmd.sniffer_cfg.phy_support =
|
||||
cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL)
|
||||
? WMI_SNIFFER_CP : WMI_SNIFFER_DP);
|
||||
} else {
|
||||
/* Initialize offload (in non-sniffer mode).
|
||||
* Linux IP stack always calculates IP checksum
|
||||
* HW always calculate TCP/UDP checksum
|
||||
*/
|
||||
cmd.l3_l4_ctrl |= (1 << L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS);
|
||||
}
|
||||
/* typical time for secure PCP is 840ms */
|
||||
rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd),
|
||||
|
|
|
@ -4164,9 +4164,7 @@ static struct cfg80211_ops wl_cfg80211_ops = {
|
|||
.stop_p2p_device = brcmf_p2p_stop_device,
|
||||
.crit_proto_start = brcmf_cfg80211_crit_proto_start,
|
||||
.crit_proto_stop = brcmf_cfg80211_crit_proto_stop,
|
||||
#ifdef CONFIG_NL80211_TESTMODE
|
||||
.testmode_cmd = brcmf_cfg80211_testmode
|
||||
#endif
|
||||
CFG80211_TESTMODE_CMD(brcmf_cfg80211_testmode)
|
||||
};
|
||||
|
||||
static s32 brcmf_nl80211_iftype_to_mode(enum nl80211_iftype type)
|
||||
|
|
|
@ -928,9 +928,9 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
|
|||
}
|
||||
} else if (txs->phyerr) {
|
||||
update_rate = false;
|
||||
brcms_err(wlc->hw->d11core,
|
||||
"%s: ampdu tx phy error (0x%x)\n",
|
||||
__func__, txs->phyerr);
|
||||
brcms_dbg_ht(wlc->hw->d11core,
|
||||
"%s: ampdu tx phy error (0x%x)\n",
|
||||
__func__, txs->phyerr);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -882,8 +882,8 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
|
|||
mcl = le16_to_cpu(txh->MacTxControlLow);
|
||||
|
||||
if (txs->phyerr)
|
||||
brcms_err(wlc->hw->d11core, "phyerr 0x%x, rate 0x%x\n",
|
||||
txs->phyerr, txh->MainRates);
|
||||
brcms_dbg_tx(wlc->hw->d11core, "phyerr 0x%x, rate 0x%x\n",
|
||||
txs->phyerr, txh->MainRates);
|
||||
|
||||
if (txs->frameid != le16_to_cpu(txh->TxFrameID)) {
|
||||
brcms_err(wlc->hw->d11core, "frameid != txh->TxFrameID\n");
|
||||
|
|
|
@ -1406,11 +1406,8 @@ static void cw1200_do_unjoin(struct cw1200_common *priv)
|
|||
if (!priv->join_status)
|
||||
goto done;
|
||||
|
||||
if (priv->join_status > CW1200_JOIN_STATUS_IBSS) {
|
||||
wiphy_err(priv->hw->wiphy, "Unexpected: join status: %d\n",
|
||||
priv->join_status);
|
||||
BUG_ON(1);
|
||||
}
|
||||
if (priv->join_status == CW1200_JOIN_STATUS_AP)
|
||||
goto done;
|
||||
|
||||
cancel_work_sync(&priv->update_filtering_work);
|
||||
cancel_work_sync(&priv->set_beacon_wakeup_period_work);
|
||||
|
|
|
@ -832,7 +832,7 @@ struct wsm_tx {
|
|||
/* the MSDU shall be terminated. Overrides the global */
|
||||
/* dot11MaxTransmitMsduLifeTime setting [optional] */
|
||||
/* Device will set the default value if this is 0. */
|
||||
u32 expire_time;
|
||||
__le32 expire_time;
|
||||
|
||||
/* WSM_HT_TX_... */
|
||||
__le32 ht_tx_parameters;
|
||||
|
|
|
@ -667,7 +667,7 @@ static int prism2_open(struct net_device *dev)
|
|||
if (local->no_pri) {
|
||||
printk(KERN_DEBUG "%s: could not set interface UP - no PRI "
|
||||
"f/w\n", dev->name);
|
||||
return 1;
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if ((local->func->card_present && !local->func->card_present(local)) ||
|
||||
|
@ -682,7 +682,7 @@ static int prism2_open(struct net_device *dev)
|
|||
printk(KERN_WARNING "%s: could not enable MAC port\n",
|
||||
dev->name);
|
||||
prism2_close(dev);
|
||||
return 1;
|
||||
return -ENODEV;
|
||||
}
|
||||
if (!local->dev_enabled)
|
||||
prism2_callback(local, PRISM2_CALLBACK_ENABLE);
|
||||
|
|
|
@ -887,6 +887,7 @@ il3945_remove_debugfs(void *il, void *il_sta)
|
|||
*/
|
||||
static void
|
||||
il3945_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
|
||||
struct cfg80211_chan_def *chandef,
|
||||
struct ieee80211_sta *sta, void *il_sta)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -475,6 +475,8 @@ il3945_is_network_packet(struct il_priv *il, struct ieee80211_hdr *header)
|
|||
}
|
||||
}
|
||||
|
||||
#define SMALL_PACKET_SIZE 256
|
||||
|
||||
static void
|
||||
il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb,
|
||||
struct ieee80211_rx_status *stats)
|
||||
|
@ -483,14 +485,13 @@ il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb,
|
|||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IL_RX_DATA(pkt);
|
||||
struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
|
||||
struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt);
|
||||
u16 len = le16_to_cpu(rx_hdr->len);
|
||||
u32 len = le16_to_cpu(rx_hdr->len);
|
||||
struct sk_buff *skb;
|
||||
__le16 fc = hdr->frame_control;
|
||||
u32 fraglen = PAGE_SIZE << il->hw_params.rx_page_order;
|
||||
|
||||
/* We received data from the HW, so stop the watchdog */
|
||||
if (unlikely
|
||||
(len + IL39_RX_FRAME_SIZE >
|
||||
PAGE_SIZE << il->hw_params.rx_page_order)) {
|
||||
if (unlikely(len + IL39_RX_FRAME_SIZE > fraglen)) {
|
||||
D_DROP("Corruption detected!\n");
|
||||
return;
|
||||
}
|
||||
|
@ -506,26 +507,32 @@ il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb,
|
|||
D_INFO("Woke queues - frame received on passive channel\n");
|
||||
}
|
||||
|
||||
skb = dev_alloc_skb(128);
|
||||
skb = dev_alloc_skb(SMALL_PACKET_SIZE);
|
||||
if (!skb) {
|
||||
IL_ERR("dev_alloc_skb failed\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!il3945_mod_params.sw_crypto)
|
||||
il_set_decrypted_flag(il, (struct ieee80211_hdr *)rxb_addr(rxb),
|
||||
il_set_decrypted_flag(il, (struct ieee80211_hdr *)pkt,
|
||||
le32_to_cpu(rx_end->status), stats);
|
||||
|
||||
skb_add_rx_frag(skb, 0, rxb->page,
|
||||
(void *)rx_hdr->payload - (void *)pkt, len,
|
||||
len);
|
||||
|
||||
/* If frame is small enough to fit into skb->head, copy it
|
||||
* and do not consume a full page
|
||||
*/
|
||||
if (len <= SMALL_PACKET_SIZE) {
|
||||
memcpy(skb_put(skb, len), rx_hdr->payload, len);
|
||||
} else {
|
||||
skb_add_rx_frag(skb, 0, rxb->page,
|
||||
(void *)rx_hdr->payload - (void *)pkt, len,
|
||||
fraglen);
|
||||
il->alloc_rxb_page--;
|
||||
rxb->page = NULL;
|
||||
}
|
||||
il_update_stats(il, false, fc, len);
|
||||
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
|
||||
|
||||
ieee80211_rx(il->hw, skb);
|
||||
il->alloc_rxb_page--;
|
||||
rxb->page = NULL;
|
||||
}
|
||||
|
||||
#define IL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
|
||||
|
|
|
@ -574,9 +574,11 @@ il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in)
|
|||
return decrypt_out;
|
||||
}
|
||||
|
||||
#define SMALL_PACKET_SIZE 256
|
||||
|
||||
static void
|
||||
il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
|
||||
u16 len, u32 ampdu_status, struct il_rx_buf *rxb,
|
||||
u32 len, u32 ampdu_status, struct il_rx_buf *rxb,
|
||||
struct ieee80211_rx_status *stats)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
@ -598,21 +600,25 @@ il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
|
|||
il_set_decrypted_flag(il, hdr, ampdu_status, stats))
|
||||
return;
|
||||
|
||||
skb = dev_alloc_skb(128);
|
||||
skb = dev_alloc_skb(SMALL_PACKET_SIZE);
|
||||
if (!skb) {
|
||||
IL_ERR("dev_alloc_skb failed\n");
|
||||
return;
|
||||
}
|
||||
|
||||
skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len,
|
||||
len);
|
||||
if (len <= SMALL_PACKET_SIZE) {
|
||||
memcpy(skb_put(skb, len), hdr, len);
|
||||
} else {
|
||||
skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb),
|
||||
len, PAGE_SIZE << il->hw_params.rx_page_order);
|
||||
il->alloc_rxb_page--;
|
||||
rxb->page = NULL;
|
||||
}
|
||||
|
||||
il_update_stats(il, false, fc, len);
|
||||
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
|
||||
|
||||
ieee80211_rx(il->hw, skb);
|
||||
il->alloc_rxb_page--;
|
||||
rxb->page = NULL;
|
||||
}
|
||||
|
||||
/* Called for N_RX (legacy ABG frames), or
|
||||
|
@ -4460,12 +4466,12 @@ il4965_irq_tasklet(struct il_priv *il)
|
|||
* is killed. Hence update the killswitch state here. The
|
||||
* rfkill handler will care about restarting if needed.
|
||||
*/
|
||||
if (!test_bit(S_ALIVE, &il->status)) {
|
||||
if (hw_rf_kill)
|
||||
set_bit(S_RFKILL, &il->status);
|
||||
else
|
||||
clear_bit(S_RFKILL, &il->status);
|
||||
if (hw_rf_kill) {
|
||||
set_bit(S_RFKILL, &il->status);
|
||||
} else {
|
||||
clear_bit(S_RFKILL, &il->status);
|
||||
wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
|
||||
il_force_reset(il, true);
|
||||
}
|
||||
|
||||
handled |= CSR_INT_BIT_RF_KILL;
|
||||
|
@ -5334,6 +5340,9 @@ il4965_alive_start(struct il_priv *il)
|
|||
|
||||
il->active_rate = RATES_MASK;
|
||||
|
||||
il_power_update_mode(il, true);
|
||||
D_INFO("Updated power mode\n");
|
||||
|
||||
if (il_is_associated(il)) {
|
||||
struct il_rxon_cmd *active_rxon =
|
||||
(struct il_rxon_cmd *)&il->active;
|
||||
|
@ -5364,9 +5373,6 @@ il4965_alive_start(struct il_priv *il)
|
|||
D_INFO("ALIVE processing complete.\n");
|
||||
wake_up(&il->wait_command_queue);
|
||||
|
||||
il_power_update_mode(il, true);
|
||||
D_INFO("Updated power mode\n");
|
||||
|
||||
return;
|
||||
|
||||
restart:
|
||||
|
|
|
@ -2803,6 +2803,7 @@ il4965_rs_remove_debugfs(void *il, void *il_sta)
|
|||
*/
|
||||
static void
|
||||
il4965_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
|
||||
struct cfg80211_chan_def *chandef,
|
||||
struct ieee80211_sta *sta, void *il_sta)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -4660,6 +4660,7 @@ il_force_reset(struct il_priv *il, bool external)
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(il_force_reset);
|
||||
|
||||
int
|
||||
il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
|
|
|
@ -127,20 +127,3 @@ config IWLWIFI_DEVICE_TRACING
|
|||
If unsure, say Y so we can help you better when problems
|
||||
occur.
|
||||
endmenu
|
||||
|
||||
config IWLWIFI_P2P
|
||||
def_bool y
|
||||
bool "iwlwifi experimental P2P support"
|
||||
depends on IWLWIFI
|
||||
help
|
||||
This option enables experimental P2P support for some devices
|
||||
based on microcode support. Since P2P support is still under
|
||||
development, this option may even enable it for some devices
|
||||
now that turn out to not support it in the future due to
|
||||
microcode restrictions.
|
||||
|
||||
To determine if your microcode supports the experimental P2P
|
||||
offered by this option, check if the driver advertises AP
|
||||
support when it is loaded.
|
||||
|
||||
Say Y only if you want to experiment with P2P.
|
||||
|
|
|
@ -106,7 +106,6 @@ extern const struct iwl_dvm_cfg iwl_dvm_6030_cfg;
|
|||
#define STATUS_CHANNEL_SWITCH_PENDING 11
|
||||
#define STATUS_SCAN_COMPLETE 12
|
||||
#define STATUS_POWER_PMI 13
|
||||
#define STATUS_SCAN_ROC_EXPIRED 14
|
||||
|
||||
struct iwl_ucode_capabilities;
|
||||
|
||||
|
@ -250,7 +249,6 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
|
|||
|
||||
/* scan */
|
||||
void iwlagn_post_scan(struct iwl_priv *priv);
|
||||
void iwlagn_disable_roc(struct iwl_priv *priv);
|
||||
int iwl_force_rf_reset(struct iwl_priv *priv, bool external);
|
||||
void iwl_init_scan_params(struct iwl_priv *priv);
|
||||
int iwl_scan_cancel(struct iwl_priv *priv);
|
||||
|
@ -265,10 +263,6 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
|
|||
enum iwl_scan_type scan_type,
|
||||
enum ieee80211_band band);
|
||||
|
||||
void iwl_scan_roc_expired(struct iwl_priv *priv);
|
||||
void iwl_scan_offchannel_skb(struct iwl_priv *priv);
|
||||
void iwl_scan_offchannel_skb_status(struct iwl_priv *priv);
|
||||
|
||||
/* For faster active scanning, scan will move to the next channel if fewer than
|
||||
* PLCP_QUIET_THRESH packets are heard on this channel within
|
||||
* ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
|
||||
|
|
|
@ -69,19 +69,7 @@
|
|||
} while (0)
|
||||
|
||||
/* file operation */
|
||||
#define DEBUGFS_READ_FUNC(name) \
|
||||
static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
|
||||
char __user *user_buf, \
|
||||
size_t count, loff_t *ppos);
|
||||
|
||||
#define DEBUGFS_WRITE_FUNC(name) \
|
||||
static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
|
||||
const char __user *user_buf, \
|
||||
size_t count, loff_t *ppos);
|
||||
|
||||
|
||||
#define DEBUGFS_READ_FILE_OPS(name) \
|
||||
DEBUGFS_READ_FUNC(name); \
|
||||
static const struct file_operations iwl_dbgfs_##name##_ops = { \
|
||||
.read = iwl_dbgfs_##name##_read, \
|
||||
.open = simple_open, \
|
||||
|
@ -89,7 +77,6 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
|
|||
};
|
||||
|
||||
#define DEBUGFS_WRITE_FILE_OPS(name) \
|
||||
DEBUGFS_WRITE_FUNC(name); \
|
||||
static const struct file_operations iwl_dbgfs_##name##_ops = { \
|
||||
.write = iwl_dbgfs_##name##_write, \
|
||||
.open = simple_open, \
|
||||
|
@ -98,8 +85,6 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
|
|||
|
||||
|
||||
#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
|
||||
DEBUGFS_READ_FUNC(name); \
|
||||
DEBUGFS_WRITE_FUNC(name); \
|
||||
static const struct file_operations iwl_dbgfs_##name##_ops = { \
|
||||
.write = iwl_dbgfs_##name##_write, \
|
||||
.read = iwl_dbgfs_##name##_read, \
|
||||
|
|
|
@ -540,7 +540,6 @@ struct iwl_rxon_context {
|
|||
enum iwl_scan_type {
|
||||
IWL_SCAN_NORMAL,
|
||||
IWL_SCAN_RADIO_RESET,
|
||||
IWL_SCAN_ROC,
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -825,12 +824,6 @@ struct iwl_priv {
|
|||
struct reply_tx_error_statistics reply_tx_stats;
|
||||
struct reply_agg_tx_error_statistics reply_agg_tx_stats;
|
||||
|
||||
/* remain-on-channel offload support */
|
||||
struct ieee80211_channel *hw_roc_channel;
|
||||
struct delayed_work hw_roc_disable_work;
|
||||
int hw_roc_duration;
|
||||
bool hw_roc_setup, hw_roc_start_notified;
|
||||
|
||||
/* bt coex */
|
||||
u8 bt_enable_flag;
|
||||
u8 bt_status;
|
||||
|
|
|
@ -76,29 +76,6 @@ static const struct ieee80211_iface_limit iwlagn_2sta_limits[] = {
|
|||
},
|
||||
};
|
||||
|
||||
static const struct ieee80211_iface_limit iwlagn_p2p_sta_go_limits[] = {
|
||||
{
|
||||
.max = 1,
|
||||
.types = BIT(NL80211_IFTYPE_STATION),
|
||||
},
|
||||
{
|
||||
.max = 1,
|
||||
.types = BIT(NL80211_IFTYPE_P2P_GO) |
|
||||
BIT(NL80211_IFTYPE_AP),
|
||||
},
|
||||
};
|
||||
|
||||
static const struct ieee80211_iface_limit iwlagn_p2p_2sta_limits[] = {
|
||||
{
|
||||
.max = 2,
|
||||
.types = BIT(NL80211_IFTYPE_STATION),
|
||||
},
|
||||
{
|
||||
.max = 1,
|
||||
.types = BIT(NL80211_IFTYPE_P2P_CLIENT),
|
||||
},
|
||||
};
|
||||
|
||||
static const struct ieee80211_iface_combination
|
||||
iwlagn_iface_combinations_dualmode[] = {
|
||||
{ .num_different_channels = 1,
|
||||
|
@ -114,21 +91,6 @@ iwlagn_iface_combinations_dualmode[] = {
|
|||
},
|
||||
};
|
||||
|
||||
static const struct ieee80211_iface_combination
|
||||
iwlagn_iface_combinations_p2p[] = {
|
||||
{ .num_different_channels = 1,
|
||||
.max_interfaces = 2,
|
||||
.beacon_int_infra_match = true,
|
||||
.limits = iwlagn_p2p_sta_go_limits,
|
||||
.n_limits = ARRAY_SIZE(iwlagn_p2p_sta_go_limits),
|
||||
},
|
||||
{ .num_different_channels = 1,
|
||||
.max_interfaces = 2,
|
||||
.limits = iwlagn_p2p_2sta_limits,
|
||||
.n_limits = ARRAY_SIZE(iwlagn_p2p_2sta_limits),
|
||||
},
|
||||
};
|
||||
|
||||
/*
|
||||
* Not a mac80211 entry point function, but it fits in with all the
|
||||
* other mac80211 functions grouped here.
|
||||
|
@ -186,19 +148,13 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
|
|||
|
||||
BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
|
||||
|
||||
if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)) {
|
||||
hw->wiphy->iface_combinations = iwlagn_iface_combinations_p2p;
|
||||
hw->wiphy->n_iface_combinations =
|
||||
ARRAY_SIZE(iwlagn_iface_combinations_p2p);
|
||||
} else if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
|
||||
if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
|
||||
hw->wiphy->iface_combinations =
|
||||
iwlagn_iface_combinations_dualmode;
|
||||
hw->wiphy->n_iface_combinations =
|
||||
ARRAY_SIZE(iwlagn_iface_combinations_dualmode);
|
||||
}
|
||||
|
||||
hw->wiphy->max_remain_on_channel_duration = 500;
|
||||
|
||||
hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
|
||||
WIPHY_FLAG_DISABLE_BEACON_HINTS |
|
||||
WIPHY_FLAG_IBSS_RSN;
|
||||
|
@ -1156,126 +1112,6 @@ done:
|
|||
IWL_DEBUG_MAC80211(priv, "leave\n");
|
||||
}
|
||||
|
||||
static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif,
|
||||
struct ieee80211_channel *channel,
|
||||
int duration,
|
||||
enum ieee80211_roc_type type)
|
||||
{
|
||||
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
|
||||
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
|
||||
int err = 0;
|
||||
|
||||
if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
IWL_DEBUG_MAC80211(priv, "enter\n");
|
||||
mutex_lock(&priv->mutex);
|
||||
|
||||
if (test_bit(STATUS_SCAN_HW, &priv->status)) {
|
||||
/* mac80211 should not scan while ROC or ROC while scanning */
|
||||
if (WARN_ON_ONCE(priv->scan_type != IWL_SCAN_RADIO_RESET)) {
|
||||
err = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
iwl_scan_cancel_timeout(priv, 100);
|
||||
|
||||
if (test_bit(STATUS_SCAN_HW, &priv->status)) {
|
||||
err = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
priv->hw_roc_channel = channel;
|
||||
/* convert from ms to TU */
|
||||
priv->hw_roc_duration = DIV_ROUND_UP(1000 * duration, 1024);
|
||||
priv->hw_roc_start_notified = false;
|
||||
cancel_delayed_work(&priv->hw_roc_disable_work);
|
||||
|
||||
if (!ctx->is_active) {
|
||||
static const struct iwl_qos_info default_qos_data = {
|
||||
.def_qos_parm = {
|
||||
.ac[0] = {
|
||||
.cw_min = cpu_to_le16(3),
|
||||
.cw_max = cpu_to_le16(7),
|
||||
.aifsn = 2,
|
||||
.edca_txop = cpu_to_le16(1504),
|
||||
},
|
||||
.ac[1] = {
|
||||
.cw_min = cpu_to_le16(7),
|
||||
.cw_max = cpu_to_le16(15),
|
||||
.aifsn = 2,
|
||||
.edca_txop = cpu_to_le16(3008),
|
||||
},
|
||||
.ac[2] = {
|
||||
.cw_min = cpu_to_le16(15),
|
||||
.cw_max = cpu_to_le16(1023),
|
||||
.aifsn = 3,
|
||||
},
|
||||
.ac[3] = {
|
||||
.cw_min = cpu_to_le16(15),
|
||||
.cw_max = cpu_to_le16(1023),
|
||||
.aifsn = 7,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
ctx->is_active = true;
|
||||
ctx->qos_data = default_qos_data;
|
||||
ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
|
||||
memcpy(ctx->staging.node_addr,
|
||||
priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
|
||||
ETH_ALEN);
|
||||
memcpy(ctx->staging.bssid_addr,
|
||||
priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
|
||||
ETH_ALEN);
|
||||
err = iwlagn_commit_rxon(priv, ctx);
|
||||
if (err)
|
||||
goto out;
|
||||
ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK |
|
||||
RXON_FILTER_PROMISC_MSK |
|
||||
RXON_FILTER_CTL2HOST_MSK;
|
||||
|
||||
err = iwlagn_commit_rxon(priv, ctx);
|
||||
if (err) {
|
||||
iwlagn_disable_roc(priv);
|
||||
goto out;
|
||||
}
|
||||
priv->hw_roc_setup = true;
|
||||
}
|
||||
|
||||
err = iwl_scan_initiate(priv, ctx->vif, IWL_SCAN_ROC, channel->band);
|
||||
if (err)
|
||||
iwlagn_disable_roc(priv);
|
||||
|
||||
out:
|
||||
mutex_unlock(&priv->mutex);
|
||||
IWL_DEBUG_MAC80211(priv, "leave\n");
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
|
||||
|
||||
if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
IWL_DEBUG_MAC80211(priv, "enter\n");
|
||||
mutex_lock(&priv->mutex);
|
||||
iwl_scan_cancel_timeout(priv, priv->hw_roc_duration);
|
||||
iwlagn_disable_roc(priv);
|
||||
mutex_unlock(&priv->mutex);
|
||||
IWL_DEBUG_MAC80211(priv, "leave\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif,
|
||||
enum ieee80211_rssi_event rssi_event)
|
||||
|
@ -1431,12 +1267,8 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
|
|||
IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
|
||||
viftype, vif->addr);
|
||||
|
||||
cancel_delayed_work_sync(&priv->hw_roc_disable_work);
|
||||
|
||||
mutex_lock(&priv->mutex);
|
||||
|
||||
iwlagn_disable_roc(priv);
|
||||
|
||||
if (!iwl_is_ready_rf(priv)) {
|
||||
IWL_WARN(priv, "Try to add interface when device not ready\n");
|
||||
err = -EINVAL;
|
||||
|
@ -1763,8 +1595,6 @@ struct ieee80211_ops iwlagn_hw_ops = {
|
|||
.channel_switch = iwlagn_mac_channel_switch,
|
||||
.flush = iwlagn_mac_flush,
|
||||
.tx_last_beacon = iwlagn_mac_tx_last_beacon,
|
||||
.remain_on_channel = iwlagn_mac_remain_on_channel,
|
||||
.cancel_remain_on_channel = iwlagn_mac_cancel_remain_on_channel,
|
||||
.rssi_callback = iwlagn_mac_rssi_callback,
|
||||
.set_tim = iwlagn_mac_set_tim,
|
||||
};
|
||||
|
|
|
@ -587,11 +587,6 @@ static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
|
|||
priv->contexts[IWL_RXON_CTX_PAN].interface_modes =
|
||||
BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP);
|
||||
|
||||
if (ucode_flags & IWL_UCODE_TLV_FLAGS_P2P)
|
||||
priv->contexts[IWL_RXON_CTX_PAN].interface_modes |=
|
||||
BIT(NL80211_IFTYPE_P2P_CLIENT) |
|
||||
BIT(NL80211_IFTYPE_P2P_GO);
|
||||
|
||||
priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP;
|
||||
priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA;
|
||||
priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P;
|
||||
|
@ -854,14 +849,6 @@ void iwl_down(struct iwl_priv *priv)
|
|||
|
||||
iwl_scan_cancel_timeout(priv, 200);
|
||||
|
||||
/*
|
||||
* If active, scanning won't cancel it, so say it expired.
|
||||
* No race since we hold the mutex here and a new one
|
||||
* can't come in at this time.
|
||||
*/
|
||||
if (priv->ucode_loaded && priv->cur_ucode != IWL_UCODE_INIT)
|
||||
ieee80211_remain_on_channel_expired(priv->hw);
|
||||
|
||||
exit_pending =
|
||||
test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
|
||||
|
||||
|
@ -1002,41 +989,6 @@ static void iwl_bg_restart(struct work_struct *data)
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
void iwlagn_disable_roc(struct iwl_priv *priv)
|
||||
{
|
||||
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
|
||||
if (!priv->hw_roc_setup)
|
||||
return;
|
||||
|
||||
ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
|
||||
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
|
||||
|
||||
priv->hw_roc_channel = NULL;
|
||||
|
||||
memset(ctx->staging.node_addr, 0, ETH_ALEN);
|
||||
|
||||
iwlagn_commit_rxon(priv, ctx);
|
||||
|
||||
ctx->is_active = false;
|
||||
priv->hw_roc_setup = false;
|
||||
}
|
||||
|
||||
static void iwlagn_disable_roc_work(struct work_struct *work)
|
||||
{
|
||||
struct iwl_priv *priv = container_of(work, struct iwl_priv,
|
||||
hw_roc_disable_work.work);
|
||||
|
||||
mutex_lock(&priv->mutex);
|
||||
iwlagn_disable_roc(priv);
|
||||
mutex_unlock(&priv->mutex);
|
||||
}
|
||||
|
||||
/*****************************************************************************
|
||||
*
|
||||
* driver setup and teardown
|
||||
|
@ -1053,8 +1005,6 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
|
|||
INIT_WORK(&priv->tx_flush, iwl_bg_tx_flush);
|
||||
INIT_WORK(&priv->bt_full_concurrency, iwl_bg_bt_full_concurrency);
|
||||
INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config);
|
||||
INIT_DELAYED_WORK(&priv->hw_roc_disable_work,
|
||||
iwlagn_disable_roc_work);
|
||||
|
||||
iwl_setup_scan_deferred_work(priv);
|
||||
|
||||
|
@ -1082,7 +1032,6 @@ void iwl_cancel_deferred_work(struct iwl_priv *priv)
|
|||
|
||||
cancel_work_sync(&priv->bt_full_concurrency);
|
||||
cancel_work_sync(&priv->bt_runtime_config);
|
||||
cancel_delayed_work_sync(&priv->hw_roc_disable_work);
|
||||
|
||||
del_timer_sync(&priv->statistics_periodic);
|
||||
del_timer_sync(&priv->ucode_trace);
|
||||
|
@ -1169,12 +1118,6 @@ static void iwl_option_config(struct iwl_priv *priv)
|
|||
#else
|
||||
IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TRACING disabled\n");
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_P2P
|
||||
IWL_INFO(priv, "CONFIG_IWLWIFI_P2P enabled\n");
|
||||
#else
|
||||
IWL_INFO(priv, "CONFIG_IWLWIFI_P2P disabled\n");
|
||||
#endif
|
||||
}
|
||||
|
||||
static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
|
||||
|
@ -1315,10 +1258,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
|
|||
|
||||
ucode_flags = fw->ucode_capa.flags;
|
||||
|
||||
#ifndef CONFIG_IWLWIFI_P2P
|
||||
ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
|
||||
#endif
|
||||
|
||||
if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) {
|
||||
priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
|
||||
trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
|
||||
|
@ -1413,7 +1352,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
|
|||
* if not PAN, then don't support P2P -- might be a uCode
|
||||
* packaging bug or due to the eeprom check above
|
||||
*/
|
||||
ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
|
||||
priv->sta_key_max_num = STA_KEY_MAX_NUM;
|
||||
trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
|
||||
|
||||
|
|
|
@ -2826,9 +2826,6 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
|
|||
|
||||
lq_sta->flush_timer = 0;
|
||||
lq_sta->supp_rates = sta->supp_rates[sband->band];
|
||||
for (j = 0; j < LQ_SIZE; j++)
|
||||
for (i = 0; i < IWL_RATE_COUNT; i++)
|
||||
rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
|
||||
|
||||
IWL_DEBUG_RATE(priv, "LQ: *** rate scale station global init for station %d ***\n",
|
||||
sta_id);
|
||||
|
@ -3319,7 +3316,8 @@ static void rs_remove_debugfs(void *priv, void *priv_sta)
|
|||
* station is added we ignore it.
|
||||
*/
|
||||
static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
|
||||
struct ieee80211_sta *sta, void *priv_sta)
|
||||
struct cfg80211_chan_def *chandef,
|
||||
struct ieee80211_sta *sta, void *priv_sta)
|
||||
{
|
||||
}
|
||||
static struct rate_control_ops rs_ops = {
|
||||
|
|
|
@ -564,11 +564,7 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
|
|||
cmd.slots[0].type = 0; /* BSS */
|
||||
cmd.slots[1].type = 1; /* PAN */
|
||||
|
||||
if (priv->hw_roc_setup) {
|
||||
/* both contexts must be used for this to happen */
|
||||
slot1 = IWL_MIN_SLOT_TIME;
|
||||
slot0 = 3000;
|
||||
} else if (ctx_bss->vif && ctx_pan->vif) {
|
||||
if (ctx_bss->vif && ctx_pan->vif) {
|
||||
int bcnint = ctx_pan->beacon_int;
|
||||
int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1;
|
||||
|
||||
|
|
|
@ -100,9 +100,6 @@ static void iwl_complete_scan(struct iwl_priv *priv, bool aborted)
|
|||
ieee80211_scan_completed(priv->hw, aborted);
|
||||
}
|
||||
|
||||
if (priv->scan_type == IWL_SCAN_ROC)
|
||||
iwl_scan_roc_expired(priv);
|
||||
|
||||
priv->scan_type = IWL_SCAN_NORMAL;
|
||||
priv->scan_vif = NULL;
|
||||
priv->scan_request = NULL;
|
||||
|
@ -130,9 +127,6 @@ static void iwl_process_scan_complete(struct iwl_priv *priv)
|
|||
goto out_settings;
|
||||
}
|
||||
|
||||
if (priv->scan_type == IWL_SCAN_ROC)
|
||||
iwl_scan_roc_expired(priv);
|
||||
|
||||
if (priv->scan_type != IWL_SCAN_NORMAL && !aborted) {
|
||||
int err;
|
||||
|
||||
|
@ -284,12 +278,6 @@ static int iwl_rx_scan_start_notif(struct iwl_priv *priv,
|
|||
le32_to_cpu(notif->tsf_low),
|
||||
notif->status, notif->beacon_timer);
|
||||
|
||||
if (priv->scan_type == IWL_SCAN_ROC &&
|
||||
!priv->hw_roc_start_notified) {
|
||||
ieee80211_ready_on_channel(priv->hw);
|
||||
priv->hw_roc_start_notified = true;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -697,8 +685,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
|
|||
scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
|
||||
scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
|
||||
|
||||
if (priv->scan_type != IWL_SCAN_ROC &&
|
||||
iwl_is_any_associated(priv)) {
|
||||
if (iwl_is_any_associated(priv)) {
|
||||
u16 interval = 0;
|
||||
u32 extra;
|
||||
u32 suspend_time = 100;
|
||||
|
@ -706,9 +693,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
|
|||
|
||||
IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
|
||||
switch (priv->scan_type) {
|
||||
case IWL_SCAN_ROC:
|
||||
WARN_ON(1);
|
||||
break;
|
||||
case IWL_SCAN_RADIO_RESET:
|
||||
interval = 0;
|
||||
break;
|
||||
|
@ -728,11 +712,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
|
|||
scan->suspend_time = cpu_to_le32(scan_suspend_time);
|
||||
IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
|
||||
scan_suspend_time, interval);
|
||||
} else if (priv->scan_type == IWL_SCAN_ROC) {
|
||||
scan->suspend_time = 0;
|
||||
scan->max_out_time = 0;
|
||||
scan->quiet_time = 0;
|
||||
scan->quiet_plcp_th = 0;
|
||||
}
|
||||
|
||||
switch (priv->scan_type) {
|
||||
|
@ -774,9 +753,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
|
|||
} else
|
||||
IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
|
||||
break;
|
||||
case IWL_SCAN_ROC:
|
||||
IWL_DEBUG_SCAN(priv, "Start ROC scan.\n");
|
||||
break;
|
||||
}
|
||||
|
||||
scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
|
||||
|
@ -898,7 +874,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
|
|||
scan_cmd_size - sizeof(*scan));
|
||||
break;
|
||||
case IWL_SCAN_RADIO_RESET:
|
||||
case IWL_SCAN_ROC:
|
||||
/* use bcast addr, will not be transmitted but must be valid */
|
||||
cmd_len = iwl_fill_probe_req(
|
||||
(struct ieee80211_mgmt *)scan->data,
|
||||
|
@ -926,46 +901,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
|
|||
is_active, n_probes,
|
||||
(void *)&scan->data[cmd_len]);
|
||||
break;
|
||||
case IWL_SCAN_ROC: {
|
||||
struct iwl_scan_channel *scan_ch;
|
||||
int n_chan, i;
|
||||
u16 dwell;
|
||||
|
||||
dwell = iwl_limit_dwell(priv, priv->hw_roc_duration);
|
||||
n_chan = DIV_ROUND_UP(priv->hw_roc_duration, dwell);
|
||||
|
||||
scan->channel_count = n_chan;
|
||||
|
||||
scan_ch = (void *)&scan->data[cmd_len];
|
||||
|
||||
for (i = 0; i < n_chan; i++) {
|
||||
scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
|
||||
scan_ch->channel =
|
||||
cpu_to_le16(priv->hw_roc_channel->hw_value);
|
||||
|
||||
if (i == n_chan - 1)
|
||||
dwell = priv->hw_roc_duration - i * dwell;
|
||||
|
||||
scan_ch->active_dwell =
|
||||
scan_ch->passive_dwell = cpu_to_le16(dwell);
|
||||
|
||||
/* Set txpower levels to defaults */
|
||||
scan_ch->dsp_atten = 110;
|
||||
|
||||
/* NOTE: if we were doing 6Mb OFDM for scans we'd use
|
||||
* power level:
|
||||
* scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
|
||||
*/
|
||||
if (priv->hw_roc_channel->band == IEEE80211_BAND_5GHZ)
|
||||
scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
|
||||
else
|
||||
scan_ch->tx_gain = ((1 << 5) | (5 << 3));
|
||||
|
||||
scan_ch++;
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
if (scan->channel_count == 0) {
|
||||
|
@ -1035,7 +970,6 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
|
|||
|
||||
IWL_DEBUG_SCAN(priv, "Starting %sscan...\n",
|
||||
scan_type == IWL_SCAN_NORMAL ? "" :
|
||||
scan_type == IWL_SCAN_ROC ? "remain-on-channel " :
|
||||
"internal short ");
|
||||
|
||||
set_bit(STATUS_SCANNING, &priv->status);
|
||||
|
@ -1149,40 +1083,3 @@ void iwl_cancel_scan_deferred_work(struct iwl_priv *priv)
|
|||
mutex_unlock(&priv->mutex);
|
||||
}
|
||||
}
|
||||
|
||||
void iwl_scan_roc_expired(struct iwl_priv *priv)
|
||||
{
|
||||
/*
|
||||
* The status bit should be set here, to prevent a race
|
||||
* where the atomic_read returns 1, but before the execution continues
|
||||
* iwl_scan_offchannel_skb_status() checks if the status bit is set
|
||||
*/
|
||||
set_bit(STATUS_SCAN_ROC_EXPIRED, &priv->status);
|
||||
|
||||
if (atomic_read(&priv->num_aux_in_flight) == 0) {
|
||||
ieee80211_remain_on_channel_expired(priv->hw);
|
||||
priv->hw_roc_channel = NULL;
|
||||
schedule_delayed_work(&priv->hw_roc_disable_work,
|
||||
10 * HZ);
|
||||
|
||||
clear_bit(STATUS_SCAN_ROC_EXPIRED, &priv->status);
|
||||
} else {
|
||||
IWL_DEBUG_SCAN(priv, "ROC done with %d frames in aux\n",
|
||||
atomic_read(&priv->num_aux_in_flight));
|
||||
}
|
||||
}
|
||||
|
||||
void iwl_scan_offchannel_skb(struct iwl_priv *priv)
|
||||
{
|
||||
WARN_ON(!priv->hw_roc_start_notified);
|
||||
atomic_inc(&priv->num_aux_in_flight);
|
||||
}
|
||||
|
||||
void iwl_scan_offchannel_skb_status(struct iwl_priv *priv)
|
||||
{
|
||||
if (atomic_dec_return(&priv->num_aux_in_flight) == 0 &&
|
||||
test_bit(STATUS_SCAN_ROC_EXPIRED, &priv->status)) {
|
||||
IWL_DEBUG_SCAN(priv, "0 aux frames. Calling ROC expired\n");
|
||||
iwl_scan_roc_expired(priv);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -478,9 +478,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
|
|||
if (sta_priv && sta_priv->client && !is_agg)
|
||||
atomic_inc(&sta_priv->pending_frames);
|
||||
|
||||
if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
|
||||
iwl_scan_offchannel_skb(priv);
|
||||
|
||||
return 0;
|
||||
|
||||
drop_unlock_sta:
|
||||
|
@ -1158,7 +1155,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
|
|||
struct sk_buff *skb;
|
||||
struct iwl_rxon_context *ctx;
|
||||
bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
|
||||
bool is_offchannel_skb;
|
||||
|
||||
tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
|
||||
IWLAGN_TX_RES_TID_POS;
|
||||
|
@ -1178,8 +1174,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
|
|||
|
||||
__skb_queue_head_init(&skbs);
|
||||
|
||||
is_offchannel_skb = false;
|
||||
|
||||
if (tx_resp->frame_count == 1) {
|
||||
u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl);
|
||||
next_reclaimed = IEEE80211_SEQ_TO_SN(next_reclaimed + 0x10);
|
||||
|
@ -1256,8 +1250,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
|
|||
if (!is_agg)
|
||||
iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
|
||||
|
||||
is_offchannel_skb =
|
||||
(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN);
|
||||
freed++;
|
||||
}
|
||||
|
||||
|
@ -1271,14 +1263,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
|
|||
if (!is_agg && freed != 1)
|
||||
IWL_ERR(priv, "Q: %d, freed %d\n", txq_id, freed);
|
||||
|
||||
/*
|
||||
* An offchannel frame can be send only on the AUX queue, where
|
||||
* there is no aggregation (and reordering) so it only is single
|
||||
* skb is expected to be processed.
|
||||
*/
|
||||
if (is_offchannel_skb && freed != 1)
|
||||
IWL_ERR(priv, "OFFCHANNEL SKB freed %d\n", freed);
|
||||
|
||||
IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x)\n", txq_id,
|
||||
iwl_get_tx_fail_reason(status), status);
|
||||
|
||||
|
@ -1298,9 +1282,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
|
|||
ieee80211_tx_status_ni(priv->hw, skb);
|
||||
}
|
||||
|
||||
if (is_offchannel_skb)
|
||||
iwl_scan_offchannel_skb_status(priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -99,6 +99,7 @@ static const struct iwl_base_params iwl7000_base_params = {
|
|||
.wd_timeout = IWL_LONG_WD_TIMEOUT,
|
||||
.max_event_log_size = 512,
|
||||
.shadow_reg_enable = true,
|
||||
.pcie_l1_allowed = true,
|
||||
};
|
||||
|
||||
static const struct iwl_ht_params iwl7000_ht_params = {
|
||||
|
@ -126,6 +127,16 @@ const struct iwl_cfg iwl7260_2ac_cfg = {
|
|||
.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 7260",
|
||||
.fw_name_pre = IWL7260_FW_PRE,
|
||||
IWL_DEVICE_7000,
|
||||
.ht_params = &iwl7000_ht_params,
|
||||
.nvm_ver = IWL7260_NVM_VERSION,
|
||||
.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
|
||||
.high_temp = true,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl7260_2n_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless N 7260",
|
||||
.fw_name_pre = IWL7260_FW_PRE,
|
||||
|
|
|
@ -152,6 +152,7 @@ struct iwl_base_params {
|
|||
unsigned int wd_timeout;
|
||||
u32 max_event_log_size;
|
||||
const bool shadow_reg_enable;
|
||||
const bool pcie_l1_allowed;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -205,6 +206,7 @@ struct iwl_eeprom_params {
|
|||
* @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
|
||||
* @rx_with_siso_diversity: 1x1 device with rx antenna diversity
|
||||
* @internal_wimax_coex: internal wifi/wimax combo device
|
||||
* @high_temp: Is this NIC is designated to be in high temperature.
|
||||
*
|
||||
* We enable the driver to be backward compatible wrt. hardware features.
|
||||
* API differences in uCode shouldn't be handled here but through TLVs
|
||||
|
@ -233,6 +235,7 @@ struct iwl_cfg {
|
|||
enum iwl_led_mode led_mode;
|
||||
const bool rx_with_siso_diversity;
|
||||
const bool internal_wimax_coex;
|
||||
bool high_temp;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -283,6 +286,7 @@ extern const struct iwl_cfg iwl135_bgn_cfg;
|
|||
#endif /* CONFIG_IWLDVM */
|
||||
#if IS_ENABLED(CONFIG_IWLMVM)
|
||||
extern const struct iwl_cfg iwl7260_2ac_cfg;
|
||||
extern const struct iwl_cfg iwl7260_2ac_cfg_high_temp;
|
||||
extern const struct iwl_cfg iwl7260_2n_cfg;
|
||||
extern const struct iwl_cfg iwl7260_n_cfg;
|
||||
extern const struct iwl_cfg iwl3160_2ac_cfg;
|
||||
|
|
|
@ -74,13 +74,22 @@
|
|||
* @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
|
||||
* @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
|
||||
* @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
|
||||
* @IWL_UCODE_TLV_FLAGS_UAPSD: This uCode image supports uAPSD
|
||||
* @IWL_UCODE_TLV_FLAGS_RX_ENERGY_API: supports rx signal strength api
|
||||
* @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
|
||||
* (rather than two) IPv6 addresses
|
||||
* @IWL_UCODE_TLV_FLAGS_BF_UPDATED: new beacon filtering API
|
||||
*/
|
||||
enum iwl_ucode_tlv_flag {
|
||||
IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
|
||||
IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
|
||||
IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
|
||||
IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
|
||||
IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4),
|
||||
IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
|
||||
IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
|
||||
IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
|
||||
IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
|
||||
IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4),
|
||||
IWL_UCODE_TLV_FLAGS_UAPSD = BIT(6),
|
||||
IWL_UCODE_TLV_FLAGS_RX_ENERGY_API = BIT(8),
|
||||
IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10),
|
||||
IWL_UCODE_TLV_FLAGS_BF_UPDATED = BIT(11),
|
||||
};
|
||||
|
||||
/* The default calibrate table size if not specified by firmware file */
|
||||
|
|
|
@ -33,6 +33,8 @@
|
|||
#include "iwl-io.h"
|
||||
#include "iwl-csr.h"
|
||||
#include "iwl-debug.h"
|
||||
#include "iwl-fh.h"
|
||||
#include "iwl-csr.h"
|
||||
|
||||
#define IWL_POLL_INTERVAL 10 /* microseconds */
|
||||
|
||||
|
@ -166,3 +168,68 @@ void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
|
|||
}
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_clear_bits_prph);
|
||||
|
||||
static const char *get_fh_string(int cmd)
|
||||
{
|
||||
#define IWL_CMD(x) case x: return #x
|
||||
switch (cmd) {
|
||||
IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
|
||||
IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
|
||||
IWL_CMD(FH_RSCSR_CHNL0_WPTR);
|
||||
IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
|
||||
IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
|
||||
IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
|
||||
IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
|
||||
IWL_CMD(FH_TSSR_TX_STATUS_REG);
|
||||
IWL_CMD(FH_TSSR_TX_ERROR_REG);
|
||||
default:
|
||||
return "UNKNOWN";
|
||||
}
|
||||
#undef IWL_CMD
|
||||
}
|
||||
|
||||
int iwl_dump_fh(struct iwl_trans *trans, char **buf)
|
||||
{
|
||||
int i;
|
||||
static const u32 fh_tbl[] = {
|
||||
FH_RSCSR_CHNL0_STTS_WPTR_REG,
|
||||
FH_RSCSR_CHNL0_RBDCB_BASE_REG,
|
||||
FH_RSCSR_CHNL0_WPTR,
|
||||
FH_MEM_RCSR_CHNL0_CONFIG_REG,
|
||||
FH_MEM_RSSR_SHARED_CTRL_REG,
|
||||
FH_MEM_RSSR_RX_STATUS_REG,
|
||||
FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
|
||||
FH_TSSR_TX_STATUS_REG,
|
||||
FH_TSSR_TX_ERROR_REG
|
||||
};
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
if (buf) {
|
||||
int pos = 0;
|
||||
size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
|
||||
|
||||
*buf = kmalloc(bufsz, GFP_KERNEL);
|
||||
if (!*buf)
|
||||
return -ENOMEM;
|
||||
|
||||
pos += scnprintf(*buf + pos, bufsz - pos,
|
||||
"FH register values:\n");
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
|
||||
pos += scnprintf(*buf + pos, bufsz - pos,
|
||||
" %34s: 0X%08x\n",
|
||||
get_fh_string(fh_tbl[i]),
|
||||
iwl_read_direct32(trans, fh_tbl[i]));
|
||||
|
||||
return pos;
|
||||
}
|
||||
#endif
|
||||
|
||||
IWL_ERR(trans, "FH register values:\n");
|
||||
for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
|
||||
IWL_ERR(trans, " %34s: 0X%08x\n",
|
||||
get_fh_string(fh_tbl[i]),
|
||||
iwl_read_direct32(trans, fh_tbl[i]));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -77,4 +77,7 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
|
|||
u32 bits, u32 mask);
|
||||
void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
|
||||
|
||||
/* Error handling */
|
||||
int iwl_dump_fh(struct iwl_trans *trans, char **buf);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -118,6 +118,7 @@ static const u8 iwl_nvm_channels[] = {
|
|||
#define LAST_2GHZ_HT_PLUS 9
|
||||
#define LAST_5GHZ_HT 161
|
||||
|
||||
#define DEFAULT_MAX_TX_POWER 16
|
||||
|
||||
/* rate data (static) */
|
||||
static struct ieee80211_rate iwl_cfg80211_rates[] = {
|
||||
|
@ -232,8 +233,11 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
|
|||
|
||||
/* Initialize regulatory-based run-time data */
|
||||
|
||||
/* TODO: read the real value from the NVM */
|
||||
channel->max_power = 0;
|
||||
/*
|
||||
* Default value - highest tx power value. max_power
|
||||
* is not used in mvm, and is used for backwards compatibility
|
||||
*/
|
||||
channel->max_power = DEFAULT_MAX_TX_POWER;
|
||||
is_5ghz = channel->band == IEEE80211_BAND_5GHZ;
|
||||
IWL_DEBUG_EEPROM(dev,
|
||||
"Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
|
||||
|
|
|
@ -93,7 +93,7 @@ struct iwl_cfg;
|
|||
* 1) The driver layer (iwl-drv.c) chooses the op_mode based on the
|
||||
* capabilities advertized by the fw file (in TLV format).
|
||||
* 2) The driver layer starts the op_mode (ops->start)
|
||||
* 3) The op_mode registers registers mac80211
|
||||
* 3) The op_mode registers mac80211
|
||||
* 4) The op_mode is governed by mac80211
|
||||
* 5) The driver layer stops the op_mode
|
||||
*/
|
||||
|
@ -112,7 +112,7 @@ struct iwl_cfg;
|
|||
* @stop: stop the op_mode. Must free all the memory allocated.
|
||||
* May sleep
|
||||
* @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
|
||||
* HCMD the this Rx responds to.
|
||||
* HCMD this Rx responds to.
|
||||
* This callback may sleep, it is called from a threaded IRQ handler.
|
||||
* @queue_full: notifies that a HW queue is full.
|
||||
* Must be atomic and called with BH disabled.
|
||||
|
|
|
@ -180,7 +180,7 @@ struct iwl_rx_packet {
|
|||
* enum CMD_MODE - how to send the host commands ?
|
||||
*
|
||||
* @CMD_SYNC: The caller will be stalled until the fw responds to the command
|
||||
* @CMD_ASYNC: Return right away and don't want for the response
|
||||
* @CMD_ASYNC: Return right away and don't wait for the response
|
||||
* @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
|
||||
* response. The caller needs to call iwl_free_resp when done.
|
||||
*/
|
||||
|
@ -218,7 +218,7 @@ struct iwl_device_cmd {
|
|||
*
|
||||
* @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
|
||||
* ring. The transport layer doesn't map the command's buffer to DMA, but
|
||||
* rather copies it to an previously allocated DMA buffer. This flag tells
|
||||
* rather copies it to a previously allocated DMA buffer. This flag tells
|
||||
* the transport layer not to copy the command, but to map the existing
|
||||
* buffer (that is passed in) instead. This saves the memcpy and allows
|
||||
* commands that are bigger than the fixed buffer to be submitted.
|
||||
|
@ -243,7 +243,7 @@ enum iwl_hcmd_dataflag {
|
|||
* @handler_status: return value of the handler of the command
|
||||
* (put in setup_rx_handlers) - valid for SYNC mode only
|
||||
* @flags: can be CMD_*
|
||||
* @len: array of the lenths of the chunks in data
|
||||
* @len: array of the lengths of the chunks in data
|
||||
* @dataflags: IWL_HCMD_DFL_*
|
||||
* @id: id of the host command
|
||||
*/
|
||||
|
@ -396,8 +396,6 @@ struct iwl_trans;
|
|||
* May sleep
|
||||
* @dbgfs_register: add the dbgfs files under this directory. Files will be
|
||||
* automatically deleted.
|
||||
* @suspend: stop the device unless WoWLAN is configured
|
||||
* @resume: resume activity of the device
|
||||
* @write8: write a u8 to a register at offset ofs from the BAR
|
||||
* @write32: write a u32 to a register at offset ofs from the BAR
|
||||
* @read32: read a u32 register at offset ofs from the BAR
|
||||
|
@ -443,10 +441,7 @@ struct iwl_trans_ops {
|
|||
|
||||
int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
|
||||
int (*wait_tx_queue_empty)(struct iwl_trans *trans);
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
int (*suspend)(struct iwl_trans *trans);
|
||||
int (*resume)(struct iwl_trans *trans);
|
||||
#endif
|
||||
|
||||
void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
|
||||
void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
|
||||
u32 (*read32)(struct iwl_trans *trans, u32 ofs);
|
||||
|
@ -700,18 +695,6 @@ static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
|
|||
return trans->ops->dbgfs_register(trans, dir);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static inline int iwl_trans_suspend(struct iwl_trans *trans)
|
||||
{
|
||||
return trans->ops->suspend(trans);
|
||||
}
|
||||
|
||||
static inline int iwl_trans_resume(struct iwl_trans *trans)
|
||||
{
|
||||
return trans->ops->resume(trans);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
|
||||
{
|
||||
trans->ops->write8(trans, ofs, val);
|
||||
|
|
|
@ -2,7 +2,7 @@ obj-$(CONFIG_IWLMVM) += iwlmvm.o
|
|||
iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
|
||||
iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o
|
||||
iwlmvm-y += scan.o time-event.o rs.o
|
||||
iwlmvm-y += power.o bt-coex.o
|
||||
iwlmvm-y += power.o power_legacy.o bt-coex.o
|
||||
iwlmvm-y += led.o tt.o
|
||||
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
|
||||
iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
|
||||
|
|
|
@ -220,45 +220,15 @@ static const __le32 iwl_single_shared_ant_lookup[BT_COEX_LUT_SIZE] = {
|
|||
|
||||
int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
|
||||
{
|
||||
struct iwl_bt_coex_cmd cmd = {
|
||||
.max_kill = 5,
|
||||
.bt3_time_t7_value = 1,
|
||||
.bt3_prio_sample_time = 2,
|
||||
.bt3_timer_t2_value = 0xc,
|
||||
struct iwl_bt_coex_cmd *bt_cmd;
|
||||
struct iwl_host_cmd cmd = {
|
||||
.id = BT_CONFIG,
|
||||
.len = { sizeof(*bt_cmd), },
|
||||
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
|
||||
.flags = CMD_SYNC,
|
||||
};
|
||||
int ret;
|
||||
|
||||
cmd.flags = iwlwifi_mod_params.bt_coex_active ?
|
||||
BT_COEX_NW : BT_COEX_DISABLE;
|
||||
cmd.flags |= BT_CH_PRIMARY_EN | BT_SYNC_2_BT_DISABLE;
|
||||
|
||||
cmd.valid_bit_msk = cpu_to_le16(BT_VALID_ENABLE |
|
||||
BT_VALID_BT_PRIO_BOOST |
|
||||
BT_VALID_MAX_KILL |
|
||||
BT_VALID_3W_TMRS |
|
||||
BT_VALID_KILL_ACK |
|
||||
BT_VALID_KILL_CTS |
|
||||
BT_VALID_REDUCED_TX_POWER |
|
||||
BT_VALID_LUT);
|
||||
|
||||
if (mvm->cfg->bt_shared_single_ant)
|
||||
memcpy(&cmd.decision_lut, iwl_single_shared_ant_lookup,
|
||||
sizeof(iwl_single_shared_ant_lookup));
|
||||
else if (is_loose_coex())
|
||||
memcpy(&cmd.decision_lut, iwl_loose_lookup,
|
||||
sizeof(iwl_tight_lookup));
|
||||
else
|
||||
memcpy(&cmd.decision_lut, iwl_tight_lookup,
|
||||
sizeof(iwl_tight_lookup));
|
||||
|
||||
cmd.bt_prio_boost = cpu_to_le32(IWL_BT_DEFAULT_BOOST);
|
||||
cmd.kill_ack_msk =
|
||||
cpu_to_le32(iwl_bt_ack_kill_msk[BT_KILL_MSK_DEFAULT]);
|
||||
cmd.kill_cts_msk =
|
||||
cpu_to_le32(iwl_bt_cts_kill_msk[BT_KILL_MSK_DEFAULT]);
|
||||
|
||||
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
|
||||
|
||||
/* go to CALIB state in internal BT-Coex state machine */
|
||||
ret = iwl_send_bt_env(mvm, BT_COEX_ENV_OPEN,
|
||||
BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
|
||||
|
@ -270,16 +240,67 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, CMD_SYNC,
|
||||
sizeof(cmd), &cmd);
|
||||
bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
|
||||
if (!bt_cmd)
|
||||
return -ENOMEM;
|
||||
cmd.data[0] = bt_cmd;
|
||||
|
||||
bt_cmd->max_kill = 5;
|
||||
bt_cmd->bt3_time_t7_value = 1;
|
||||
bt_cmd->bt3_prio_sample_time = 2;
|
||||
bt_cmd->bt3_timer_t2_value = 0xc;
|
||||
|
||||
bt_cmd->flags = iwlwifi_mod_params.bt_coex_active ?
|
||||
BT_COEX_NW : BT_COEX_DISABLE;
|
||||
bt_cmd->flags |= BT_CH_PRIMARY_EN | BT_SYNC_2_BT_DISABLE;
|
||||
|
||||
bt_cmd->valid_bit_msk = cpu_to_le16(BT_VALID_ENABLE |
|
||||
BT_VALID_BT_PRIO_BOOST |
|
||||
BT_VALID_MAX_KILL |
|
||||
BT_VALID_3W_TMRS |
|
||||
BT_VALID_KILL_ACK |
|
||||
BT_VALID_KILL_CTS |
|
||||
BT_VALID_REDUCED_TX_POWER |
|
||||
BT_VALID_LUT);
|
||||
|
||||
if (mvm->cfg->bt_shared_single_ant)
|
||||
memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant_lookup,
|
||||
sizeof(iwl_single_shared_ant_lookup));
|
||||
else if (is_loose_coex())
|
||||
memcpy(&bt_cmd->decision_lut, iwl_loose_lookup,
|
||||
sizeof(iwl_tight_lookup));
|
||||
else
|
||||
memcpy(&bt_cmd->decision_lut, iwl_tight_lookup,
|
||||
sizeof(iwl_tight_lookup));
|
||||
|
||||
bt_cmd->bt_prio_boost = cpu_to_le32(IWL_BT_DEFAULT_BOOST);
|
||||
bt_cmd->kill_ack_msk =
|
||||
cpu_to_le32(iwl_bt_ack_kill_msk[BT_KILL_MSK_DEFAULT]);
|
||||
bt_cmd->kill_cts_msk =
|
||||
cpu_to_le32(iwl_bt_cts_kill_msk[BT_KILL_MSK_DEFAULT]);
|
||||
|
||||
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
|
||||
|
||||
ret = iwl_mvm_send_cmd(mvm, &cmd);
|
||||
|
||||
kfree(bt_cmd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
|
||||
bool reduced_tx_power)
|
||||
{
|
||||
enum iwl_bt_kill_msk bt_kill_msk;
|
||||
struct iwl_bt_coex_cmd cmd = {};
|
||||
struct iwl_bt_coex_cmd *bt_cmd;
|
||||
struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif;
|
||||
struct iwl_host_cmd cmd = {
|
||||
.id = BT_CONFIG,
|
||||
.data[0] = &bt_cmd,
|
||||
.len = { sizeof(*bt_cmd), },
|
||||
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
|
||||
.flags = CMD_SYNC,
|
||||
};
|
||||
int ret = 0;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
|
@ -308,24 +329,40 @@ static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
|
|||
return 0;
|
||||
|
||||
mvm->bt_kill_msk = bt_kill_msk;
|
||||
cmd.kill_ack_msk = cpu_to_le32(iwl_bt_ack_kill_msk[bt_kill_msk]);
|
||||
cmd.kill_cts_msk = cpu_to_le32(iwl_bt_cts_kill_msk[bt_kill_msk]);
|
||||
cmd.valid_bit_msk = cpu_to_le16(BT_VALID_KILL_ACK | BT_VALID_KILL_CTS);
|
||||
|
||||
bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
|
||||
if (!bt_cmd)
|
||||
return -ENOMEM;
|
||||
cmd.data[0] = bt_cmd;
|
||||
|
||||
bt_cmd->kill_ack_msk = cpu_to_le32(iwl_bt_ack_kill_msk[bt_kill_msk]);
|
||||
bt_cmd->kill_cts_msk = cpu_to_le32(iwl_bt_cts_kill_msk[bt_kill_msk]);
|
||||
bt_cmd->valid_bit_msk =
|
||||
cpu_to_le16(BT_VALID_KILL_ACK | BT_VALID_KILL_CTS);
|
||||
|
||||
IWL_DEBUG_COEX(mvm, "bt_kill_msk = %d\n", bt_kill_msk);
|
||||
return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, CMD_SYNC,
|
||||
sizeof(cmd), &cmd);
|
||||
|
||||
ret = iwl_mvm_send_cmd(mvm, &cmd);
|
||||
|
||||
kfree(bt_cmd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
|
||||
bool enable)
|
||||
{
|
||||
struct iwl_bt_coex_cmd cmd = {
|
||||
.valid_bit_msk = cpu_to_le16(BT_VALID_REDUCED_TX_POWER),
|
||||
.bt_reduced_tx_power = sta_id,
|
||||
struct iwl_bt_coex_cmd *bt_cmd;
|
||||
/* Send ASYNC since this can be sent from an atomic context */
|
||||
struct iwl_host_cmd cmd = {
|
||||
.id = BT_CONFIG,
|
||||
.len = { sizeof(*bt_cmd), },
|
||||
.dataflags = { IWL_HCMD_DFL_DUP, },
|
||||
.flags = CMD_ASYNC,
|
||||
};
|
||||
|
||||
struct ieee80211_sta *sta;
|
||||
struct iwl_mvm_sta *mvmsta;
|
||||
int ret;
|
||||
|
||||
/* This can happen if the station has been removed right now */
|
||||
if (sta_id == IWL_MVM_STATION_COUNT)
|
||||
|
@ -339,17 +376,26 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
|
|||
if (mvmsta->bt_reduced_txpower == enable)
|
||||
return 0;
|
||||
|
||||
bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC);
|
||||
if (!bt_cmd)
|
||||
return -ENOMEM;
|
||||
cmd.data[0] = bt_cmd;
|
||||
|
||||
bt_cmd->valid_bit_msk = cpu_to_le16(BT_VALID_REDUCED_TX_POWER),
|
||||
bt_cmd->bt_reduced_tx_power = sta_id;
|
||||
|
||||
if (enable)
|
||||
cmd.bt_reduced_tx_power |= BT_REDUCED_TX_POWER_BIT;
|
||||
bt_cmd->bt_reduced_tx_power |= BT_REDUCED_TX_POWER_BIT;
|
||||
|
||||
IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n",
|
||||
enable ? "en" : "dis", sta_id);
|
||||
|
||||
mvmsta->bt_reduced_txpower = enable;
|
||||
|
||||
/* Send ASYNC since this can be sent from an atomic context */
|
||||
return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, CMD_ASYNC,
|
||||
sizeof(cmd), &cmd);
|
||||
ret = iwl_mvm_send_cmd(mvm, &cmd);
|
||||
|
||||
kfree(bt_cmd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct iwl_bt_iterator_data {
|
||||
|
@ -384,6 +430,10 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
|
|||
|
||||
smps_mode = IEEE80211_SMPS_AUTOMATIC;
|
||||
|
||||
/* non associated BSSes aren't to be considered */
|
||||
if (!vif->bss_conf.assoc)
|
||||
return;
|
||||
|
||||
if (band != IEEE80211_BAND_2GHZ) {
|
||||
iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
|
||||
smps_mode);
|
||||
|
@ -523,6 +573,8 @@ static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
|
|||
lockdep_is_held(&mvm->mutex));
|
||||
mvmsta = (void *)sta->drv_priv;
|
||||
|
||||
data->num_bss_ifaces++;
|
||||
|
||||
/*
|
||||
* This interface doesn't support reduced Tx power (because of low
|
||||
* RSSI probably), then set bt_kill_msk to default values.
|
||||
|
@ -588,23 +640,5 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
|
||||
void iwl_mvm_bt_coex_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
{
|
||||
struct ieee80211_chanctx_conf *chanctx_conf;
|
||||
enum ieee80211_band band;
|
||||
|
||||
rcu_read_lock();
|
||||
chanctx_conf = rcu_dereference(vif->chanctx_conf);
|
||||
if (chanctx_conf && chanctx_conf->def.chan)
|
||||
band = chanctx_conf->def.chan->band;
|
||||
else
|
||||
band = -1;
|
||||
rcu_read_unlock();
|
||||
|
||||
/* if we are in 2GHz we will get a notification from the fw */
|
||||
if (band == IEEE80211_BAND_2GHZ)
|
||||
return;
|
||||
|
||||
/* else, we can remove all the constraints */
|
||||
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
|
||||
|
||||
iwl_mvm_bt_coex_notif_handle(mvm);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,71 @@
|
|||
/******************************************************************************
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
*
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2013 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
|
||||
* USA
|
||||
*
|
||||
* The full GNU General Public License is included in this distribution
|
||||
* in the file called COPYING.
|
||||
*
|
||||
* Contact Information:
|
||||
* Intel Linux Wireless <ilw@linux.intel.com>
|
||||
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
*
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2013 Intel Corporation. All rights reserved.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*****************************************************************************/
|
||||
#ifndef __MVM_CONSTANTS_H
|
||||
#define __MVM_CONSTANTS_H
|
||||
|
||||
#define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
|
||||
#define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
|
||||
#define IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
|
||||
#define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
|
||||
|
||||
#endif /* __MVM_CONSTANTS_H */
|
|
@ -105,7 +105,7 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
|
|||
list_for_each_entry(ifa, &idev->addr_list, if_list) {
|
||||
mvmvif->target_ipv6_addrs[idx] = ifa->addr;
|
||||
idx++;
|
||||
if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS)
|
||||
if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)
|
||||
break;
|
||||
}
|
||||
read_unlock_bh(&idev->lock);
|
||||
|
@ -378,36 +378,68 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
|
|||
static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
struct iwl_proto_offload_cmd cmd = {};
|
||||
union {
|
||||
struct iwl_proto_offload_cmd_v1 v1;
|
||||
struct iwl_proto_offload_cmd_v2 v2;
|
||||
} cmd = {};
|
||||
struct iwl_proto_offload_cmd_common *common;
|
||||
u32 enabled = 0, size;
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
int i;
|
||||
|
||||
if (mvmvif->num_target_ipv6_addrs) {
|
||||
cmd.enabled |= cpu_to_le32(IWL_D3_PROTO_OFFLOAD_NS);
|
||||
memcpy(cmd.ndp_mac_addr, vif->addr, ETH_ALEN);
|
||||
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
|
||||
if (mvmvif->num_target_ipv6_addrs) {
|
||||
enabled |= IWL_D3_PROTO_OFFLOAD_NS;
|
||||
memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
|
||||
}
|
||||
|
||||
BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) !=
|
||||
sizeof(mvmvif->target_ipv6_addrs[0]));
|
||||
|
||||
for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
|
||||
IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++)
|
||||
memcpy(cmd.v2.target_ipv6_addr[i],
|
||||
&mvmvif->target_ipv6_addrs[i],
|
||||
sizeof(cmd.v2.target_ipv6_addr[i]));
|
||||
} else {
|
||||
if (mvmvif->num_target_ipv6_addrs) {
|
||||
enabled |= IWL_D3_PROTO_OFFLOAD_NS;
|
||||
memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN);
|
||||
}
|
||||
|
||||
BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) !=
|
||||
sizeof(mvmvif->target_ipv6_addrs[0]));
|
||||
|
||||
for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
|
||||
IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++)
|
||||
memcpy(cmd.v1.target_ipv6_addr[i],
|
||||
&mvmvif->target_ipv6_addrs[i],
|
||||
sizeof(cmd.v1.target_ipv6_addr[i]));
|
||||
}
|
||||
|
||||
BUILD_BUG_ON(sizeof(cmd.target_ipv6_addr[i]) !=
|
||||
sizeof(mvmvif->target_ipv6_addrs[i]));
|
||||
|
||||
for (i = 0; i < mvmvif->num_target_ipv6_addrs; i++)
|
||||
memcpy(cmd.target_ipv6_addr[i],
|
||||
&mvmvif->target_ipv6_addrs[i],
|
||||
sizeof(cmd.target_ipv6_addr[i]));
|
||||
#endif
|
||||
|
||||
if (vif->bss_conf.arp_addr_cnt) {
|
||||
cmd.enabled |= cpu_to_le32(IWL_D3_PROTO_OFFLOAD_ARP);
|
||||
cmd.host_ipv4_addr = vif->bss_conf.arp_addr_list[0];
|
||||
memcpy(cmd.arp_mac_addr, vif->addr, ETH_ALEN);
|
||||
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
|
||||
common = &cmd.v2.common;
|
||||
size = sizeof(cmd.v2);
|
||||
} else {
|
||||
common = &cmd.v1.common;
|
||||
size = sizeof(cmd.v1);
|
||||
}
|
||||
|
||||
if (!cmd.enabled)
|
||||
if (vif->bss_conf.arp_addr_cnt) {
|
||||
enabled |= IWL_D3_PROTO_OFFLOAD_ARP;
|
||||
common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0];
|
||||
memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN);
|
||||
}
|
||||
|
||||
if (!enabled)
|
||||
return 0;
|
||||
|
||||
common->enabled = cpu_to_le32(enabled);
|
||||
|
||||
return iwl_mvm_send_cmd_pdu(mvm, PROT_OFFLOAD_CONFIG_CMD, CMD_SYNC,
|
||||
sizeof(cmd), &cmd);
|
||||
size, &cmd);
|
||||
}
|
||||
|
||||
enum iwl_mvm_tcp_packet_type {
|
||||
|
|
|
@ -424,40 +424,11 @@ static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
|
|||
struct ieee80211_vif *vif = file->private_data;
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
struct iwl_mvm *mvm = mvmvif->dbgfs_data;
|
||||
struct iwl_powertable_cmd cmd = {};
|
||||
char buf[256];
|
||||
int bufsz = sizeof(buf);
|
||||
int pos = 0;
|
||||
int pos;
|
||||
|
||||
iwl_mvm_power_build_cmd(mvm, vif, &cmd);
|
||||
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
|
||||
(cmd.flags &
|
||||
cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
|
||||
0 : 1);
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
|
||||
le32_to_cpu(cmd.skip_dtim_periods));
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
|
||||
iwlmvm_mod_params.power_scheme);
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
|
||||
le16_to_cpu(cmd.flags));
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
|
||||
cmd.keep_alive_seconds);
|
||||
|
||||
if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
|
||||
(cmd.flags &
|
||||
cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ?
|
||||
1 : 0);
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n",
|
||||
le32_to_cpu(cmd.rx_data_timeout));
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n",
|
||||
le32_to_cpu(cmd.tx_data_timeout));
|
||||
if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
|
||||
pos += scnprintf(buf+pos, bufsz-pos,
|
||||
"lprx_rssi_threshold = %d\n",
|
||||
le32_to_cpu(cmd.lprx_rssi_threshold));
|
||||
}
|
||||
pos = iwl_mvm_power_dbgfs_read(mvm, vif, buf, bufsz);
|
||||
|
||||
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
|
||||
}
|
||||
|
@ -621,25 +592,160 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
|
|||
}
|
||||
#undef BT_MBOX_PRINT
|
||||
|
||||
#define PRINT_STATS_LE32(_str, _val) \
|
||||
pos += scnprintf(buf + pos, bufsz - pos, \
|
||||
fmt_table, _str, \
|
||||
le32_to_cpu(_val))
|
||||
|
||||
static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
|
||||
char __user *user_buf, size_t count,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct iwl_mvm *mvm = file->private_data;
|
||||
static const char *fmt_table = "\t%-30s %10u\n";
|
||||
static const char *fmt_header = "%-32s\n";
|
||||
int pos = 0;
|
||||
char *buf;
|
||||
int ret;
|
||||
int bufsz = sizeof(struct mvm_statistics_rx_phy) * 20 +
|
||||
sizeof(struct mvm_statistics_rx_non_phy) * 10 +
|
||||
sizeof(struct mvm_statistics_rx_ht_phy) * 10 + 200;
|
||||
struct mvm_statistics_rx_phy *ofdm;
|
||||
struct mvm_statistics_rx_phy *cck;
|
||||
struct mvm_statistics_rx_non_phy *general;
|
||||
struct mvm_statistics_rx_ht_phy *ht;
|
||||
|
||||
buf = kzalloc(bufsz, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
||||
ofdm = &mvm->rx_stats.ofdm;
|
||||
cck = &mvm->rx_stats.cck;
|
||||
general = &mvm->rx_stats.general;
|
||||
ht = &mvm->rx_stats.ofdm_ht;
|
||||
|
||||
pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
|
||||
"Statistics_Rx - OFDM");
|
||||
PRINT_STATS_LE32("ina_cnt", ofdm->ina_cnt);
|
||||
PRINT_STATS_LE32("fina_cnt", ofdm->fina_cnt);
|
||||
PRINT_STATS_LE32("plcp_err", ofdm->plcp_err);
|
||||
PRINT_STATS_LE32("crc32_err", ofdm->crc32_err);
|
||||
PRINT_STATS_LE32("overrun_err", ofdm->overrun_err);
|
||||
PRINT_STATS_LE32("early_overrun_err", ofdm->early_overrun_err);
|
||||
PRINT_STATS_LE32("crc32_good", ofdm->crc32_good);
|
||||
PRINT_STATS_LE32("false_alarm_cnt", ofdm->false_alarm_cnt);
|
||||
PRINT_STATS_LE32("fina_sync_err_cnt", ofdm->fina_sync_err_cnt);
|
||||
PRINT_STATS_LE32("sfd_timeout", ofdm->sfd_timeout);
|
||||
PRINT_STATS_LE32("fina_timeout", ofdm->fina_timeout);
|
||||
PRINT_STATS_LE32("unresponded_rts", ofdm->unresponded_rts);
|
||||
PRINT_STATS_LE32("rxe_frame_lmt_overrun",
|
||||
ofdm->rxe_frame_limit_overrun);
|
||||
PRINT_STATS_LE32("sent_ack_cnt", ofdm->sent_ack_cnt);
|
||||
PRINT_STATS_LE32("sent_cts_cnt", ofdm->sent_cts_cnt);
|
||||
PRINT_STATS_LE32("sent_ba_rsp_cnt", ofdm->sent_ba_rsp_cnt);
|
||||
PRINT_STATS_LE32("dsp_self_kill", ofdm->dsp_self_kill);
|
||||
PRINT_STATS_LE32("mh_format_err", ofdm->mh_format_err);
|
||||
PRINT_STATS_LE32("re_acq_main_rssi_sum", ofdm->re_acq_main_rssi_sum);
|
||||
PRINT_STATS_LE32("reserved", ofdm->reserved);
|
||||
|
||||
pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
|
||||
"Statistics_Rx - CCK");
|
||||
PRINT_STATS_LE32("ina_cnt", cck->ina_cnt);
|
||||
PRINT_STATS_LE32("fina_cnt", cck->fina_cnt);
|
||||
PRINT_STATS_LE32("plcp_err", cck->plcp_err);
|
||||
PRINT_STATS_LE32("crc32_err", cck->crc32_err);
|
||||
PRINT_STATS_LE32("overrun_err", cck->overrun_err);
|
||||
PRINT_STATS_LE32("early_overrun_err", cck->early_overrun_err);
|
||||
PRINT_STATS_LE32("crc32_good", cck->crc32_good);
|
||||
PRINT_STATS_LE32("false_alarm_cnt", cck->false_alarm_cnt);
|
||||
PRINT_STATS_LE32("fina_sync_err_cnt", cck->fina_sync_err_cnt);
|
||||
PRINT_STATS_LE32("sfd_timeout", cck->sfd_timeout);
|
||||
PRINT_STATS_LE32("fina_timeout", cck->fina_timeout);
|
||||
PRINT_STATS_LE32("unresponded_rts", cck->unresponded_rts);
|
||||
PRINT_STATS_LE32("rxe_frame_lmt_overrun",
|
||||
cck->rxe_frame_limit_overrun);
|
||||
PRINT_STATS_LE32("sent_ack_cnt", cck->sent_ack_cnt);
|
||||
PRINT_STATS_LE32("sent_cts_cnt", cck->sent_cts_cnt);
|
||||
PRINT_STATS_LE32("sent_ba_rsp_cnt", cck->sent_ba_rsp_cnt);
|
||||
PRINT_STATS_LE32("dsp_self_kill", cck->dsp_self_kill);
|
||||
PRINT_STATS_LE32("mh_format_err", cck->mh_format_err);
|
||||
PRINT_STATS_LE32("re_acq_main_rssi_sum", cck->re_acq_main_rssi_sum);
|
||||
PRINT_STATS_LE32("reserved", cck->reserved);
|
||||
|
||||
pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
|
||||
"Statistics_Rx - GENERAL");
|
||||
PRINT_STATS_LE32("bogus_cts", general->bogus_cts);
|
||||
PRINT_STATS_LE32("bogus_ack", general->bogus_ack);
|
||||
PRINT_STATS_LE32("non_bssid_frames", general->non_bssid_frames);
|
||||
PRINT_STATS_LE32("filtered_frames", general->filtered_frames);
|
||||
PRINT_STATS_LE32("non_channel_beacons", general->non_channel_beacons);
|
||||
PRINT_STATS_LE32("channel_beacons", general->channel_beacons);
|
||||
PRINT_STATS_LE32("num_missed_bcon", general->num_missed_bcon);
|
||||
PRINT_STATS_LE32("adc_rx_saturation_time",
|
||||
general->adc_rx_saturation_time);
|
||||
PRINT_STATS_LE32("ina_detection_search_time",
|
||||
general->ina_detection_search_time);
|
||||
PRINT_STATS_LE32("beacon_silence_rssi_a",
|
||||
general->beacon_silence_rssi_a);
|
||||
PRINT_STATS_LE32("beacon_silence_rssi_b",
|
||||
general->beacon_silence_rssi_b);
|
||||
PRINT_STATS_LE32("beacon_silence_rssi_c",
|
||||
general->beacon_silence_rssi_c);
|
||||
PRINT_STATS_LE32("interference_data_flag",
|
||||
general->interference_data_flag);
|
||||
PRINT_STATS_LE32("channel_load", general->channel_load);
|
||||
PRINT_STATS_LE32("dsp_false_alarms", general->dsp_false_alarms);
|
||||
PRINT_STATS_LE32("beacon_rssi_a", general->beacon_rssi_a);
|
||||
PRINT_STATS_LE32("beacon_rssi_b", general->beacon_rssi_b);
|
||||
PRINT_STATS_LE32("beacon_rssi_c", general->beacon_rssi_c);
|
||||
PRINT_STATS_LE32("beacon_energy_a", general->beacon_energy_a);
|
||||
PRINT_STATS_LE32("beacon_energy_b", general->beacon_energy_b);
|
||||
PRINT_STATS_LE32("beacon_energy_c", general->beacon_energy_c);
|
||||
PRINT_STATS_LE32("num_bt_kills", general->num_bt_kills);
|
||||
PRINT_STATS_LE32("directed_data_mpdu", general->directed_data_mpdu);
|
||||
|
||||
pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
|
||||
"Statistics_Rx - HT");
|
||||
PRINT_STATS_LE32("plcp_err", ht->plcp_err);
|
||||
PRINT_STATS_LE32("overrun_err", ht->overrun_err);
|
||||
PRINT_STATS_LE32("early_overrun_err", ht->early_overrun_err);
|
||||
PRINT_STATS_LE32("crc32_good", ht->crc32_good);
|
||||
PRINT_STATS_LE32("crc32_err", ht->crc32_err);
|
||||
PRINT_STATS_LE32("mh_format_err", ht->mh_format_err);
|
||||
PRINT_STATS_LE32("agg_crc32_good", ht->agg_crc32_good);
|
||||
PRINT_STATS_LE32("agg_mpdu_cnt", ht->agg_mpdu_cnt);
|
||||
PRINT_STATS_LE32("agg_cnt", ht->agg_cnt);
|
||||
PRINT_STATS_LE32("unsupport_mcs", ht->unsupport_mcs);
|
||||
|
||||
mutex_unlock(&mvm->mutex);
|
||||
|
||||
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
|
||||
kfree(buf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#undef PRINT_STAT_LE32
|
||||
|
||||
static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
|
||||
const char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct iwl_mvm *mvm = file->private_data;
|
||||
bool restart_fw = iwlwifi_mod_params.restart_fw;
|
||||
int ret;
|
||||
|
||||
iwlwifi_mod_params.restart_fw = true;
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
||||
/* allow one more restart that we're provoking here */
|
||||
if (mvm->restart_fw >= 0)
|
||||
mvm->restart_fw++;
|
||||
|
||||
/* take the return value to make compiler happy - it will fail anyway */
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_ERROR, CMD_SYNC, 0, NULL);
|
||||
|
||||
mutex_unlock(&mvm->mutex);
|
||||
|
||||
iwlwifi_mod_params.restart_fw = restart_fw;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
|
@ -661,8 +767,14 @@ static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
|
|||
case MVM_DEBUGFS_BF_ROAMING_STATE:
|
||||
dbgfs_bf->bf_roaming_state = value;
|
||||
break;
|
||||
case MVM_DEBUGFS_BF_TEMPERATURE_DELTA:
|
||||
dbgfs_bf->bf_temperature_delta = value;
|
||||
case MVM_DEBUGFS_BF_TEMP_THRESHOLD:
|
||||
dbgfs_bf->bf_temp_threshold = value;
|
||||
break;
|
||||
case MVM_DEBUGFS_BF_TEMP_FAST_FILTER:
|
||||
dbgfs_bf->bf_temp_fast_filter = value;
|
||||
break;
|
||||
case MVM_DEBUGFS_BF_TEMP_SLOW_FILTER:
|
||||
dbgfs_bf->bf_temp_slow_filter = value;
|
||||
break;
|
||||
case MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER:
|
||||
dbgfs_bf->bf_enable_beacon_filter = value;
|
||||
|
@ -721,13 +833,27 @@ static ssize_t iwl_dbgfs_bf_params_write(struct file *file,
|
|||
value > IWL_BF_ROAMING_STATE_MAX)
|
||||
return -EINVAL;
|
||||
param = MVM_DEBUGFS_BF_ROAMING_STATE;
|
||||
} else if (!strncmp("bf_temperature_delta=", buf, 21)) {
|
||||
if (sscanf(buf+21, "%d", &value) != 1)
|
||||
} else if (!strncmp("bf_temp_threshold=", buf, 18)) {
|
||||
if (sscanf(buf+18, "%d", &value) != 1)
|
||||
return -EINVAL;
|
||||
if (value < IWL_BF_TEMPERATURE_DELTA_MIN ||
|
||||
value > IWL_BF_TEMPERATURE_DELTA_MAX)
|
||||
if (value < IWL_BF_TEMP_THRESHOLD_MIN ||
|
||||
value > IWL_BF_TEMP_THRESHOLD_MAX)
|
||||
return -EINVAL;
|
||||
param = MVM_DEBUGFS_BF_TEMPERATURE_DELTA;
|
||||
param = MVM_DEBUGFS_BF_TEMP_THRESHOLD;
|
||||
} else if (!strncmp("bf_temp_fast_filter=", buf, 20)) {
|
||||
if (sscanf(buf+20, "%d", &value) != 1)
|
||||
return -EINVAL;
|
||||
if (value < IWL_BF_TEMP_FAST_FILTER_MIN ||
|
||||
value > IWL_BF_TEMP_FAST_FILTER_MAX)
|
||||
return -EINVAL;
|
||||
param = MVM_DEBUGFS_BF_TEMP_FAST_FILTER;
|
||||
} else if (!strncmp("bf_temp_slow_filter=", buf, 20)) {
|
||||
if (sscanf(buf+20, "%d", &value) != 1)
|
||||
return -EINVAL;
|
||||
if (value < IWL_BF_TEMP_SLOW_FILTER_MIN ||
|
||||
value > IWL_BF_TEMP_SLOW_FILTER_MAX)
|
||||
return -EINVAL;
|
||||
param = MVM_DEBUGFS_BF_TEMP_SLOW_FILTER;
|
||||
} else if (!strncmp("bf_enable_beacon_filter=", buf, 24)) {
|
||||
if (sscanf(buf+24, "%d", &value) != 1)
|
||||
return -EINVAL;
|
||||
|
@ -789,41 +915,41 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
|
|||
int pos = 0;
|
||||
const size_t bufsz = sizeof(buf);
|
||||
struct iwl_beacon_filter_cmd cmd = {
|
||||
.bf_energy_delta = IWL_BF_ENERGY_DELTA_DEFAULT,
|
||||
.bf_roaming_energy_delta = IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT,
|
||||
.bf_roaming_state = IWL_BF_ROAMING_STATE_DEFAULT,
|
||||
.bf_temperature_delta = IWL_BF_TEMPERATURE_DELTA_DEFAULT,
|
||||
.bf_enable_beacon_filter = IWL_BF_ENABLE_BEACON_FILTER_DEFAULT,
|
||||
.bf_debug_flag = IWL_BF_DEBUG_FLAG_DEFAULT,
|
||||
.bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT),
|
||||
.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_DEFAULT),
|
||||
.ba_enable_beacon_abort = IWL_BA_ENABLE_BEACON_ABORT_DEFAULT,
|
||||
IWL_BF_CMD_CONFIG_DEFAULTS,
|
||||
.bf_enable_beacon_filter =
|
||||
cpu_to_le32(IWL_BF_ENABLE_BEACON_FILTER_DEFAULT),
|
||||
.ba_enable_beacon_abort =
|
||||
cpu_to_le32(IWL_BA_ENABLE_BEACON_ABORT_DEFAULT),
|
||||
};
|
||||
|
||||
iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
|
||||
if (mvmvif->bf_enabled)
|
||||
cmd.bf_enable_beacon_filter = 1;
|
||||
cmd.bf_enable_beacon_filter = cpu_to_le32(1);
|
||||
else
|
||||
cmd.bf_enable_beacon_filter = 0;
|
||||
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "bf_energy_delta = %d\n",
|
||||
cmd.bf_energy_delta);
|
||||
le32_to_cpu(cmd.bf_energy_delta));
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_energy_delta = %d\n",
|
||||
cmd.bf_roaming_energy_delta);
|
||||
le32_to_cpu(cmd.bf_roaming_energy_delta));
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_state = %d\n",
|
||||
cmd.bf_roaming_state);
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "bf_temperature_delta = %d\n",
|
||||
cmd.bf_temperature_delta);
|
||||
le32_to_cpu(cmd.bf_roaming_state));
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_threshold = %d\n",
|
||||
le32_to_cpu(cmd.bf_temp_threshold));
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_fast_filter = %d\n",
|
||||
le32_to_cpu(cmd.bf_temp_fast_filter));
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_slow_filter = %d\n",
|
||||
le32_to_cpu(cmd.bf_temp_slow_filter));
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "bf_enable_beacon_filter = %d\n",
|
||||
cmd.bf_enable_beacon_filter);
|
||||
le32_to_cpu(cmd.bf_enable_beacon_filter));
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "bf_debug_flag = %d\n",
|
||||
cmd.bf_debug_flag);
|
||||
le32_to_cpu(cmd.bf_debug_flag));
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "bf_escape_timer = %d\n",
|
||||
cmd.bf_escape_timer);
|
||||
le32_to_cpu(cmd.bf_escape_timer));
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "ba_escape_timer = %d\n",
|
||||
cmd.ba_escape_timer);
|
||||
le32_to_cpu(cmd.ba_escape_timer));
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "ba_enable_beacon_abort = %d\n",
|
||||
cmd.ba_enable_beacon_abort);
|
||||
le32_to_cpu(cmd.ba_enable_beacon_abort));
|
||||
|
||||
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
|
||||
}
|
||||
|
@ -934,6 +1060,7 @@ MVM_DEBUGFS_READ_FILE_OPS(stations);
|
|||
MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
|
||||
MVM_DEBUGFS_WRITE_FILE_OPS(power_down_allow);
|
||||
MVM_DEBUGFS_WRITE_FILE_OPS(power_down_d3_allow);
|
||||
MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
|
||||
MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart);
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram);
|
||||
|
@ -957,6 +1084,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
|
|||
MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
|
||||
MVM_DEBUGFS_ADD_FILE(power_down_allow, mvm->debugfs_dir, S_IWUSR);
|
||||
MVM_DEBUGFS_ADD_FILE(power_down_d3_allow, mvm->debugfs_dir, S_IWUSR);
|
||||
MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR);
|
||||
MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
|
||||
|
|
|
@ -98,34 +98,63 @@ enum iwl_proto_offloads {
|
|||
IWL_D3_PROTO_OFFLOAD_NS = BIT(1),
|
||||
};
|
||||
|
||||
#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS 2
|
||||
#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1 2
|
||||
#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2 6
|
||||
#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX 6
|
||||
|
||||
/**
|
||||
* struct iwl_proto_offload_cmd - ARP/NS offload configuration
|
||||
* struct iwl_proto_offload_cmd_common - ARP/NS offload common part
|
||||
* @enabled: enable flags
|
||||
* @remote_ipv4_addr: remote address to answer to (or zero if all)
|
||||
* @host_ipv4_addr: our IPv4 address to respond to queries for
|
||||
* @arp_mac_addr: our MAC address for ARP responses
|
||||
* @reserved: unused
|
||||
*/
|
||||
struct iwl_proto_offload_cmd_common {
|
||||
__le32 enabled;
|
||||
__be32 remote_ipv4_addr;
|
||||
__be32 host_ipv4_addr;
|
||||
u8 arp_mac_addr[ETH_ALEN];
|
||||
__le16 reserved;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct iwl_proto_offload_cmd_v1 - ARP/NS offload configuration
|
||||
* @common: common/IPv4 configuration
|
||||
* @remote_ipv6_addr: remote address to answer to (or zero if all)
|
||||
* @solicited_node_ipv6_addr: broken -- solicited node address exists
|
||||
* for each target address
|
||||
* @target_ipv6_addr: our target addresses
|
||||
* @ndp_mac_addr: neighbor soliciation response MAC address
|
||||
*/
|
||||
struct iwl_proto_offload_cmd {
|
||||
__le32 enabled;
|
||||
__be32 remote_ipv4_addr;
|
||||
__be32 host_ipv4_addr;
|
||||
u8 arp_mac_addr[ETH_ALEN];
|
||||
__le16 reserved1;
|
||||
|
||||
struct iwl_proto_offload_cmd_v1 {
|
||||
struct iwl_proto_offload_cmd_common common;
|
||||
u8 remote_ipv6_addr[16];
|
||||
u8 solicited_node_ipv6_addr[16];
|
||||
u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS][16];
|
||||
u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1][16];
|
||||
u8 ndp_mac_addr[ETH_ALEN];
|
||||
__le16 reserved2;
|
||||
} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_proto_offload_cmd_v2 - ARP/NS offload configuration
|
||||
* @common: common/IPv4 configuration
|
||||
* @remote_ipv6_addr: remote address to answer to (or zero if all)
|
||||
* @solicited_node_ipv6_addr: broken -- solicited node address exists
|
||||
* for each target address
|
||||
* @target_ipv6_addr: our target addresses
|
||||
* @ndp_mac_addr: neighbor soliciation response MAC address
|
||||
*/
|
||||
struct iwl_proto_offload_cmd_v2 {
|
||||
struct iwl_proto_offload_cmd_common common;
|
||||
u8 remote_ipv6_addr[16];
|
||||
u8 solicited_node_ipv6_addr[16];
|
||||
u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2][16];
|
||||
u8 ndp_mac_addr[ETH_ALEN];
|
||||
u8 numValidIPv6Addresses;
|
||||
u8 reserved2[3];
|
||||
} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_2 */
|
||||
|
||||
|
||||
/*
|
||||
* WOWLAN_PATTERNS
|
||||
|
|
|
@ -79,6 +79,10 @@
|
|||
* '1' Driver enables PM (use rest of parameters)
|
||||
* @POWER_FLAGS_SKIP_OVER_DTIM_MSK: '0' PM have to walk up every DTIM,
|
||||
* '1' PM could sleep over DTIM till listen Interval.
|
||||
* @POWER_FLAGS_SNOOZE_ENA_MSK: Enable snoozing only if uAPSD is enabled and all
|
||||
* access categories are both delivery and trigger enabled.
|
||||
* @POWER_FLAGS_BT_SCO_ENA: Enable BT SCO coex only if uAPSD and
|
||||
* PBW Snoozing enabled
|
||||
* @POWER_FLAGS_ADVANCE_PM_ENA_MSK: Advanced PM (uAPSD) enable mask
|
||||
* @POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable.
|
||||
*/
|
||||
|
@ -86,6 +90,8 @@ enum iwl_power_flags {
|
|||
POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0),
|
||||
POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK = BIT(1),
|
||||
POWER_FLAGS_SKIP_OVER_DTIM_MSK = BIT(2),
|
||||
POWER_FLAGS_SNOOZE_ENA_MSK = BIT(5),
|
||||
POWER_FLAGS_BT_SCO_ENA = BIT(8),
|
||||
POWER_FLAGS_ADVANCE_PM_ENA_MSK = BIT(9),
|
||||
POWER_FLAGS_LPRX_ENA_MSK = BIT(11),
|
||||
};
|
||||
|
@ -93,7 +99,8 @@ enum iwl_power_flags {
|
|||
#define IWL_POWER_VEC_SIZE 5
|
||||
|
||||
/**
|
||||
* struct iwl_powertable_cmd - Power Table Command
|
||||
* struct iwl_powertable_cmd - legacy power command. Beside old API support this
|
||||
* is used also with a new power API for device wide power settings.
|
||||
* POWER_TABLE_CMD = 0x77 (command, has simple generic response)
|
||||
*
|
||||
* @flags: Power table command flags from POWER_FLAGS_*
|
||||
|
@ -124,6 +131,72 @@ struct iwl_powertable_cmd {
|
|||
__le32 lprx_rssi_threshold;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct iwl_mac_power_cmd - New power command containing uAPSD support
|
||||
* MAC_PM_POWER_TABLE = 0xA9 (command, has simple generic response)
|
||||
* @id_and_color: MAC contex identifier
|
||||
* @flags: Power table command flags from POWER_FLAGS_*
|
||||
* @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec.
|
||||
* Minimum allowed:- 3 * DTIM. Keep alive period must be
|
||||
* set regardless of power scheme or current power state.
|
||||
* FW use this value also when PM is disabled.
|
||||
* @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to
|
||||
* PSM transition - legacy PM
|
||||
* @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to
|
||||
* PSM transition - legacy PM
|
||||
* @sleep_interval: not in use
|
||||
* @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag
|
||||
* is set. For example, if it is required to skip over
|
||||
* one DTIM, this value need to be set to 2 (DTIM periods).
|
||||
* @rx_data_timeout_uapsd: Minimum time (usec) from last Rx packet for AM to
|
||||
* PSM transition - uAPSD
|
||||
* @tx_data_timeout_uapsd: Minimum time (usec) from last Tx packet for AM to
|
||||
* PSM transition - uAPSD
|
||||
* @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled.
|
||||
* Default: 80dbm
|
||||
* @num_skip_dtim: Number of DTIMs to skip if Skip over DTIM flag is set
|
||||
* @snooze_interval: TBD
|
||||
* @snooze_window: TBD
|
||||
* @snooze_step: TBD
|
||||
* @qndp_tid: TID client shall use for uAPSD QNDP triggers
|
||||
* @uapsd_ac_flags: Set trigger-enabled and delivery-enabled indication for
|
||||
* each corresponding AC.
|
||||
* Use IEEE80211_WMM_IE_STA_QOSINFO_AC* for correct values.
|
||||
* @uapsd_max_sp: Use IEEE80211_WMM_IE_STA_QOSINFO_SP_* for correct
|
||||
* values.
|
||||
* @heavy_traffic_thr_tx_pkts: TX threshold measured in number of packets
|
||||
* @heavy_traffic_thr_rx_pkts: RX threshold measured in number of packets
|
||||
* @heavy_traffic_thr_tx_load: TX threshold measured in load's percentage
|
||||
* @heavy_traffic_thr_rx_load: RX threshold measured in load's percentage
|
||||
* @limited_ps_threshold:
|
||||
*/
|
||||
struct iwl_mac_power_cmd {
|
||||
/* CONTEXT_DESC_API_T_VER_1 */
|
||||
__le32 id_and_color;
|
||||
|
||||
/* CLIENT_PM_POWER_TABLE_S_VER_1 */
|
||||
__le16 flags;
|
||||
__le16 keep_alive_seconds;
|
||||
__le32 rx_data_timeout;
|
||||
__le32 tx_data_timeout;
|
||||
__le32 rx_data_timeout_uapsd;
|
||||
__le32 tx_data_timeout_uapsd;
|
||||
u8 lprx_rssi_threshold;
|
||||
u8 skip_dtim_periods;
|
||||
__le16 snooze_interval;
|
||||
__le16 snooze_window;
|
||||
u8 snooze_step;
|
||||
u8 qndp_tid;
|
||||
u8 uapsd_ac_flags;
|
||||
u8 uapsd_max_sp;
|
||||
u8 heavy_traffic_threshold_tx_packets;
|
||||
u8 heavy_traffic_threshold_rx_packets;
|
||||
u8 heavy_traffic_threshold_tx_percentage;
|
||||
u8 heavy_traffic_threshold_rx_percentage;
|
||||
u8 limited_ps_threshold;
|
||||
u8 reserved;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct iwl_beacon_filter_cmd
|
||||
* REPLY_BEACON_FILTERING_CMD = 0xd2 (command)
|
||||
|
@ -143,11 +216,21 @@ struct iwl_powertable_cmd {
|
|||
* calculated for current beacon is less than the threshold, use
|
||||
* Roaming Energy Delta Threshold, otherwise use normal Energy Delta
|
||||
* Threshold. Typical energy threshold is -72dBm.
|
||||
* @bf_temperature_delta: Send Beacon to driver if delta in temperature values
|
||||
* calculated for this and the last passed beacon is greater than this
|
||||
* threshold. Zero value means that the temperature changeis ignored for
|
||||
* @bf_temp_threshold: This threshold determines the type of temperature
|
||||
* filtering (Slow or Fast) that is selected (Units are in Celsuis):
|
||||
* If the current temperature is above this threshold - Fast filter
|
||||
* will be used, If the current temperature is below this threshold -
|
||||
* Slow filter will be used.
|
||||
* @bf_temp_fast_filter: Send Beacon to driver if delta in temperature values
|
||||
* calculated for this and the last passed beacon is greater than this
|
||||
* threshold. Zero value means that the temperature change is ignored for
|
||||
* beacon filtering; beacons will not be forced to be sent to driver
|
||||
* regardless of whether its temerature has been changed.
|
||||
* @bf_temp_slow_filter: Send Beacon to driver if delta in temperature values
|
||||
* calculated for this and the last passed beacon is greater than this
|
||||
* threshold. Zero value means that the temperature change is ignored for
|
||||
* beacon filtering; beacons will not be forced to be sent to driver
|
||||
* regardless of whether its temerature has been changed.
|
||||
* @bf_enable_beacon_filter: 1, beacon filtering is enabled; 0, disabled.
|
||||
* @bf_filter_escape_timer: Send beacons to to driver if no beacons were passed
|
||||
* for a specific period of time. Units: Beacons.
|
||||
|
@ -156,17 +239,17 @@ struct iwl_powertable_cmd {
|
|||
* @ba_enable_beacon_abort: 1, beacon abort is enabled; 0, disabled.
|
||||
*/
|
||||
struct iwl_beacon_filter_cmd {
|
||||
u8 bf_energy_delta;
|
||||
u8 bf_roaming_energy_delta;
|
||||
u8 bf_roaming_state;
|
||||
u8 bf_temperature_delta;
|
||||
u8 bf_enable_beacon_filter;
|
||||
u8 bf_debug_flag;
|
||||
__le16 reserved1;
|
||||
__le32 bf_energy_delta;
|
||||
__le32 bf_roaming_energy_delta;
|
||||
__le32 bf_roaming_state;
|
||||
__le32 bf_temp_threshold;
|
||||
__le32 bf_temp_fast_filter;
|
||||
__le32 bf_temp_slow_filter;
|
||||
__le32 bf_enable_beacon_filter;
|
||||
__le32 bf_debug_flag;
|
||||
__le32 bf_escape_timer;
|
||||
__le32 ba_escape_timer;
|
||||
u8 ba_enable_beacon_abort;
|
||||
u8 reserved2[3];
|
||||
__le32 ba_enable_beacon_abort;
|
||||
} __packed;
|
||||
|
||||
/* Beacon filtering and beacon abort */
|
||||
|
@ -182,9 +265,17 @@ struct iwl_beacon_filter_cmd {
|
|||
#define IWL_BF_ROAMING_STATE_MAX 255
|
||||
#define IWL_BF_ROAMING_STATE_MIN 0
|
||||
|
||||
#define IWL_BF_TEMPERATURE_DELTA_DEFAULT 5
|
||||
#define IWL_BF_TEMPERATURE_DELTA_MAX 255
|
||||
#define IWL_BF_TEMPERATURE_DELTA_MIN 0
|
||||
#define IWL_BF_TEMP_THRESHOLD_DEFAULT 112
|
||||
#define IWL_BF_TEMP_THRESHOLD_MAX 255
|
||||
#define IWL_BF_TEMP_THRESHOLD_MIN 0
|
||||
|
||||
#define IWL_BF_TEMP_FAST_FILTER_DEFAULT 1
|
||||
#define IWL_BF_TEMP_FAST_FILTER_MAX 255
|
||||
#define IWL_BF_TEMP_FAST_FILTER_MIN 0
|
||||
|
||||
#define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5
|
||||
#define IWL_BF_TEMP_SLOW_FILTER_MAX 255
|
||||
#define IWL_BF_TEMP_SLOW_FILTER_MIN 0
|
||||
|
||||
#define IWL_BF_ENABLE_BEACON_FILTER_DEFAULT 1
|
||||
|
||||
|
@ -194,19 +285,23 @@ struct iwl_beacon_filter_cmd {
|
|||
#define IWL_BF_ESCAPE_TIMER_MAX 1024
|
||||
#define IWL_BF_ESCAPE_TIMER_MIN 0
|
||||
|
||||
#define IWL_BA_ESCAPE_TIMER_DEFAULT 3
|
||||
#define IWL_BA_ESCAPE_TIMER_DEFAULT 6
|
||||
#define IWL_BA_ESCAPE_TIMER_D3 6
|
||||
#define IWL_BA_ESCAPE_TIMER_MAX 1024
|
||||
#define IWL_BA_ESCAPE_TIMER_MIN 0
|
||||
|
||||
#define IWL_BA_ENABLE_BEACON_ABORT_DEFAULT 1
|
||||
|
||||
#define IWL_BF_CMD_CONFIG_DEFAULTS \
|
||||
.bf_energy_delta = IWL_BF_ENERGY_DELTA_DEFAULT, \
|
||||
.bf_roaming_energy_delta = IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT, \
|
||||
.bf_roaming_state = IWL_BF_ROAMING_STATE_DEFAULT, \
|
||||
.bf_temperature_delta = IWL_BF_TEMPERATURE_DELTA_DEFAULT, \
|
||||
.bf_debug_flag = IWL_BF_DEBUG_FLAG_DEFAULT, \
|
||||
.bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT), \
|
||||
#define IWL_BF_CMD_CONFIG_DEFAULTS \
|
||||
.bf_energy_delta = cpu_to_le32(IWL_BF_ENERGY_DELTA_DEFAULT), \
|
||||
.bf_roaming_energy_delta = \
|
||||
cpu_to_le32(IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT), \
|
||||
.bf_roaming_state = cpu_to_le32(IWL_BF_ROAMING_STATE_DEFAULT), \
|
||||
.bf_temp_threshold = cpu_to_le32(IWL_BF_TEMP_THRESHOLD_DEFAULT), \
|
||||
.bf_temp_fast_filter = cpu_to_le32(IWL_BF_TEMP_FAST_FILTER_DEFAULT), \
|
||||
.bf_temp_slow_filter = cpu_to_le32(IWL_BF_TEMP_SLOW_FILTER_DEFAULT), \
|
||||
.bf_debug_flag = cpu_to_le32(IWL_BF_DEBUG_FLAG_DEFAULT), \
|
||||
.bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT), \
|
||||
.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_DEFAULT)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -137,6 +137,8 @@ struct iwl_ssid_ie {
|
|||
*@SCAN_FLAGS_DELAYED_SCAN_LOWBAND:
|
||||
*@SCAN_FLAGS_DELAYED_SCAN_HIGHBAND:
|
||||
*@SCAN_FLAGS_FRAGMENTED_SCAN:
|
||||
*@SCAN_FLAGS_PASSIVE2ACTIVE: use active scan on channels that was active
|
||||
* in the past hour, even if they are marked as passive.
|
||||
*/
|
||||
enum iwl_scan_flags {
|
||||
SCAN_FLAGS_PERIODIC_SCAN = BIT(0),
|
||||
|
@ -144,6 +146,7 @@ enum iwl_scan_flags {
|
|||
SCAN_FLAGS_DELAYED_SCAN_LOWBAND = BIT(2),
|
||||
SCAN_FLAGS_DELAYED_SCAN_HIGHBAND = BIT(3),
|
||||
SCAN_FLAGS_FRAGMENTED_SCAN = BIT(4),
|
||||
SCAN_FLAGS_PASSIVE2ACTIVE = BIT(5),
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -178,7 +181,7 @@ enum iwl_scan_type {
|
|||
* @quiet_time: in msecs, dwell this time for active scan on quiet channels
|
||||
* @quiet_plcp_th: quiet PLCP threshold (channel is quiet if less than
|
||||
* this number of packets were received (typically 1)
|
||||
* @passive2active: is auto switching from passive to active allowed (0 or 1)
|
||||
* @passive2active: is auto switching from passive to active during scan allowed
|
||||
* @rxchain_sel_flags: RXON_RX_CHAIN_*
|
||||
* @max_out_time: in usecs, max out of serving channel time
|
||||
* @suspend_time: how long to pause scan when returning to service channel:
|
||||
|
|
|
@ -91,7 +91,6 @@
|
|||
* @TX_CMD_FLG_RESP_TO_DRV: zero this if the response should go only to FW
|
||||
* @TX_CMD_FLG_CCMP_AGG: this frame uses CCMP for aggregation acceleration
|
||||
* @TX_CMD_FLG_TKIP_MIC_DONE: FW already performed TKIP MIC calculation
|
||||
* @TX_CMD_FLG_CTS_ONLY: send CTS only, no data after that
|
||||
* @TX_CMD_FLG_DUR: disable duration overwriting used in PS-Poll Assoc-id
|
||||
* @TX_CMD_FLG_FW_DROP: FW should mark frame to be dropped
|
||||
* @TX_CMD_FLG_EXEC_PAPD: execute PAPD
|
||||
|
@ -120,7 +119,6 @@ enum iwl_tx_flags {
|
|||
TX_CMD_FLG_RESP_TO_DRV = BIT(21),
|
||||
TX_CMD_FLG_CCMP_AGG = BIT(22),
|
||||
TX_CMD_FLG_TKIP_MIC_DONE = BIT(23),
|
||||
TX_CMD_FLG_CTS_ONLY = BIT(24),
|
||||
TX_CMD_FLG_DUR = BIT(25),
|
||||
TX_CMD_FLG_FW_DROP = BIT(26),
|
||||
TX_CMD_FLG_EXEC_PAPD = BIT(27),
|
||||
|
|
|
@ -136,7 +136,7 @@ enum {
|
|||
CALIB_RES_NOTIF_PHY_DB = 0x6b,
|
||||
/* PHY_DB_CMD = 0x6c, */
|
||||
|
||||
/* Power */
|
||||
/* Power - legacy power table command */
|
||||
POWER_TABLE_CMD = 0x77,
|
||||
|
||||
/* Thermal Throttling*/
|
||||
|
@ -159,6 +159,7 @@ enum {
|
|||
TX_ANT_CONFIGURATION_CMD = 0x98,
|
||||
BT_CONFIG = 0x9b,
|
||||
STATISTICS_NOTIFICATION = 0x9d,
|
||||
REDUCE_TX_POWER_CMD = 0x9f,
|
||||
|
||||
/* RF-KILL commands and notifications */
|
||||
CARD_STATE_CMD = 0xa0,
|
||||
|
@ -166,6 +167,9 @@ enum {
|
|||
|
||||
MISSED_BEACONS_NOTIFICATION = 0xa2,
|
||||
|
||||
/* Power - new power table command */
|
||||
MAC_PM_POWER_TABLE = 0xa9,
|
||||
|
||||
REPLY_RX_PHY_CMD = 0xc0,
|
||||
REPLY_RX_MPDU_CMD = 0xc1,
|
||||
BA_NOTIF = 0xc5,
|
||||
|
@ -223,6 +227,19 @@ struct iwl_tx_ant_cfg_cmd {
|
|||
__le32 valid;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct iwl_reduce_tx_power_cmd - TX power reduction command
|
||||
* REDUCE_TX_POWER_CMD = 0x9f
|
||||
* @flags: (reserved for future implementation)
|
||||
* @mac_context_id: id of the mac ctx for which we are reducing TX power.
|
||||
* @pwr_restriction: TX power restriction in dBms.
|
||||
*/
|
||||
struct iwl_reduce_tx_power_cmd {
|
||||
u8 flags;
|
||||
u8 mac_context_id;
|
||||
__le16 pwr_restriction;
|
||||
} __packed; /* TX_REDUCED_POWER_API_S_VER_1 */
|
||||
|
||||
/*
|
||||
* Calibration control struct.
|
||||
* Sent as part of the phy configuration command.
|
||||
|
@ -765,6 +782,14 @@ struct iwl_phy_context_cmd {
|
|||
} __packed; /* PHY_CONTEXT_CMD_API_VER_1 */
|
||||
|
||||
#define IWL_RX_INFO_PHY_CNT 8
|
||||
#define IWL_RX_INFO_ENERGY_ANT_ABC_IDX 1
|
||||
#define IWL_RX_INFO_ENERGY_ANT_A_MSK 0x000000ff
|
||||
#define IWL_RX_INFO_ENERGY_ANT_B_MSK 0x0000ff00
|
||||
#define IWL_RX_INFO_ENERGY_ANT_C_MSK 0x00ff0000
|
||||
#define IWL_RX_INFO_ENERGY_ANT_A_POS 0
|
||||
#define IWL_RX_INFO_ENERGY_ANT_B_POS 8
|
||||
#define IWL_RX_INFO_ENERGY_ANT_C_POS 16
|
||||
|
||||
#define IWL_RX_INFO_AGC_IDX 1
|
||||
#define IWL_RX_INFO_RSSI_AB_IDX 2
|
||||
#define IWL_OFDM_AGC_A_MSK 0x0000007f
|
||||
|
|
|
@ -78,22 +78,6 @@
|
|||
|
||||
#define UCODE_VALID_OK cpu_to_le32(0x1)
|
||||
|
||||
/* Default calibration values for WkP - set to INIT image w/o running */
|
||||
static const u8 wkp_calib_values_rx_iq_skew[] = { 0x00, 0x00, 0x01, 0x00 };
|
||||
static const u8 wkp_calib_values_tx_iq_skew[] = { 0x01, 0x00, 0x00, 0x00 };
|
||||
|
||||
struct iwl_calib_default_data {
|
||||
u16 size;
|
||||
void *data;
|
||||
};
|
||||
|
||||
#define CALIB_SIZE_N_DATA(_buf) {.size = sizeof(_buf), .data = &_buf}
|
||||
|
||||
static const struct iwl_calib_default_data wkp_calib_default_data[12] = {
|
||||
[9] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq_skew),
|
||||
[11] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq_skew),
|
||||
};
|
||||
|
||||
struct iwl_mvm_alive_data {
|
||||
bool valid;
|
||||
u32 scd_base_addr;
|
||||
|
@ -248,40 +232,6 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
|
|||
sizeof(phy_cfg_cmd), &phy_cfg_cmd);
|
||||
}
|
||||
|
||||
static int iwl_set_default_calibrations(struct iwl_mvm *mvm)
|
||||
{
|
||||
u8 cmd_raw[16]; /* holds the variable size commands */
|
||||
struct iwl_set_calib_default_cmd *cmd =
|
||||
(struct iwl_set_calib_default_cmd *)cmd_raw;
|
||||
int ret, i;
|
||||
|
||||
/* Setting default values for calibrations we don't run */
|
||||
for (i = 0; i < ARRAY_SIZE(wkp_calib_default_data); i++) {
|
||||
u16 cmd_len;
|
||||
|
||||
if (wkp_calib_default_data[i].size == 0)
|
||||
continue;
|
||||
|
||||
memset(cmd_raw, 0, sizeof(cmd_raw));
|
||||
cmd_len = wkp_calib_default_data[i].size + sizeof(cmd);
|
||||
cmd->calib_index = cpu_to_le16(i);
|
||||
cmd->length = cpu_to_le16(wkp_calib_default_data[i].size);
|
||||
if (WARN_ONCE(cmd_len > sizeof(cmd_raw),
|
||||
"Need to enlarge cmd_raw to %d\n", cmd_len))
|
||||
break;
|
||||
memcpy(cmd->data, wkp_calib_default_data[i].data,
|
||||
wkp_calib_default_data[i].size);
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, SET_CALIB_DEFAULT_CMD, 0,
|
||||
sizeof(*cmd) +
|
||||
wkp_calib_default_data[i].size,
|
||||
cmd);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
|
||||
{
|
||||
struct iwl_notification_wait calib_wait;
|
||||
|
@ -342,11 +292,6 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
|
|||
if (ret)
|
||||
goto error;
|
||||
|
||||
/* need to set default values */
|
||||
ret = iwl_set_default_calibrations(mvm);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
/*
|
||||
* Send phy configurations command to init uCode
|
||||
* to start the 16.0 uCode init image internal calibrations.
|
||||
|
|
|
@ -264,7 +264,8 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
|
|||
return 0;
|
||||
|
||||
/* Therefore, in recovery, we can't get here */
|
||||
WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
|
||||
if (WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
|
||||
return -EBUSY;
|
||||
|
||||
mvmvif->id = find_first_bit(data.available_mac_ids,
|
||||
NUM_MAC_INDEX_DRIVER);
|
||||
|
|
|
@ -153,7 +153,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
|||
IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
|
||||
IEEE80211_HW_AMPDU_AGGREGATION |
|
||||
IEEE80211_HW_TIMING_BEACON_ONLY |
|
||||
IEEE80211_HW_CONNECTION_MONITOR;
|
||||
IEEE80211_HW_CONNECTION_MONITOR |
|
||||
IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
|
||||
IEEE80211_HW_SUPPORTS_STATIC_SMPS;
|
||||
|
||||
hw->queues = IWL_MVM_FIRST_AGG_QUEUE;
|
||||
hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
|
||||
|
@ -506,7 +508,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
|
|||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
||||
/* Allocate resources for the MAC context, and add it the the fw */
|
||||
/* Allocate resources for the MAC context, and add it to the fw */
|
||||
ret = iwl_mvm_mac_ctxt_init(mvm, vif);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
@ -552,6 +554,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
|
|||
goto out_release;
|
||||
}
|
||||
|
||||
iwl_mvm_vif_dbgfs_register(mvm, vif);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
|
@ -566,16 +569,17 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
|
|||
iwl_mvm_power_update_mode(mvm, vif);
|
||||
|
||||
/* beacon filtering */
|
||||
ret = iwl_mvm_disable_beacon_filter(mvm, vif);
|
||||
if (ret)
|
||||
goto out_remove_mac;
|
||||
|
||||
if (!mvm->bf_allowed_vif &&
|
||||
vif->type == NL80211_IFTYPE_STATION && !vif->p2p){
|
||||
vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
|
||||
mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED){
|
||||
mvm->bf_allowed_vif = mvmvif;
|
||||
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
|
||||
}
|
||||
|
||||
ret = iwl_mvm_disable_beacon_filter(mvm, vif);
|
||||
if (ret)
|
||||
goto out_release;
|
||||
|
||||
/*
|
||||
* P2P_DEVICE interface does not have a channel context assigned to it,
|
||||
* so a dedicated PHY context is allocated to it and the corresponding
|
||||
|
@ -586,7 +590,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
|
|||
mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
|
||||
if (!mvmvif->phy_ctxt) {
|
||||
ret = -ENOSPC;
|
||||
goto out_remove_mac;
|
||||
goto out_free_bf;
|
||||
}
|
||||
|
||||
iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
|
||||
|
@ -610,6 +614,11 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
|
|||
iwl_mvm_binding_remove_vif(mvm, vif);
|
||||
out_unref_phy:
|
||||
iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
|
||||
out_free_bf:
|
||||
if (mvm->bf_allowed_vif == mvmvif) {
|
||||
mvm->bf_allowed_vif = NULL;
|
||||
vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
|
||||
}
|
||||
out_remove_mac:
|
||||
mvmvif->phy_ctxt = NULL;
|
||||
iwl_mvm_mac_ctxt_remove(mvm, vif);
|
||||
|
@ -719,6 +728,20 @@ out_release:
|
|||
mutex_unlock(&mvm->mutex);
|
||||
}
|
||||
|
||||
static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
s8 tx_power)
|
||||
{
|
||||
/* FW is in charge of regulatory enforcement */
|
||||
struct iwl_reduce_tx_power_cmd reduce_txpwr_cmd = {
|
||||
.mac_context_id = iwl_mvm_vif_from_mac80211(vif)->id,
|
||||
.pwr_restriction = cpu_to_le16(tx_power),
|
||||
};
|
||||
|
||||
return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, CMD_SYNC,
|
||||
sizeof(reduce_txpwr_cmd),
|
||||
&reduce_txpwr_cmd);
|
||||
}
|
||||
|
||||
static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
|
||||
{
|
||||
return 0;
|
||||
|
@ -766,7 +789,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
|
|||
IWL_ERR(mvm, "failed to update quotas\n");
|
||||
return;
|
||||
}
|
||||
iwl_mvm_bt_coex_vif_assoc(mvm, vif);
|
||||
iwl_mvm_configure_mcast_filter(mvm, vif);
|
||||
} else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
|
||||
/* remove AP station now that the MAC is unassoc */
|
||||
|
@ -779,9 +801,15 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
|
|||
if (ret)
|
||||
IWL_ERR(mvm, "failed to update quotas\n");
|
||||
}
|
||||
ret = iwl_mvm_power_update_mode(mvm, vif);
|
||||
if (ret)
|
||||
IWL_ERR(mvm, "failed to update power mode\n");
|
||||
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD)) {
|
||||
/* Workaround for FW bug, otherwise FW disables device
|
||||
* power save upon disassociation
|
||||
*/
|
||||
ret = iwl_mvm_power_update_mode(mvm, vif);
|
||||
if (ret)
|
||||
IWL_ERR(mvm, "failed to update power mode\n");
|
||||
}
|
||||
iwl_mvm_bt_coex_vif_assoc(mvm, vif);
|
||||
} else if (changes & BSS_CHANGED_BEACON_INFO) {
|
||||
/*
|
||||
* We received a beacon _after_ association so
|
||||
|
@ -794,6 +822,11 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
|
|||
if (ret)
|
||||
IWL_ERR(mvm, "failed to update power mode\n");
|
||||
}
|
||||
if (changes & BSS_CHANGED_TXPOWER) {
|
||||
IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
|
||||
bss_conf->txpower);
|
||||
iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
|
||||
}
|
||||
}
|
||||
|
||||
static int iwl_mvm_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
|
||||
|
|
|
@ -76,6 +76,7 @@
|
|||
#include "iwl-trans.h"
|
||||
#include "sta.h"
|
||||
#include "fw-api.h"
|
||||
#include "constants.h"
|
||||
|
||||
#define IWL_INVALID_MAC80211_QUEUE 0xff
|
||||
#define IWL_MVM_MAX_ADDRESSES 5
|
||||
|
@ -91,6 +92,9 @@ enum iwl_mvm_tx_fifo {
|
|||
};
|
||||
|
||||
extern struct ieee80211_ops iwl_mvm_hw_ops;
|
||||
extern const struct iwl_mvm_power_ops pm_legacy_ops;
|
||||
extern const struct iwl_mvm_power_ops pm_mac_ops;
|
||||
|
||||
/**
|
||||
* struct iwl_mvm_mod_params - module parameters for iwlmvm
|
||||
* @init_dbg: if true, then the NIC won't be stopped if the INIT fw asserted.
|
||||
|
@ -150,6 +154,17 @@ enum iwl_power_scheme {
|
|||
|
||||
#define IWL_CONN_MAX_LISTEN_INTERVAL 70
|
||||
|
||||
struct iwl_mvm_power_ops {
|
||||
int (*power_update_mode)(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif);
|
||||
int (*power_disable)(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
int (*power_dbgfs_read)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
char *buf, int bufsz);
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
enum iwl_dbgfs_pm_mask {
|
||||
MVM_DEBUGFS_PM_KEEP_ALIVE = BIT(0),
|
||||
|
@ -163,7 +178,7 @@ enum iwl_dbgfs_pm_mask {
|
|||
};
|
||||
|
||||
struct iwl_dbgfs_pm {
|
||||
u8 keep_alive_seconds;
|
||||
u16 keep_alive_seconds;
|
||||
u32 rx_data_timeout;
|
||||
u32 tx_data_timeout;
|
||||
bool skip_over_dtim;
|
||||
|
@ -180,24 +195,28 @@ enum iwl_dbgfs_bf_mask {
|
|||
MVM_DEBUGFS_BF_ENERGY_DELTA = BIT(0),
|
||||
MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA = BIT(1),
|
||||
MVM_DEBUGFS_BF_ROAMING_STATE = BIT(2),
|
||||
MVM_DEBUGFS_BF_TEMPERATURE_DELTA = BIT(3),
|
||||
MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER = BIT(4),
|
||||
MVM_DEBUGFS_BF_DEBUG_FLAG = BIT(5),
|
||||
MVM_DEBUGFS_BF_ESCAPE_TIMER = BIT(6),
|
||||
MVM_DEBUGFS_BA_ESCAPE_TIMER = BIT(7),
|
||||
MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT = BIT(8),
|
||||
MVM_DEBUGFS_BF_TEMP_THRESHOLD = BIT(3),
|
||||
MVM_DEBUGFS_BF_TEMP_FAST_FILTER = BIT(4),
|
||||
MVM_DEBUGFS_BF_TEMP_SLOW_FILTER = BIT(5),
|
||||
MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER = BIT(6),
|
||||
MVM_DEBUGFS_BF_DEBUG_FLAG = BIT(7),
|
||||
MVM_DEBUGFS_BF_ESCAPE_TIMER = BIT(8),
|
||||
MVM_DEBUGFS_BA_ESCAPE_TIMER = BIT(9),
|
||||
MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT = BIT(10),
|
||||
};
|
||||
|
||||
struct iwl_dbgfs_bf {
|
||||
u8 bf_energy_delta;
|
||||
u8 bf_roaming_energy_delta;
|
||||
u8 bf_roaming_state;
|
||||
u8 bf_temperature_delta;
|
||||
u8 bf_enable_beacon_filter;
|
||||
u8 bf_debug_flag;
|
||||
u32 bf_energy_delta;
|
||||
u32 bf_roaming_energy_delta;
|
||||
u32 bf_roaming_state;
|
||||
u32 bf_temp_threshold;
|
||||
u32 bf_temp_fast_filter;
|
||||
u32 bf_temp_slow_filter;
|
||||
u32 bf_enable_beacon_filter;
|
||||
u32 bf_debug_flag;
|
||||
u32 bf_escape_timer;
|
||||
u32 ba_escape_timer;
|
||||
u8 ba_enable_beacon_abort;
|
||||
u32 ba_enable_beacon_abort;
|
||||
int mask;
|
||||
};
|
||||
#endif
|
||||
|
@ -268,7 +287,7 @@ struct iwl_mvm_vif {
|
|||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
/* IPv6 addresses for WoWLAN */
|
||||
struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS];
|
||||
struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX];
|
||||
int num_target_ipv6_addrs;
|
||||
#endif
|
||||
#endif
|
||||
|
@ -402,6 +421,8 @@ struct iwl_mvm {
|
|||
|
||||
struct iwl_notif_wait_data notif_wait;
|
||||
|
||||
struct mvm_statistics_rx rx_stats;
|
||||
|
||||
unsigned long transport_queue_stop;
|
||||
u8 queue_to_mac80211[IWL_MAX_HW_QUEUES];
|
||||
atomic_t queue_stop_count[IWL_MAX_HW_QUEUES];
|
||||
|
@ -459,6 +480,9 @@ struct iwl_mvm {
|
|||
*/
|
||||
u8 vif_count;
|
||||
|
||||
/* -1 for always, 0 for never, >0 for that many times */
|
||||
s8 restart_fw;
|
||||
|
||||
struct led_classdev led;
|
||||
|
||||
struct ieee80211_vif *p2p_device_vif;
|
||||
|
@ -482,6 +506,8 @@ struct iwl_mvm {
|
|||
/* Thermal Throttling and CTkill */
|
||||
struct iwl_mvm_tt_mgmt thermal_throttle;
|
||||
s32 temperature; /* Celsius */
|
||||
|
||||
const struct iwl_mvm_power_ops *pm_ops;
|
||||
};
|
||||
|
||||
/* Extract MVM priv from op_mode and _hw */
|
||||
|
@ -525,6 +551,7 @@ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
|
|||
enum ieee80211_band band);
|
||||
u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
|
||||
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
|
||||
void iwl_mvm_dump_sram(struct iwl_mvm *mvm);
|
||||
u8 first_antenna(u8 mask);
|
||||
u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
|
||||
|
||||
|
@ -660,10 +687,26 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
|
|||
u8 flags, bool init);
|
||||
|
||||
/* power managment */
|
||||
int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
|
||||
int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
|
||||
void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
struct iwl_powertable_cmd *cmd);
|
||||
static inline int iwl_mvm_power_update_mode(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
return mvm->pm_ops->power_update_mode(mvm, vif);
|
||||
}
|
||||
|
||||
static inline int iwl_mvm_power_disable(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
return mvm->pm_ops->power_disable(mvm, vif);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
static inline int iwl_mvm_power_dbgfs_read(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif,
|
||||
char *buf, int bufsz)
|
||||
{
|
||||
return mvm->pm_ops->power_dbgfs_read(mvm, vif, buf, bufsz);
|
||||
}
|
||||
#endif
|
||||
|
||||
int iwl_mvm_leds_init(struct iwl_mvm *mvm);
|
||||
void iwl_mvm_leds_exit(struct iwl_mvm *mvm);
|
||||
|
@ -707,6 +750,10 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
|
|||
struct ieee80211_vif *vif);
|
||||
int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif);
|
||||
int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
|
||||
struct iwl_beacon_filter_cmd *cmd);
|
||||
int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif, bool enable);
|
||||
|
||||
/* SMPS */
|
||||
void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
|
|
|
@ -275,6 +275,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
|
|||
CMD(BEACON_NOTIFICATION),
|
||||
CMD(BEACON_TEMPLATE_CMD),
|
||||
CMD(STATISTICS_NOTIFICATION),
|
||||
CMD(REDUCE_TX_POWER_CMD),
|
||||
CMD(TX_ANT_CONFIGURATION_CMD),
|
||||
CMD(D3_CONFIG_CMD),
|
||||
CMD(PROT_OFFLOAD_CONFIG_CMD),
|
||||
|
@ -301,6 +302,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
|
|||
CMD(MCAST_FILTER_CMD),
|
||||
CMD(REPLY_BEACON_FILTERING_CMD),
|
||||
CMD(REPLY_THERMAL_MNG_BACKOFF),
|
||||
CMD(MAC_PM_POWER_TABLE),
|
||||
};
|
||||
#undef CMD
|
||||
|
||||
|
@ -340,6 +342,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||
mvm->fw = fw;
|
||||
mvm->hw = hw;
|
||||
|
||||
mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0;
|
||||
|
||||
mutex_init(&mvm->mutex);
|
||||
spin_lock_init(&mvm->async_handlers_lock);
|
||||
INIT_LIST_HEAD(&mvm->time_event_list);
|
||||
|
@ -431,6 +435,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||
if (err)
|
||||
goto out_unregister;
|
||||
|
||||
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD)
|
||||
mvm->pm_ops = &pm_mac_ops;
|
||||
else
|
||||
mvm->pm_ops = &pm_legacy_ops;
|
||||
|
||||
memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
|
||||
|
||||
return op_mode;
|
||||
|
||||
out_unregister:
|
||||
|
@ -638,6 +649,22 @@ static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
|
|||
ieee80211_free_txskb(mvm->hw, skb);
|
||||
}
|
||||
|
||||
struct iwl_mvm_reprobe {
|
||||
struct device *dev;
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
static void iwl_mvm_reprobe_wk(struct work_struct *wk)
|
||||
{
|
||||
struct iwl_mvm_reprobe *reprobe;
|
||||
|
||||
reprobe = container_of(wk, struct iwl_mvm_reprobe, work);
|
||||
if (device_reprobe(reprobe->dev))
|
||||
dev_err(reprobe->dev, "reprobe failed!\n");
|
||||
kfree(reprobe);
|
||||
module_put(THIS_MODULE);
|
||||
}
|
||||
|
||||
static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
|
||||
{
|
||||
iwl_abort_notification_waits(&mvm->notif_wait);
|
||||
|
@ -649,9 +676,30 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
|
|||
* can't recover this since we're already half suspended.
|
||||
*/
|
||||
if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
|
||||
IWL_ERR(mvm, "Firmware error during reconfiguration! Abort.\n");
|
||||
} else if (mvm->cur_ucode == IWL_UCODE_REGULAR &&
|
||||
iwlwifi_mod_params.restart_fw) {
|
||||
struct iwl_mvm_reprobe *reprobe;
|
||||
|
||||
IWL_ERR(mvm,
|
||||
"Firmware error during reconfiguration - reprobe!\n");
|
||||
|
||||
/*
|
||||
* get a module reference to avoid doing this while unloading
|
||||
* anyway and to avoid scheduling a work with code that's
|
||||
* being removed.
|
||||
*/
|
||||
if (!try_module_get(THIS_MODULE)) {
|
||||
IWL_ERR(mvm, "Module is being unloaded - abort\n");
|
||||
return;
|
||||
}
|
||||
|
||||
reprobe = kzalloc(sizeof(*reprobe), GFP_ATOMIC);
|
||||
if (!reprobe) {
|
||||
module_put(THIS_MODULE);
|
||||
return;
|
||||
}
|
||||
reprobe->dev = mvm->trans->dev;
|
||||
INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
|
||||
schedule_work(&reprobe->work);
|
||||
} else if (mvm->cur_ucode == IWL_UCODE_REGULAR && mvm->restart_fw) {
|
||||
/*
|
||||
* This is a bit racy, but worst case we tell mac80211 about
|
||||
* a stopped/aborted (sched) scan when that was already done
|
||||
|
@ -669,6 +717,8 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
|
|||
break;
|
||||
}
|
||||
|
||||
if (mvm->restart_fw > 0)
|
||||
mvm->restart_fw--;
|
||||
ieee80211_restart_hw(mvm->hw);
|
||||
}
|
||||
}
|
||||
|
@ -678,6 +728,8 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
|
|||
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
||||
|
||||
iwl_mvm_dump_nic_error_log(mvm);
|
||||
if (!mvm->restart_fw)
|
||||
iwl_mvm_dump_sram(mvm);
|
||||
|
||||
iwl_mvm_nic_restart(mvm);
|
||||
}
|
||||
|
|
|
@ -75,8 +75,8 @@
|
|||
|
||||
#define POWER_KEEP_ALIVE_PERIOD_SEC 25
|
||||
|
||||
static int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
|
||||
struct iwl_beacon_filter_cmd *cmd)
|
||||
int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
|
||||
struct iwl_beacon_filter_cmd *cmd)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -85,52 +85,60 @@ static int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
|
|||
|
||||
if (!ret) {
|
||||
IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n",
|
||||
cmd->ba_enable_beacon_abort);
|
||||
le32_to_cpu(cmd->ba_enable_beacon_abort));
|
||||
IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n",
|
||||
cmd->ba_escape_timer);
|
||||
le32_to_cpu(cmd->ba_escape_timer));
|
||||
IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n",
|
||||
cmd->bf_debug_flag);
|
||||
le32_to_cpu(cmd->bf_debug_flag));
|
||||
IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n",
|
||||
cmd->bf_enable_beacon_filter);
|
||||
le32_to_cpu(cmd->bf_enable_beacon_filter));
|
||||
IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n",
|
||||
cmd->bf_energy_delta);
|
||||
le32_to_cpu(cmd->bf_energy_delta));
|
||||
IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n",
|
||||
cmd->bf_escape_timer);
|
||||
le32_to_cpu(cmd->bf_escape_timer));
|
||||
IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n",
|
||||
cmd->bf_roaming_energy_delta);
|
||||
le32_to_cpu(cmd->bf_roaming_energy_delta));
|
||||
IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n",
|
||||
cmd->bf_roaming_state);
|
||||
IWL_DEBUG_POWER(mvm, "bf_temperature_delta is: %d\n",
|
||||
cmd->bf_temperature_delta);
|
||||
le32_to_cpu(cmd->bf_roaming_state));
|
||||
IWL_DEBUG_POWER(mvm, "bf_temp_threshold is: %d\n",
|
||||
le32_to_cpu(cmd->bf_temp_threshold));
|
||||
IWL_DEBUG_POWER(mvm, "bf_temp_fast_filter is: %d\n",
|
||||
le32_to_cpu(cmd->bf_temp_fast_filter));
|
||||
IWL_DEBUG_POWER(mvm, "bf_temp_slow_filter is: %d\n",
|
||||
le32_to_cpu(cmd->bf_temp_slow_filter));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif, bool enable)
|
||||
int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif, bool enable)
|
||||
{
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
struct iwl_beacon_filter_cmd cmd = {
|
||||
IWL_BF_CMD_CONFIG_DEFAULTS,
|
||||
.bf_enable_beacon_filter = 1,
|
||||
.ba_enable_beacon_abort = enable,
|
||||
.bf_enable_beacon_filter = cpu_to_le32(1),
|
||||
.ba_enable_beacon_abort = cpu_to_le32(enable),
|
||||
};
|
||||
|
||||
if (!mvmvif->bf_enabled)
|
||||
return 0;
|
||||
|
||||
if (mvm->cur_ucode == IWL_UCODE_WOWLAN)
|
||||
cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
|
||||
|
||||
iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
|
||||
return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
|
||||
}
|
||||
|
||||
static void iwl_mvm_power_log(struct iwl_mvm *mvm,
|
||||
struct iwl_powertable_cmd *cmd)
|
||||
struct iwl_mac_power_cmd *cmd)
|
||||
{
|
||||
IWL_DEBUG_POWER(mvm,
|
||||
"Sending power table command for power level %d, flags = 0x%X\n",
|
||||
iwlmvm_mod_params.power_scheme,
|
||||
"Sending power table command on mac id 0x%X for power level %d, flags = 0x%X\n",
|
||||
cmd->id_and_color, iwlmvm_mod_params.power_scheme,
|
||||
le16_to_cpu(cmd->flags));
|
||||
IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n", cmd->keep_alive_seconds);
|
||||
IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n",
|
||||
le16_to_cpu(cmd->keep_alive_seconds));
|
||||
|
||||
if (cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
|
||||
IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n",
|
||||
|
@ -139,15 +147,16 @@ static void iwl_mvm_power_log(struct iwl_mvm *mvm,
|
|||
le32_to_cpu(cmd->tx_data_timeout));
|
||||
if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK))
|
||||
IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n",
|
||||
le32_to_cpu(cmd->skip_dtim_periods));
|
||||
cmd->skip_dtim_periods);
|
||||
if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
|
||||
IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
|
||||
le32_to_cpu(cmd->lprx_rssi_threshold));
|
||||
cmd->lprx_rssi_threshold);
|
||||
}
|
||||
}
|
||||
|
||||
void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
struct iwl_powertable_cmd *cmd)
|
||||
static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif,
|
||||
struct iwl_mac_power_cmd *cmd)
|
||||
{
|
||||
struct ieee80211_hw *hw = mvm->hw;
|
||||
struct ieee80211_chanctx_conf *chanctx_conf;
|
||||
|
@ -158,19 +167,26 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
struct iwl_mvm_vif *mvmvif __maybe_unused =
|
||||
iwl_mvm_vif_from_mac80211(vif);
|
||||
|
||||
cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
|
||||
mvmvif->color));
|
||||
dtimper = hw->conf.ps_dtim_period ?: 1;
|
||||
|
||||
/*
|
||||
* Regardless of power management state the driver must set
|
||||
* keep alive period. FW will use it for sending keep alive NDPs
|
||||
* immediately after association.
|
||||
* immediately after association. Check that keep alive period
|
||||
* is at least 3 * DTIM
|
||||
*/
|
||||
cmd->keep_alive_seconds = POWER_KEEP_ALIVE_PERIOD_SEC;
|
||||
dtimper_msec = dtimper * vif->bss_conf.beacon_int;
|
||||
keep_alive = max_t(int, 3 * dtimper_msec,
|
||||
MSEC_PER_SEC * POWER_KEEP_ALIVE_PERIOD_SEC);
|
||||
keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
|
||||
cmd->keep_alive_seconds = cpu_to_le16(keep_alive);
|
||||
|
||||
if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
|
||||
return;
|
||||
|
||||
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
|
||||
if (!vif->bss_conf.assoc)
|
||||
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
|
||||
|
@ -186,12 +202,9 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
(vif->bss_conf.beacon_rate->bitrate == 10 ||
|
||||
vif->bss_conf.beacon_rate->bitrate == 60)) {
|
||||
cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
|
||||
cmd->lprx_rssi_threshold =
|
||||
cpu_to_le32(POWER_LPRX_RSSI_THRESHOLD);
|
||||
cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD;
|
||||
}
|
||||
|
||||
dtimper = hw->conf.ps_dtim_period ?: 1;
|
||||
|
||||
/* Check if radar detection is required on current channel */
|
||||
rcu_read_lock();
|
||||
chanctx_conf = rcu_dereference(vif->chanctx_conf);
|
||||
|
@ -207,27 +220,25 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
(iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
|
||||
mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
|
||||
cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
|
||||
cmd->skip_dtim_periods = cpu_to_le32(3);
|
||||
cmd->skip_dtim_periods = 3;
|
||||
}
|
||||
|
||||
/* Check that keep alive period is at least 3 * DTIM */
|
||||
dtimper_msec = dtimper * vif->bss_conf.beacon_int;
|
||||
keep_alive = max_t(int, 3 * dtimper_msec,
|
||||
MSEC_PER_SEC * cmd->keep_alive_seconds);
|
||||
keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
|
||||
cmd->keep_alive_seconds = keep_alive;
|
||||
|
||||
if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {
|
||||
cmd->rx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
|
||||
cmd->tx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
|
||||
cmd->rx_data_timeout =
|
||||
cpu_to_le32(IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT);
|
||||
cmd->tx_data_timeout =
|
||||
cpu_to_le32(IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT);
|
||||
} else {
|
||||
cmd->rx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
|
||||
cmd->tx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
|
||||
cmd->rx_data_timeout =
|
||||
cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
|
||||
cmd->tx_data_timeout =
|
||||
cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE)
|
||||
cmd->keep_alive_seconds = mvmvif->dbgfs_pm.keep_alive_seconds;
|
||||
cmd->keep_alive_seconds =
|
||||
cpu_to_le16(mvmvif->dbgfs_pm.keep_alive_seconds);
|
||||
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) {
|
||||
if (mvmvif->dbgfs_pm.skip_over_dtim)
|
||||
cmd->flags |=
|
||||
|
@ -243,8 +254,7 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
cmd->tx_data_timeout =
|
||||
cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout);
|
||||
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS)
|
||||
cmd->skip_dtim_periods =
|
||||
cpu_to_le32(mvmvif->dbgfs_pm.skip_dtim_periods);
|
||||
cmd->skip_dtim_periods = mvmvif->dbgfs_pm.skip_dtim_periods;
|
||||
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) {
|
||||
if (mvmvif->dbgfs_pm.lprx_ena)
|
||||
cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
|
||||
|
@ -252,16 +262,16 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK);
|
||||
}
|
||||
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD)
|
||||
cmd->lprx_rssi_threshold =
|
||||
cpu_to_le32(mvmvif->dbgfs_pm.lprx_rssi_threshold);
|
||||
cmd->lprx_rssi_threshold = mvmvif->dbgfs_pm.lprx_rssi_threshold;
|
||||
#endif /* CONFIG_IWLWIFI_DEBUGFS */
|
||||
}
|
||||
|
||||
int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
static int iwl_mvm_power_mac_update_mode(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
int ret;
|
||||
bool ba_enable;
|
||||
struct iwl_powertable_cmd cmd = {};
|
||||
struct iwl_mac_power_cmd cmd = {};
|
||||
|
||||
if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
|
||||
return 0;
|
||||
|
@ -280,7 +290,7 @@ int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
iwl_mvm_power_build_cmd(mvm, vif, &cmd);
|
||||
iwl_mvm_power_log(mvm, &cmd);
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_SYNC,
|
||||
sizeof(cmd), &cmd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -291,15 +301,19 @@ int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
return iwl_mvm_update_beacon_abort(mvm, vif, ba_enable);
|
||||
}
|
||||
|
||||
int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
static int iwl_mvm_power_mac_disable(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
struct iwl_powertable_cmd cmd = {};
|
||||
struct iwl_mac_power_cmd cmd = {};
|
||||
struct iwl_mvm_vif *mvmvif __maybe_unused =
|
||||
iwl_mvm_vif_from_mac80211(vif);
|
||||
|
||||
if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
|
||||
return 0;
|
||||
|
||||
cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
|
||||
mvmvif->color));
|
||||
|
||||
if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
|
||||
cmd.flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
|
||||
|
||||
|
@ -310,11 +324,50 @@ int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
#endif
|
||||
iwl_mvm_power_log(mvm, &cmd);
|
||||
|
||||
return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_ASYNC,
|
||||
return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_ASYNC,
|
||||
sizeof(cmd), &cmd);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif, char *buf,
|
||||
int bufsz)
|
||||
{
|
||||
struct iwl_mac_power_cmd cmd = {};
|
||||
int pos = 0;
|
||||
|
||||
iwl_mvm_power_build_cmd(mvm, vif, &cmd);
|
||||
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
|
||||
(cmd.flags &
|
||||
cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
|
||||
0 : 1);
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
|
||||
cmd.skip_dtim_periods);
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
|
||||
iwlmvm_mod_params.power_scheme);
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
|
||||
le16_to_cpu(cmd.flags));
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
|
||||
le16_to_cpu(cmd.keep_alive_seconds));
|
||||
|
||||
if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
|
||||
(cmd.flags &
|
||||
cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ?
|
||||
1 : 0);
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n",
|
||||
le32_to_cpu(cmd.rx_data_timeout));
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n",
|
||||
le32_to_cpu(cmd.tx_data_timeout));
|
||||
if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
|
||||
pos += scnprintf(buf+pos, bufsz-pos,
|
||||
"lprx_rssi_threshold = %d\n",
|
||||
cmd.lprx_rssi_threshold);
|
||||
}
|
||||
return pos;
|
||||
}
|
||||
|
||||
void
|
||||
iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
|
||||
struct iwl_beacon_filter_cmd *cmd)
|
||||
|
@ -323,22 +376,30 @@ iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
|
|||
struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf;
|
||||
|
||||
if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ENERGY_DELTA)
|
||||
cmd->bf_energy_delta = dbgfs_bf->bf_energy_delta;
|
||||
cmd->bf_energy_delta = cpu_to_le32(dbgfs_bf->bf_energy_delta);
|
||||
if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA)
|
||||
cmd->bf_roaming_energy_delta =
|
||||
dbgfs_bf->bf_roaming_energy_delta;
|
||||
cpu_to_le32(dbgfs_bf->bf_roaming_energy_delta);
|
||||
if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_STATE)
|
||||
cmd->bf_roaming_state = dbgfs_bf->bf_roaming_state;
|
||||
if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMPERATURE_DELTA)
|
||||
cmd->bf_temperature_delta = dbgfs_bf->bf_temperature_delta;
|
||||
cmd->bf_roaming_state = cpu_to_le32(dbgfs_bf->bf_roaming_state);
|
||||
if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_THRESHOLD)
|
||||
cmd->bf_temp_threshold =
|
||||
cpu_to_le32(dbgfs_bf->bf_temp_threshold);
|
||||
if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_FAST_FILTER)
|
||||
cmd->bf_temp_fast_filter =
|
||||
cpu_to_le32(dbgfs_bf->bf_temp_fast_filter);
|
||||
if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_SLOW_FILTER)
|
||||
cmd->bf_temp_slow_filter =
|
||||
cpu_to_le32(dbgfs_bf->bf_temp_slow_filter);
|
||||
if (dbgfs_bf->mask & MVM_DEBUGFS_BF_DEBUG_FLAG)
|
||||
cmd->bf_debug_flag = dbgfs_bf->bf_debug_flag;
|
||||
cmd->bf_debug_flag = cpu_to_le32(dbgfs_bf->bf_debug_flag);
|
||||
if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ESCAPE_TIMER)
|
||||
cmd->bf_escape_timer = cpu_to_le32(dbgfs_bf->bf_escape_timer);
|
||||
if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ESCAPE_TIMER)
|
||||
cmd->ba_escape_timer = cpu_to_le32(dbgfs_bf->ba_escape_timer);
|
||||
if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT)
|
||||
cmd->ba_enable_beacon_abort = dbgfs_bf->ba_enable_beacon_abort;
|
||||
cmd->ba_enable_beacon_abort =
|
||||
cpu_to_le32(dbgfs_bf->ba_enable_beacon_abort);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -348,7 +409,7 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
|
|||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
struct iwl_beacon_filter_cmd cmd = {
|
||||
IWL_BF_CMD_CONFIG_DEFAULTS,
|
||||
.bf_enable_beacon_filter = 1,
|
||||
.bf_enable_beacon_filter = cpu_to_le32(1),
|
||||
};
|
||||
int ret;
|
||||
|
||||
|
@ -372,7 +433,8 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
|
|||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
int ret;
|
||||
|
||||
if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
|
||||
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED) ||
|
||||
vif->type != NL80211_IFTYPE_STATION || vif->p2p)
|
||||
return 0;
|
||||
|
||||
ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
|
||||
|
@ -382,3 +444,11 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
|
|||
|
||||
return ret;
|
||||
}
|
||||
|
||||
const struct iwl_mvm_power_ops pm_mac_ops = {
|
||||
.power_update_mode = iwl_mvm_power_mac_update_mode,
|
||||
.power_disable = iwl_mvm_power_mac_disable,
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
.power_dbgfs_read = iwl_mvm_power_mac_dbgfs_read,
|
||||
#endif
|
||||
};
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче