Major changes:
ath9k: * add Active Interference Cancellation, a method implemented in the HW to counter WLAN RX > sensitivity degradation when BT is transmitting at the same time. This feature is supported by cards like WB222 based on AR9462. iwlwifi: * Location Aware Regulatory was added by Arik * 8000 device family work * update to the BT Coex firmware API brmcfmac: * add new BCM43455 and BCM43457 SDIO device support * add new BCM43430 SDIO device support wil6210: * take care of AP bridging * fix NAPI behavior * found approach to achieve 4*n+2 alignment of Rx frames rt2x00: * add new rt2800usb device DWA 130 rtlwifi: * add USB ID for D-Link DWA-131 * add USB ID ASUS N10 WiFi dongle mwifiex: * throughput enhancements -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.11 (GNU/Linux) iQEcBAABAgAGBQJVHATOAAoJEG4XJFUm622b4EsIAIMGOLb2GuqtxpN/Ei1TmUFV 8B3KTLmeau/glqxquySQ2mIthDqijesbl0jyKWJP/+ZsXhjhpagXAIMw6E5KH3HN 1XKCvyqbkGScTcheaS4HYFmKKoM0OlPnDKhybYtPiScW2kyQf8S9msbeEzLdYYen qYKMRq/J/QuobWvtapyGOBtyuVtuTrKuP8LzKQX9JAyKtM2si/iOvG7EiFmcAcis DGJBrwhy/i6oBoi6e6KGgxIpXJE9WHsMs2QBYpFkxV2SbzXTOyq1EMFJvMHCq927 QfEsgOgAyVLfsXqN1uNK49eJtTuWRuJehEV88xjUbMXcYQNiaKdnYY6KvIBQ7XU= =POsk -----END PGP SIGNATURE----- Merge tag 'wireless-drivers-next-for-davem-2015-04-01' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next Kalle Valo says: ==================== Major changes: ath9k: * add Active Interference Cancellation, a method implemented in the HW to counter WLAN RX > sensitivity degradation when BT is transmitting at the same time. This feature is supported by cards like WB222 based on AR9462. iwlwifi: * Location Aware Regulatory was added by Arik * 8000 device family work * update to the BT Coex firmware API brmcfmac: * add new BCM43455 and BCM43457 SDIO device support * add new BCM43430 SDIO device support wil6210: * take care of AP bridging * fix NAPI behavior * found approach to achieve 4*n+2 alignment of Rx frames rt2x00: * add new rt2800usb device DWA 130 rtlwifi: * add USB ID for D-Link DWA-131 * add USB ID ASUS N10 WiFi dongle mwifiex: * throughput enhancements ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Коммит
45eb516887
|
@ -1,6 +1,6 @@
|
|||
config BCMA_POSSIBLE
|
||||
bool
|
||||
depends on HAS_IOMEM && HAS_DMA && PCI
|
||||
depends on HAS_IOMEM && HAS_DMA
|
||||
default y
|
||||
|
||||
menu "Broadcom specific AMBA"
|
||||
|
@ -45,9 +45,9 @@ config BCMA_HOST_SOC
|
|||
|
||||
If unsure, say N
|
||||
|
||||
# TODO: make it depend on PCI when ready
|
||||
config BCMA_DRIVER_PCI
|
||||
bool
|
||||
bool "BCMA Broadcom PCI core driver"
|
||||
depends on BCMA && PCI
|
||||
default y
|
||||
help
|
||||
BCMA bus may have many versions of PCIe core. This driver
|
||||
|
|
|
@ -106,15 +106,35 @@ static inline void __exit bcma_host_soc_unregister_driver(void)
|
|||
#endif /* CONFIG_BCMA_HOST_SOC && CONFIG_OF */
|
||||
|
||||
/* driver_pci.c */
|
||||
#ifdef CONFIG_BCMA_DRIVER_PCI
|
||||
u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address);
|
||||
void bcma_core_pci_early_init(struct bcma_drv_pci *pc);
|
||||
void bcma_core_pci_init(struct bcma_drv_pci *pc);
|
||||
void bcma_core_pci_up(struct bcma_drv_pci *pc);
|
||||
void bcma_core_pci_down(struct bcma_drv_pci *pc);
|
||||
#else
|
||||
static inline void bcma_core_pci_early_init(struct bcma_drv_pci *pc)
|
||||
{
|
||||
WARN_ON(pc->core->bus->hosttype == BCMA_HOSTTYPE_PCI);
|
||||
}
|
||||
static inline void bcma_core_pci_init(struct bcma_drv_pci *pc)
|
||||
{
|
||||
/* Initialization is required for PCI hosted bus */
|
||||
WARN_ON(pc->core->bus->hosttype == BCMA_HOSTTYPE_PCI);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* driver_pcie2.c */
|
||||
#ifdef CONFIG_BCMA_DRIVER_PCI
|
||||
void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2);
|
||||
void bcma_core_pcie2_up(struct bcma_drv_pcie2 *pcie2);
|
||||
#else
|
||||
static inline void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2)
|
||||
{
|
||||
/* Initialization is required for PCI hosted bus */
|
||||
WARN_ON(pcie2->core->bus->hosttype == BCMA_HOSTTYPE_PCI);
|
||||
}
|
||||
#endif
|
||||
|
||||
extern int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc);
|
||||
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
|
||||
#include "bcma_private.h"
|
||||
|
||||
#define BCMA_GPIO_MAX_PINS 32
|
||||
|
||||
static inline struct bcma_drv_cc *bcma_gpio_get_cc(struct gpio_chip *chip)
|
||||
{
|
||||
return container_of(chip, struct bcma_drv_cc, gpio);
|
||||
|
@ -204,6 +206,7 @@ static void bcma_gpio_irq_domain_exit(struct bcma_drv_cc *cc)
|
|||
|
||||
int bcma_gpio_init(struct bcma_drv_cc *cc)
|
||||
{
|
||||
struct bcma_bus *bus = cc->core->bus;
|
||||
struct gpio_chip *chip = &cc->gpio;
|
||||
int err;
|
||||
|
||||
|
@ -222,7 +225,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
|
|||
if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
|
||||
chip->of_node = cc->core->dev.of_node;
|
||||
#endif
|
||||
switch (cc->core->bus->chipinfo.id) {
|
||||
switch (bus->chipinfo.id) {
|
||||
case BCMA_CHIP_ID_BCM5357:
|
||||
case BCMA_CHIP_ID_BCM53572:
|
||||
chip->ngpio = 32;
|
||||
|
@ -231,13 +234,17 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
|
|||
chip->ngpio = 16;
|
||||
}
|
||||
|
||||
/* There is just one SoC in one device and its GPIO addresses should be
|
||||
* deterministic to address them more easily. The other buses could get
|
||||
* a random base number. */
|
||||
if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
|
||||
chip->base = 0;
|
||||
else
|
||||
chip->base = -1;
|
||||
/*
|
||||
* On MIPS we register GPIO devices (LEDs, buttons) using absolute GPIO
|
||||
* pin numbers. We don't have Device Tree there and we can't really use
|
||||
* relative (per chip) numbers.
|
||||
* So let's use predictable base for BCM47XX and "random" for all other.
|
||||
*/
|
||||
#if IS_BUILTIN(CONFIG_BCM47XX)
|
||||
chip->base = bus->num * BCMA_GPIO_MAX_PINS;
|
||||
#else
|
||||
chip->base = -1;
|
||||
#endif
|
||||
|
||||
err = bcma_gpio_irq_domain_init(cc);
|
||||
if (err)
|
||||
|
|
|
@ -282,39 +282,6 @@ void bcma_core_pci_power_save(struct bcma_bus *bus, bool up)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(bcma_core_pci_power_save);
|
||||
|
||||
int bcma_core_pci_irq_ctl(struct bcma_bus *bus, struct bcma_device *core,
|
||||
bool enable)
|
||||
{
|
||||
struct pci_dev *pdev;
|
||||
u32 coremask, tmp;
|
||||
int err = 0;
|
||||
|
||||
if (bus->hosttype != BCMA_HOSTTYPE_PCI) {
|
||||
/* This bcma device is not on a PCI host-bus. So the IRQs are
|
||||
* not routed through the PCI core.
|
||||
* So we must not enable routing through the PCI core. */
|
||||
goto out;
|
||||
}
|
||||
|
||||
pdev = bus->host_pci;
|
||||
|
||||
err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
coremask = BIT(core->core_index) << 8;
|
||||
if (enable)
|
||||
tmp |= coremask;
|
||||
else
|
||||
tmp &= ~coremask;
|
||||
|
||||
err = pci_write_config_dword(pdev, BCMA_PCI_IRQMASK, tmp);
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bcma_core_pci_irq_ctl);
|
||||
|
||||
static void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend)
|
||||
{
|
||||
u32 w;
|
||||
|
|
|
@ -351,3 +351,37 @@ void bcma_host_pci_down(struct bcma_bus *bus)
|
|||
bcma_core_pci_down(&bus->drv_pci[0]);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bcma_host_pci_down);
|
||||
|
||||
/* See also si_pci_setup */
|
||||
int bcma_host_pci_irq_ctl(struct bcma_bus *bus, struct bcma_device *core,
|
||||
bool enable)
|
||||
{
|
||||
struct pci_dev *pdev;
|
||||
u32 coremask, tmp;
|
||||
int err = 0;
|
||||
|
||||
if (bus->hosttype != BCMA_HOSTTYPE_PCI) {
|
||||
/* This bcma device is not on a PCI host-bus. So the IRQs are
|
||||
* not routed through the PCI core.
|
||||
* So we must not enable routing through the PCI core. */
|
||||
goto out;
|
||||
}
|
||||
|
||||
pdev = bus->host_pci;
|
||||
|
||||
err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
coremask = BIT(core->core_index) << 8;
|
||||
if (enable)
|
||||
tmp |= coremask;
|
||||
else
|
||||
tmp &= ~coremask;
|
||||
|
||||
err = pci_write_config_dword(pdev, BCMA_PCI_IRQMASK, tmp);
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bcma_host_pci_irq_ctl);
|
||||
|
|
|
@ -779,8 +779,6 @@ static void ar5523_tx(struct ieee80211_hw *hw,
|
|||
ieee80211_stop_queues(hw);
|
||||
}
|
||||
|
||||
data->skb = skb;
|
||||
|
||||
spin_lock_irqsave(&ar->tx_data_list_lock, flags);
|
||||
list_add_tail(&data->list, &ar->tx_queue_pending);
|
||||
spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
|
||||
|
@ -817,10 +815,13 @@ static void ar5523_tx_work_locked(struct ar5523 *ar)
|
|||
if (!data)
|
||||
break;
|
||||
|
||||
skb = data->skb;
|
||||
txi = container_of((void *)data, struct ieee80211_tx_info,
|
||||
driver_data);
|
||||
txqid = 0;
|
||||
txi = IEEE80211_SKB_CB(skb);
|
||||
|
||||
skb = container_of((void *)txi, struct sk_buff, cb);
|
||||
paylen = skb->len;
|
||||
|
||||
urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||
if (!urb) {
|
||||
ar5523_err(ar, "Failed to allocate TX urb\n");
|
||||
|
|
|
@ -74,7 +74,6 @@ struct ar5523_tx_cmd {
|
|||
struct ar5523_tx_data {
|
||||
struct list_head list;
|
||||
struct ar5523 *ar;
|
||||
struct sk_buff *skb;
|
||||
struct urb *urb;
|
||||
};
|
||||
|
||||
|
|
|
@ -131,6 +131,9 @@ struct ath_ops {
|
|||
void (*enable_write_buffer)(void *);
|
||||
void (*write_flush) (void *);
|
||||
u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
|
||||
void (*enable_rmw_buffer)(void *);
|
||||
void (*rmw_flush) (void *);
|
||||
|
||||
};
|
||||
|
||||
struct ath_common;
|
||||
|
|
|
@ -1283,6 +1283,7 @@ struct ath5k_hw {
|
|||
#define ATH_STAT_PROMISC 1
|
||||
#define ATH_STAT_LEDSOFT 2 /* enable LED gpio status */
|
||||
#define ATH_STAT_STARTED 3 /* opened & irqs enabled */
|
||||
#define ATH_STAT_RESET 4 /* hw reset */
|
||||
|
||||
unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */
|
||||
unsigned int fif_filter_flags; /* Current FIF_* filter flags */
|
||||
|
|
|
@ -1523,6 +1523,9 @@ ath5k_set_current_imask(struct ath5k_hw *ah)
|
|||
enum ath5k_int imask;
|
||||
unsigned long flags;
|
||||
|
||||
if (test_bit(ATH_STAT_RESET, ah->status))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&ah->irqlock, flags);
|
||||
imask = ah->imask;
|
||||
if (ah->rx_pending)
|
||||
|
@ -2858,10 +2861,12 @@ ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
|
|||
{
|
||||
struct ath_common *common = ath5k_hw_common(ah);
|
||||
int ret, ani_mode;
|
||||
bool fast;
|
||||
bool fast = chan && modparam_fastchanswitch ? 1 : 0;
|
||||
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "resetting\n");
|
||||
|
||||
__set_bit(ATH_STAT_RESET, ah->status);
|
||||
|
||||
ath5k_hw_set_imr(ah, 0);
|
||||
synchronize_irq(ah->irq);
|
||||
ath5k_stop_tasklets(ah);
|
||||
|
@ -2876,11 +2881,29 @@ ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
|
|||
* so we should also free any remaining
|
||||
* tx buffers */
|
||||
ath5k_drain_tx_buffs(ah);
|
||||
|
||||
/* Stop PCU */
|
||||
ath5k_hw_stop_rx_pcu(ah);
|
||||
|
||||
/* Stop DMA
|
||||
*
|
||||
* Note: If DMA didn't stop continue
|
||||
* since only a reset will fix it.
|
||||
*/
|
||||
ret = ath5k_hw_dma_stop(ah);
|
||||
|
||||
/* RF Bus grant won't work if we have pending
|
||||
* frames
|
||||
*/
|
||||
if (ret && fast) {
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
|
||||
"DMA didn't stop, falling back to normal reset\n");
|
||||
fast = false;
|
||||
}
|
||||
|
||||
if (chan)
|
||||
ah->curchan = chan;
|
||||
|
||||
fast = ((chan != NULL) && modparam_fastchanswitch) ? 1 : 0;
|
||||
|
||||
ret = ath5k_hw_reset(ah, ah->opmode, ah->curchan, fast, skip_pcu);
|
||||
if (ret) {
|
||||
ATH5K_ERR(ah, "can't reset hardware (%d)\n", ret);
|
||||
|
@ -2934,6 +2957,8 @@ ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
|
|||
*/
|
||||
/* ath5k_chan_change(ah, c); */
|
||||
|
||||
__clear_bit(ATH_STAT_RESET, ah->status);
|
||||
|
||||
ath5k_beacon_config(ah);
|
||||
/* intrs are enabled by ath5k_beacon_config */
|
||||
|
||||
|
|
|
@ -1169,30 +1169,6 @@ ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
|
|||
if (ah->ah_version == AR5K_AR5212)
|
||||
ath5k_hw_set_sleep_clock(ah, false);
|
||||
|
||||
/*
|
||||
* Stop PCU
|
||||
*/
|
||||
ath5k_hw_stop_rx_pcu(ah);
|
||||
|
||||
/*
|
||||
* Stop DMA
|
||||
*
|
||||
* Note: If DMA didn't stop continue
|
||||
* since only a reset will fix it.
|
||||
*/
|
||||
ret = ath5k_hw_dma_stop(ah);
|
||||
|
||||
/* RF Bus grant won't work if we have pending
|
||||
* frames */
|
||||
if (ret && fast) {
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
|
||||
"DMA didn't stop, falling back to normal reset\n");
|
||||
fast = false;
|
||||
/* Non fatal, just continue with
|
||||
* normal reset */
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
mode = channel->hw_value;
|
||||
switch (mode) {
|
||||
case AR5K_MODE_11A:
|
||||
|
|
|
@ -46,7 +46,8 @@ ath9k_hw-y:= \
|
|||
ath9k_hw-$(CONFIG_ATH9K_WOW) += ar9003_wow.o
|
||||
|
||||
ath9k_hw-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += btcoex.o \
|
||||
ar9003_mci.o
|
||||
ar9003_mci.o \
|
||||
ar9003_aic.o
|
||||
|
||||
ath9k_hw-$(CONFIG_ATH9K_PCOEM) += ar9003_rtt.o
|
||||
|
||||
|
|
|
@ -107,11 +107,21 @@ static const struct ani_cck_level_entry cck_level_table[] = {
|
|||
static void ath9k_hw_update_mibstats(struct ath_hw *ah,
|
||||
struct ath9k_mib_stats *stats)
|
||||
{
|
||||
stats->ackrcv_bad += REG_READ(ah, AR_ACK_FAIL);
|
||||
stats->rts_bad += REG_READ(ah, AR_RTS_FAIL);
|
||||
stats->fcs_bad += REG_READ(ah, AR_FCS_FAIL);
|
||||
stats->rts_good += REG_READ(ah, AR_RTS_OK);
|
||||
stats->beacons += REG_READ(ah, AR_BEACON_CNT);
|
||||
u32 addr[5] = {AR_RTS_OK, AR_RTS_FAIL, AR_ACK_FAIL,
|
||||
AR_FCS_FAIL, AR_BEACON_CNT};
|
||||
u32 data[5];
|
||||
|
||||
REG_READ_MULTI(ah, &addr[0], &data[0], 5);
|
||||
/* AR_RTS_OK */
|
||||
stats->rts_good += data[0];
|
||||
/* AR_RTS_FAIL */
|
||||
stats->rts_bad += data[1];
|
||||
/* AR_ACK_FAIL */
|
||||
stats->ackrcv_bad += data[2];
|
||||
/* AR_FCS_FAIL */
|
||||
stats->fcs_bad += data[3];
|
||||
/* AR_BEACON_CNT */
|
||||
stats->beacons += data[4];
|
||||
}
|
||||
|
||||
static void ath9k_ani_restart(struct ath_hw *ah)
|
||||
|
|
|
@ -681,12 +681,13 @@ static void ar5008_hw_set_channel_regs(struct ath_hw *ah,
|
|||
phymode |= AR_PHY_FC_DYN2040_PRI_CH;
|
||||
|
||||
}
|
||||
ENABLE_REGWRITE_BUFFER(ah);
|
||||
REG_WRITE(ah, AR_PHY_TURBO, phymode);
|
||||
|
||||
/* This function do only REG_WRITE, so
|
||||
* we can include it to REGWRITE_BUFFER. */
|
||||
ath9k_hw_set11nmac2040(ah, chan);
|
||||
|
||||
ENABLE_REGWRITE_BUFFER(ah);
|
||||
|
||||
REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
|
||||
REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
|
||||
|
||||
|
|
|
@ -430,46 +430,43 @@ static void ar9271_hw_pa_cal(struct ath_hw *ah, bool is_reset)
|
|||
u32 regVal;
|
||||
unsigned int i;
|
||||
u32 regList[][2] = {
|
||||
{ 0x786c, 0 },
|
||||
{ 0x7854, 0 },
|
||||
{ 0x7820, 0 },
|
||||
{ 0x7824, 0 },
|
||||
{ 0x7868, 0 },
|
||||
{ 0x783c, 0 },
|
||||
{ 0x7838, 0 } ,
|
||||
{ 0x7828, 0 } ,
|
||||
{ AR9285_AN_TOP3, 0 },
|
||||
{ AR9285_AN_RXTXBB1, 0 },
|
||||
{ AR9285_AN_RF2G1, 0 },
|
||||
{ AR9285_AN_RF2G2, 0 },
|
||||
{ AR9285_AN_TOP2, 0 },
|
||||
{ AR9285_AN_RF2G8, 0 },
|
||||
{ AR9285_AN_RF2G7, 0 },
|
||||
{ AR9285_AN_RF2G3, 0 },
|
||||
};
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(regList); i++)
|
||||
regList[i][1] = REG_READ(ah, regList[i][0]);
|
||||
|
||||
regVal = REG_READ(ah, 0x7834);
|
||||
regVal &= (~(0x1));
|
||||
REG_WRITE(ah, 0x7834, regVal);
|
||||
regVal = REG_READ(ah, 0x9808);
|
||||
regVal |= (0x1 << 27);
|
||||
REG_WRITE(ah, 0x9808, regVal);
|
||||
REG_READ_ARRAY(ah, regList, ARRAY_SIZE(regList));
|
||||
|
||||
ENABLE_REG_RMW_BUFFER(ah);
|
||||
/* 7834, b1=0 */
|
||||
REG_CLR_BIT(ah, AR9285_AN_RF2G6, 1 << 0);
|
||||
/* 9808, b27=1 */
|
||||
REG_SET_BIT(ah, 0x9808, 1 << 27);
|
||||
/* 786c,b23,1, pwddac=1 */
|
||||
REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1);
|
||||
REG_SET_BIT(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC);
|
||||
/* 7854, b5,1, pdrxtxbb=1 */
|
||||
REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1);
|
||||
REG_SET_BIT(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1);
|
||||
/* 7854, b7,1, pdv2i=1 */
|
||||
REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1);
|
||||
REG_SET_BIT(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I);
|
||||
/* 7854, b8,1, pddacinterface=1 */
|
||||
REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1);
|
||||
REG_SET_BIT(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF);
|
||||
/* 7824,b12,0, offcal=0 */
|
||||
REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0);
|
||||
REG_CLR_BIT(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL);
|
||||
/* 7838, b1,0, pwddb=0 */
|
||||
REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0);
|
||||
REG_CLR_BIT(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB);
|
||||
/* 7820,b11,0, enpacal=0 */
|
||||
REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0);
|
||||
REG_CLR_BIT(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL);
|
||||
/* 7820,b25,1, pdpadrv1=0 */
|
||||
REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0);
|
||||
REG_CLR_BIT(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1);
|
||||
/* 7820,b24,0, pdpadrv2=0 */
|
||||
REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2, 0);
|
||||
REG_CLR_BIT(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2);
|
||||
/* 7820,b23,0, pdpaout=0 */
|
||||
REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0);
|
||||
REG_CLR_BIT(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT);
|
||||
/* 783c,b14-16,7, padrvgn2tab_0=7 */
|
||||
REG_RMW_FIELD(ah, AR9285_AN_RF2G8, AR9285_AN_RF2G8_PADRVGN2TAB0, 7);
|
||||
/*
|
||||
|
@ -477,8 +474,9 @@ static void ar9271_hw_pa_cal(struct ath_hw *ah, bool is_reset)
|
|||
* does not matter since we turn it off
|
||||
*/
|
||||
REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PADRVGN2TAB0, 0);
|
||||
|
||||
/* 7828, b0-11, ccom=fff */
|
||||
REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_CCOMP, 0xfff);
|
||||
REG_RMW_BUFFER_FLUSH(ah);
|
||||
|
||||
/* Set:
|
||||
* localmode=1,bmode=1,bmoderxtx=1,synthon=1,
|
||||
|
@ -490,15 +488,16 @@ static void ar9271_hw_pa_cal(struct ath_hw *ah, bool is_reset)
|
|||
|
||||
/* find off_6_1; */
|
||||
for (i = 6; i > 0; i--) {
|
||||
regVal = REG_READ(ah, 0x7834);
|
||||
regVal = REG_READ(ah, AR9285_AN_RF2G6);
|
||||
regVal |= (1 << (20 + i));
|
||||
REG_WRITE(ah, 0x7834, regVal);
|
||||
REG_WRITE(ah, AR9285_AN_RF2G6, regVal);
|
||||
udelay(1);
|
||||
/* regVal = REG_READ(ah, 0x7834); */
|
||||
regVal &= (~(0x1 << (20 + i)));
|
||||
regVal |= (MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9)
|
||||
regVal |= (MS(REG_READ(ah, AR9285_AN_RF2G9),
|
||||
AR9285_AN_RXTXBB1_SPARE9)
|
||||
<< (20 + i));
|
||||
REG_WRITE(ah, 0x7834, regVal);
|
||||
REG_WRITE(ah, AR9285_AN_RF2G6, regVal);
|
||||
}
|
||||
|
||||
regVal = (regVal >> 20) & 0x7f;
|
||||
|
@ -515,15 +514,15 @@ static void ar9271_hw_pa_cal(struct ath_hw *ah, bool is_reset)
|
|||
ah->pacal_info.prev_offset = regVal;
|
||||
}
|
||||
|
||||
|
||||
ENABLE_REG_RMW_BUFFER(ah);
|
||||
/* 7834, b1=1 */
|
||||
REG_SET_BIT(ah, AR9285_AN_RF2G6, 1 << 0);
|
||||
/* 9808, b27=0 */
|
||||
REG_CLR_BIT(ah, 0x9808, 1 << 27);
|
||||
REG_RMW_BUFFER_FLUSH(ah);
|
||||
|
||||
ENABLE_REGWRITE_BUFFER(ah);
|
||||
|
||||
regVal = REG_READ(ah, 0x7834);
|
||||
regVal |= 0x1;
|
||||
REG_WRITE(ah, 0x7834, regVal);
|
||||
regVal = REG_READ(ah, 0x9808);
|
||||
regVal &= (~(0x1 << 27));
|
||||
REG_WRITE(ah, 0x9808, regVal);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(regList); i++)
|
||||
REG_WRITE(ah, regList[i][0], regList[i][1]);
|
||||
|
||||
|
|
|
@ -0,0 +1,599 @@
|
|||
/*
|
||||
* Copyright (c) 2015 Qualcomm Atheros Inc.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "hw.h"
|
||||
#include "hw-ops.h"
|
||||
#include "ar9003_mci.h"
|
||||
#include "ar9003_aic.h"
|
||||
#include "ar9003_phy.h"
|
||||
#include "reg_aic.h"
|
||||
|
||||
static const u8 com_att_db_table[ATH_AIC_MAX_COM_ATT_DB_TABLE] = {
|
||||
0, 3, 9, 15, 21, 27
|
||||
};
|
||||
|
||||
static const u16 aic_lin_table[ATH_AIC_MAX_AIC_LIN_TABLE] = {
|
||||
8191, 7300, 6506, 5799, 5168, 4606, 4105, 3659,
|
||||
3261, 2906, 2590, 2309, 2057, 1834, 1634, 1457,
|
||||
1298, 1157, 1031, 919, 819, 730, 651, 580,
|
||||
517, 461, 411, 366, 326, 291, 259, 231,
|
||||
206, 183, 163, 146, 130, 116, 103, 92,
|
||||
82, 73, 65, 58, 52, 46, 41, 37,
|
||||
33, 29, 26, 23, 21, 18, 16, 15,
|
||||
13, 12, 10, 9, 8, 7, 7, 6,
|
||||
5, 5, 4, 4, 3
|
||||
};
|
||||
|
||||
static bool ar9003_hw_is_aic_enabled(struct ath_hw *ah)
|
||||
{
|
||||
struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
|
||||
|
||||
/*
|
||||
* Disable AIC for now, until we have all the
|
||||
* HW code and the driver-layer support ready.
|
||||
*/
|
||||
return false;
|
||||
|
||||
if (mci_hw->config & ATH_MCI_CONFIG_DISABLE_AIC)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int16_t ar9003_aic_find_valid(struct ath_aic_sram_info *cal_sram,
|
||||
bool dir, u8 index)
|
||||
{
|
||||
int16_t i;
|
||||
|
||||
if (dir) {
|
||||
for (i = index + 1; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
|
||||
if (cal_sram[i].valid)
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
for (i = index - 1; i >= 0; i--) {
|
||||
if (cal_sram[i].valid)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if ((i >= ATH_AIC_MAX_BT_CHANNEL) || (i < 0))
|
||||
i = -1;
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
/*
|
||||
* type 0: aic_lin_table, 1: com_att_db_table
|
||||
*/
|
||||
static int16_t ar9003_aic_find_index(u8 type, int16_t value)
|
||||
{
|
||||
int16_t i = -1;
|
||||
|
||||
if (type == 0) {
|
||||
for (i = ATH_AIC_MAX_AIC_LIN_TABLE - 1; i >= 0; i--) {
|
||||
if (aic_lin_table[i] >= value)
|
||||
break;
|
||||
}
|
||||
} else if (type == 1) {
|
||||
for (i = 0; i < ATH_AIC_MAX_COM_ATT_DB_TABLE; i++) {
|
||||
if (com_att_db_table[i] > value) {
|
||||
i--;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (i >= ATH_AIC_MAX_COM_ATT_DB_TABLE)
|
||||
i = -1;
|
||||
}
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
static void ar9003_aic_gain_table(struct ath_hw *ah)
|
||||
{
|
||||
u32 aic_atten_word[19], i;
|
||||
|
||||
/* Config LNA gain difference */
|
||||
REG_WRITE(ah, AR_PHY_BT_COEX_4, 0x2c200a00);
|
||||
REG_WRITE(ah, AR_PHY_BT_COEX_5, 0x5c4e4438);
|
||||
|
||||
/* Program gain table */
|
||||
aic_atten_word[0] = (0x1 & 0xf) << 14 | (0x1f & 0x1f) << 9 | (0x0 & 0xf) << 5 |
|
||||
(0x1f & 0x1f); /* -01 dB: 4'd1, 5'd31, 00 dB: 4'd0, 5'd31 */
|
||||
aic_atten_word[1] = (0x3 & 0xf) << 14 | (0x1f & 0x1f) << 9 | (0x2 & 0xf) << 5 |
|
||||
(0x1f & 0x1f); /* -03 dB: 4'd3, 5'd31, -02 dB: 4'd2, 5'd31 */
|
||||
aic_atten_word[2] = (0x5 & 0xf) << 14 | (0x1f & 0x1f) << 9 | (0x4 & 0xf) << 5 |
|
||||
(0x1f & 0x1f); /* -05 dB: 4'd5, 5'd31, -04 dB: 4'd4, 5'd31 */
|
||||
aic_atten_word[3] = (0x1 & 0xf) << 14 | (0x1e & 0x1f) << 9 | (0x0 & 0xf) << 5 |
|
||||
(0x1e & 0x1f); /* -07 dB: 4'd1, 5'd30, -06 dB: 4'd0, 5'd30 */
|
||||
aic_atten_word[4] = (0x3 & 0xf) << 14 | (0x1e & 0x1f) << 9 | (0x2 & 0xf) << 5 |
|
||||
(0x1e & 0x1f); /* -09 dB: 4'd3, 5'd30, -08 dB: 4'd2, 5'd30 */
|
||||
aic_atten_word[5] = (0x5 & 0xf) << 14 | (0x1e & 0x1f) << 9 | (0x4 & 0xf) << 5 |
|
||||
(0x1e & 0x1f); /* -11 dB: 4'd5, 5'd30, -10 dB: 4'd4, 5'd30 */
|
||||
aic_atten_word[6] = (0x1 & 0xf) << 14 | (0xf & 0x1f) << 9 | (0x0 & 0xf) << 5 |
|
||||
(0xf & 0x1f); /* -13 dB: 4'd1, 5'd15, -12 dB: 4'd0, 5'd15 */
|
||||
aic_atten_word[7] = (0x3 & 0xf) << 14 | (0xf & 0x1f) << 9 | (0x2 & 0xf) << 5 |
|
||||
(0xf & 0x1f); /* -15 dB: 4'd3, 5'd15, -14 dB: 4'd2, 5'd15 */
|
||||
aic_atten_word[8] = (0x5 & 0xf) << 14 | (0xf & 0x1f) << 9 | (0x4 & 0xf) << 5 |
|
||||
(0xf & 0x1f); /* -17 dB: 4'd5, 5'd15, -16 dB: 4'd4, 5'd15 */
|
||||
aic_atten_word[9] = (0x1 & 0xf) << 14 | (0x7 & 0x1f) << 9 | (0x0 & 0xf) << 5 |
|
||||
(0x7 & 0x1f); /* -19 dB: 4'd1, 5'd07, -18 dB: 4'd0, 5'd07 */
|
||||
aic_atten_word[10] = (0x3 & 0xf) << 14 | (0x7 & 0x1f) << 9 | (0x2 & 0xf) << 5 |
|
||||
(0x7 & 0x1f); /* -21 dB: 4'd3, 5'd07, -20 dB: 4'd2, 5'd07 */
|
||||
aic_atten_word[11] = (0x5 & 0xf) << 14 | (0x7 & 0x1f) << 9 | (0x4 & 0xf) << 5 |
|
||||
(0x7 & 0x1f); /* -23 dB: 4'd5, 5'd07, -22 dB: 4'd4, 5'd07 */
|
||||
aic_atten_word[12] = (0x7 & 0xf) << 14 | (0x7 & 0x1f) << 9 | (0x6 & 0xf) << 5 |
|
||||
(0x7 & 0x1f); /* -25 dB: 4'd7, 5'd07, -24 dB: 4'd6, 5'd07 */
|
||||
aic_atten_word[13] = (0x3 & 0xf) << 14 | (0x3 & 0x1f) << 9 | (0x2 & 0xf) << 5 |
|
||||
(0x3 & 0x1f); /* -27 dB: 4'd3, 5'd03, -26 dB: 4'd2, 5'd03 */
|
||||
aic_atten_word[14] = (0x5 & 0xf) << 14 | (0x3 & 0x1f) << 9 | (0x4 & 0xf) << 5 |
|
||||
(0x3 & 0x1f); /* -29 dB: 4'd5, 5'd03, -28 dB: 4'd4, 5'd03 */
|
||||
aic_atten_word[15] = (0x1 & 0xf) << 14 | (0x1 & 0x1f) << 9 | (0x0 & 0xf) << 5 |
|
||||
(0x1 & 0x1f); /* -31 dB: 4'd1, 5'd01, -30 dB: 4'd0, 5'd01 */
|
||||
aic_atten_word[16] = (0x3 & 0xf) << 14 | (0x1 & 0x1f) << 9 | (0x2 & 0xf) << 5 |
|
||||
(0x1 & 0x1f); /* -33 dB: 4'd3, 5'd01, -32 dB: 4'd2, 5'd01 */
|
||||
aic_atten_word[17] = (0x5 & 0xf) << 14 | (0x1 & 0x1f) << 9 | (0x4 & 0xf) << 5 |
|
||||
(0x1 & 0x1f); /* -35 dB: 4'd5, 5'd01, -34 dB: 4'd4, 5'd01 */
|
||||
aic_atten_word[18] = (0x7 & 0xf) << 14 | (0x1 & 0x1f) << 9 | (0x6 & 0xf) << 5 |
|
||||
(0x1 & 0x1f); /* -37 dB: 4'd7, 5'd01, -36 dB: 4'd6, 5'd01 */
|
||||
|
||||
/* Write to Gain table with auto increment enabled. */
|
||||
REG_WRITE(ah, (AR_PHY_AIC_SRAM_ADDR_B0 + 0x3000),
|
||||
(ATH_AIC_SRAM_AUTO_INCREMENT |
|
||||
ATH_AIC_SRAM_GAIN_TABLE_OFFSET));
|
||||
|
||||
for (i = 0; i < 19; i++) {
|
||||
REG_WRITE(ah, (AR_PHY_AIC_SRAM_DATA_B0 + 0x3000),
|
||||
aic_atten_word[i]);
|
||||
}
|
||||
}
|
||||
|
||||
static u8 ar9003_aic_cal_start(struct ath_hw *ah, u8 min_valid_count)
|
||||
{
|
||||
struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
|
||||
int i;
|
||||
|
||||
/* Write to Gain table with auto increment enabled. */
|
||||
REG_WRITE(ah, (AR_PHY_AIC_SRAM_ADDR_B0 + 0x3000),
|
||||
(ATH_AIC_SRAM_AUTO_INCREMENT |
|
||||
ATH_AIC_SRAM_CAL_OFFSET));
|
||||
|
||||
for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
|
||||
REG_WRITE(ah, (AR_PHY_AIC_SRAM_DATA_B0 + 0x3000), 0);
|
||||
aic->aic_sram[i] = 0;
|
||||
}
|
||||
|
||||
REG_WRITE(ah, AR_PHY_AIC_CTRL_0_B0,
|
||||
(SM(0, AR_PHY_AIC_MON_ENABLE) |
|
||||
SM(127, AR_PHY_AIC_CAL_MAX_HOP_COUNT) |
|
||||
SM(min_valid_count, AR_PHY_AIC_CAL_MIN_VALID_COUNT) |
|
||||
SM(37, AR_PHY_AIC_F_WLAN) |
|
||||
SM(1, AR_PHY_AIC_CAL_CH_VALID_RESET) |
|
||||
SM(0, AR_PHY_AIC_CAL_ENABLE) |
|
||||
SM(0x40, AR_PHY_AIC_BTTX_PWR_THR) |
|
||||
SM(0, AR_PHY_AIC_ENABLE)));
|
||||
|
||||
REG_WRITE(ah, AR_PHY_AIC_CTRL_0_B1,
|
||||
(SM(0, AR_PHY_AIC_MON_ENABLE) |
|
||||
SM(1, AR_PHY_AIC_CAL_CH_VALID_RESET) |
|
||||
SM(0, AR_PHY_AIC_CAL_ENABLE) |
|
||||
SM(0x40, AR_PHY_AIC_BTTX_PWR_THR) |
|
||||
SM(0, AR_PHY_AIC_ENABLE)));
|
||||
|
||||
REG_WRITE(ah, AR_PHY_AIC_CTRL_1_B0,
|
||||
(SM(8, AR_PHY_AIC_CAL_BT_REF_DELAY) |
|
||||
SM(0, AR_PHY_AIC_BT_IDLE_CFG) |
|
||||
SM(1, AR_PHY_AIC_STDBY_COND) |
|
||||
SM(37, AR_PHY_AIC_STDBY_ROT_ATT_DB) |
|
||||
SM(5, AR_PHY_AIC_STDBY_COM_ATT_DB) |
|
||||
SM(15, AR_PHY_AIC_RSSI_MAX) |
|
||||
SM(0, AR_PHY_AIC_RSSI_MIN)));
|
||||
|
||||
REG_WRITE(ah, AR_PHY_AIC_CTRL_1_B1,
|
||||
(SM(15, AR_PHY_AIC_RSSI_MAX) |
|
||||
SM(0, AR_PHY_AIC_RSSI_MIN)));
|
||||
|
||||
REG_WRITE(ah, AR_PHY_AIC_CTRL_2_B0,
|
||||
(SM(44, AR_PHY_AIC_RADIO_DELAY) |
|
||||
SM(8, AR_PHY_AIC_CAL_STEP_SIZE_CORR) |
|
||||
SM(12, AR_PHY_AIC_CAL_ROT_IDX_CORR) |
|
||||
SM(2, AR_PHY_AIC_CAL_CONV_CHECK_FACTOR) |
|
||||
SM(5, AR_PHY_AIC_ROT_IDX_COUNT_MAX) |
|
||||
SM(0, AR_PHY_AIC_CAL_SYNTH_TOGGLE) |
|
||||
SM(0, AR_PHY_AIC_CAL_SYNTH_AFTER_BTRX) |
|
||||
SM(200, AR_PHY_AIC_CAL_SYNTH_SETTLING)));
|
||||
|
||||
REG_WRITE(ah, AR_PHY_AIC_CTRL_3_B0,
|
||||
(SM(2, AR_PHY_AIC_MON_MAX_HOP_COUNT) |
|
||||
SM(1, AR_PHY_AIC_MON_MIN_STALE_COUNT) |
|
||||
SM(1, AR_PHY_AIC_MON_PWR_EST_LONG) |
|
||||
SM(2, AR_PHY_AIC_MON_PD_TALLY_SCALING) |
|
||||
SM(10, AR_PHY_AIC_MON_PERF_THR) |
|
||||
SM(2, AR_PHY_AIC_CAL_TARGET_MAG_SETTING) |
|
||||
SM(1, AR_PHY_AIC_CAL_PERF_CHECK_FACTOR) |
|
||||
SM(1, AR_PHY_AIC_CAL_PWR_EST_LONG)));
|
||||
|
||||
REG_WRITE(ah, AR_PHY_AIC_CTRL_4_B0,
|
||||
(SM(2, AR_PHY_AIC_CAL_ROT_ATT_DB_EST_ISO) |
|
||||
SM(3, AR_PHY_AIC_CAL_COM_ATT_DB_EST_ISO) |
|
||||
SM(0, AR_PHY_AIC_CAL_ISO_EST_INIT_SETTING) |
|
||||
SM(2, AR_PHY_AIC_CAL_COM_ATT_DB_BACKOFF) |
|
||||
SM(1, AR_PHY_AIC_CAL_COM_ATT_DB_FIXED)));
|
||||
|
||||
REG_WRITE(ah, AR_PHY_AIC_CTRL_4_B1,
|
||||
(SM(2, AR_PHY_AIC_CAL_ROT_ATT_DB_EST_ISO) |
|
||||
SM(3, AR_PHY_AIC_CAL_COM_ATT_DB_EST_ISO) |
|
||||
SM(0, AR_PHY_AIC_CAL_ISO_EST_INIT_SETTING) |
|
||||
SM(2, AR_PHY_AIC_CAL_COM_ATT_DB_BACKOFF) |
|
||||
SM(1, AR_PHY_AIC_CAL_COM_ATT_DB_FIXED)));
|
||||
|
||||
ar9003_aic_gain_table(ah);
|
||||
|
||||
/* Need to enable AIC reference signal in BT modem. */
|
||||
REG_WRITE(ah, ATH_AIC_BT_JUPITER_CTRL,
|
||||
(REG_READ(ah, ATH_AIC_BT_JUPITER_CTRL) |
|
||||
ATH_AIC_BT_AIC_ENABLE));
|
||||
|
||||
aic->aic_cal_start_time = REG_READ(ah, AR_TSF_L32);
|
||||
|
||||
/* Start calibration */
|
||||
REG_CLR_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_ENABLE);
|
||||
REG_SET_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_CH_VALID_RESET);
|
||||
REG_SET_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_ENABLE);
|
||||
|
||||
aic->aic_caled_chan = 0;
|
||||
aic->aic_cal_state = AIC_CAL_STATE_STARTED;
|
||||
|
||||
return aic->aic_cal_state;
|
||||
}
|
||||
|
||||
static bool ar9003_aic_cal_post_process(struct ath_hw *ah)
|
||||
{
|
||||
struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
|
||||
struct ath_aic_sram_info cal_sram[ATH_AIC_MAX_BT_CHANNEL];
|
||||
struct ath_aic_out_info aic_sram[ATH_AIC_MAX_BT_CHANNEL];
|
||||
u32 dir_path_gain_idx, quad_path_gain_idx, value;
|
||||
u32 fixed_com_att_db;
|
||||
int8_t dir_path_sign, quad_path_sign;
|
||||
int16_t i;
|
||||
bool ret = true;
|
||||
|
||||
memset(&cal_sram, 0, sizeof(cal_sram));
|
||||
memset(&aic_sram, 0, sizeof(aic_sram));
|
||||
|
||||
for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
|
||||
value = aic->aic_sram[i];
|
||||
|
||||
cal_sram[i].valid =
|
||||
MS(value, AR_PHY_AIC_SRAM_VALID);
|
||||
cal_sram[i].rot_quad_att_db =
|
||||
MS(value, AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB);
|
||||
cal_sram[i].vga_quad_sign =
|
||||
MS(value, AR_PHY_AIC_SRAM_VGA_QUAD_SIGN);
|
||||
cal_sram[i].rot_dir_att_db =
|
||||
MS(value, AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB);
|
||||
cal_sram[i].vga_dir_sign =
|
||||
MS(value, AR_PHY_AIC_SRAM_VGA_DIR_SIGN);
|
||||
cal_sram[i].com_att_6db =
|
||||
MS(value, AR_PHY_AIC_SRAM_COM_ATT_6DB);
|
||||
|
||||
if (cal_sram[i].valid) {
|
||||
dir_path_gain_idx = cal_sram[i].rot_dir_att_db +
|
||||
com_att_db_table[cal_sram[i].com_att_6db];
|
||||
quad_path_gain_idx = cal_sram[i].rot_quad_att_db +
|
||||
com_att_db_table[cal_sram[i].com_att_6db];
|
||||
|
||||
dir_path_sign = (cal_sram[i].vga_dir_sign) ? 1 : -1;
|
||||
quad_path_sign = (cal_sram[i].vga_quad_sign) ? 1 : -1;
|
||||
|
||||
aic_sram[i].dir_path_gain_lin = dir_path_sign *
|
||||
aic_lin_table[dir_path_gain_idx];
|
||||
aic_sram[i].quad_path_gain_lin = quad_path_sign *
|
||||
aic_lin_table[quad_path_gain_idx];
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
|
||||
int16_t start_idx, end_idx;
|
||||
|
||||
if (cal_sram[i].valid)
|
||||
continue;
|
||||
|
||||
start_idx = ar9003_aic_find_valid(cal_sram, 0, i);
|
||||
end_idx = ar9003_aic_find_valid(cal_sram, 1, i);
|
||||
|
||||
if (start_idx < 0) {
|
||||
/* extrapolation */
|
||||
start_idx = end_idx;
|
||||
end_idx = ar9003_aic_find_valid(cal_sram, 1, start_idx);
|
||||
|
||||
if (end_idx < 0) {
|
||||
ret = false;
|
||||
break;
|
||||
}
|
||||
|
||||
aic_sram[i].dir_path_gain_lin =
|
||||
((aic_sram[start_idx].dir_path_gain_lin -
|
||||
aic_sram[end_idx].dir_path_gain_lin) *
|
||||
(start_idx - i) + ((end_idx - i) >> 1)) /
|
||||
(end_idx - i) +
|
||||
aic_sram[start_idx].dir_path_gain_lin;
|
||||
aic_sram[i].quad_path_gain_lin =
|
||||
((aic_sram[start_idx].quad_path_gain_lin -
|
||||
aic_sram[end_idx].quad_path_gain_lin) *
|
||||
(start_idx - i) + ((end_idx - i) >> 1)) /
|
||||
(end_idx - i) +
|
||||
aic_sram[start_idx].quad_path_gain_lin;
|
||||
}
|
||||
|
||||
if (end_idx < 0) {
|
||||
/* extrapolation */
|
||||
end_idx = ar9003_aic_find_valid(cal_sram, 0, start_idx);
|
||||
|
||||
if (end_idx < 0) {
|
||||
ret = false;
|
||||
break;
|
||||
}
|
||||
|
||||
aic_sram[i].dir_path_gain_lin =
|
||||
((aic_sram[start_idx].dir_path_gain_lin -
|
||||
aic_sram[end_idx].dir_path_gain_lin) *
|
||||
(i - start_idx) + ((start_idx - end_idx) >> 1)) /
|
||||
(start_idx - end_idx) +
|
||||
aic_sram[start_idx].dir_path_gain_lin;
|
||||
aic_sram[i].quad_path_gain_lin =
|
||||
((aic_sram[start_idx].quad_path_gain_lin -
|
||||
aic_sram[end_idx].quad_path_gain_lin) *
|
||||
(i - start_idx) + ((start_idx - end_idx) >> 1)) /
|
||||
(start_idx - end_idx) +
|
||||
aic_sram[start_idx].quad_path_gain_lin;
|
||||
|
||||
} else if (start_idx >= 0){
|
||||
/* interpolation */
|
||||
aic_sram[i].dir_path_gain_lin =
|
||||
(((end_idx - i) * aic_sram[start_idx].dir_path_gain_lin) +
|
||||
((i - start_idx) * aic_sram[end_idx].dir_path_gain_lin) +
|
||||
((end_idx - start_idx) >> 1)) /
|
||||
(end_idx - start_idx);
|
||||
aic_sram[i].quad_path_gain_lin =
|
||||
(((end_idx - i) * aic_sram[start_idx].quad_path_gain_lin) +
|
||||
((i - start_idx) * aic_sram[end_idx].quad_path_gain_lin) +
|
||||
((end_idx - start_idx) >> 1))/
|
||||
(end_idx - start_idx);
|
||||
}
|
||||
}
|
||||
|
||||
/* From dir/quad_path_gain_lin to sram. */
|
||||
i = ar9003_aic_find_valid(cal_sram, 1, 0);
|
||||
if (i < 0) {
|
||||
i = 0;
|
||||
ret = false;
|
||||
}
|
||||
fixed_com_att_db = com_att_db_table[cal_sram[i].com_att_6db];
|
||||
|
||||
for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
|
||||
int16_t rot_dir_path_att_db, rot_quad_path_att_db;
|
||||
|
||||
aic_sram[i].sram.vga_dir_sign =
|
||||
(aic_sram[i].dir_path_gain_lin >= 0) ? 1 : 0;
|
||||
aic_sram[i].sram.vga_quad_sign=
|
||||
(aic_sram[i].quad_path_gain_lin >= 0) ? 1 : 0;
|
||||
|
||||
rot_dir_path_att_db =
|
||||
ar9003_aic_find_index(0, abs(aic_sram[i].dir_path_gain_lin)) -
|
||||
fixed_com_att_db;
|
||||
rot_quad_path_att_db =
|
||||
ar9003_aic_find_index(0, abs(aic_sram[i].quad_path_gain_lin)) -
|
||||
fixed_com_att_db;
|
||||
|
||||
aic_sram[i].sram.com_att_6db =
|
||||
ar9003_aic_find_index(1, fixed_com_att_db);
|
||||
|
||||
aic_sram[i].sram.valid = 1;
|
||||
|
||||
aic_sram[i].sram.rot_dir_att_db =
|
||||
min(max(rot_dir_path_att_db,
|
||||
(int16_t)ATH_AIC_MIN_ROT_DIR_ATT_DB),
|
||||
ATH_AIC_MAX_ROT_DIR_ATT_DB);
|
||||
aic_sram[i].sram.rot_quad_att_db =
|
||||
min(max(rot_quad_path_att_db,
|
||||
(int16_t)ATH_AIC_MIN_ROT_QUAD_ATT_DB),
|
||||
ATH_AIC_MAX_ROT_QUAD_ATT_DB);
|
||||
}
|
||||
|
||||
for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
|
||||
aic->aic_sram[i] = (SM(aic_sram[i].sram.vga_dir_sign,
|
||||
AR_PHY_AIC_SRAM_VGA_DIR_SIGN) |
|
||||
SM(aic_sram[i].sram.vga_quad_sign,
|
||||
AR_PHY_AIC_SRAM_VGA_QUAD_SIGN) |
|
||||
SM(aic_sram[i].sram.com_att_6db,
|
||||
AR_PHY_AIC_SRAM_COM_ATT_6DB) |
|
||||
SM(aic_sram[i].sram.valid,
|
||||
AR_PHY_AIC_SRAM_VALID) |
|
||||
SM(aic_sram[i].sram.rot_dir_att_db,
|
||||
AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB) |
|
||||
SM(aic_sram[i].sram.rot_quad_att_db,
|
||||
AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ar9003_aic_cal_done(struct ath_hw *ah)
|
||||
{
|
||||
struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
|
||||
|
||||
/* Disable AIC reference signal in BT modem. */
|
||||
REG_WRITE(ah, ATH_AIC_BT_JUPITER_CTRL,
|
||||
(REG_READ(ah, ATH_AIC_BT_JUPITER_CTRL) &
|
||||
~ATH_AIC_BT_AIC_ENABLE));
|
||||
|
||||
if (ar9003_aic_cal_post_process(ah))
|
||||
aic->aic_cal_state = AIC_CAL_STATE_DONE;
|
||||
else
|
||||
aic->aic_cal_state = AIC_CAL_STATE_ERROR;
|
||||
}
|
||||
|
||||
static u8 ar9003_aic_cal_continue(struct ath_hw *ah, bool cal_once)
|
||||
{
|
||||
struct ath_common *common = ath9k_hw_common(ah);
|
||||
struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
|
||||
struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
|
||||
int i, num_chan;
|
||||
|
||||
num_chan = MS(mci_hw->config, ATH_MCI_CONFIG_AIC_CAL_NUM_CHAN);
|
||||
|
||||
if (!num_chan) {
|
||||
aic->aic_cal_state = AIC_CAL_STATE_ERROR;
|
||||
return aic->aic_cal_state;
|
||||
}
|
||||
|
||||
if (cal_once) {
|
||||
for (i = 0; i < 10000; i++) {
|
||||
if ((REG_READ(ah, AR_PHY_AIC_CTRL_0_B1) &
|
||||
AR_PHY_AIC_CAL_ENABLE) == 0)
|
||||
break;
|
||||
|
||||
udelay(100);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Use AR_PHY_AIC_CAL_ENABLE bit instead of AR_PHY_AIC_CAL_DONE.
|
||||
* Sometimes CAL_DONE bit is not asserted.
|
||||
*/
|
||||
if ((REG_READ(ah, AR_PHY_AIC_CTRL_0_B1) &
|
||||
AR_PHY_AIC_CAL_ENABLE) != 0) {
|
||||
ath_dbg(common, MCI, "AIC cal is not done after 40ms");
|
||||
goto exit;
|
||||
}
|
||||
|
||||
REG_WRITE(ah, AR_PHY_AIC_SRAM_ADDR_B1,
|
||||
(ATH_AIC_SRAM_CAL_OFFSET | ATH_AIC_SRAM_AUTO_INCREMENT));
|
||||
|
||||
for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
|
||||
u32 value;
|
||||
|
||||
value = REG_READ(ah, AR_PHY_AIC_SRAM_DATA_B1);
|
||||
|
||||
if (value & 0x01) {
|
||||
if (aic->aic_sram[i] == 0)
|
||||
aic->aic_caled_chan++;
|
||||
|
||||
aic->aic_sram[i] = value;
|
||||
|
||||
if (!cal_once)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if ((aic->aic_caled_chan >= num_chan) || cal_once) {
|
||||
ar9003_aic_cal_done(ah);
|
||||
} else {
|
||||
/* Start calibration */
|
||||
REG_CLR_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_ENABLE);
|
||||
REG_SET_BIT(ah, AR_PHY_AIC_CTRL_0_B1,
|
||||
AR_PHY_AIC_CAL_CH_VALID_RESET);
|
||||
REG_SET_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_ENABLE);
|
||||
}
|
||||
exit:
|
||||
return aic->aic_cal_state;
|
||||
|
||||
}
|
||||
|
||||
u8 ar9003_aic_calibration(struct ath_hw *ah)
|
||||
{
|
||||
struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
|
||||
u8 cal_ret = AIC_CAL_STATE_ERROR;
|
||||
|
||||
switch (aic->aic_cal_state) {
|
||||
case AIC_CAL_STATE_IDLE:
|
||||
cal_ret = ar9003_aic_cal_start(ah, 1);
|
||||
break;
|
||||
case AIC_CAL_STATE_STARTED:
|
||||
cal_ret = ar9003_aic_cal_continue(ah, false);
|
||||
break;
|
||||
case AIC_CAL_STATE_DONE:
|
||||
cal_ret = AIC_CAL_STATE_DONE;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return cal_ret;
|
||||
}
|
||||
|
||||
u8 ar9003_aic_start_normal(struct ath_hw *ah)
|
||||
{
|
||||
struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
|
||||
int16_t i;
|
||||
|
||||
if (aic->aic_cal_state != AIC_CAL_STATE_DONE)
|
||||
return 1;
|
||||
|
||||
ar9003_aic_gain_table(ah);
|
||||
|
||||
REG_WRITE(ah, AR_PHY_AIC_SRAM_ADDR_B1, ATH_AIC_SRAM_AUTO_INCREMENT);
|
||||
|
||||
for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
|
||||
REG_WRITE(ah, AR_PHY_AIC_SRAM_DATA_B1, aic->aic_sram[i]);
|
||||
}
|
||||
|
||||
/* FIXME: Replace these with proper register names */
|
||||
REG_WRITE(ah, 0xa6b0, 0x80);
|
||||
REG_WRITE(ah, 0xa6b4, 0x5b2df0);
|
||||
REG_WRITE(ah, 0xa6b8, 0x10762cc8);
|
||||
REG_WRITE(ah, 0xa6bc, 0x1219a4b);
|
||||
REG_WRITE(ah, 0xa6c0, 0x1e01);
|
||||
REG_WRITE(ah, 0xb6b4, 0xf0);
|
||||
REG_WRITE(ah, 0xb6c0, 0x1e01);
|
||||
REG_WRITE(ah, 0xb6b0, 0x81);
|
||||
REG_WRITE(ah, AR_PHY_65NM_CH1_RXTX4, 0x40000000);
|
||||
|
||||
aic->aic_enabled = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
u8 ar9003_aic_cal_reset(struct ath_hw *ah)
|
||||
{
|
||||
struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
|
||||
|
||||
aic->aic_cal_state = AIC_CAL_STATE_IDLE;
|
||||
return aic->aic_cal_state;
|
||||
}
|
||||
|
||||
u8 ar9003_aic_calibration_single(struct ath_hw *ah)
|
||||
{
|
||||
struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
|
||||
u8 cal_ret;
|
||||
int num_chan;
|
||||
|
||||
num_chan = MS(mci_hw->config, ATH_MCI_CONFIG_AIC_CAL_NUM_CHAN);
|
||||
|
||||
(void) ar9003_aic_cal_start(ah, num_chan);
|
||||
cal_ret = ar9003_aic_cal_continue(ah, true);
|
||||
|
||||
return cal_ret;
|
||||
}
|
||||
|
||||
void ar9003_hw_attach_aic_ops(struct ath_hw *ah)
|
||||
{
|
||||
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
|
||||
|
||||
priv_ops->is_aic_enabled = ar9003_hw_is_aic_enabled;
|
||||
}
|
|
@ -0,0 +1,61 @@
|
|||
/*
|
||||
* Copyright (c) 2015 Qualcomm Atheros Inc.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef AR9003_AIC_H
|
||||
#define AR9003_AIC_H
|
||||
|
||||
#define ATH_AIC_MAX_COM_ATT_DB_TABLE 6
|
||||
#define ATH_AIC_MAX_AIC_LIN_TABLE 69
|
||||
#define ATH_AIC_MIN_ROT_DIR_ATT_DB 0
|
||||
#define ATH_AIC_MIN_ROT_QUAD_ATT_DB 0
|
||||
#define ATH_AIC_MAX_ROT_DIR_ATT_DB 37
|
||||
#define ATH_AIC_MAX_ROT_QUAD_ATT_DB 37
|
||||
#define ATH_AIC_SRAM_AUTO_INCREMENT 0x80000000
|
||||
#define ATH_AIC_SRAM_GAIN_TABLE_OFFSET 0x280
|
||||
#define ATH_AIC_SRAM_CAL_OFFSET 0x140
|
||||
#define ATH_AIC_SRAM_OFFSET 0x00
|
||||
#define ATH_AIC_MEAS_MAG_THRESH 20
|
||||
#define ATH_AIC_BT_JUPITER_CTRL 0x66820
|
||||
#define ATH_AIC_BT_AIC_ENABLE 0x02
|
||||
|
||||
enum aic_cal_state {
|
||||
AIC_CAL_STATE_IDLE = 0,
|
||||
AIC_CAL_STATE_STARTED,
|
||||
AIC_CAL_STATE_DONE,
|
||||
AIC_CAL_STATE_ERROR
|
||||
};
|
||||
|
||||
struct ath_aic_sram_info {
|
||||
bool valid:1;
|
||||
bool vga_quad_sign:1;
|
||||
bool vga_dir_sign:1;
|
||||
u8 rot_quad_att_db;
|
||||
u8 rot_dir_att_db;
|
||||
u8 com_att_6db;
|
||||
};
|
||||
|
||||
struct ath_aic_out_info {
|
||||
int16_t dir_path_gain_lin;
|
||||
int16_t quad_path_gain_lin;
|
||||
struct ath_aic_sram_info sram;
|
||||
};
|
||||
|
||||
u8 ar9003_aic_calibration(struct ath_hw *ah);
|
||||
u8 ar9003_aic_start_normal(struct ath_hw *ah);
|
||||
u8 ar9003_aic_cal_reset(struct ath_hw *ah);
|
||||
u8 ar9003_aic_calibration_single(struct ath_hw *ah);
|
||||
|
||||
#endif /* AR9003_AIC_H */
|
|
@ -195,16 +195,16 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
|
|||
INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
|
||||
ar9485_1_1_baseband_core_txfir_coeff_japan_2484);
|
||||
|
||||
if (ah->config.no_pll_pwrsave) {
|
||||
if (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) {
|
||||
INIT_INI_ARRAY(&ah->iniPcieSerdes,
|
||||
ar9485_1_1_pcie_phy_clkreq_disable_L1);
|
||||
ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1);
|
||||
INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
|
||||
ar9485_1_1_pcie_phy_clkreq_disable_L1);
|
||||
ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1);
|
||||
} else {
|
||||
INIT_INI_ARRAY(&ah->iniPcieSerdes,
|
||||
ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1);
|
||||
ar9485_1_1_pcie_phy_clkreq_disable_L1);
|
||||
INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
|
||||
ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1);
|
||||
ar9485_1_1_pcie_phy_clkreq_disable_L1);
|
||||
}
|
||||
} else if (AR_SREV_9462_21(ah)) {
|
||||
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
|
||||
|
@ -231,10 +231,20 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
|
|||
ar9462_2p1_modes_fast_clock);
|
||||
INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
|
||||
ar9462_2p1_baseband_core_txfir_coeff_japan_2484);
|
||||
INIT_INI_ARRAY(&ah->iniPcieSerdes,
|
||||
ar9462_2p1_pciephy_clkreq_disable_L1);
|
||||
INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
|
||||
ar9462_2p1_pciephy_clkreq_disable_L1);
|
||||
|
||||
/* Awake -> Sleep Setting */
|
||||
if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
|
||||
(ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D3)) {
|
||||
INIT_INI_ARRAY(&ah->iniPcieSerdes,
|
||||
ar9462_2p1_pciephy_clkreq_disable_L1);
|
||||
}
|
||||
|
||||
/* Sleep -> Awake Setting */
|
||||
if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
|
||||
(ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D0)) {
|
||||
INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
|
||||
ar9462_2p1_pciephy_clkreq_disable_L1);
|
||||
}
|
||||
} else if (AR_SREV_9462_20(ah)) {
|
||||
|
||||
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], ar9462_2p0_mac_core);
|
||||
|
@ -262,11 +272,18 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
|
|||
ar9462_2p0_common_rx_gain);
|
||||
|
||||
/* Awake -> Sleep Setting */
|
||||
INIT_INI_ARRAY(&ah->iniPcieSerdes,
|
||||
ar9462_2p0_pciephy_clkreq_disable_L1);
|
||||
if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
|
||||
(ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D3)) {
|
||||
INIT_INI_ARRAY(&ah->iniPcieSerdes,
|
||||
ar9462_2p0_pciephy_clkreq_disable_L1);
|
||||
}
|
||||
|
||||
/* Sleep -> Awake Setting */
|
||||
INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
|
||||
ar9462_2p0_pciephy_clkreq_disable_L1);
|
||||
if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
|
||||
(ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D0)) {
|
||||
INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
|
||||
ar9462_2p0_pciephy_clkreq_disable_L1);
|
||||
}
|
||||
|
||||
/* Fast clock modal settings */
|
||||
INIT_INI_ARRAY(&ah->iniModesFastClock,
|
||||
|
@ -456,10 +473,19 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
|
|||
INIT_INI_ARRAY(&ah->iniModesTxGain,
|
||||
ar9565_1p1_Modes_lowest_ob_db_tx_gain_table);
|
||||
|
||||
INIT_INI_ARRAY(&ah->iniPcieSerdes,
|
||||
ar9565_1p1_pciephy_clkreq_disable_L1);
|
||||
INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
|
||||
ar9565_1p1_pciephy_clkreq_disable_L1);
|
||||
/* Awake -> Sleep Setting */
|
||||
if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
|
||||
(ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D3)) {
|
||||
INIT_INI_ARRAY(&ah->iniPcieSerdes,
|
||||
ar9565_1p1_pciephy_clkreq_disable_L1);
|
||||
}
|
||||
|
||||
/* Sleep -> Awake Setting */
|
||||
if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
|
||||
(ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D0)) {
|
||||
INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
|
||||
ar9565_1p1_pciephy_clkreq_disable_L1);
|
||||
}
|
||||
|
||||
INIT_INI_ARRAY(&ah->iniModesFastClock,
|
||||
ar9565_1p1_modes_fast_clock);
|
||||
|
@ -491,10 +517,19 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
|
|||
INIT_INI_ARRAY(&ah->iniModesTxGain,
|
||||
ar9565_1p0_Modes_lowest_ob_db_tx_gain_table);
|
||||
|
||||
INIT_INI_ARRAY(&ah->iniPcieSerdes,
|
||||
ar9565_1p0_pciephy_clkreq_disable_L1);
|
||||
INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
|
||||
ar9565_1p0_pciephy_clkreq_disable_L1);
|
||||
/* Awake -> Sleep Setting */
|
||||
if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
|
||||
(ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D3)) {
|
||||
INIT_INI_ARRAY(&ah->iniPcieSerdes,
|
||||
ar9565_1p0_pciephy_clkreq_disable_L1);
|
||||
}
|
||||
|
||||
/* Sleep -> Awake Setting */
|
||||
if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
|
||||
(ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D0)) {
|
||||
INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
|
||||
ar9565_1p0_pciephy_clkreq_disable_L1);
|
||||
}
|
||||
|
||||
INIT_INI_ARRAY(&ah->iniModesFastClock,
|
||||
ar9565_1p0_modes_fast_clock);
|
||||
|
@ -1130,6 +1165,12 @@ void ar9003_hw_attach_ops(struct ath_hw *ah)
|
|||
struct ath_hw_ops *ops = ath9k_hw_ops(ah);
|
||||
|
||||
ar9003_hw_init_mode_regs(ah);
|
||||
|
||||
if (AR_SREV_9003_PCOEM(ah)) {
|
||||
WARN_ON(!ah->iniPcieSerdes.ia_array);
|
||||
WARN_ON(!ah->iniPcieSerdesLowPower.ia_array);
|
||||
}
|
||||
|
||||
priv_ops->init_mode_gain_regs = ar9003_hw_init_mode_gain_regs;
|
||||
priv_ops->init_hang_checks = ar9003_hw_init_hang_checks;
|
||||
priv_ops->detect_mac_hang = ar9003_hw_detect_mac_hang;
|
||||
|
@ -1139,4 +1180,5 @@ void ar9003_hw_attach_ops(struct ath_hw *ah)
|
|||
ar9003_hw_attach_phy_ops(ah);
|
||||
ar9003_hw_attach_calib_ops(ah);
|
||||
ar9003_hw_attach_mac_ops(ah);
|
||||
ar9003_hw_attach_aic_ops(ah);
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include "hw-ops.h"
|
||||
#include "ar9003_phy.h"
|
||||
#include "ar9003_mci.h"
|
||||
#include "ar9003_aic.h"
|
||||
|
||||
static void ar9003_mci_reset_req_wakeup(struct ath_hw *ah)
|
||||
{
|
||||
|
@ -1016,6 +1017,9 @@ int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
|
|||
if (en_int)
|
||||
ar9003_mci_enable_interrupt(ah);
|
||||
|
||||
if (ath9k_hw_is_aic_enabled(ah))
|
||||
ar9003_aic_start_normal(ah);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1362,6 +1366,22 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type)
|
|||
value = (!mci->unhalt_bt_gpm && mci->need_flush_btinfo) ? 1 : 0;
|
||||
mci->need_flush_btinfo = false;
|
||||
break;
|
||||
case MCI_STATE_AIC_CAL:
|
||||
if (ath9k_hw_is_aic_enabled(ah))
|
||||
value = ar9003_aic_calibration(ah);
|
||||
break;
|
||||
case MCI_STATE_AIC_START:
|
||||
if (ath9k_hw_is_aic_enabled(ah))
|
||||
ar9003_aic_start_normal(ah);
|
||||
break;
|
||||
case MCI_STATE_AIC_CAL_RESET:
|
||||
if (ath9k_hw_is_aic_enabled(ah))
|
||||
value = ar9003_aic_cal_reset(ah);
|
||||
break;
|
||||
case MCI_STATE_AIC_CAL_SINGLE:
|
||||
if (ath9k_hw_is_aic_enabled(ah))
|
||||
value = ar9003_aic_calibration_single(ah);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -640,16 +640,6 @@
|
|||
#define AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE 0x0000ff00
|
||||
#define AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE_S 8
|
||||
|
||||
/* AIC Registers */
|
||||
#define AR_PHY_AIC_CTRL_0_B0 (AR_SM_BASE + 0x4b0)
|
||||
#define AR_PHY_AIC_CTRL_1_B0 (AR_SM_BASE + 0x4b4)
|
||||
#define AR_PHY_AIC_CTRL_2_B0 (AR_SM_BASE + 0x4b8)
|
||||
#define AR_PHY_AIC_CTRL_3_B0 (AR_SM_BASE + 0x4bc)
|
||||
#define AR_PHY_AIC_STAT_0_B0 (AR_SM_BASE + 0x4c4))
|
||||
#define AR_PHY_AIC_STAT_1_B0 (AR_SM_BASE + 0x4c8))
|
||||
#define AR_PHY_AIC_CTRL_4_B0 (AR_SM_BASE + 0x4c0)
|
||||
#define AR_PHY_AIC_STAT_2_B0 (AR_SM_BASE + 0x4cc)
|
||||
|
||||
#define AR_PHY_65NM_CH0_TXRF3 0x16048
|
||||
#define AR_PHY_65NM_CH0_TXRF3_CAPDIV2G 0x0000001e
|
||||
#define AR_PHY_65NM_CH0_TXRF3_CAPDIV2G_S 1
|
||||
|
@ -989,21 +979,6 @@
|
|||
#define AR_PHY_TX_IQCAL_STATUS_B1 (AR_SM1_BASE + 0x48c)
|
||||
#define AR_PHY_TX_IQCAL_CORR_COEFF_B1(_i) (AR_SM1_BASE + 0x450 + ((_i) << 2))
|
||||
|
||||
/* SM 1 AIC Registers */
|
||||
|
||||
#define AR_PHY_AIC_CTRL_0_B1 (AR_SM1_BASE + 0x4b0)
|
||||
#define AR_PHY_AIC_CTRL_1_B1 (AR_SM1_BASE + 0x4b4)
|
||||
#define AR_PHY_AIC_CTRL_2_B1 (AR_SM1_BASE + 0x4b8)
|
||||
#define AR_PHY_AIC_STAT_0_B1 (AR_SM1_BASE + (AR_SREV_9462_10(ah) ? \
|
||||
0x4c0 : 0x4c4))
|
||||
#define AR_PHY_AIC_STAT_1_B1 (AR_SM1_BASE + (AR_SREV_9462_10(ah) ? \
|
||||
0x4c4 : 0x4c8))
|
||||
#define AR_PHY_AIC_CTRL_4_B1 (AR_SM1_BASE + 0x4c0)
|
||||
#define AR_PHY_AIC_STAT_2_B1 (AR_SM1_BASE + 0x4cc)
|
||||
|
||||
#define AR_PHY_AIC_SRAM_ADDR_B1 (AR_SM1_BASE + 0x5f0)
|
||||
#define AR_PHY_AIC_SRAM_DATA_B1 (AR_SM1_BASE + 0x5f4)
|
||||
|
||||
#define AR_PHY_RTT_TABLE_SW_INTF_B(i) (0x384 + ((i) ? \
|
||||
AR_SM1_BASE : AR_SM_BASE))
|
||||
#define AR_PHY_RTT_TABLE_SW_INTF_1_B(i) (0x388 + ((i) ? \
|
||||
|
|
|
@ -106,7 +106,7 @@ void ar9003_hw_rtt_load_hist(struct ath_hw *ah)
|
|||
int chain, i;
|
||||
|
||||
for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
|
||||
if (!(ah->rxchainmask & (1 << chain)))
|
||||
if (!(ah->caps.rx_chainmask & (1 << chain)))
|
||||
continue;
|
||||
for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++) {
|
||||
ar9003_hw_rtt_load_hist_entry(ah, chain, i,
|
||||
|
@ -171,7 +171,7 @@ void ar9003_hw_rtt_fill_hist(struct ath_hw *ah)
|
|||
int chain, i;
|
||||
|
||||
for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
|
||||
if (!(ah->rxchainmask & (1 << chain)))
|
||||
if (!(ah->caps.rx_chainmask & (1 << chain)))
|
||||
continue;
|
||||
for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++) {
|
||||
ah->caldata->rtt_table[chain][i] =
|
||||
|
@ -193,7 +193,7 @@ void ar9003_hw_rtt_clear_hist(struct ath_hw *ah)
|
|||
int chain, i;
|
||||
|
||||
for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
|
||||
if (!(ah->rxchainmask & (1 << chain)))
|
||||
if (!(ah->caps.rx_chainmask & (1 << chain)))
|
||||
continue;
|
||||
for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++)
|
||||
ar9003_hw_rtt_load_hist_entry(ah, chain, i, 0);
|
||||
|
|
|
@ -184,12 +184,12 @@ struct ath_frame_info {
|
|||
struct ath_buf *bf;
|
||||
u16 framelen;
|
||||
s8 txq;
|
||||
enum ath9k_key_type keytype;
|
||||
u8 keyix;
|
||||
u8 rtscts_rate;
|
||||
u8 retries : 7;
|
||||
u8 baw_tracked : 1;
|
||||
u8 tx_power;
|
||||
enum ath9k_key_type keytype:2;
|
||||
};
|
||||
|
||||
struct ath_rxbuf {
|
||||
|
|
|
@ -44,6 +44,9 @@
|
|||
|
||||
#define AR9300_NUM_BT_WEIGHTS 4
|
||||
#define AR9300_NUM_WLAN_WEIGHTS 4
|
||||
|
||||
#define ATH_AIC_MAX_BT_CHANNEL 79
|
||||
|
||||
/* Defines the BT AR_BT_COEX_WGHT used */
|
||||
enum ath_stomp_type {
|
||||
ATH_BTCOEX_STOMP_ALL,
|
||||
|
@ -93,9 +96,18 @@ struct ath9k_hw_mci {
|
|||
u32 last_recovery;
|
||||
};
|
||||
|
||||
struct ath9k_hw_aic {
|
||||
bool aic_enabled;
|
||||
u8 aic_cal_state;
|
||||
u8 aic_caled_chan;
|
||||
u32 aic_sram[ATH_AIC_MAX_BT_CHANNEL];
|
||||
u32 aic_cal_start_time;
|
||||
};
|
||||
|
||||
struct ath_btcoex_hw {
|
||||
enum ath_btcoex_scheme scheme;
|
||||
struct ath9k_hw_mci mci;
|
||||
struct ath9k_hw_aic aic;
|
||||
bool enabled;
|
||||
u8 wlanactive_gpio;
|
||||
u8 btactive_gpio;
|
||||
|
|
|
@ -238,7 +238,6 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
|
|||
{
|
||||
struct ath9k_nfcal_hist *h = NULL;
|
||||
unsigned i, j;
|
||||
int32_t val;
|
||||
u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
|
||||
struct ath_common *common = ath9k_hw_common(ah);
|
||||
s16 default_nf = ath9k_hw_get_default_nf(ah, chan);
|
||||
|
@ -246,6 +245,7 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
|
|||
if (ah->caldata)
|
||||
h = ah->caldata->nfCalHist;
|
||||
|
||||
ENABLE_REG_RMW_BUFFER(ah);
|
||||
for (i = 0; i < NUM_NF_READINGS; i++) {
|
||||
if (chainmask & (1 << i)) {
|
||||
s16 nfval;
|
||||
|
@ -258,10 +258,8 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
|
|||
else
|
||||
nfval = default_nf;
|
||||
|
||||
val = REG_READ(ah, ah->nf_regs[i]);
|
||||
val &= 0xFFFFFE00;
|
||||
val |= (((u32) nfval << 1) & 0x1ff);
|
||||
REG_WRITE(ah, ah->nf_regs[i], val);
|
||||
REG_RMW(ah, ah->nf_regs[i],
|
||||
(((u32) nfval << 1) & 0x1ff), 0x1ff);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -274,6 +272,7 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
|
|||
REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
|
||||
AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
|
||||
REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
|
||||
REG_RMW_BUFFER_FLUSH(ah);
|
||||
|
||||
/*
|
||||
* Wait for load to complete, should be fast, a few 10s of us.
|
||||
|
@ -309,19 +308,17 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
|
|||
* by the median we just loaded. This will be initial (and max) value
|
||||
* of next noise floor calibration the baseband does.
|
||||
*/
|
||||
ENABLE_REGWRITE_BUFFER(ah);
|
||||
ENABLE_REG_RMW_BUFFER(ah);
|
||||
for (i = 0; i < NUM_NF_READINGS; i++) {
|
||||
if (chainmask & (1 << i)) {
|
||||
if ((i >= AR5416_MAX_CHAINS) && !IS_CHAN_HT40(chan))
|
||||
continue;
|
||||
|
||||
val = REG_READ(ah, ah->nf_regs[i]);
|
||||
val &= 0xFFFFFE00;
|
||||
val |= (((u32) (-50) << 1) & 0x1ff);
|
||||
REG_WRITE(ah, ah->nf_regs[i], val);
|
||||
REG_RMW(ah, ah->nf_regs[i],
|
||||
(((u32) (-50) << 1) & 0x1ff), 0x1ff);
|
||||
}
|
||||
}
|
||||
REGWRITE_BUFFER_FLUSH(ah);
|
||||
REG_RMW_BUFFER_FLUSH(ah);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -126,8 +126,19 @@ ath9k_postprocess_radar_event(struct ath_softc *sc,
|
|||
DFS_STAT_INC(sc, pulses_detected);
|
||||
return true;
|
||||
}
|
||||
#undef PRI_CH_RADAR_FOUND
|
||||
#undef EXT_CH_RADAR_FOUND
|
||||
|
||||
static void
|
||||
ath9k_dfs_process_radar_pulse(struct ath_softc *sc, struct pulse_event *pe)
|
||||
{
|
||||
struct dfs_pattern_detector *pd = sc->dfs_detector;
|
||||
DFS_STAT_INC(sc, pulses_processed);
|
||||
if (pd == NULL)
|
||||
return;
|
||||
if (!pd->add_pulse(pd, pe))
|
||||
return;
|
||||
DFS_STAT_INC(sc, radar_detected);
|
||||
ieee80211_radar_detected(sc->hw);
|
||||
}
|
||||
|
||||
/*
|
||||
* DFS: check PHY-error for radar pulse and feed the detector
|
||||
|
@ -176,18 +187,21 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
|
|||
ard.pulse_length_pri = vdata_end[-3];
|
||||
pe.freq = ah->curchan->channel;
|
||||
pe.ts = mactime;
|
||||
if (ath9k_postprocess_radar_event(sc, &ard, &pe)) {
|
||||
struct dfs_pattern_detector *pd = sc->dfs_detector;
|
||||
ath_dbg(common, DFS,
|
||||
"ath9k_dfs_process_phyerr: channel=%d, ts=%llu, "
|
||||
"width=%d, rssi=%d, delta_ts=%llu\n",
|
||||
pe.freq, pe.ts, pe.width, pe.rssi,
|
||||
pe.ts - sc->dfs_prev_pulse_ts);
|
||||
sc->dfs_prev_pulse_ts = pe.ts;
|
||||
DFS_STAT_INC(sc, pulses_processed);
|
||||
if (pd != NULL && pd->add_pulse(pd, &pe)) {
|
||||
DFS_STAT_INC(sc, radar_detected);
|
||||
ieee80211_radar_detected(sc->hw);
|
||||
}
|
||||
if (!ath9k_postprocess_radar_event(sc, &ard, &pe))
|
||||
return;
|
||||
|
||||
ath_dbg(common, DFS,
|
||||
"ath9k_dfs_process_phyerr: type=%d, freq=%d, ts=%llu, "
|
||||
"width=%d, rssi=%d, delta_ts=%llu\n",
|
||||
ard.pulse_bw_info, pe.freq, pe.ts, pe.width, pe.rssi,
|
||||
pe.ts - sc->dfs_prev_pulse_ts);
|
||||
sc->dfs_prev_pulse_ts = pe.ts;
|
||||
if (ard.pulse_bw_info & PRI_CH_RADAR_FOUND)
|
||||
ath9k_dfs_process_radar_pulse(sc, &pe);
|
||||
if (ard.pulse_bw_info & EXT_CH_RADAR_FOUND) {
|
||||
pe.freq += IS_CHAN_HT40PLUS(ah->curchan) ? 20 : -20;
|
||||
ath9k_dfs_process_radar_pulse(sc, &pe);
|
||||
}
|
||||
}
|
||||
#undef PRI_CH_RADAR_FOUND
|
||||
#undef EXT_CH_RADAR_FOUND
|
||||
|
|
|
@ -27,12 +27,7 @@ void ath9k_hw_analog_shift_regwrite(struct ath_hw *ah, u32 reg, u32 val)
|
|||
void ath9k_hw_analog_shift_rmw(struct ath_hw *ah, u32 reg, u32 mask,
|
||||
u32 shift, u32 val)
|
||||
{
|
||||
u32 regVal;
|
||||
|
||||
regVal = REG_READ(ah, reg) & ~mask;
|
||||
regVal |= (val << shift) & mask;
|
||||
|
||||
REG_WRITE(ah, reg, regVal);
|
||||
REG_RMW(ah, reg, ((val << shift) & mask), mask);
|
||||
|
||||
if (ah->config.analog_shiftreg)
|
||||
udelay(100);
|
||||
|
|
|
@ -389,6 +389,7 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
|
|||
}
|
||||
}
|
||||
|
||||
ENABLE_REG_RMW_BUFFER(ah);
|
||||
REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_NUM_PD_GAIN,
|
||||
(numXpdGain - 1) & 0x3);
|
||||
REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_1,
|
||||
|
@ -396,6 +397,7 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
|
|||
REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_2,
|
||||
xpdGainValues[1]);
|
||||
REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_3, 0);
|
||||
REG_RMW_BUFFER_FLUSH(ah);
|
||||
|
||||
for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++) {
|
||||
regChainOffset = i * 0x1000;
|
||||
|
@ -770,15 +772,14 @@ static void ath9k_hw_4k_set_gain(struct ath_hw *ah,
|
|||
struct ar5416_eeprom_4k *eep,
|
||||
u8 txRxAttenLocal)
|
||||
{
|
||||
REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0,
|
||||
pModal->antCtrlChain[0]);
|
||||
ENABLE_REG_RMW_BUFFER(ah);
|
||||
REG_RMW(ah, AR_PHY_SWITCH_CHAIN_0,
|
||||
pModal->antCtrlChain[0], 0);
|
||||
|
||||
REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0),
|
||||
(REG_READ(ah, AR_PHY_TIMING_CTRL4(0)) &
|
||||
~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF |
|
||||
AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) |
|
||||
SM(pModal->iqCalICh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) |
|
||||
SM(pModal->iqCalQCh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF));
|
||||
REG_RMW(ah, AR_PHY_TIMING_CTRL4(0),
|
||||
SM(pModal->iqCalICh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) |
|
||||
SM(pModal->iqCalQCh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF),
|
||||
AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF | AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF);
|
||||
|
||||
if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
|
||||
AR5416_EEP_MINOR_VER_3) {
|
||||
|
@ -817,6 +818,7 @@ static void ath9k_hw_4k_set_gain(struct ath_hw *ah,
|
|||
AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal);
|
||||
REG_RMW_FIELD(ah, AR_PHY_RXGAIN + 0x1000,
|
||||
AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[0]);
|
||||
REG_RMW_BUFFER_FLUSH(ah);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -928,6 +930,7 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
|
|||
}
|
||||
}
|
||||
|
||||
ENABLE_REG_RMW_BUFFER(ah);
|
||||
if (AR_SREV_9271(ah)) {
|
||||
ath9k_hw_analog_shift_rmw(ah,
|
||||
AR9285_AN_RF2G3,
|
||||
|
@ -1032,18 +1035,19 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
|
|||
AR9285_AN_RF2G4_DB2_4_S,
|
||||
db2[4]);
|
||||
}
|
||||
REG_RMW_BUFFER_FLUSH(ah);
|
||||
|
||||
|
||||
ENABLE_REG_RMW_BUFFER(ah);
|
||||
REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH,
|
||||
pModal->switchSettling);
|
||||
REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, AR_PHY_DESIRED_SZ_ADC,
|
||||
pModal->adcDesiredSize);
|
||||
|
||||
REG_WRITE(ah, AR_PHY_RF_CTL4,
|
||||
SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAA_OFF) |
|
||||
SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAB_OFF) |
|
||||
SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAA_ON) |
|
||||
SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAB_ON));
|
||||
REG_RMW(ah, AR_PHY_RF_CTL4,
|
||||
SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAA_OFF) |
|
||||
SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAB_OFF) |
|
||||
SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAA_ON) |
|
||||
SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAB_ON), 0);
|
||||
|
||||
REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON,
|
||||
pModal->txEndToRxOn);
|
||||
|
@ -1072,6 +1076,8 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
|
|||
pModal->swSettleHt40);
|
||||
}
|
||||
|
||||
REG_RMW_BUFFER_FLUSH(ah);
|
||||
|
||||
bb_desired_scale = (pModal->bb_scale_smrt_antenna &
|
||||
EEP_4K_BB_DESIRED_SCALE_MASK);
|
||||
if ((pBase->txGainType == 0) && (bb_desired_scale != 0)) {
|
||||
|
@ -1080,6 +1086,7 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
|
|||
mask = BIT(0)|BIT(5)|BIT(10)|BIT(15)|BIT(20)|BIT(25);
|
||||
pwrctrl = mask * bb_desired_scale;
|
||||
clr = mask * 0x1f;
|
||||
ENABLE_REG_RMW_BUFFER(ah);
|
||||
REG_RMW(ah, AR_PHY_TX_PWRCTRL8, pwrctrl, clr);
|
||||
REG_RMW(ah, AR_PHY_TX_PWRCTRL10, pwrctrl, clr);
|
||||
REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL12, pwrctrl, clr);
|
||||
|
@ -1094,6 +1101,7 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
|
|||
clr = mask * 0x1f;
|
||||
REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL11, pwrctrl, clr);
|
||||
REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL13, pwrctrl, clr);
|
||||
REG_RMW_BUFFER_FLUSH(ah);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -466,6 +466,7 @@ static void ath9k_hw_def_set_gain(struct ath_hw *ah,
|
|||
struct ar5416_eeprom_def *eep,
|
||||
u8 txRxAttenLocal, int regChainOffset, int i)
|
||||
{
|
||||
ENABLE_REG_RMW_BUFFER(ah);
|
||||
if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_3) {
|
||||
txRxAttenLocal = pModal->txRxAttenCh[i];
|
||||
|
||||
|
@ -483,16 +484,12 @@ static void ath9k_hw_def_set_gain(struct ath_hw *ah,
|
|||
AR_PHY_GAIN_2GHZ_XATTEN2_DB,
|
||||
pModal->xatten2Db[i]);
|
||||
} else {
|
||||
REG_WRITE(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
|
||||
(REG_READ(ah, AR_PHY_GAIN_2GHZ + regChainOffset) &
|
||||
~AR_PHY_GAIN_2GHZ_BSW_MARGIN)
|
||||
| SM(pModal-> bswMargin[i],
|
||||
AR_PHY_GAIN_2GHZ_BSW_MARGIN));
|
||||
REG_WRITE(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
|
||||
(REG_READ(ah, AR_PHY_GAIN_2GHZ + regChainOffset) &
|
||||
~AR_PHY_GAIN_2GHZ_BSW_ATTEN)
|
||||
| SM(pModal->bswAtten[i],
|
||||
AR_PHY_GAIN_2GHZ_BSW_ATTEN));
|
||||
REG_RMW(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
|
||||
SM(pModal-> bswMargin[i], AR_PHY_GAIN_2GHZ_BSW_MARGIN),
|
||||
AR_PHY_GAIN_2GHZ_BSW_MARGIN);
|
||||
REG_RMW(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
|
||||
SM(pModal->bswAtten[i], AR_PHY_GAIN_2GHZ_BSW_ATTEN),
|
||||
AR_PHY_GAIN_2GHZ_BSW_ATTEN);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -504,17 +501,14 @@ static void ath9k_hw_def_set_gain(struct ath_hw *ah,
|
|||
AR_PHY_RXGAIN + regChainOffset,
|
||||
AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[i]);
|
||||
} else {
|
||||
REG_WRITE(ah,
|
||||
AR_PHY_RXGAIN + regChainOffset,
|
||||
(REG_READ(ah, AR_PHY_RXGAIN + regChainOffset) &
|
||||
~AR_PHY_RXGAIN_TXRX_ATTEN)
|
||||
| SM(txRxAttenLocal, AR_PHY_RXGAIN_TXRX_ATTEN));
|
||||
REG_WRITE(ah,
|
||||
AR_PHY_GAIN_2GHZ + regChainOffset,
|
||||
(REG_READ(ah, AR_PHY_GAIN_2GHZ + regChainOffset) &
|
||||
~AR_PHY_GAIN_2GHZ_RXTX_MARGIN) |
|
||||
SM(pModal->rxTxMarginCh[i], AR_PHY_GAIN_2GHZ_RXTX_MARGIN));
|
||||
REG_RMW(ah, AR_PHY_RXGAIN + regChainOffset,
|
||||
SM(txRxAttenLocal, AR_PHY_RXGAIN_TXRX_ATTEN),
|
||||
AR_PHY_RXGAIN_TXRX_ATTEN);
|
||||
REG_RMW(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
|
||||
SM(pModal->rxTxMarginCh[i], AR_PHY_GAIN_2GHZ_RXTX_MARGIN),
|
||||
AR_PHY_GAIN_2GHZ_RXTX_MARGIN);
|
||||
}
|
||||
REG_RMW_BUFFER_FLUSH(ah);
|
||||
}
|
||||
|
||||
static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
|
||||
|
|
|
@ -444,6 +444,10 @@ static inline void ath9k_htc_stop_btcoex(struct ath9k_htc_priv *priv)
|
|||
#define OP_BT_SCAN BIT(4)
|
||||
#define OP_TSF_RESET BIT(6)
|
||||
|
||||
enum htc_op_flags {
|
||||
HTC_FWFLAG_NO_RMW,
|
||||
};
|
||||
|
||||
struct ath9k_htc_priv {
|
||||
struct device *dev;
|
||||
struct ieee80211_hw *hw;
|
||||
|
@ -482,6 +486,7 @@ struct ath9k_htc_priv {
|
|||
bool reconfig_beacon;
|
||||
unsigned int rxfilter;
|
||||
unsigned long op_flags;
|
||||
unsigned long fw_flags;
|
||||
|
||||
struct ath9k_hw_cal_data caldata;
|
||||
struct ath_spec_scan_priv spec_priv;
|
||||
|
|
|
@ -376,15 +376,137 @@ static void ath9k_regwrite_flush(void *hw_priv)
|
|||
mutex_unlock(&priv->wmi->multi_write_mutex);
|
||||
}
|
||||
|
||||
static void ath9k_reg_rmw_buffer(void *hw_priv,
|
||||
u32 reg_offset, u32 set, u32 clr)
|
||||
{
|
||||
struct ath_hw *ah = (struct ath_hw *) hw_priv;
|
||||
struct ath_common *common = ath9k_hw_common(ah);
|
||||
struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
|
||||
u32 rsp_status;
|
||||
int r;
|
||||
|
||||
mutex_lock(&priv->wmi->multi_rmw_mutex);
|
||||
|
||||
/* Store the register/value */
|
||||
priv->wmi->multi_rmw[priv->wmi->multi_rmw_idx].reg =
|
||||
cpu_to_be32(reg_offset);
|
||||
priv->wmi->multi_rmw[priv->wmi->multi_rmw_idx].set =
|
||||
cpu_to_be32(set);
|
||||
priv->wmi->multi_rmw[priv->wmi->multi_rmw_idx].clr =
|
||||
cpu_to_be32(clr);
|
||||
|
||||
priv->wmi->multi_rmw_idx++;
|
||||
|
||||
/* If the buffer is full, send it out. */
|
||||
if (priv->wmi->multi_rmw_idx == MAX_RMW_CMD_NUMBER) {
|
||||
r = ath9k_wmi_cmd(priv->wmi, WMI_REG_RMW_CMDID,
|
||||
(u8 *) &priv->wmi->multi_rmw,
|
||||
sizeof(struct register_write) * priv->wmi->multi_rmw_idx,
|
||||
(u8 *) &rsp_status, sizeof(rsp_status),
|
||||
100);
|
||||
if (unlikely(r)) {
|
||||
ath_dbg(common, WMI,
|
||||
"REGISTER RMW FAILED, multi len: %d\n",
|
||||
priv->wmi->multi_rmw_idx);
|
||||
}
|
||||
priv->wmi->multi_rmw_idx = 0;
|
||||
}
|
||||
|
||||
mutex_unlock(&priv->wmi->multi_rmw_mutex);
|
||||
}
|
||||
|
||||
static void ath9k_reg_rmw_flush(void *hw_priv)
|
||||
{
|
||||
struct ath_hw *ah = (struct ath_hw *) hw_priv;
|
||||
struct ath_common *common = ath9k_hw_common(ah);
|
||||
struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
|
||||
u32 rsp_status;
|
||||
int r;
|
||||
|
||||
if (test_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags))
|
||||
return;
|
||||
|
||||
atomic_dec(&priv->wmi->m_rmw_cnt);
|
||||
|
||||
mutex_lock(&priv->wmi->multi_rmw_mutex);
|
||||
|
||||
if (priv->wmi->multi_rmw_idx) {
|
||||
r = ath9k_wmi_cmd(priv->wmi, WMI_REG_RMW_CMDID,
|
||||
(u8 *) &priv->wmi->multi_rmw,
|
||||
sizeof(struct register_rmw) * priv->wmi->multi_rmw_idx,
|
||||
(u8 *) &rsp_status, sizeof(rsp_status),
|
||||
100);
|
||||
if (unlikely(r)) {
|
||||
ath_dbg(common, WMI,
|
||||
"REGISTER RMW FAILED, multi len: %d\n",
|
||||
priv->wmi->multi_rmw_idx);
|
||||
}
|
||||
priv->wmi->multi_rmw_idx = 0;
|
||||
}
|
||||
|
||||
mutex_unlock(&priv->wmi->multi_rmw_mutex);
|
||||
}
|
||||
|
||||
static void ath9k_enable_rmw_buffer(void *hw_priv)
|
||||
{
|
||||
struct ath_hw *ah = (struct ath_hw *) hw_priv;
|
||||
struct ath_common *common = ath9k_hw_common(ah);
|
||||
struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
|
||||
|
||||
if (test_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags))
|
||||
return;
|
||||
|
||||
atomic_inc(&priv->wmi->m_rmw_cnt);
|
||||
}
|
||||
|
||||
static u32 ath9k_reg_rmw_single(void *hw_priv,
|
||||
u32 reg_offset, u32 set, u32 clr)
|
||||
{
|
||||
struct ath_hw *ah = (struct ath_hw *) hw_priv;
|
||||
struct ath_common *common = ath9k_hw_common(ah);
|
||||
struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
|
||||
struct register_rmw buf, buf_ret;
|
||||
int ret;
|
||||
u32 val = 0;
|
||||
|
||||
buf.reg = cpu_to_be32(reg_offset);
|
||||
buf.set = cpu_to_be32(set);
|
||||
buf.clr = cpu_to_be32(clr);
|
||||
|
||||
ret = ath9k_wmi_cmd(priv->wmi, WMI_REG_RMW_CMDID,
|
||||
(u8 *) &buf, sizeof(buf),
|
||||
(u8 *) &buf_ret, sizeof(buf_ret),
|
||||
100);
|
||||
if (unlikely(ret)) {
|
||||
ath_dbg(common, WMI, "REGISTER RMW FAILED:(0x%04x, %d)\n",
|
||||
reg_offset, ret);
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
||||
static u32 ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr)
|
||||
{
|
||||
u32 val;
|
||||
struct ath_hw *ah = (struct ath_hw *) hw_priv;
|
||||
struct ath_common *common = ath9k_hw_common(ah);
|
||||
struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
|
||||
|
||||
val = ath9k_regread(hw_priv, reg_offset);
|
||||
val &= ~clr;
|
||||
val |= set;
|
||||
ath9k_regwrite(hw_priv, val, reg_offset);
|
||||
return val;
|
||||
if (test_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags)) {
|
||||
u32 val;
|
||||
|
||||
val = REG_READ(ah, reg_offset);
|
||||
val &= ~clr;
|
||||
val |= set;
|
||||
REG_WRITE(ah, reg_offset, val);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (atomic_read(&priv->wmi->m_rmw_cnt))
|
||||
ath9k_reg_rmw_buffer(hw_priv, reg_offset, set, clr);
|
||||
else
|
||||
ath9k_reg_rmw_single(hw_priv, reg_offset, set, clr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ath_usb_read_cachesize(struct ath_common *common, int *csz)
|
||||
|
@ -501,6 +623,8 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
|
|||
ah->reg_ops.write = ath9k_regwrite;
|
||||
ah->reg_ops.enable_write_buffer = ath9k_enable_regwrite_buffer;
|
||||
ah->reg_ops.write_flush = ath9k_regwrite_flush;
|
||||
ah->reg_ops.enable_rmw_buffer = ath9k_enable_rmw_buffer;
|
||||
ah->reg_ops.rmw_flush = ath9k_reg_rmw_flush;
|
||||
ah->reg_ops.rmw = ath9k_reg_rmw;
|
||||
priv->ah = ah;
|
||||
|
||||
|
@ -686,6 +810,12 @@ static int ath9k_init_firmware_version(struct ath9k_htc_priv *priv)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (priv->fw_version_major == 1 && priv->fw_version_minor < 4)
|
||||
set_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags);
|
||||
|
||||
dev_info(priv->dev, "FW RMW support: %s\n",
|
||||
test_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags) ? "Off" : "On");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -108,6 +108,14 @@ static inline void ath9k_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
|
|||
ath9k_hw_ops(ah)->set_bt_ant_diversity(ah, enable);
|
||||
}
|
||||
|
||||
static inline bool ath9k_hw_is_aic_enabled(struct ath_hw *ah)
|
||||
{
|
||||
if (ath9k_hw_private_ops(ah)->is_aic_enabled)
|
||||
return ath9k_hw_private_ops(ah)->is_aic_enabled(ah);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* Private hardware call ops */
|
||||
|
|
|
@ -121,6 +121,36 @@ void ath9k_hw_write_array(struct ath_hw *ah, const struct ar5416IniArray *array,
|
|||
REGWRITE_BUFFER_FLUSH(ah);
|
||||
}
|
||||
|
||||
void ath9k_hw_read_array(struct ath_hw *ah, u32 array[][2], int size)
|
||||
{
|
||||
u32 *tmp_reg_list, *tmp_data;
|
||||
int i;
|
||||
|
||||
tmp_reg_list = kmalloc(size * sizeof(u32), GFP_KERNEL);
|
||||
if (!tmp_reg_list) {
|
||||
dev_err(ah->dev, "%s: tmp_reg_list: alloc filed\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
tmp_data = kmalloc(size * sizeof(u32), GFP_KERNEL);
|
||||
if (!tmp_data) {
|
||||
dev_err(ah->dev, "%s tmp_data: alloc filed\n", __func__);
|
||||
goto error_tmp_data;
|
||||
}
|
||||
|
||||
for (i = 0; i < size; i++)
|
||||
tmp_reg_list[i] = array[i][0];
|
||||
|
||||
REG_READ_MULTI(ah, tmp_reg_list, tmp_data, size);
|
||||
|
||||
for (i = 0; i < size; i++)
|
||||
array[i][1] = tmp_data[i];
|
||||
|
||||
kfree(tmp_data);
|
||||
error_tmp_data:
|
||||
kfree(tmp_reg_list);
|
||||
}
|
||||
|
||||
u32 ath9k_hw_reverse_bits(u32 val, u32 n)
|
||||
{
|
||||
u32 retval;
|
||||
|
@ -366,6 +396,9 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
|
|||
ah->config.rimt_first = 700;
|
||||
}
|
||||
|
||||
if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
|
||||
ah->config.pll_pwrsave = 7;
|
||||
|
||||
/*
|
||||
* We need this for PCI devices only (Cardbus, PCI, miniPCI)
|
||||
* _and_ if on non-uniprocessor systems (Multiprocessor/HT).
|
||||
|
@ -1197,6 +1230,7 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
|
|||
u32 mask = AR_STA_ID1_STA_AP | AR_STA_ID1_ADHOC;
|
||||
u32 set = AR_STA_ID1_KSRCH_MODE;
|
||||
|
||||
ENABLE_REG_RMW_BUFFER(ah);
|
||||
switch (opmode) {
|
||||
case NL80211_IFTYPE_ADHOC:
|
||||
if (!AR_SREV_9340_13(ah)) {
|
||||
|
@ -1218,6 +1252,7 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
|
|||
break;
|
||||
}
|
||||
REG_RMW(ah, AR_STA_ID1, set, mask);
|
||||
REG_RMW_BUFFER_FLUSH(ah);
|
||||
}
|
||||
|
||||
void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
|
||||
|
@ -1930,6 +1965,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
|
|||
if (!ath9k_hw_mci_is_enabled(ah))
|
||||
REG_WRITE(ah, AR_OBS, 8);
|
||||
|
||||
ENABLE_REG_RMW_BUFFER(ah);
|
||||
if (ah->config.rx_intr_mitigation) {
|
||||
REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, ah->config.rimt_last);
|
||||
REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, ah->config.rimt_first);
|
||||
|
@ -1939,6 +1975,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
|
|||
REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_LAST, 300);
|
||||
REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_FIRST, 750);
|
||||
}
|
||||
REG_RMW_BUFFER_FLUSH(ah);
|
||||
|
||||
ath9k_hw_init_bb(ah, chan);
|
||||
|
||||
|
|
|
@ -100,6 +100,18 @@
|
|||
(_ah)->reg_ops.write_flush((_ah)); \
|
||||
} while (0)
|
||||
|
||||
#define ENABLE_REG_RMW_BUFFER(_ah) \
|
||||
do { \
|
||||
if ((_ah)->reg_ops.enable_rmw_buffer) \
|
||||
(_ah)->reg_ops.enable_rmw_buffer((_ah)); \
|
||||
} while (0)
|
||||
|
||||
#define REG_RMW_BUFFER_FLUSH(_ah) \
|
||||
do { \
|
||||
if ((_ah)->reg_ops.rmw_flush) \
|
||||
(_ah)->reg_ops.rmw_flush((_ah)); \
|
||||
} while (0)
|
||||
|
||||
#define PR_EEP(_s, _val) \
|
||||
do { \
|
||||
len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
|
||||
|
@ -126,6 +138,8 @@
|
|||
|
||||
#define REG_WRITE_ARRAY(iniarray, column, regWr) \
|
||||
ath9k_hw_write_array(ah, iniarray, column, &(regWr))
|
||||
#define REG_READ_ARRAY(ah, array, size) \
|
||||
ath9k_hw_read_array(ah, array, size)
|
||||
|
||||
#define AR_GPIO_OUTPUT_MUX_AS_OUTPUT 0
|
||||
#define AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED 1
|
||||
|
@ -309,6 +323,12 @@ enum ath9k_hw_hang_checks {
|
|||
HW_MAC_HANG = BIT(5),
|
||||
};
|
||||
|
||||
#define AR_PCIE_PLL_PWRSAVE_CONTROL BIT(0)
|
||||
#define AR_PCIE_PLL_PWRSAVE_ON_D3 BIT(1)
|
||||
#define AR_PCIE_PLL_PWRSAVE_ON_D0 BIT(2)
|
||||
#define AR_PCIE_CDR_PWRSAVE_ON_D3 BIT(3)
|
||||
#define AR_PCIE_CDR_PWRSAVE_ON_D0 BIT(4)
|
||||
|
||||
struct ath9k_ops_config {
|
||||
int dma_beacon_response_time;
|
||||
int sw_beacon_response_time;
|
||||
|
@ -335,7 +355,7 @@ struct ath9k_ops_config {
|
|||
u32 ant_ctrl_comm2g_switch_enable;
|
||||
bool xatten_margin_cfg;
|
||||
bool alt_mingainidx;
|
||||
bool no_pll_pwrsave;
|
||||
u8 pll_pwrsave;
|
||||
bool tx_gain_buffalo;
|
||||
bool led_active_high;
|
||||
};
|
||||
|
@ -647,6 +667,10 @@ struct ath_hw_private_ops {
|
|||
|
||||
/* ANI */
|
||||
void (*ani_cache_ini_regs)(struct ath_hw *ah);
|
||||
|
||||
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
|
||||
bool (*is_aic_enabled)(struct ath_hw *ah);
|
||||
#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -1008,6 +1032,7 @@ void ath9k_hw_synth_delay(struct ath_hw *ah, struct ath9k_channel *chan,
|
|||
bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout);
|
||||
void ath9k_hw_write_array(struct ath_hw *ah, const struct ar5416IniArray *array,
|
||||
int column, unsigned int *writecnt);
|
||||
void ath9k_hw_read_array(struct ath_hw *ah, u32 array[][2], int size);
|
||||
u32 ath9k_hw_reverse_bits(u32 val, u32 n);
|
||||
u16 ath9k_hw_computetxtime(struct ath_hw *ah,
|
||||
u8 phy, int kbps,
|
||||
|
@ -1117,6 +1142,7 @@ void ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us);
|
|||
void ath9k_hw_setslottime(struct ath_hw *ah, u32 us);
|
||||
|
||||
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
|
||||
void ar9003_hw_attach_aic_ops(struct ath_hw *ah);
|
||||
static inline bool ath9k_hw_btcoex_is_enabled(struct ath_hw *ah)
|
||||
{
|
||||
return ah->btcoex_hw.enabled;
|
||||
|
@ -1134,6 +1160,9 @@ ath9k_hw_get_btcoex_scheme(struct ath_hw *ah)
|
|||
return ah->btcoex_hw.scheme;
|
||||
}
|
||||
#else
|
||||
static inline void ar9003_hw_attach_aic_ops(struct ath_hw *ah)
|
||||
{
|
||||
}
|
||||
static inline bool ath9k_hw_btcoex_is_enabled(struct ath_hw *ah)
|
||||
{
|
||||
return false;
|
||||
|
|
|
@ -141,6 +141,16 @@ static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
|
|||
return val;
|
||||
}
|
||||
|
||||
static void ath9k_multi_ioread32(void *hw_priv, u32 *addr,
|
||||
u32 *val, u16 count)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
val[i] = ath9k_ioread32(hw_priv, addr[i]);
|
||||
}
|
||||
|
||||
|
||||
static unsigned int __ath9k_reg_rmw(struct ath_softc *sc, u32 reg_offset,
|
||||
u32 set, u32 clr)
|
||||
{
|
||||
|
@ -437,8 +447,15 @@ static void ath9k_init_pcoem_platform(struct ath_softc *sc)
|
|||
ath_info(common, "Enable WAR for ASPM D3/L1\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* The default value of pll_pwrsave is 1.
|
||||
* For certain AR9485 cards, it is set to 0.
|
||||
* For AR9462, AR9565 it's set to 7.
|
||||
*/
|
||||
ah->config.pll_pwrsave = 1;
|
||||
|
||||
if (sc->driver_data & ATH9K_PCI_NO_PLL_PWRSAVE) {
|
||||
ah->config.no_pll_pwrsave = true;
|
||||
ah->config.pll_pwrsave = 0;
|
||||
ath_info(common, "Disable PLL PowerSave\n");
|
||||
}
|
||||
|
||||
|
@ -530,6 +547,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
|
|||
ah->hw = sc->hw;
|
||||
ah->hw_version.devid = devid;
|
||||
ah->reg_ops.read = ath9k_ioread32;
|
||||
ah->reg_ops.multi_read = ath9k_multi_ioread32;
|
||||
ah->reg_ops.write = ath9k_iowrite32;
|
||||
ah->reg_ops.rmw = ath9k_reg_rmw;
|
||||
pCap = &ah->caps;
|
||||
|
@ -763,7 +781,8 @@ static const struct ieee80211_iface_combination if_comb[] = {
|
|||
.num_different_channels = 1,
|
||||
.beacon_int_infra_match = true,
|
||||
.radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
|
||||
BIT(NL80211_CHAN_WIDTH_20),
|
||||
BIT(NL80211_CHAN_WIDTH_20) |
|
||||
BIT(NL80211_CHAN_WIDTH_40),
|
||||
}
|
||||
#endif
|
||||
};
|
||||
|
|
|
@ -0,0 +1,168 @@
|
|||
/*
|
||||
* Copyright (c) 2015 Qualcomm Atheros Inc.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef REG_AIC_H
|
||||
#define REG_AIC_H
|
||||
|
||||
#define AR_SM_BASE 0xa200
|
||||
#define AR_SM1_BASE 0xb200
|
||||
#define AR_AGC_BASE 0x9e00
|
||||
|
||||
#define AR_PHY_AIC_CTRL_0_B0 (AR_SM_BASE + 0x4b0)
|
||||
#define AR_PHY_AIC_CTRL_1_B0 (AR_SM_BASE + 0x4b4)
|
||||
#define AR_PHY_AIC_CTRL_2_B0 (AR_SM_BASE + 0x4b8)
|
||||
#define AR_PHY_AIC_CTRL_3_B0 (AR_SM_BASE + 0x4bc)
|
||||
#define AR_PHY_AIC_CTRL_4_B0 (AR_SM_BASE + 0x4c0)
|
||||
|
||||
#define AR_PHY_AIC_STAT_0_B0 (AR_SM_BASE + 0x4c4)
|
||||
#define AR_PHY_AIC_STAT_1_B0 (AR_SM_BASE + 0x4c8)
|
||||
#define AR_PHY_AIC_STAT_2_B0 (AR_SM_BASE + 0x4cc)
|
||||
|
||||
#define AR_PHY_AIC_CTRL_0_B1 (AR_SM1_BASE + 0x4b0)
|
||||
#define AR_PHY_AIC_CTRL_1_B1 (AR_SM1_BASE + 0x4b4)
|
||||
#define AR_PHY_AIC_CTRL_4_B1 (AR_SM1_BASE + 0x4c0)
|
||||
|
||||
#define AR_PHY_AIC_STAT_0_B1 (AR_SM1_BASE + 0x4c4)
|
||||
#define AR_PHY_AIC_STAT_1_B1 (AR_SM1_BASE + 0x4c8)
|
||||
#define AR_PHY_AIC_STAT_2_B1 (AR_SM1_BASE + 0x4cc)
|
||||
|
||||
#define AR_PHY_AIC_SRAM_ADDR_B0 (AR_SM_BASE + 0x5f0)
|
||||
#define AR_PHY_AIC_SRAM_DATA_B0 (AR_SM_BASE + 0x5f4)
|
||||
|
||||
#define AR_PHY_AIC_SRAM_ADDR_B1 (AR_SM1_BASE + 0x5f0)
|
||||
#define AR_PHY_AIC_SRAM_DATA_B1 (AR_SM1_BASE + 0x5f4)
|
||||
|
||||
#define AR_PHY_BT_COEX_4 (AR_AGC_BASE + 0x60)
|
||||
#define AR_PHY_BT_COEX_5 (AR_AGC_BASE + 0x64)
|
||||
|
||||
/* AIC fields */
|
||||
#define AR_PHY_AIC_MON_ENABLE 0x80000000
|
||||
#define AR_PHY_AIC_MON_ENABLE_S 31
|
||||
#define AR_PHY_AIC_CAL_MAX_HOP_COUNT 0x7F000000
|
||||
#define AR_PHY_AIC_CAL_MAX_HOP_COUNT_S 24
|
||||
#define AR_PHY_AIC_CAL_MIN_VALID_COUNT 0x00FE0000
|
||||
#define AR_PHY_AIC_CAL_MIN_VALID_COUNT_S 17
|
||||
#define AR_PHY_AIC_F_WLAN 0x0001FC00
|
||||
#define AR_PHY_AIC_F_WLAN_S 10
|
||||
#define AR_PHY_AIC_CAL_CH_VALID_RESET 0x00000200
|
||||
#define AR_PHY_AIC_CAL_CH_VALID_RESET_S 9
|
||||
#define AR_PHY_AIC_CAL_ENABLE 0x00000100
|
||||
#define AR_PHY_AIC_CAL_ENABLE_S 8
|
||||
#define AR_PHY_AIC_BTTX_PWR_THR 0x000000FE
|
||||
#define AR_PHY_AIC_BTTX_PWR_THR_S 1
|
||||
#define AR_PHY_AIC_ENABLE 0x00000001
|
||||
#define AR_PHY_AIC_ENABLE_S 0
|
||||
#define AR_PHY_AIC_CAL_BT_REF_DELAY 0x00F00000
|
||||
#define AR_PHY_AIC_CAL_BT_REF_DELAY_S 20
|
||||
#define AR_PHY_AIC_BT_IDLE_CFG 0x00080000
|
||||
#define AR_PHY_AIC_BT_IDLE_CFG_S 19
|
||||
#define AR_PHY_AIC_STDBY_COND 0x00060000
|
||||
#define AR_PHY_AIC_STDBY_COND_S 17
|
||||
#define AR_PHY_AIC_STDBY_ROT_ATT_DB 0x0001F800
|
||||
#define AR_PHY_AIC_STDBY_ROT_ATT_DB_S 11
|
||||
#define AR_PHY_AIC_STDBY_COM_ATT_DB 0x00000700
|
||||
#define AR_PHY_AIC_STDBY_COM_ATT_DB_S 8
|
||||
#define AR_PHY_AIC_RSSI_MAX 0x000000F0
|
||||
#define AR_PHY_AIC_RSSI_MAX_S 4
|
||||
#define AR_PHY_AIC_RSSI_MIN 0x0000000F
|
||||
#define AR_PHY_AIC_RSSI_MIN_S 0
|
||||
#define AR_PHY_AIC_RADIO_DELAY 0x7F000000
|
||||
#define AR_PHY_AIC_RADIO_DELAY_S 24
|
||||
#define AR_PHY_AIC_CAL_STEP_SIZE_CORR 0x00F00000
|
||||
#define AR_PHY_AIC_CAL_STEP_SIZE_CORR_S 20
|
||||
#define AR_PHY_AIC_CAL_ROT_IDX_CORR 0x000F8000
|
||||
#define AR_PHY_AIC_CAL_ROT_IDX_CORR_S 15
|
||||
#define AR_PHY_AIC_CAL_CONV_CHECK_FACTOR 0x00006000
|
||||
#define AR_PHY_AIC_CAL_CONV_CHECK_FACTOR_S 13
|
||||
#define AR_PHY_AIC_ROT_IDX_COUNT_MAX 0x00001C00
|
||||
#define AR_PHY_AIC_ROT_IDX_COUNT_MAX_S 10
|
||||
#define AR_PHY_AIC_CAL_SYNTH_TOGGLE 0x00000200
|
||||
#define AR_PHY_AIC_CAL_SYNTH_TOGGLE_S 9
|
||||
#define AR_PHY_AIC_CAL_SYNTH_AFTER_BTRX 0x00000100
|
||||
#define AR_PHY_AIC_CAL_SYNTH_AFTER_BTRX_S 8
|
||||
#define AR_PHY_AIC_CAL_SYNTH_SETTLING 0x000000FF
|
||||
#define AR_PHY_AIC_CAL_SYNTH_SETTLING_S 0
|
||||
#define AR_PHY_AIC_MON_MAX_HOP_COUNT 0x07F00000
|
||||
#define AR_PHY_AIC_MON_MAX_HOP_COUNT_S 20
|
||||
#define AR_PHY_AIC_MON_MIN_STALE_COUNT 0x000FE000
|
||||
#define AR_PHY_AIC_MON_MIN_STALE_COUNT_S 13
|
||||
#define AR_PHY_AIC_MON_PWR_EST_LONG 0x00001000
|
||||
#define AR_PHY_AIC_MON_PWR_EST_LONG_S 12
|
||||
#define AR_PHY_AIC_MON_PD_TALLY_SCALING 0x00000C00
|
||||
#define AR_PHY_AIC_MON_PD_TALLY_SCALING_S 10
|
||||
#define AR_PHY_AIC_MON_PERF_THR 0x000003E0
|
||||
#define AR_PHY_AIC_MON_PERF_THR_S 5
|
||||
#define AR_PHY_AIC_CAL_TARGET_MAG_SETTING 0x00000018
|
||||
#define AR_PHY_AIC_CAL_TARGET_MAG_SETTING_S 3
|
||||
#define AR_PHY_AIC_CAL_PERF_CHECK_FACTOR 0x00000006
|
||||
#define AR_PHY_AIC_CAL_PERF_CHECK_FACTOR_S 1
|
||||
#define AR_PHY_AIC_CAL_PWR_EST_LONG 0x00000001
|
||||
#define AR_PHY_AIC_CAL_PWR_EST_LONG_S 0
|
||||
#define AR_PHY_AIC_MON_DONE 0x80000000
|
||||
#define AR_PHY_AIC_MON_DONE_S 31
|
||||
#define AR_PHY_AIC_MON_ACTIVE 0x40000000
|
||||
#define AR_PHY_AIC_MON_ACTIVE_S 30
|
||||
#define AR_PHY_AIC_MEAS_COUNT 0x3F000000
|
||||
#define AR_PHY_AIC_MEAS_COUNT_S 24
|
||||
#define AR_PHY_AIC_CAL_ANT_ISO_EST 0x00FC0000
|
||||
#define AR_PHY_AIC_CAL_ANT_ISO_EST_S 18
|
||||
#define AR_PHY_AIC_CAL_HOP_COUNT 0x0003F800
|
||||
#define AR_PHY_AIC_CAL_HOP_COUNT_S 11
|
||||
#define AR_PHY_AIC_CAL_VALID_COUNT 0x000007F0
|
||||
#define AR_PHY_AIC_CAL_VALID_COUNT_S 4
|
||||
#define AR_PHY_AIC_CAL_BT_TOO_WEAK_ERR 0x00000008
|
||||
#define AR_PHY_AIC_CAL_BT_TOO_WEAK_ERR_S 3
|
||||
#define AR_PHY_AIC_CAL_BT_TOO_STRONG_ERR 0x00000004
|
||||
#define AR_PHY_AIC_CAL_BT_TOO_STRONG_ERR_S 2
|
||||
#define AR_PHY_AIC_CAL_DONE 0x00000002
|
||||
#define AR_PHY_AIC_CAL_DONE_S 1
|
||||
#define AR_PHY_AIC_CAL_ACTIVE 0x00000001
|
||||
#define AR_PHY_AIC_CAL_ACTIVE_S 0
|
||||
|
||||
#define AR_PHY_AIC_MEAS_MAG_MIN 0xFFC00000
|
||||
#define AR_PHY_AIC_MEAS_MAG_MIN_S 22
|
||||
#define AR_PHY_AIC_MON_STALE_COUNT 0x003F8000
|
||||
#define AR_PHY_AIC_MON_STALE_COUNT_S 15
|
||||
#define AR_PHY_AIC_MON_HOP_COUNT 0x00007F00
|
||||
#define AR_PHY_AIC_MON_HOP_COUNT_S 8
|
||||
#define AR_PHY_AIC_CAL_AIC_SM 0x000000F8
|
||||
#define AR_PHY_AIC_CAL_AIC_SM_S 3
|
||||
#define AR_PHY_AIC_SM 0x00000007
|
||||
#define AR_PHY_AIC_SM_S 0
|
||||
#define AR_PHY_AIC_SRAM_VALID 0x00000001
|
||||
#define AR_PHY_AIC_SRAM_VALID_S 0
|
||||
#define AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB 0x0000007E
|
||||
#define AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB_S 1
|
||||
#define AR_PHY_AIC_SRAM_VGA_QUAD_SIGN 0x00000080
|
||||
#define AR_PHY_AIC_SRAM_VGA_QUAD_SIGN_S 7
|
||||
#define AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB 0x00003F00
|
||||
#define AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB_S 8
|
||||
#define AR_PHY_AIC_SRAM_VGA_DIR_SIGN 0x00004000
|
||||
#define AR_PHY_AIC_SRAM_VGA_DIR_SIGN_S 14
|
||||
#define AR_PHY_AIC_SRAM_COM_ATT_6DB 0x00038000
|
||||
#define AR_PHY_AIC_SRAM_COM_ATT_6DB_S 15
|
||||
#define AR_PHY_AIC_CAL_ROT_ATT_DB_EST_ISO 0x0000E000
|
||||
#define AR_PHY_AIC_CAL_ROT_ATT_DB_EST_ISO_S 13
|
||||
#define AR_PHY_AIC_CAL_COM_ATT_DB_EST_ISO 0x00001E00
|
||||
#define AR_PHY_AIC_CAL_COM_ATT_DB_EST_ISO_S 9
|
||||
#define AR_PHY_AIC_CAL_ISO_EST_INIT_SETTING 0x000001F8
|
||||
#define AR_PHY_AIC_CAL_ISO_EST_INIT_SETTING_S 3
|
||||
#define AR_PHY_AIC_CAL_COM_ATT_DB_BACKOFF 0x00000006
|
||||
#define AR_PHY_AIC_CAL_COM_ATT_DB_BACKOFF_S 1
|
||||
#define AR_PHY_AIC_CAL_COM_ATT_DB_FIXED 0x00000001
|
||||
#define AR_PHY_AIC_CAL_COM_ATT_DB_FIXED_S 0
|
||||
|
||||
#endif /* REG_AIC_H */
|
|
@ -61,6 +61,8 @@ static const char *wmi_cmd_to_name(enum wmi_cmd_id wmi_cmd)
|
|||
return "WMI_REG_READ_CMDID";
|
||||
case WMI_REG_WRITE_CMDID:
|
||||
return "WMI_REG_WRITE_CMDID";
|
||||
case WMI_REG_RMW_CMDID:
|
||||
return "WMI_REG_RMW_CMDID";
|
||||
case WMI_RC_STATE_CHANGE_CMDID:
|
||||
return "WMI_RC_STATE_CHANGE_CMDID";
|
||||
case WMI_RC_RATE_UPDATE_CMDID:
|
||||
|
@ -101,6 +103,7 @@ struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv)
|
|||
spin_lock_init(&wmi->event_lock);
|
||||
mutex_init(&wmi->op_mutex);
|
||||
mutex_init(&wmi->multi_write_mutex);
|
||||
mutex_init(&wmi->multi_rmw_mutex);
|
||||
init_completion(&wmi->cmd_wait);
|
||||
INIT_LIST_HEAD(&wmi->pending_tx_events);
|
||||
tasklet_init(&wmi->wmi_event_tasklet, ath9k_wmi_event_tasklet,
|
||||
|
|
|
@ -112,6 +112,7 @@ enum wmi_cmd_id {
|
|||
WMI_TX_STATS_CMDID,
|
||||
WMI_RX_STATS_CMDID,
|
||||
WMI_BITRATE_MASK_CMDID,
|
||||
WMI_REG_RMW_CMDID,
|
||||
};
|
||||
|
||||
enum wmi_event_id {
|
||||
|
@ -125,12 +126,19 @@ enum wmi_event_id {
|
|||
};
|
||||
|
||||
#define MAX_CMD_NUMBER 62
|
||||
#define MAX_RMW_CMD_NUMBER 15
|
||||
|
||||
struct register_write {
|
||||
__be32 reg;
|
||||
__be32 val;
|
||||
};
|
||||
|
||||
struct register_rmw {
|
||||
__be32 reg;
|
||||
__be32 set;
|
||||
__be32 clr;
|
||||
} __packed;
|
||||
|
||||
struct ath9k_htc_tx_event {
|
||||
int count;
|
||||
struct __wmi_event_txstatus txs;
|
||||
|
@ -156,10 +164,18 @@ struct wmi {
|
|||
|
||||
spinlock_t wmi_lock;
|
||||
|
||||
/* multi write section */
|
||||
atomic_t mwrite_cnt;
|
||||
struct register_write multi_write[MAX_CMD_NUMBER];
|
||||
u32 multi_write_idx;
|
||||
struct mutex multi_write_mutex;
|
||||
|
||||
/* multi rmw section */
|
||||
atomic_t m_rmw_cnt;
|
||||
struct register_rmw multi_rmw[MAX_RMW_CMD_NUMBER];
|
||||
u32 multi_rmw_idx;
|
||||
struct mutex multi_rmw_mutex;
|
||||
|
||||
};
|
||||
|
||||
struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv);
|
||||
|
|
|
@ -289,7 +289,7 @@ dpd_add_pulse(struct dfs_pattern_detector *dpd, struct pulse_event *event)
|
|||
"count=%d, count_false=%d\n",
|
||||
event->freq, pd->rs->type_id,
|
||||
ps->pri, ps->count, ps->count_falses);
|
||||
channel_detector_reset(dpd, cd);
|
||||
pd->reset(pd, dpd->last_pulse_ts);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/etherdevice.h>
|
||||
#include "wil6210.h"
|
||||
#include "wmi.h"
|
||||
|
||||
|
@ -217,7 +218,7 @@ static int wil_cfg80211_dump_station(struct wiphy *wiphy,
|
|||
if (cid < 0)
|
||||
return -ENOENT;
|
||||
|
||||
memcpy(mac, wil->sta[cid].addr, ETH_ALEN);
|
||||
ether_addr_copy(mac, wil->sta[cid].addr);
|
||||
wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid);
|
||||
|
||||
rc = wil_cid_fill_sinfo(wil, cid, sinfo);
|
||||
|
@ -478,8 +479,8 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
|
|||
}
|
||||
conn.channel = ch - 1;
|
||||
|
||||
memcpy(conn.bssid, bss->bssid, ETH_ALEN);
|
||||
memcpy(conn.dst_mac, bss->bssid, ETH_ALEN);
|
||||
ether_addr_copy(conn.bssid, bss->bssid);
|
||||
ether_addr_copy(conn.dst_mac, bss->bssid);
|
||||
|
||||
set_bit(wil_status_fwconnecting, wil->status);
|
||||
|
||||
|
@ -782,8 +783,17 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
|
|||
rc = wmi_pcp_start(wil, info->beacon_interval, wmi_nettype,
|
||||
channel->hw_value);
|
||||
if (rc)
|
||||
netif_carrier_off(ndev);
|
||||
goto err_pcp_start;
|
||||
|
||||
rc = wil_bcast_init(wil);
|
||||
if (rc)
|
||||
goto err_bcast;
|
||||
|
||||
goto out; /* success */
|
||||
err_bcast:
|
||||
wmi_pcp_stop(wil);
|
||||
err_pcp_start:
|
||||
netif_carrier_off(ndev);
|
||||
out:
|
||||
mutex_unlock(&wil->mutex);
|
||||
return rc;
|
||||
|
@ -917,6 +927,21 @@ static int wil_cfg80211_probe_client(struct wiphy *wiphy,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int wil_cfg80211_change_bss(struct wiphy *wiphy,
|
||||
struct net_device *dev,
|
||||
struct bss_parameters *params)
|
||||
{
|
||||
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
|
||||
|
||||
if (params->ap_isolate >= 0) {
|
||||
wil_dbg_misc(wil, "%s(ap_isolate %d => %d)\n", __func__,
|
||||
wil->ap_isolate, params->ap_isolate);
|
||||
wil->ap_isolate = params->ap_isolate;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct cfg80211_ops wil_cfg80211_ops = {
|
||||
.scan = wil_cfg80211_scan,
|
||||
.connect = wil_cfg80211_connect,
|
||||
|
@ -937,6 +962,7 @@ static struct cfg80211_ops wil_cfg80211_ops = {
|
|||
.stop_ap = wil_cfg80211_stop_ap,
|
||||
.del_station = wil_cfg80211_del_station,
|
||||
.probe_client = wil_cfg80211_probe_client,
|
||||
.change_bss = wil_cfg80211_change_bss,
|
||||
};
|
||||
|
||||
static void wil_wiphy_init(struct wiphy *wiphy)
|
||||
|
|
|
@ -121,12 +121,18 @@ static int wil_vring_debugfs_show(struct seq_file *s, void *data)
|
|||
|
||||
snprintf(name, sizeof(name), "tx_%2d", i);
|
||||
|
||||
seq_printf(s,
|
||||
"\n%pM CID %d TID %d BACK([%d] %d TU A%s) [%3d|%3d] idle %s\n",
|
||||
wil->sta[cid].addr, cid, tid,
|
||||
txdata->agg_wsize, txdata->agg_timeout,
|
||||
txdata->agg_amsdu ? "+" : "-",
|
||||
used, avail, sidle);
|
||||
if (cid < WIL6210_MAX_CID)
|
||||
seq_printf(s,
|
||||
"\n%pM CID %d TID %d BACK([%u] %u TU A%s) [%3d|%3d] idle %s\n",
|
||||
wil->sta[cid].addr, cid, tid,
|
||||
txdata->agg_wsize,
|
||||
txdata->agg_timeout,
|
||||
txdata->agg_amsdu ? "+" : "-",
|
||||
used, avail, sidle);
|
||||
else
|
||||
seq_printf(s,
|
||||
"\nBroadcast [%3d|%3d] idle %s\n",
|
||||
used, avail, sidle);
|
||||
|
||||
wil_print_vring(s, wil, name, vring, '_', 'H');
|
||||
}
|
||||
|
@ -1405,6 +1411,7 @@ static const struct dbg_off dbg_wil_off[] = {
|
|||
WIL_FIELD(fw_version, S_IRUGO, doff_u32),
|
||||
WIL_FIELD(hw_version, S_IRUGO, doff_x32),
|
||||
WIL_FIELD(recovery_count, S_IRUGO, doff_u32),
|
||||
WIL_FIELD(ap_isolate, S_IRUGO, doff_u32),
|
||||
{},
|
||||
};
|
||||
|
||||
|
|
|
@ -68,6 +68,7 @@ MODULE_PARM_DESC(mtu_max, " Max MTU value.");
|
|||
|
||||
static uint rx_ring_order = WIL_RX_RING_SIZE_ORDER_DEFAULT;
|
||||
static uint tx_ring_order = WIL_TX_RING_SIZE_ORDER_DEFAULT;
|
||||
static uint bcast_ring_order = WIL_BCAST_RING_SIZE_ORDER_DEFAULT;
|
||||
|
||||
static int ring_order_set(const char *val, const struct kernel_param *kp)
|
||||
{
|
||||
|
@ -216,6 +217,7 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
|
|||
switch (wdev->iftype) {
|
||||
case NL80211_IFTYPE_STATION:
|
||||
case NL80211_IFTYPE_P2P_CLIENT:
|
||||
wil_bcast_fini(wil);
|
||||
netif_tx_stop_all_queues(ndev);
|
||||
netif_carrier_off(ndev);
|
||||
|
||||
|
@ -360,6 +362,35 @@ static int wil_find_free_vring(struct wil6210_priv *wil)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
int wil_bcast_init(struct wil6210_priv *wil)
|
||||
{
|
||||
int ri = wil->bcast_vring, rc;
|
||||
|
||||
if ((ri >= 0) && wil->vring_tx[ri].va)
|
||||
return 0;
|
||||
|
||||
ri = wil_find_free_vring(wil);
|
||||
if (ri < 0)
|
||||
return ri;
|
||||
|
||||
rc = wil_vring_init_bcast(wil, ri, 1 << bcast_ring_order);
|
||||
if (rc == 0)
|
||||
wil->bcast_vring = ri;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
void wil_bcast_fini(struct wil6210_priv *wil)
|
||||
{
|
||||
int ri = wil->bcast_vring;
|
||||
|
||||
if (ri < 0)
|
||||
return;
|
||||
|
||||
wil->bcast_vring = -1;
|
||||
wil_vring_fini_tx(wil, ri);
|
||||
}
|
||||
|
||||
static void wil_connect_worker(struct work_struct *work)
|
||||
{
|
||||
int rc;
|
||||
|
@ -407,6 +438,7 @@ int wil_priv_init(struct wil6210_priv *wil)
|
|||
init_completion(&wil->wmi_call);
|
||||
|
||||
wil->pending_connect_cid = -1;
|
||||
wil->bcast_vring = -1;
|
||||
setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil);
|
||||
setup_timer(&wil->scan_timer, wil_scan_timer_fn, (ulong)wil);
|
||||
|
||||
|
@ -656,6 +688,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
|
|||
|
||||
cancel_work_sync(&wil->disconnect_worker);
|
||||
wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
|
||||
wil_bcast_fini(wil);
|
||||
|
||||
/* prevent NAPI from being scheduled */
|
||||
bitmap_zero(wil->status, wil_status_last);
|
||||
|
@ -714,6 +747,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
|
|||
|
||||
/* init after reset */
|
||||
wil->pending_connect_cid = -1;
|
||||
wil->ap_isolate = 0;
|
||||
reinit_completion(&wil->wmi_ready);
|
||||
reinit_completion(&wil->wmi_call);
|
||||
|
||||
|
@ -723,6 +757,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
|
|||
|
||||
/* we just started MAC, wait for FW ready */
|
||||
rc = wil_wait_for_fw_ready(wil);
|
||||
if (rc == 0) /* check FW is responsive */
|
||||
rc = wmi_echo(wil);
|
||||
}
|
||||
|
||||
return rc;
|
||||
|
|
|
@ -82,7 +82,7 @@ static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget)
|
|||
wil_rx_handle(wil, "a);
|
||||
done = budget - quota;
|
||||
|
||||
if (done <= 1) { /* burst ends - only one packet processed */
|
||||
if (done < budget) {
|
||||
napi_complete(napi);
|
||||
wil6210_unmask_irq_rx(wil);
|
||||
wil_dbg_txrx(wil, "NAPI RX complete\n");
|
||||
|
@ -110,7 +110,7 @@ static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
|
|||
tx_done += wil_tx_complete(wil, i);
|
||||
}
|
||||
|
||||
if (tx_done <= 1) { /* burst ends - only one packet processed */
|
||||
if (tx_done < budget) {
|
||||
napi_complete(napi);
|
||||
wil6210_unmask_irq_tx(wil);
|
||||
wil_dbg_txrx(wil, "NAPI TX complete\n");
|
||||
|
|
|
@ -246,8 +246,6 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
|
||||
wil6210_debugfs_init(wil);
|
||||
|
||||
/* check FW is alive */
|
||||
wmi_echo(wil);
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -33,6 +33,15 @@ module_param(rtap_include_phy_info, bool, S_IRUGO);
|
|||
MODULE_PARM_DESC(rtap_include_phy_info,
|
||||
" Include PHY info in the radiotap header, default - no");
|
||||
|
||||
bool rx_align_2;
|
||||
module_param(rx_align_2, bool, S_IRUGO);
|
||||
MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no");
|
||||
|
||||
static inline uint wil_rx_snaplen(void)
|
||||
{
|
||||
return rx_align_2 ? 6 : 0;
|
||||
}
|
||||
|
||||
static inline int wil_vring_is_empty(struct vring *vring)
|
||||
{
|
||||
return vring->swhead == vring->swtail;
|
||||
|
@ -209,7 +218,7 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
|
|||
u32 i, int headroom)
|
||||
{
|
||||
struct device *dev = wil_to_dev(wil);
|
||||
unsigned int sz = mtu_max + ETH_HLEN;
|
||||
unsigned int sz = mtu_max + ETH_HLEN + wil_rx_snaplen();
|
||||
struct vring_rx_desc dd, *d = ⅆ
|
||||
volatile struct vring_rx_desc *_d = &vring->va[i].rx;
|
||||
dma_addr_t pa;
|
||||
|
@ -365,10 +374,12 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
|
|||
struct vring_rx_desc *d;
|
||||
struct sk_buff *skb;
|
||||
dma_addr_t pa;
|
||||
unsigned int sz = mtu_max + ETH_HLEN;
|
||||
unsigned int snaplen = wil_rx_snaplen();
|
||||
unsigned int sz = mtu_max + ETH_HLEN + snaplen;
|
||||
u16 dmalen;
|
||||
u8 ftype;
|
||||
int cid;
|
||||
int i = (int)vring->swhead;
|
||||
struct wil_net_stats *stats;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
|
||||
|
@ -376,24 +387,28 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
|
|||
if (unlikely(wil_vring_is_empty(vring)))
|
||||
return NULL;
|
||||
|
||||
_d = &vring->va[vring->swhead].rx;
|
||||
_d = &vring->va[i].rx;
|
||||
if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) {
|
||||
/* it is not error, we just reached end of Rx done area */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
skb = vring->ctx[vring->swhead].skb;
|
||||
skb = vring->ctx[i].skb;
|
||||
vring->ctx[i].skb = NULL;
|
||||
wil_vring_advance_head(vring, 1);
|
||||
if (!skb) {
|
||||
wil_err(wil, "No Rx skb at [%d]\n", i);
|
||||
return NULL;
|
||||
}
|
||||
d = wil_skb_rxdesc(skb);
|
||||
*d = *_d;
|
||||
pa = wil_desc_addr(&d->dma.addr);
|
||||
vring->ctx[vring->swhead].skb = NULL;
|
||||
wil_vring_advance_head(vring, 1);
|
||||
|
||||
dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
|
||||
dmalen = le16_to_cpu(d->dma.length);
|
||||
|
||||
trace_wil6210_rx(vring->swhead, d);
|
||||
wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", vring->swhead, dmalen);
|
||||
trace_wil6210_rx(i, d);
|
||||
wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", i, dmalen);
|
||||
wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4,
|
||||
(const void *)d, sizeof(*d), false);
|
||||
|
||||
|
@ -433,7 +448,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if (unlikely(skb->len < ETH_HLEN)) {
|
||||
if (unlikely(skb->len < ETH_HLEN + snaplen)) {
|
||||
wil_err(wil, "Short frame, len = %d\n", skb->len);
|
||||
/* TODO: process it (i.e. BAR) */
|
||||
kfree_skb(skb);
|
||||
|
@ -455,6 +470,17 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
|
|||
*/
|
||||
}
|
||||
|
||||
if (snaplen) {
|
||||
/* Packet layout
|
||||
* +-------+-------+---------+------------+------+
|
||||
* | SA(6) | DA(6) | SNAP(6) | ETHTYPE(2) | DATA |
|
||||
* +-------+-------+---------+------------+------+
|
||||
* Need to remove SNAP, shifting SA and DA forward
|
||||
*/
|
||||
memmove(skb->data + snaplen, skb->data, 2 * ETH_ALEN);
|
||||
skb_pull(skb, snaplen);
|
||||
}
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
|
@ -492,17 +518,71 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
|
|||
*/
|
||||
void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
|
||||
{
|
||||
gro_result_t rc;
|
||||
gro_result_t rc = GRO_NORMAL;
|
||||
struct wil6210_priv *wil = ndev_to_wil(ndev);
|
||||
struct wireless_dev *wdev = wil_to_wdev(wil);
|
||||
unsigned int len = skb->len;
|
||||
struct vring_rx_desc *d = wil_skb_rxdesc(skb);
|
||||
int cid = wil_rxdesc_cid(d);
|
||||
int cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */
|
||||
struct ethhdr *eth = (void *)skb->data;
|
||||
/* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
|
||||
* is not suitable, need to look at data
|
||||
*/
|
||||
int mcast = is_multicast_ether_addr(eth->h_dest);
|
||||
struct wil_net_stats *stats = &wil->sta[cid].stats;
|
||||
struct sk_buff *xmit_skb = NULL;
|
||||
static const char * const gro_res_str[] = {
|
||||
[GRO_MERGED] = "GRO_MERGED",
|
||||
[GRO_MERGED_FREE] = "GRO_MERGED_FREE",
|
||||
[GRO_HELD] = "GRO_HELD",
|
||||
[GRO_NORMAL] = "GRO_NORMAL",
|
||||
[GRO_DROP] = "GRO_DROP",
|
||||
};
|
||||
|
||||
skb_orphan(skb);
|
||||
|
||||
rc = napi_gro_receive(&wil->napi_rx, skb);
|
||||
if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) {
|
||||
if (mcast) {
|
||||
/* send multicast frames both to higher layers in
|
||||
* local net stack and back to the wireless medium
|
||||
*/
|
||||
xmit_skb = skb_copy(skb, GFP_ATOMIC);
|
||||
} else {
|
||||
int xmit_cid = wil_find_cid(wil, eth->h_dest);
|
||||
|
||||
if (xmit_cid >= 0) {
|
||||
/* The destination station is associated to
|
||||
* this AP (in this VLAN), so send the frame
|
||||
* directly to it and do not pass it to local
|
||||
* net stack.
|
||||
*/
|
||||
xmit_skb = skb;
|
||||
skb = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (xmit_skb) {
|
||||
/* Send to wireless media and increase priority by 256 to
|
||||
* keep the received priority instead of reclassifying
|
||||
* the frame (see cfg80211_classify8021d).
|
||||
*/
|
||||
xmit_skb->dev = ndev;
|
||||
xmit_skb->priority += 256;
|
||||
xmit_skb->protocol = htons(ETH_P_802_3);
|
||||
skb_reset_network_header(xmit_skb);
|
||||
skb_reset_mac_header(xmit_skb);
|
||||
wil_dbg_txrx(wil, "Rx -> Tx %d bytes\n", len);
|
||||
dev_queue_xmit(xmit_skb);
|
||||
}
|
||||
|
||||
if (skb) { /* deliver to local stack */
|
||||
|
||||
skb->protocol = eth_type_trans(skb, ndev);
|
||||
rc = napi_gro_receive(&wil->napi_rx, skb);
|
||||
wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
|
||||
len, gro_res_str[rc]);
|
||||
}
|
||||
/* statistics. rc set to GRO_NORMAL for AP bridging */
|
||||
if (unlikely(rc == GRO_DROP)) {
|
||||
ndev->stats.rx_dropped++;
|
||||
stats->rx_dropped++;
|
||||
|
@ -512,17 +592,8 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
|
|||
stats->rx_packets++;
|
||||
ndev->stats.rx_bytes += len;
|
||||
stats->rx_bytes += len;
|
||||
}
|
||||
{
|
||||
static const char * const gro_res_str[] = {
|
||||
[GRO_MERGED] = "GRO_MERGED",
|
||||
[GRO_MERGED_FREE] = "GRO_MERGED_FREE",
|
||||
[GRO_HELD] = "GRO_HELD",
|
||||
[GRO_NORMAL] = "GRO_NORMAL",
|
||||
[GRO_DROP] = "GRO_DROP",
|
||||
};
|
||||
wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
|
||||
len, gro_res_str[rc]);
|
||||
if (mcast)
|
||||
ndev->stats.multicast++;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -553,7 +624,6 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota)
|
|||
skb->protocol = htons(ETH_P_802_2);
|
||||
wil_netif_rx_any(skb, ndev);
|
||||
} else {
|
||||
skb->protocol = eth_type_trans(skb, ndev);
|
||||
wil_rx_reorder(wil, skb);
|
||||
}
|
||||
}
|
||||
|
@ -679,6 +749,72 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
|
|||
return rc;
|
||||
}
|
||||
|
||||
int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
|
||||
{
|
||||
int rc;
|
||||
struct wmi_bcast_vring_cfg_cmd cmd = {
|
||||
.action = cpu_to_le32(WMI_VRING_CMD_ADD),
|
||||
.vring_cfg = {
|
||||
.tx_sw_ring = {
|
||||
.max_mpdu_size =
|
||||
cpu_to_le16(wil_mtu2macbuf(mtu_max)),
|
||||
.ring_size = cpu_to_le16(size),
|
||||
},
|
||||
.ringid = id,
|
||||
.encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
|
||||
},
|
||||
};
|
||||
struct {
|
||||
struct wil6210_mbox_hdr_wmi wmi;
|
||||
struct wmi_vring_cfg_done_event cmd;
|
||||
} __packed reply;
|
||||
struct vring *vring = &wil->vring_tx[id];
|
||||
struct vring_tx_data *txdata = &wil->vring_tx_data[id];
|
||||
|
||||
wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__,
|
||||
cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
|
||||
|
||||
if (vring->va) {
|
||||
wil_err(wil, "Tx ring [%d] already allocated\n", id);
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
memset(txdata, 0, sizeof(*txdata));
|
||||
spin_lock_init(&txdata->lock);
|
||||
vring->size = size;
|
||||
rc = wil_vring_alloc(wil, vring);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
wil->vring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */
|
||||
wil->vring2cid_tid[id][1] = 0; /* TID */
|
||||
|
||||
cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
|
||||
|
||||
rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, &cmd, sizeof(cmd),
|
||||
WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
|
||||
if (rc)
|
||||
goto out_free;
|
||||
|
||||
if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
|
||||
wil_err(wil, "Tx config failed, status 0x%02x\n",
|
||||
reply.cmd.status);
|
||||
rc = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
|
||||
|
||||
txdata->enabled = 1;
|
||||
|
||||
return 0;
|
||||
out_free:
|
||||
wil_vring_free(wil, vring, 1);
|
||||
out:
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
|
||||
{
|
||||
struct vring *vring = &wil->vring_tx[id];
|
||||
|
@ -702,7 +838,7 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
|
|||
memset(txdata, 0, sizeof(*txdata));
|
||||
}
|
||||
|
||||
static struct vring *wil_find_tx_vring(struct wil6210_priv *wil,
|
||||
static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
int i;
|
||||
|
@ -735,15 +871,6 @@ static struct vring *wil_find_tx_vring(struct wil6210_priv *wil,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void wil_set_da_for_vring(struct wil6210_priv *wil,
|
||||
struct sk_buff *skb, int vring_index)
|
||||
{
|
||||
struct ethhdr *eth = (void *)skb->data;
|
||||
int cid = wil->vring2cid_tid[vring_index][0];
|
||||
|
||||
memcpy(eth->h_dest, wil->sta[cid].addr, ETH_ALEN);
|
||||
}
|
||||
|
||||
static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
|
||||
struct sk_buff *skb);
|
||||
|
||||
|
@ -764,6 +891,9 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
|
|||
continue;
|
||||
|
||||
cid = wil->vring2cid_tid[i][0];
|
||||
if (cid >= WIL6210_MAX_CID) /* skip BCAST */
|
||||
continue;
|
||||
|
||||
if (!wil->sta[cid].data_port_open &&
|
||||
(skb->protocol != cpu_to_be16(ETH_P_PAE)))
|
||||
break;
|
||||
|
@ -778,17 +908,51 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find 1-st vring and return it; set dest address for this vring in skb
|
||||
* duplicate skb and send it to other active vrings
|
||||
/* Use one of 2 strategies:
|
||||
*
|
||||
* 1. New (real broadcast):
|
||||
* use dedicated broadcast vring
|
||||
* 2. Old (pseudo-DMS):
|
||||
* Find 1-st vring and return it;
|
||||
* duplicate skb and send it to other active vrings;
|
||||
* in all cases override dest address to unicast peer's address
|
||||
* Use old strategy when new is not supported yet:
|
||||
* - for PBSS
|
||||
* - for secure link
|
||||
*/
|
||||
static struct vring *wil_tx_bcast(struct wil6210_priv *wil,
|
||||
struct sk_buff *skb)
|
||||
static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct vring *v;
|
||||
int i = wil->bcast_vring;
|
||||
|
||||
if (i < 0)
|
||||
return NULL;
|
||||
v = &wil->vring_tx[i];
|
||||
if (!v->va)
|
||||
return NULL;
|
||||
|
||||
return v;
|
||||
}
|
||||
|
||||
static void wil_set_da_for_vring(struct wil6210_priv *wil,
|
||||
struct sk_buff *skb, int vring_index)
|
||||
{
|
||||
struct ethhdr *eth = (void *)skb->data;
|
||||
int cid = wil->vring2cid_tid[vring_index][0];
|
||||
|
||||
ether_addr_copy(eth->h_dest, wil->sta[cid].addr);
|
||||
}
|
||||
|
||||
static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct vring *v, *v2;
|
||||
struct sk_buff *skb2;
|
||||
int i;
|
||||
u8 cid;
|
||||
struct ethhdr *eth = (void *)skb->data;
|
||||
char *src = eth->h_source;
|
||||
|
||||
/* find 1-st vring eligible for data */
|
||||
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
|
||||
|
@ -797,9 +961,15 @@ static struct vring *wil_tx_bcast(struct wil6210_priv *wil,
|
|||
continue;
|
||||
|
||||
cid = wil->vring2cid_tid[i][0];
|
||||
if (cid >= WIL6210_MAX_CID) /* skip BCAST */
|
||||
continue;
|
||||
if (!wil->sta[cid].data_port_open)
|
||||
continue;
|
||||
|
||||
/* don't Tx back to source when re-routing Rx->Tx at the AP */
|
||||
if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
|
||||
continue;
|
||||
|
||||
goto found;
|
||||
}
|
||||
|
||||
|
@ -817,9 +987,14 @@ found:
|
|||
if (!v2->va)
|
||||
continue;
|
||||
cid = wil->vring2cid_tid[i][0];
|
||||
if (cid >= WIL6210_MAX_CID) /* skip BCAST */
|
||||
continue;
|
||||
if (!wil->sta[cid].data_port_open)
|
||||
continue;
|
||||
|
||||
if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
|
||||
continue;
|
||||
|
||||
skb2 = skb_copy(skb, GFP_ATOMIC);
|
||||
if (skb2) {
|
||||
wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
|
||||
|
@ -833,6 +1008,20 @@ found:
|
|||
return v;
|
||||
}
|
||||
|
||||
static struct vring *wil_find_tx_bcast(struct wil6210_priv *wil,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct wireless_dev *wdev = wil->wdev;
|
||||
|
||||
if (wdev->iftype != NL80211_IFTYPE_AP)
|
||||
return wil_find_tx_bcast_2(wil, skb);
|
||||
|
||||
if (wil->privacy)
|
||||
return wil_find_tx_bcast_2(wil, skb);
|
||||
|
||||
return wil_find_tx_bcast_1(wil, skb);
|
||||
}
|
||||
|
||||
static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
|
||||
int vring_index)
|
||||
{
|
||||
|
@ -925,6 +1114,8 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
|
|||
uint i = swhead;
|
||||
dma_addr_t pa;
|
||||
int used;
|
||||
bool mcast = (vring_index == wil->bcast_vring);
|
||||
uint len = skb_headlen(skb);
|
||||
|
||||
wil_dbg_txrx(wil, "%s()\n", __func__);
|
||||
|
||||
|
@ -950,7 +1141,17 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
|
|||
return -EINVAL;
|
||||
vring->ctx[i].mapped_as = wil_mapped_as_single;
|
||||
/* 1-st segment */
|
||||
wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index);
|
||||
wil_tx_desc_map(d, pa, len, vring_index);
|
||||
if (unlikely(mcast)) {
|
||||
d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */
|
||||
if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) {
|
||||
/* set MCS 1 */
|
||||
d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
|
||||
/* packet mode 2 */
|
||||
d->mac.d[1] |= BIT(MAC_CFG_DESC_TX_1_PKT_MODE_EN_POS) |
|
||||
(2 << MAC_CFG_DESC_TX_1_PKT_MODE_POS);
|
||||
}
|
||||
}
|
||||
/* Process TCP/UDP checksum offloading */
|
||||
if (unlikely(wil_tx_desc_offload_cksum_set(wil, d, skb))) {
|
||||
wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
|
||||
|
@ -1056,6 +1257,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
{
|
||||
struct wil6210_priv *wil = ndev_to_wil(ndev);
|
||||
struct ethhdr *eth = (void *)skb->data;
|
||||
bool bcast = is_multicast_ether_addr(eth->h_dest);
|
||||
struct vring *vring;
|
||||
static bool pr_once_fw;
|
||||
int rc;
|
||||
|
@ -1083,10 +1285,8 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
/* in STA mode (ESS), all to same VRING */
|
||||
vring = wil_find_tx_vring_sta(wil, skb);
|
||||
} else { /* direct communication, find matching VRING */
|
||||
if (is_unicast_ether_addr(eth->h_dest))
|
||||
vring = wil_find_tx_vring(wil, skb);
|
||||
else
|
||||
vring = wil_tx_bcast(wil, skb);
|
||||
vring = bcast ? wil_find_tx_bcast(wil, skb) :
|
||||
wil_find_tx_ucast(wil, skb);
|
||||
}
|
||||
if (unlikely(!vring)) {
|
||||
wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest);
|
||||
|
@ -1149,7 +1349,7 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
|
|||
struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
|
||||
int done = 0;
|
||||
int cid = wil->vring2cid_tid[ringid][0];
|
||||
struct wil_net_stats *stats = &wil->sta[cid].stats;
|
||||
struct wil_net_stats *stats = NULL;
|
||||
volatile struct vring_tx_desc *_d;
|
||||
int used_before_complete;
|
||||
int used_new;
|
||||
|
@ -1168,6 +1368,9 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
|
|||
|
||||
used_before_complete = wil_vring_used_tx(vring);
|
||||
|
||||
if (cid < WIL6210_MAX_CID)
|
||||
stats = &wil->sta[cid].stats;
|
||||
|
||||
while (!wil_vring_is_empty(vring)) {
|
||||
int new_swtail;
|
||||
struct wil_ctx *ctx = &vring->ctx[vring->swtail];
|
||||
|
@ -1209,12 +1412,15 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
|
|||
if (skb) {
|
||||
if (likely(d->dma.error == 0)) {
|
||||
ndev->stats.tx_packets++;
|
||||
stats->tx_packets++;
|
||||
ndev->stats.tx_bytes += skb->len;
|
||||
stats->tx_bytes += skb->len;
|
||||
if (stats) {
|
||||
stats->tx_packets++;
|
||||
stats->tx_bytes += skb->len;
|
||||
}
|
||||
} else {
|
||||
ndev->stats.tx_errors++;
|
||||
stats->tx_errors++;
|
||||
if (stats)
|
||||
stats->tx_errors++;
|
||||
}
|
||||
wil_consume_skb(skb, d->dma.error == 0);
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ extern unsigned int mtu_max;
|
|||
extern unsigned short rx_ring_overflow_thrsh;
|
||||
extern int agg_wsize;
|
||||
extern u32 vring_idle_trsh;
|
||||
extern bool rx_align_2;
|
||||
|
||||
#define WIL_NAME "wil6210"
|
||||
#define WIL_FW_NAME "wil6210.fw" /* code */
|
||||
|
@ -49,6 +50,8 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
|
|||
#define WIL_TX_Q_LEN_DEFAULT (4000)
|
||||
#define WIL_RX_RING_SIZE_ORDER_DEFAULT (10)
|
||||
#define WIL_TX_RING_SIZE_ORDER_DEFAULT (10)
|
||||
#define WIL_BCAST_RING_SIZE_ORDER_DEFAULT (7)
|
||||
#define WIL_BCAST_MCS0_LIMIT (1024) /* limit for MCS0 frame size */
|
||||
/* limit ring size in range [32..32k] */
|
||||
#define WIL_RING_SIZE_ORDER_MIN (5)
|
||||
#define WIL_RING_SIZE_ORDER_MAX (15)
|
||||
|
@ -542,6 +545,7 @@ struct wil6210_priv {
|
|||
u32 monitor_flags;
|
||||
u32 privacy; /* secure connection? */
|
||||
int sinfo_gen;
|
||||
u32 ap_isolate; /* no intra-BSS communication */
|
||||
/* interrupt moderation */
|
||||
u32 tx_max_burst_duration;
|
||||
u32 tx_interframe_timeout;
|
||||
|
@ -593,6 +597,7 @@ struct wil6210_priv {
|
|||
struct vring_tx_data vring_tx_data[WIL6210_MAX_TX_RINGS];
|
||||
u8 vring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */
|
||||
struct wil_sta_info sta[WIL6210_MAX_CID];
|
||||
int bcast_vring;
|
||||
/* scan */
|
||||
struct cfg80211_scan_request *scan_request;
|
||||
|
||||
|
@ -755,6 +760,9 @@ void wil_rx_fini(struct wil6210_priv *wil);
|
|||
int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
|
||||
int cid, int tid);
|
||||
void wil_vring_fini_tx(struct wil6210_priv *wil, int id);
|
||||
int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size);
|
||||
int wil_bcast_init(struct wil6210_priv *wil);
|
||||
void wil_bcast_fini(struct wil6210_priv *wil);
|
||||
|
||||
netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev);
|
||||
int wil_tx_complete(struct wil6210_priv *wil, int ringid);
|
||||
|
|
|
@ -466,7 +466,7 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
|
|||
|
||||
/* FIXME FW can transmit only ucast frames to peer */
|
||||
/* FIXME real ring_id instead of hard coded 0 */
|
||||
memcpy(wil->sta[evt->cid].addr, evt->bssid, ETH_ALEN);
|
||||
ether_addr_copy(wil->sta[evt->cid].addr, evt->bssid);
|
||||
wil->sta[evt->cid].status = wil_sta_conn_pending;
|
||||
|
||||
wil->pending_connect_cid = evt->cid;
|
||||
|
@ -524,8 +524,8 @@ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
|
|||
}
|
||||
|
||||
eth = (struct ethhdr *)skb_put(skb, ETH_HLEN);
|
||||
memcpy(eth->h_dest, ndev->dev_addr, ETH_ALEN);
|
||||
memcpy(eth->h_source, evt->src_mac, ETH_ALEN);
|
||||
ether_addr_copy(eth->h_dest, ndev->dev_addr);
|
||||
ether_addr_copy(eth->h_source, evt->src_mac);
|
||||
eth->h_proto = cpu_to_be16(ETH_P_PAE);
|
||||
memcpy(skb_put(skb, eapol_len), evt->eapol, eapol_len);
|
||||
skb->protocol = eth_type_trans(skb, ndev);
|
||||
|
@ -851,7 +851,7 @@ int wmi_set_mac_address(struct wil6210_priv *wil, void *addr)
|
|||
{
|
||||
struct wmi_set_mac_address_cmd cmd;
|
||||
|
||||
memcpy(cmd.mac, addr, ETH_ALEN);
|
||||
ether_addr_copy(cmd.mac, addr);
|
||||
|
||||
wil_dbg_wmi(wil, "Set MAC %pM\n", addr);
|
||||
|
||||
|
@ -1109,6 +1109,11 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
|
|||
*/
|
||||
cmd.l3_l4_ctrl |= (1 << L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS);
|
||||
}
|
||||
|
||||
if (rx_align_2)
|
||||
cmd.l2_802_3_offload_ctrl |=
|
||||
L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_MSK;
|
||||
|
||||
/* typical time for secure PCP is 840ms */
|
||||
rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd),
|
||||
WMI_CFG_RX_CHAIN_DONE_EVENTID, &evt, sizeof(evt), 2000);
|
||||
|
@ -1157,7 +1162,8 @@ int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason)
|
|||
struct wmi_disconnect_sta_cmd cmd = {
|
||||
.disconnect_reason = cpu_to_le16(reason),
|
||||
};
|
||||
memcpy(cmd.dst_mac, mac, ETH_ALEN);
|
||||
|
||||
ether_addr_copy(cmd.dst_mac, mac);
|
||||
|
||||
wil_dbg_wmi(wil, "%s(%pM, reason %d)\n", __func__, mac, reason);
|
||||
|
||||
|
|
|
@ -70,7 +70,6 @@ enum wmi_command_id {
|
|||
WMI_SET_UCODE_IDLE_CMDID = 0x0813,
|
||||
WMI_SET_WORK_MODE_CMDID = 0x0815,
|
||||
WMI_LO_LEAKAGE_CALIB_CMDID = 0x0816,
|
||||
WMI_MARLON_R_ACTIVATE_CMDID = 0x0817,
|
||||
WMI_MARLON_R_READ_CMDID = 0x0818,
|
||||
WMI_MARLON_R_WRITE_CMDID = 0x0819,
|
||||
WMI_MARLON_R_TXRX_SEL_CMDID = 0x081a,
|
||||
|
@ -80,6 +79,7 @@ enum wmi_command_id {
|
|||
WMI_RF_RX_TEST_CMDID = 0x081e,
|
||||
WMI_CFG_RX_CHAIN_CMDID = 0x0820,
|
||||
WMI_VRING_CFG_CMDID = 0x0821,
|
||||
WMI_BCAST_VRING_CFG_CMDID = 0x0822,
|
||||
WMI_VRING_BA_EN_CMDID = 0x0823,
|
||||
WMI_VRING_BA_DIS_CMDID = 0x0824,
|
||||
WMI_RCP_ADDBA_RESP_CMDID = 0x0825,
|
||||
|
@ -99,6 +99,7 @@ enum wmi_command_id {
|
|||
WMI_BF_TXSS_MGMT_CMDID = 0x0837,
|
||||
WMI_BF_SM_MGMT_CMDID = 0x0838,
|
||||
WMI_BF_RXSS_MGMT_CMDID = 0x0839,
|
||||
WMI_BF_TRIG_CMDID = 0x083A,
|
||||
WMI_SET_SECTORS_CMDID = 0x0849,
|
||||
WMI_MAINTAIN_PAUSE_CMDID = 0x0850,
|
||||
WMI_MAINTAIN_RESUME_CMDID = 0x0851,
|
||||
|
@ -595,6 +596,22 @@ struct wmi_vring_cfg_cmd {
|
|||
struct wmi_vring_cfg vring_cfg;
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* WMI_BCAST_VRING_CFG_CMDID
|
||||
*/
|
||||
struct wmi_bcast_vring_cfg {
|
||||
struct wmi_sw_ring_cfg tx_sw_ring;
|
||||
u8 ringid; /* 0-23 vrings */
|
||||
u8 encap_trans_type;
|
||||
u8 ds_cfg; /* 802.3 DS cfg */
|
||||
u8 nwifi_ds_trans_type;
|
||||
} __packed;
|
||||
|
||||
struct wmi_bcast_vring_cfg_cmd {
|
||||
__le32 action;
|
||||
struct wmi_bcast_vring_cfg vring_cfg;
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* WMI_VRING_BA_EN_CMDID
|
||||
*/
|
||||
|
@ -687,6 +704,9 @@ struct wmi_cfg_rx_chain_cmd {
|
|||
#define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_POS (0)
|
||||
#define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_LEN (1)
|
||||
#define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_MSK (0x1)
|
||||
#define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_POS (1)
|
||||
#define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_LEN (1)
|
||||
#define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_MSK (0x2)
|
||||
u8 l2_802_3_offload_ctrl;
|
||||
|
||||
#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_POS (0)
|
||||
|
@ -841,7 +861,6 @@ enum wmi_event_id {
|
|||
WMI_IQ_RX_CALIB_DONE_EVENTID = 0x1812,
|
||||
WMI_SET_WORK_MODE_DONE_EVENTID = 0x1815,
|
||||
WMI_LO_LEAKAGE_CALIB_DONE_EVENTID = 0x1816,
|
||||
WMI_MARLON_R_ACTIVATE_DONE_EVENTID = 0x1817,
|
||||
WMI_MARLON_R_READ_DONE_EVENTID = 0x1818,
|
||||
WMI_MARLON_R_WRITE_DONE_EVENTID = 0x1819,
|
||||
WMI_MARLON_R_TXRX_SEL_DONE_EVENTID = 0x181a,
|
||||
|
|
|
@ -4866,7 +4866,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
|
|||
switch (dev->dev->bus_type) {
|
||||
#ifdef CONFIG_B43_BCMA
|
||||
case B43_BUS_BCMA:
|
||||
bcma_core_pci_irq_ctl(dev->dev->bdev->bus,
|
||||
bcma_host_pci_irq_ctl(dev->dev->bdev->bus,
|
||||
dev->dev->bdev, true);
|
||||
bcma_host_pci_up(dev->dev->bdev->bus);
|
||||
break;
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include <linux/mmc/host.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/platform_data/brcmfmac-sdio.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -1006,6 +1007,7 @@ static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
|
|||
sg_free_table(&sdiodev->sgtable);
|
||||
sdiodev->sbwad = 0;
|
||||
|
||||
pm_runtime_allow(sdiodev->func[1]->card->host->parent);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1074,7 +1076,7 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
|
|||
ret = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
|
||||
pm_runtime_forbid(host->parent);
|
||||
out:
|
||||
if (ret)
|
||||
brcmf_sdiod_remove(sdiodev);
|
||||
|
@ -1096,6 +1098,8 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
|
|||
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341),
|
||||
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362),
|
||||
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339),
|
||||
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430),
|
||||
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4345),
|
||||
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354),
|
||||
{ /* end: all zeroes */ }
|
||||
};
|
||||
|
@ -1194,7 +1198,7 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func)
|
|||
brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
|
||||
brcmf_dbg(SDIO, "Function: %d\n", func->num);
|
||||
|
||||
if (func->num != 1 && func->num != 2)
|
||||
if (func->num != 1)
|
||||
return;
|
||||
|
||||
bus_if = dev_get_drvdata(&func->dev);
|
||||
|
|
|
@ -100,9 +100,6 @@
|
|||
#define BCM4329_CORE_SOCRAM_BASE 0x18003000
|
||||
/* ARM Cortex M3 core, ID 0x82a */
|
||||
#define BCM4329_CORE_ARM_BASE 0x18002000
|
||||
#define BCM4329_RAMSIZE 0x48000
|
||||
/* bcm43143 */
|
||||
#define BCM43143_RAMSIZE 0x70000
|
||||
|
||||
#define CORE_SB(base, field) \
|
||||
(base + SBCONFIGOFF + offsetof(struct sbconfig, field))
|
||||
|
@ -150,6 +147,78 @@ struct sbconfig {
|
|||
u32 sbidhigh; /* identification */
|
||||
};
|
||||
|
||||
/* bankidx and bankinfo reg defines corerev >= 8 */
|
||||
#define SOCRAM_BANKINFO_RETNTRAM_MASK 0x00010000
|
||||
#define SOCRAM_BANKINFO_SZMASK 0x0000007f
|
||||
#define SOCRAM_BANKIDX_ROM_MASK 0x00000100
|
||||
|
||||
#define SOCRAM_BANKIDX_MEMTYPE_SHIFT 8
|
||||
/* socram bankinfo memtype */
|
||||
#define SOCRAM_MEMTYPE_RAM 0
|
||||
#define SOCRAM_MEMTYPE_R0M 1
|
||||
#define SOCRAM_MEMTYPE_DEVRAM 2
|
||||
|
||||
#define SOCRAM_BANKINFO_SZBASE 8192
|
||||
#define SRCI_LSS_MASK 0x00f00000
|
||||
#define SRCI_LSS_SHIFT 20
|
||||
#define SRCI_SRNB_MASK 0xf0
|
||||
#define SRCI_SRNB_SHIFT 4
|
||||
#define SRCI_SRBSZ_MASK 0xf
|
||||
#define SRCI_SRBSZ_SHIFT 0
|
||||
#define SR_BSZ_BASE 14
|
||||
|
||||
struct sbsocramregs {
|
||||
u32 coreinfo;
|
||||
u32 bwalloc;
|
||||
u32 extracoreinfo;
|
||||
u32 biststat;
|
||||
u32 bankidx;
|
||||
u32 standbyctrl;
|
||||
|
||||
u32 errlogstatus; /* rev 6 */
|
||||
u32 errlogaddr; /* rev 6 */
|
||||
/* used for patching rev 3 & 5 */
|
||||
u32 cambankidx;
|
||||
u32 cambankstandbyctrl;
|
||||
u32 cambankpatchctrl;
|
||||
u32 cambankpatchtblbaseaddr;
|
||||
u32 cambankcmdreg;
|
||||
u32 cambankdatareg;
|
||||
u32 cambankmaskreg;
|
||||
u32 PAD[1];
|
||||
u32 bankinfo; /* corev 8 */
|
||||
u32 bankpda;
|
||||
u32 PAD[14];
|
||||
u32 extmemconfig;
|
||||
u32 extmemparitycsr;
|
||||
u32 extmemparityerrdata;
|
||||
u32 extmemparityerrcnt;
|
||||
u32 extmemwrctrlandsize;
|
||||
u32 PAD[84];
|
||||
u32 workaround;
|
||||
u32 pwrctl; /* corerev >= 2 */
|
||||
u32 PAD[133];
|
||||
u32 sr_control; /* corerev >= 15 */
|
||||
u32 sr_status; /* corerev >= 15 */
|
||||
u32 sr_address; /* corerev >= 15 */
|
||||
u32 sr_data; /* corerev >= 15 */
|
||||
};
|
||||
|
||||
#define SOCRAMREGOFFS(_f) offsetof(struct sbsocramregs, _f)
|
||||
|
||||
#define ARMCR4_CAP (0x04)
|
||||
#define ARMCR4_BANKIDX (0x40)
|
||||
#define ARMCR4_BANKINFO (0x44)
|
||||
#define ARMCR4_BANKPDA (0x4C)
|
||||
|
||||
#define ARMCR4_TCBBNB_MASK 0xf0
|
||||
#define ARMCR4_TCBBNB_SHIFT 4
|
||||
#define ARMCR4_TCBANB_MASK 0xf
|
||||
#define ARMCR4_TCBANB_SHIFT 0
|
||||
|
||||
#define ARMCR4_BSZ_MASK 0x3f
|
||||
#define ARMCR4_BSZ_MULT 8192
|
||||
|
||||
struct brcmf_core_priv {
|
||||
struct brcmf_core pub;
|
||||
u32 wrapbase;
|
||||
|
@ -419,13 +488,13 @@ static struct brcmf_core *brcmf_chip_add_core(struct brcmf_chip_priv *ci,
|
|||
return &core->pub;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
/* safety check for chipinfo */
|
||||
static int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
|
||||
{
|
||||
struct brcmf_core_priv *core;
|
||||
bool need_socram = false;
|
||||
bool has_socram = false;
|
||||
bool cpu_found = false;
|
||||
int idx = 1;
|
||||
|
||||
list_for_each_entry(core, &ci->cores, list) {
|
||||
|
@ -435,22 +504,24 @@ static int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
|
|||
|
||||
switch (core->pub.id) {
|
||||
case BCMA_CORE_ARM_CM3:
|
||||
cpu_found = true;
|
||||
need_socram = true;
|
||||
break;
|
||||
case BCMA_CORE_INTERNAL_MEM:
|
||||
has_socram = true;
|
||||
break;
|
||||
case BCMA_CORE_ARM_CR4:
|
||||
if (ci->pub.rambase == 0) {
|
||||
brcmf_err("RAM base not provided with ARM CR4 core\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
cpu_found = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!cpu_found) {
|
||||
brcmf_err("CPU core not detected\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
/* check RAM core presence for ARM CM3 core */
|
||||
if (need_socram && !has_socram) {
|
||||
brcmf_err("RAM core not provided with ARM CM3 core\n");
|
||||
|
@ -458,56 +529,164 @@ static int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
#else /* DEBUG */
|
||||
static inline int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void brcmf_chip_get_raminfo(struct brcmf_chip_priv *ci)
|
||||
static u32 brcmf_chip_core_read32(struct brcmf_core_priv *core, u16 reg)
|
||||
{
|
||||
return core->chip->ops->read32(core->chip->ctx, core->pub.base + reg);
|
||||
}
|
||||
|
||||
static void brcmf_chip_core_write32(struct brcmf_core_priv *core,
|
||||
u16 reg, u32 val)
|
||||
{
|
||||
core->chip->ops->write32(core->chip->ctx, core->pub.base + reg, val);
|
||||
}
|
||||
|
||||
static bool brcmf_chip_socram_banksize(struct brcmf_core_priv *core, u8 idx,
|
||||
u32 *banksize)
|
||||
{
|
||||
u32 bankinfo;
|
||||
u32 bankidx = (SOCRAM_MEMTYPE_RAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
|
||||
|
||||
bankidx |= idx;
|
||||
brcmf_chip_core_write32(core, SOCRAMREGOFFS(bankidx), bankidx);
|
||||
bankinfo = brcmf_chip_core_read32(core, SOCRAMREGOFFS(bankinfo));
|
||||
*banksize = (bankinfo & SOCRAM_BANKINFO_SZMASK) + 1;
|
||||
*banksize *= SOCRAM_BANKINFO_SZBASE;
|
||||
return !!(bankinfo & SOCRAM_BANKINFO_RETNTRAM_MASK);
|
||||
}
|
||||
|
||||
static void brcmf_chip_socram_ramsize(struct brcmf_core_priv *sr, u32 *ramsize,
|
||||
u32 *srsize)
|
||||
{
|
||||
u32 coreinfo;
|
||||
uint nb, banksize, lss;
|
||||
bool retent;
|
||||
int i;
|
||||
|
||||
*ramsize = 0;
|
||||
*srsize = 0;
|
||||
|
||||
if (WARN_ON(sr->pub.rev < 4))
|
||||
return;
|
||||
|
||||
if (!brcmf_chip_iscoreup(&sr->pub))
|
||||
brcmf_chip_resetcore(&sr->pub, 0, 0, 0);
|
||||
|
||||
/* Get info for determining size */
|
||||
coreinfo = brcmf_chip_core_read32(sr, SOCRAMREGOFFS(coreinfo));
|
||||
nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
|
||||
|
||||
if ((sr->pub.rev <= 7) || (sr->pub.rev == 12)) {
|
||||
banksize = (coreinfo & SRCI_SRBSZ_MASK);
|
||||
lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT;
|
||||
if (lss != 0)
|
||||
nb--;
|
||||
*ramsize = nb * (1 << (banksize + SR_BSZ_BASE));
|
||||
if (lss != 0)
|
||||
*ramsize += (1 << ((lss - 1) + SR_BSZ_BASE));
|
||||
} else {
|
||||
nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
|
||||
for (i = 0; i < nb; i++) {
|
||||
retent = brcmf_chip_socram_banksize(sr, i, &banksize);
|
||||
*ramsize += banksize;
|
||||
if (retent)
|
||||
*srsize += banksize;
|
||||
}
|
||||
}
|
||||
|
||||
/* hardcoded save&restore memory sizes */
|
||||
switch (sr->chip->pub.chip) {
|
||||
case BRCM_CC_4334_CHIP_ID:
|
||||
if (sr->chip->pub.chiprev < 2)
|
||||
*srsize = (32 * 1024);
|
||||
break;
|
||||
case BRCM_CC_43430_CHIP_ID:
|
||||
/* assume sr for now as we can not check
|
||||
* firmware sr capability at this point.
|
||||
*/
|
||||
*srsize = (64 * 1024);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/** Return the TCM-RAM size of the ARMCR4 core. */
|
||||
static u32 brcmf_chip_tcm_ramsize(struct brcmf_core_priv *cr4)
|
||||
{
|
||||
u32 corecap;
|
||||
u32 memsize = 0;
|
||||
u32 nab;
|
||||
u32 nbb;
|
||||
u32 totb;
|
||||
u32 bxinfo;
|
||||
u32 idx;
|
||||
|
||||
corecap = brcmf_chip_core_read32(cr4, ARMCR4_CAP);
|
||||
|
||||
nab = (corecap & ARMCR4_TCBANB_MASK) >> ARMCR4_TCBANB_SHIFT;
|
||||
nbb = (corecap & ARMCR4_TCBBNB_MASK) >> ARMCR4_TCBBNB_SHIFT;
|
||||
totb = nab + nbb;
|
||||
|
||||
for (idx = 0; idx < totb; idx++) {
|
||||
brcmf_chip_core_write32(cr4, ARMCR4_BANKIDX, idx);
|
||||
bxinfo = brcmf_chip_core_read32(cr4, ARMCR4_BANKINFO);
|
||||
memsize += ((bxinfo & ARMCR4_BSZ_MASK) + 1) * ARMCR4_BSZ_MULT;
|
||||
}
|
||||
|
||||
return memsize;
|
||||
}
|
||||
|
||||
static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
|
||||
{
|
||||
switch (ci->pub.chip) {
|
||||
case BRCM_CC_4329_CHIP_ID:
|
||||
ci->pub.ramsize = BCM4329_RAMSIZE;
|
||||
break;
|
||||
case BRCM_CC_43143_CHIP_ID:
|
||||
ci->pub.ramsize = BCM43143_RAMSIZE;
|
||||
break;
|
||||
case BRCM_CC_43241_CHIP_ID:
|
||||
ci->pub.ramsize = 0x90000;
|
||||
break;
|
||||
case BRCM_CC_4330_CHIP_ID:
|
||||
ci->pub.ramsize = 0x48000;
|
||||
break;
|
||||
case BRCM_CC_4334_CHIP_ID:
|
||||
case BRCM_CC_43340_CHIP_ID:
|
||||
ci->pub.ramsize = 0x80000;
|
||||
break;
|
||||
case BRCM_CC_4345_CHIP_ID:
|
||||
return 0x198000;
|
||||
case BRCM_CC_4335_CHIP_ID:
|
||||
ci->pub.ramsize = 0xc0000;
|
||||
ci->pub.rambase = 0x180000;
|
||||
break;
|
||||
case BRCM_CC_43362_CHIP_ID:
|
||||
ci->pub.ramsize = 0x3c000;
|
||||
break;
|
||||
case BRCM_CC_4339_CHIP_ID:
|
||||
case BRCM_CC_4354_CHIP_ID:
|
||||
case BRCM_CC_4356_CHIP_ID:
|
||||
case BRCM_CC_43567_CHIP_ID:
|
||||
case BRCM_CC_43569_CHIP_ID:
|
||||
case BRCM_CC_43570_CHIP_ID:
|
||||
ci->pub.ramsize = 0xc0000;
|
||||
ci->pub.rambase = 0x180000;
|
||||
break;
|
||||
case BRCM_CC_43602_CHIP_ID:
|
||||
ci->pub.ramsize = 0xf0000;
|
||||
ci->pub.rambase = 0x180000;
|
||||
break;
|
||||
return 0x180000;
|
||||
default:
|
||||
brcmf_err("unknown chip: %s\n", ci->pub.name);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int brcmf_chip_get_raminfo(struct brcmf_chip_priv *ci)
|
||||
{
|
||||
struct brcmf_core_priv *mem_core;
|
||||
struct brcmf_core *mem;
|
||||
|
||||
mem = brcmf_chip_get_core(&ci->pub, BCMA_CORE_ARM_CR4);
|
||||
if (mem) {
|
||||
mem_core = container_of(mem, struct brcmf_core_priv, pub);
|
||||
ci->pub.ramsize = brcmf_chip_tcm_ramsize(mem_core);
|
||||
ci->pub.rambase = brcmf_chip_tcm_rambase(ci);
|
||||
if (!ci->pub.rambase) {
|
||||
brcmf_err("RAM base not provided with ARM CR4 core\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
mem = brcmf_chip_get_core(&ci->pub, BCMA_CORE_INTERNAL_MEM);
|
||||
mem_core = container_of(mem, struct brcmf_core_priv, pub);
|
||||
brcmf_chip_socram_ramsize(mem_core, &ci->pub.ramsize,
|
||||
&ci->pub.srsize);
|
||||
}
|
||||
brcmf_dbg(INFO, "RAM: base=0x%x size=%d (0x%x) sr=%d (0x%x)\n",
|
||||
ci->pub.rambase, ci->pub.ramsize, ci->pub.ramsize,
|
||||
ci->pub.srsize, ci->pub.srsize);
|
||||
|
||||
if (!ci->pub.ramsize) {
|
||||
brcmf_err("RAM size is undetermined\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 brcmf_chip_dmp_get_desc(struct brcmf_chip_priv *ci, u32 *eromaddr,
|
||||
|
@ -660,6 +839,7 @@ static int brcmf_chip_recognition(struct brcmf_chip_priv *ci)
|
|||
struct brcmf_core *core;
|
||||
u32 regdata;
|
||||
u32 socitype;
|
||||
int ret;
|
||||
|
||||
/* Get CC core rev
|
||||
* Chipid is assume to be at offset 0 from SI_ENUM_BASE
|
||||
|
@ -712,9 +892,13 @@ static int brcmf_chip_recognition(struct brcmf_chip_priv *ci)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
brcmf_chip_get_raminfo(ci);
|
||||
ret = brcmf_chip_cores_check(ci);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return brcmf_chip_cores_check(ci);
|
||||
/* assure chip is passive for core access */
|
||||
brcmf_chip_set_passive(&ci->pub);
|
||||
return brcmf_chip_get_raminfo(ci);
|
||||
}
|
||||
|
||||
static void brcmf_chip_disable_arm(struct brcmf_chip_priv *chip, u16 id)
|
||||
|
@ -778,12 +962,6 @@ static int brcmf_chip_setup(struct brcmf_chip_priv *chip)
|
|||
if (chip->ops->setup)
|
||||
ret = chip->ops->setup(chip->ctx, pub);
|
||||
|
||||
/*
|
||||
* Make sure any on-chip ARM is off (in case strapping is wrong),
|
||||
* or downloaded code was already running.
|
||||
*/
|
||||
brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CM3);
|
||||
brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CR4);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -799,7 +977,7 @@ struct brcmf_chip *brcmf_chip_attach(void *ctx,
|
|||
err = -EINVAL;
|
||||
if (WARN_ON(!ops->prepare))
|
||||
err = -EINVAL;
|
||||
if (WARN_ON(!ops->exit_dl))
|
||||
if (WARN_ON(!ops->activate))
|
||||
err = -EINVAL;
|
||||
if (err < 0)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
@ -897,9 +1075,10 @@ void brcmf_chip_resetcore(struct brcmf_core *pub, u32 prereset, u32 reset,
|
|||
}
|
||||
|
||||
static void
|
||||
brcmf_chip_cm3_enterdl(struct brcmf_chip_priv *chip)
|
||||
brcmf_chip_cm3_set_passive(struct brcmf_chip_priv *chip)
|
||||
{
|
||||
struct brcmf_core *core;
|
||||
struct brcmf_core_priv *sr;
|
||||
|
||||
brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CM3);
|
||||
core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211);
|
||||
|
@ -909,9 +1088,16 @@ brcmf_chip_cm3_enterdl(struct brcmf_chip_priv *chip)
|
|||
D11_BCMA_IOCTL_PHYCLOCKEN);
|
||||
core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_INTERNAL_MEM);
|
||||
brcmf_chip_resetcore(core, 0, 0, 0);
|
||||
|
||||
/* disable bank #3 remap for this device */
|
||||
if (chip->pub.chip == BRCM_CC_43430_CHIP_ID) {
|
||||
sr = container_of(core, struct brcmf_core_priv, pub);
|
||||
brcmf_chip_core_write32(sr, SOCRAMREGOFFS(bankidx), 3);
|
||||
brcmf_chip_core_write32(sr, SOCRAMREGOFFS(bankpda), 0);
|
||||
}
|
||||
}
|
||||
|
||||
static bool brcmf_chip_cm3_exitdl(struct brcmf_chip_priv *chip)
|
||||
static bool brcmf_chip_cm3_set_active(struct brcmf_chip_priv *chip)
|
||||
{
|
||||
struct brcmf_core *core;
|
||||
|
||||
|
@ -921,7 +1107,7 @@ static bool brcmf_chip_cm3_exitdl(struct brcmf_chip_priv *chip)
|
|||
return false;
|
||||
}
|
||||
|
||||
chip->ops->exit_dl(chip->ctx, &chip->pub, 0);
|
||||
chip->ops->activate(chip->ctx, &chip->pub, 0);
|
||||
|
||||
core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CM3);
|
||||
brcmf_chip_resetcore(core, 0, 0, 0);
|
||||
|
@ -930,7 +1116,7 @@ static bool brcmf_chip_cm3_exitdl(struct brcmf_chip_priv *chip)
|
|||
}
|
||||
|
||||
static inline void
|
||||
brcmf_chip_cr4_enterdl(struct brcmf_chip_priv *chip)
|
||||
brcmf_chip_cr4_set_passive(struct brcmf_chip_priv *chip)
|
||||
{
|
||||
struct brcmf_core *core;
|
||||
|
||||
|
@ -943,11 +1129,11 @@ brcmf_chip_cr4_enterdl(struct brcmf_chip_priv *chip)
|
|||
D11_BCMA_IOCTL_PHYCLOCKEN);
|
||||
}
|
||||
|
||||
static bool brcmf_chip_cr4_exitdl(struct brcmf_chip_priv *chip, u32 rstvec)
|
||||
static bool brcmf_chip_cr4_set_active(struct brcmf_chip_priv *chip, u32 rstvec)
|
||||
{
|
||||
struct brcmf_core *core;
|
||||
|
||||
chip->ops->exit_dl(chip->ctx, &chip->pub, rstvec);
|
||||
chip->ops->activate(chip->ctx, &chip->pub, rstvec);
|
||||
|
||||
/* restore ARM */
|
||||
core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CR4);
|
||||
|
@ -956,7 +1142,7 @@ static bool brcmf_chip_cr4_exitdl(struct brcmf_chip_priv *chip, u32 rstvec)
|
|||
return true;
|
||||
}
|
||||
|
||||
void brcmf_chip_enter_download(struct brcmf_chip *pub)
|
||||
void brcmf_chip_set_passive(struct brcmf_chip *pub)
|
||||
{
|
||||
struct brcmf_chip_priv *chip;
|
||||
struct brcmf_core *arm;
|
||||
|
@ -966,14 +1152,14 @@ void brcmf_chip_enter_download(struct brcmf_chip *pub)
|
|||
chip = container_of(pub, struct brcmf_chip_priv, pub);
|
||||
arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4);
|
||||
if (arm) {
|
||||
brcmf_chip_cr4_enterdl(chip);
|
||||
brcmf_chip_cr4_set_passive(chip);
|
||||
return;
|
||||
}
|
||||
|
||||
brcmf_chip_cm3_enterdl(chip);
|
||||
brcmf_chip_cm3_set_passive(chip);
|
||||
}
|
||||
|
||||
bool brcmf_chip_exit_download(struct brcmf_chip *pub, u32 rstvec)
|
||||
bool brcmf_chip_set_active(struct brcmf_chip *pub, u32 rstvec)
|
||||
{
|
||||
struct brcmf_chip_priv *chip;
|
||||
struct brcmf_core *arm;
|
||||
|
@ -983,9 +1169,9 @@ bool brcmf_chip_exit_download(struct brcmf_chip *pub, u32 rstvec)
|
|||
chip = container_of(pub, struct brcmf_chip_priv, pub);
|
||||
arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4);
|
||||
if (arm)
|
||||
return brcmf_chip_cr4_exitdl(chip, rstvec);
|
||||
return brcmf_chip_cr4_set_active(chip, rstvec);
|
||||
|
||||
return brcmf_chip_cm3_exitdl(chip);
|
||||
return brcmf_chip_cm3_set_active(chip);
|
||||
}
|
||||
|
||||
bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
|
||||
|
@ -1016,6 +1202,10 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
|
|||
addr = CORE_CC_REG(base, chipcontrol_data);
|
||||
reg = chip->ops->read32(chip->ctx, addr);
|
||||
return (reg & pmu_cc3_mask) != 0;
|
||||
case BRCM_CC_43430_CHIP_ID:
|
||||
addr = CORE_CC_REG(base, sr_control1);
|
||||
reg = chip->ops->read32(chip->ctx, addr);
|
||||
return reg != 0;
|
||||
default:
|
||||
addr = CORE_CC_REG(base, pmucapabilities_ext);
|
||||
reg = chip->ops->read32(chip->ctx, addr);
|
||||
|
|
|
@ -30,7 +30,8 @@
|
|||
* @pmucaps: PMU capabilities.
|
||||
* @pmurev: PMU revision.
|
||||
* @rambase: RAM base address (only applicable for ARM CR4 chips).
|
||||
* @ramsize: amount of RAM on chip.
|
||||
* @ramsize: amount of RAM on chip including retention.
|
||||
* @srsize: amount of retention RAM on chip.
|
||||
* @name: string representation of the chip identifier.
|
||||
*/
|
||||
struct brcmf_chip {
|
||||
|
@ -41,6 +42,7 @@ struct brcmf_chip {
|
|||
u32 pmurev;
|
||||
u32 rambase;
|
||||
u32 ramsize;
|
||||
u32 srsize;
|
||||
char name[8];
|
||||
};
|
||||
|
||||
|
@ -64,7 +66,7 @@ struct brcmf_core {
|
|||
* @write32: write 32-bit value over bus.
|
||||
* @prepare: prepare bus for core configuration.
|
||||
* @setup: bus-specific core setup.
|
||||
* @exit_dl: exit download state.
|
||||
* @active: chip becomes active.
|
||||
* The callback should use the provided @rstvec when non-zero.
|
||||
*/
|
||||
struct brcmf_buscore_ops {
|
||||
|
@ -72,7 +74,7 @@ struct brcmf_buscore_ops {
|
|||
void (*write32)(void *ctx, u32 addr, u32 value);
|
||||
int (*prepare)(void *ctx);
|
||||
int (*setup)(void *ctx, struct brcmf_chip *chip);
|
||||
void (*exit_dl)(void *ctx, struct brcmf_chip *chip, u32 rstvec);
|
||||
void (*activate)(void *ctx, struct brcmf_chip *chip, u32 rstvec);
|
||||
};
|
||||
|
||||
struct brcmf_chip *brcmf_chip_attach(void *ctx,
|
||||
|
@ -84,8 +86,8 @@ bool brcmf_chip_iscoreup(struct brcmf_core *core);
|
|||
void brcmf_chip_coredisable(struct brcmf_core *core, u32 prereset, u32 reset);
|
||||
void brcmf_chip_resetcore(struct brcmf_core *core, u32 prereset, u32 reset,
|
||||
u32 postreset);
|
||||
void brcmf_chip_enter_download(struct brcmf_chip *ci);
|
||||
bool brcmf_chip_exit_download(struct brcmf_chip *ci, u32 rstvec);
|
||||
void brcmf_chip_set_passive(struct brcmf_chip *ci);
|
||||
bool brcmf_chip_set_active(struct brcmf_chip *ci, u32 rstvec);
|
||||
bool brcmf_chip_sr_capable(struct brcmf_chip *pub);
|
||||
|
||||
#endif /* BRCMF_AXIDMP_H */
|
||||
|
|
|
@ -481,10 +481,9 @@ static int brcmf_msgbuf_ioctl_resp_wait(struct brcmf_msgbuf *msgbuf)
|
|||
|
||||
static void brcmf_msgbuf_ioctl_resp_wake(struct brcmf_msgbuf *msgbuf)
|
||||
{
|
||||
if (waitqueue_active(&msgbuf->ioctl_resp_wait)) {
|
||||
msgbuf->ctl_completed = true;
|
||||
msgbuf->ctl_completed = true;
|
||||
if (waitqueue_active(&msgbuf->ioctl_resp_wait))
|
||||
wake_up(&msgbuf->ioctl_resp_wait);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -17,11 +17,11 @@
|
|||
|
||||
#ifdef CONFIG_BRCMFMAC_PROTO_MSGBUF
|
||||
|
||||
#define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM 20
|
||||
#define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM 256
|
||||
#define BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM 20
|
||||
#define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM 64
|
||||
#define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM 512
|
||||
#define BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM 64
|
||||
#define BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM 1024
|
||||
#define BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM 256
|
||||
#define BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM 512
|
||||
#define BRCMF_H2D_TXFLOWRING_MAX_ITEM 512
|
||||
|
||||
#define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE 40
|
||||
|
|
|
@ -47,8 +47,6 @@ enum brcmf_pcie_state {
|
|||
|
||||
#define BRCMF_PCIE_43602_FW_NAME "brcm/brcmfmac43602-pcie.bin"
|
||||
#define BRCMF_PCIE_43602_NVRAM_NAME "brcm/brcmfmac43602-pcie.txt"
|
||||
#define BRCMF_PCIE_4354_FW_NAME "brcm/brcmfmac4354-pcie.bin"
|
||||
#define BRCMF_PCIE_4354_NVRAM_NAME "brcm/brcmfmac4354-pcie.txt"
|
||||
#define BRCMF_PCIE_4356_FW_NAME "brcm/brcmfmac4356-pcie.bin"
|
||||
#define BRCMF_PCIE_4356_NVRAM_NAME "brcm/brcmfmac4356-pcie.txt"
|
||||
#define BRCMF_PCIE_43570_FW_NAME "brcm/brcmfmac43570-pcie.bin"
|
||||
|
@ -187,8 +185,8 @@ enum brcmf_pcie_state {
|
|||
|
||||
MODULE_FIRMWARE(BRCMF_PCIE_43602_FW_NAME);
|
||||
MODULE_FIRMWARE(BRCMF_PCIE_43602_NVRAM_NAME);
|
||||
MODULE_FIRMWARE(BRCMF_PCIE_4354_FW_NAME);
|
||||
MODULE_FIRMWARE(BRCMF_PCIE_4354_NVRAM_NAME);
|
||||
MODULE_FIRMWARE(BRCMF_PCIE_4356_FW_NAME);
|
||||
MODULE_FIRMWARE(BRCMF_PCIE_4356_NVRAM_NAME);
|
||||
MODULE_FIRMWARE(BRCMF_PCIE_43570_FW_NAME);
|
||||
MODULE_FIRMWARE(BRCMF_PCIE_43570_NVRAM_NAME);
|
||||
|
||||
|
@ -509,8 +507,6 @@ static void brcmf_pcie_attach(struct brcmf_pciedev_info *devinfo)
|
|||
|
||||
static int brcmf_pcie_enter_download_state(struct brcmf_pciedev_info *devinfo)
|
||||
{
|
||||
brcmf_chip_enter_download(devinfo->ci);
|
||||
|
||||
if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
|
||||
brcmf_pcie_select_core(devinfo, BCMA_CORE_ARM_CR4);
|
||||
brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
|
||||
|
@ -536,7 +532,7 @@ static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo,
|
|||
brcmf_chip_resetcore(core, 0, 0, 0);
|
||||
}
|
||||
|
||||
return !brcmf_chip_exit_download(devinfo->ci, resetintr);
|
||||
return !brcmf_chip_set_active(devinfo->ci, resetintr);
|
||||
}
|
||||
|
||||
|
||||
|
@ -653,10 +649,9 @@ static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo)
|
|||
console->log_str[console->log_idx] = ch;
|
||||
console->log_idx++;
|
||||
}
|
||||
|
||||
if (ch == '\n') {
|
||||
console->log_str[console->log_idx] = 0;
|
||||
brcmf_dbg(PCIE, "CONSOLE: %s\n", console->log_str);
|
||||
brcmf_dbg(PCIE, "CONSOLE: %s", console->log_str);
|
||||
console->log_idx = 0;
|
||||
}
|
||||
}
|
||||
|
@ -1328,10 +1323,6 @@ static int brcmf_pcie_get_fwnames(struct brcmf_pciedev_info *devinfo)
|
|||
fw_name = BRCMF_PCIE_43602_FW_NAME;
|
||||
nvram_name = BRCMF_PCIE_43602_NVRAM_NAME;
|
||||
break;
|
||||
case BRCM_CC_4354_CHIP_ID:
|
||||
fw_name = BRCMF_PCIE_4354_FW_NAME;
|
||||
nvram_name = BRCMF_PCIE_4354_NVRAM_NAME;
|
||||
break;
|
||||
case BRCM_CC_4356_CHIP_ID:
|
||||
fw_name = BRCMF_PCIE_4356_FW_NAME;
|
||||
nvram_name = BRCMF_PCIE_4356_NVRAM_NAME;
|
||||
|
@ -1566,8 +1557,8 @@ static int brcmf_pcie_buscoreprep(void *ctx)
|
|||
}
|
||||
|
||||
|
||||
static void brcmf_pcie_buscore_exitdl(void *ctx, struct brcmf_chip *chip,
|
||||
u32 rstvec)
|
||||
static void brcmf_pcie_buscore_activate(void *ctx, struct brcmf_chip *chip,
|
||||
u32 rstvec)
|
||||
{
|
||||
struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
|
||||
|
||||
|
@ -1577,7 +1568,7 @@ static void brcmf_pcie_buscore_exitdl(void *ctx, struct brcmf_chip *chip,
|
|||
|
||||
static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
|
||||
.prepare = brcmf_pcie_buscoreprep,
|
||||
.exit_dl = brcmf_pcie_buscore_exitdl,
|
||||
.activate = brcmf_pcie_buscore_activate,
|
||||
.read32 = brcmf_pcie_buscore_read32,
|
||||
.write32 = brcmf_pcie_buscore_write32,
|
||||
};
|
||||
|
@ -1856,7 +1847,6 @@ cleanup:
|
|||
PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
|
||||
|
||||
static struct pci_device_id brcmf_pcie_devid_table[] = {
|
||||
BRCMF_PCIE_DEVICE(BRCM_PCIE_4354_DEVICE_ID),
|
||||
BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
|
||||
BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
|
||||
BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
|
||||
|
|
|
@ -432,8 +432,6 @@ struct brcmf_sdio {
|
|||
struct brcmf_sdio_dev *sdiodev; /* sdio device handler */
|
||||
struct brcmf_chip *ci; /* Chip info struct */
|
||||
|
||||
u32 ramsize; /* Size of RAM in SOCRAM (bytes) */
|
||||
|
||||
u32 hostintmask; /* Copy of Host Interrupt Mask */
|
||||
atomic_t intstatus; /* Intstatus bits (events) pending */
|
||||
atomic_t fcstate; /* State of dongle flow-control */
|
||||
|
@ -485,10 +483,9 @@ struct brcmf_sdio {
|
|||
#endif /* DEBUG */
|
||||
|
||||
uint clkstate; /* State of sd and backplane clock(s) */
|
||||
bool activity; /* Activity flag for clock down */
|
||||
s32 idletime; /* Control for activity timeout */
|
||||
s32 idlecount; /* Activity timeout counter */
|
||||
s32 idleclock; /* How to set bus driver when idle */
|
||||
s32 idlecount; /* Activity timeout counter */
|
||||
s32 idleclock; /* How to set bus driver when idle */
|
||||
bool rxflow_mode; /* Rx flow control mode */
|
||||
bool rxflow; /* Is rx flow control on */
|
||||
bool alp_only; /* Don't use HT clock (ALP only) */
|
||||
|
@ -510,7 +507,8 @@ struct brcmf_sdio {
|
|||
|
||||
struct workqueue_struct *brcmf_wq;
|
||||
struct work_struct datawork;
|
||||
atomic_t dpc_tskcnt;
|
||||
bool dpc_triggered;
|
||||
bool dpc_running;
|
||||
|
||||
bool txoff; /* Transmit flow-controlled */
|
||||
struct brcmf_sdio_count sdcnt;
|
||||
|
@ -617,6 +615,10 @@ static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
|
|||
#define BCM43362_NVRAM_NAME "brcm/brcmfmac43362-sdio.txt"
|
||||
#define BCM4339_FIRMWARE_NAME "brcm/brcmfmac4339-sdio.bin"
|
||||
#define BCM4339_NVRAM_NAME "brcm/brcmfmac4339-sdio.txt"
|
||||
#define BCM43430_FIRMWARE_NAME "brcm/brcmfmac43430-sdio.bin"
|
||||
#define BCM43430_NVRAM_NAME "brcm/brcmfmac43430-sdio.txt"
|
||||
#define BCM43455_FIRMWARE_NAME "brcm/brcmfmac43455-sdio.bin"
|
||||
#define BCM43455_NVRAM_NAME "brcm/brcmfmac43455-sdio.txt"
|
||||
#define BCM4354_FIRMWARE_NAME "brcm/brcmfmac4354-sdio.bin"
|
||||
#define BCM4354_NVRAM_NAME "brcm/brcmfmac4354-sdio.txt"
|
||||
|
||||
|
@ -640,6 +642,10 @@ MODULE_FIRMWARE(BCM43362_FIRMWARE_NAME);
|
|||
MODULE_FIRMWARE(BCM43362_NVRAM_NAME);
|
||||
MODULE_FIRMWARE(BCM4339_FIRMWARE_NAME);
|
||||
MODULE_FIRMWARE(BCM4339_NVRAM_NAME);
|
||||
MODULE_FIRMWARE(BCM43430_FIRMWARE_NAME);
|
||||
MODULE_FIRMWARE(BCM43430_NVRAM_NAME);
|
||||
MODULE_FIRMWARE(BCM43455_FIRMWARE_NAME);
|
||||
MODULE_FIRMWARE(BCM43455_NVRAM_NAME);
|
||||
MODULE_FIRMWARE(BCM4354_FIRMWARE_NAME);
|
||||
MODULE_FIRMWARE(BCM4354_NVRAM_NAME);
|
||||
|
||||
|
@ -669,6 +675,8 @@ static const struct brcmf_firmware_names brcmf_fwname_data[] = {
|
|||
{ BRCM_CC_4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) },
|
||||
{ BRCM_CC_43362_CHIP_ID, 0xFFFFFFFE, BRCMF_FIRMWARE_NVRAM(BCM43362) },
|
||||
{ BRCM_CC_4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) },
|
||||
{ BRCM_CC_43430_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43430) },
|
||||
{ BRCM_CC_4345_CHIP_ID, 0xFFFFFFC0, BRCMF_FIRMWARE_NVRAM(BCM43455) },
|
||||
{ BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4354) }
|
||||
};
|
||||
|
||||
|
@ -959,13 +967,8 @@ static int brcmf_sdio_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
|
|||
brcmf_dbg(SDIO, "Enter\n");
|
||||
|
||||
/* Early exit if we're already there */
|
||||
if (bus->clkstate == target) {
|
||||
if (target == CLK_AVAIL) {
|
||||
brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
|
||||
bus->activity = true;
|
||||
}
|
||||
if (bus->clkstate == target)
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (target) {
|
||||
case CLK_AVAIL:
|
||||
|
@ -974,8 +977,6 @@ static int brcmf_sdio_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
|
|||
brcmf_sdio_sdclk(bus, true);
|
||||
/* Now request HT Avail on the backplane */
|
||||
brcmf_sdio_htclk(bus, true, pendok);
|
||||
brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
|
||||
bus->activity = true;
|
||||
break;
|
||||
|
||||
case CLK_SDONLY:
|
||||
|
@ -987,7 +988,6 @@ static int brcmf_sdio_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
|
|||
else
|
||||
brcmf_err("request for %d -> %d\n",
|
||||
bus->clkstate, target);
|
||||
brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
|
||||
break;
|
||||
|
||||
case CLK_NONE:
|
||||
|
@ -996,7 +996,6 @@ static int brcmf_sdio_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
|
|||
brcmf_sdio_htclk(bus, false, false);
|
||||
/* Now remove the SD clock */
|
||||
brcmf_sdio_sdclk(bus, false);
|
||||
brcmf_sdio_wd_timer(bus, 0);
|
||||
break;
|
||||
}
|
||||
#ifdef DEBUG
|
||||
|
@ -1024,17 +1023,6 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
|
|||
|
||||
/* Going to sleep */
|
||||
if (sleep) {
|
||||
/* Don't sleep if something is pending */
|
||||
if (atomic_read(&bus->intstatus) ||
|
||||
atomic_read(&bus->ipend) > 0 ||
|
||||
bus->ctrl_frame_stat ||
|
||||
(!atomic_read(&bus->fcstate) &&
|
||||
brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
|
||||
data_ok(bus))) {
|
||||
err = -EBUSY;
|
||||
goto done;
|
||||
}
|
||||
|
||||
clkcsr = brcmf_sdiod_regrb(bus->sdiodev,
|
||||
SBSDIO_FUNC1_CHIPCLKCSR,
|
||||
&err);
|
||||
|
@ -1045,11 +1033,7 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
|
|||
SBSDIO_ALP_AVAIL_REQ, &err);
|
||||
}
|
||||
err = brcmf_sdio_kso_control(bus, false);
|
||||
/* disable watchdog */
|
||||
if (!err)
|
||||
brcmf_sdio_wd_timer(bus, 0);
|
||||
} else {
|
||||
bus->idlecount = 0;
|
||||
err = brcmf_sdio_kso_control(bus, true);
|
||||
}
|
||||
if (err) {
|
||||
|
@ -1066,6 +1050,7 @@ end:
|
|||
brcmf_sdio_clkctl(bus, CLK_NONE, pendok);
|
||||
} else {
|
||||
brcmf_sdio_clkctl(bus, CLK_AVAIL, pendok);
|
||||
brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
|
||||
}
|
||||
bus->sleeping = sleep;
|
||||
brcmf_dbg(SDIO, "new state %s\n",
|
||||
|
@ -1085,44 +1070,47 @@ static inline bool brcmf_sdio_valid_shared_address(u32 addr)
|
|||
static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
|
||||
struct sdpcm_shared *sh)
|
||||
{
|
||||
u32 addr;
|
||||
u32 addr = 0;
|
||||
int rv;
|
||||
u32 shaddr = 0;
|
||||
struct sdpcm_shared_le sh_le;
|
||||
__le32 addr_le;
|
||||
|
||||
shaddr = bus->ci->rambase + bus->ramsize - 4;
|
||||
sdio_claim_host(bus->sdiodev->func[1]);
|
||||
brcmf_sdio_bus_sleep(bus, false, false);
|
||||
|
||||
/*
|
||||
* Read last word in socram to determine
|
||||
* address of sdpcm_shared structure
|
||||
*/
|
||||
sdio_claim_host(bus->sdiodev->func[1]);
|
||||
brcmf_sdio_bus_sleep(bus, false, false);
|
||||
rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
|
||||
sdio_release_host(bus->sdiodev->func[1]);
|
||||
shaddr = bus->ci->rambase + bus->ci->ramsize - 4;
|
||||
if (!bus->ci->rambase && brcmf_chip_sr_capable(bus->ci))
|
||||
shaddr -= bus->ci->srsize;
|
||||
rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr,
|
||||
(u8 *)&addr_le, 4);
|
||||
if (rv < 0)
|
||||
return rv;
|
||||
|
||||
addr = le32_to_cpu(addr_le);
|
||||
|
||||
brcmf_dbg(SDIO, "sdpcm_shared address 0x%08X\n", addr);
|
||||
goto fail;
|
||||
|
||||
/*
|
||||
* Check if addr is valid.
|
||||
* NVRAM length at the end of memory should have been overwritten.
|
||||
*/
|
||||
addr = le32_to_cpu(addr_le);
|
||||
if (!brcmf_sdio_valid_shared_address(addr)) {
|
||||
brcmf_err("invalid sdpcm_shared address 0x%08X\n",
|
||||
addr);
|
||||
return -EINVAL;
|
||||
brcmf_err("invalid sdpcm_shared address 0x%08X\n", addr);
|
||||
rv = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
brcmf_dbg(INFO, "sdpcm_shared address 0x%08X\n", addr);
|
||||
|
||||
/* Read hndrte_shared structure */
|
||||
rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
|
||||
sizeof(struct sdpcm_shared_le));
|
||||
if (rv < 0)
|
||||
return rv;
|
||||
goto fail;
|
||||
|
||||
sdio_release_host(bus->sdiodev->func[1]);
|
||||
|
||||
/* Endianness */
|
||||
sh->flags = le32_to_cpu(sh_le.flags);
|
||||
|
@ -1139,8 +1127,13 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
|
|||
sh->flags & SDPCM_SHARED_VERSION_MASK);
|
||||
return -EPROTO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
brcmf_err("unable to obtain sdpcm_shared info: rv=%d (addr=0x%x)\n",
|
||||
rv, addr);
|
||||
sdio_release_host(bus->sdiodev->func[1]);
|
||||
return rv;
|
||||
}
|
||||
|
||||
static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
|
||||
|
@ -2721,11 +2714,14 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
|
|||
if (bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL) &&
|
||||
data_ok(bus)) {
|
||||
sdio_claim_host(bus->sdiodev->func[1]);
|
||||
err = brcmf_sdio_tx_ctrlframe(bus, bus->ctrl_frame_buf,
|
||||
bus->ctrl_frame_len);
|
||||
if (bus->ctrl_frame_stat) {
|
||||
err = brcmf_sdio_tx_ctrlframe(bus, bus->ctrl_frame_buf,
|
||||
bus->ctrl_frame_len);
|
||||
bus->ctrl_frame_err = err;
|
||||
wmb();
|
||||
bus->ctrl_frame_stat = false;
|
||||
}
|
||||
sdio_release_host(bus->sdiodev->func[1]);
|
||||
bus->ctrl_frame_err = err;
|
||||
bus->ctrl_frame_stat = false;
|
||||
brcmf_sdio_wait_event_wakeup(bus);
|
||||
}
|
||||
/* Send queued frames (limit 1 if rx may still be pending) */
|
||||
|
@ -2740,12 +2736,22 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
|
|||
if ((bus->sdiodev->state != BRCMF_SDIOD_DATA) || (err != 0)) {
|
||||
brcmf_err("failed backplane access over SDIO, halting operation\n");
|
||||
atomic_set(&bus->intstatus, 0);
|
||||
if (bus->ctrl_frame_stat) {
|
||||
sdio_claim_host(bus->sdiodev->func[1]);
|
||||
if (bus->ctrl_frame_stat) {
|
||||
bus->ctrl_frame_err = -ENODEV;
|
||||
wmb();
|
||||
bus->ctrl_frame_stat = false;
|
||||
brcmf_sdio_wait_event_wakeup(bus);
|
||||
}
|
||||
sdio_release_host(bus->sdiodev->func[1]);
|
||||
}
|
||||
} else if (atomic_read(&bus->intstatus) ||
|
||||
atomic_read(&bus->ipend) > 0 ||
|
||||
(!atomic_read(&bus->fcstate) &&
|
||||
brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
|
||||
data_ok(bus))) {
|
||||
atomic_inc(&bus->dpc_tskcnt);
|
||||
bus->dpc_triggered = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2941,20 +2947,27 @@ brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
|
|||
/* Send from dpc */
|
||||
bus->ctrl_frame_buf = msg;
|
||||
bus->ctrl_frame_len = msglen;
|
||||
wmb();
|
||||
bus->ctrl_frame_stat = true;
|
||||
|
||||
brcmf_sdio_trigger_dpc(bus);
|
||||
wait_event_interruptible_timeout(bus->ctrl_wait, !bus->ctrl_frame_stat,
|
||||
msecs_to_jiffies(CTL_DONE_TIMEOUT));
|
||||
|
||||
if (!bus->ctrl_frame_stat) {
|
||||
ret = 0;
|
||||
if (bus->ctrl_frame_stat) {
|
||||
sdio_claim_host(bus->sdiodev->func[1]);
|
||||
if (bus->ctrl_frame_stat) {
|
||||
brcmf_dbg(SDIO, "ctrl_frame timeout\n");
|
||||
bus->ctrl_frame_stat = false;
|
||||
ret = -ETIMEDOUT;
|
||||
}
|
||||
sdio_release_host(bus->sdiodev->func[1]);
|
||||
}
|
||||
if (!ret) {
|
||||
brcmf_dbg(SDIO, "ctrl_frame complete, err=%d\n",
|
||||
bus->ctrl_frame_err);
|
||||
rmb();
|
||||
ret = bus->ctrl_frame_err;
|
||||
} else {
|
||||
brcmf_dbg(SDIO, "ctrl_frame timeout\n");
|
||||
bus->ctrl_frame_stat = false;
|
||||
ret = -ETIMEDOUT;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
|
@ -3358,9 +3371,6 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
|
|||
sdio_claim_host(bus->sdiodev->func[1]);
|
||||
brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
|
||||
|
||||
/* Keep arm in reset */
|
||||
brcmf_chip_enter_download(bus->ci);
|
||||
|
||||
rstvec = get_unaligned_le32(fw->data);
|
||||
brcmf_dbg(SDIO, "firmware rstvec: %x\n", rstvec);
|
||||
|
||||
|
@ -3380,7 +3390,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
|
|||
}
|
||||
|
||||
/* Take arm out of reset */
|
||||
if (!brcmf_chip_exit_download(bus->ci, rstvec)) {
|
||||
if (!brcmf_chip_set_active(bus->ci, rstvec)) {
|
||||
brcmf_err("error getting out of ARM core reset\n");
|
||||
goto err;
|
||||
}
|
||||
|
@ -3525,8 +3535,8 @@ done:
|
|||
|
||||
void brcmf_sdio_trigger_dpc(struct brcmf_sdio *bus)
|
||||
{
|
||||
if (atomic_read(&bus->dpc_tskcnt) == 0) {
|
||||
atomic_inc(&bus->dpc_tskcnt);
|
||||
if (!bus->dpc_triggered) {
|
||||
bus->dpc_triggered = true;
|
||||
queue_work(bus->brcmf_wq, &bus->datawork);
|
||||
}
|
||||
}
|
||||
|
@ -3557,11 +3567,11 @@ void brcmf_sdio_isr(struct brcmf_sdio *bus)
|
|||
if (!bus->intr)
|
||||
brcmf_err("isr w/o interrupt configured!\n");
|
||||
|
||||
atomic_inc(&bus->dpc_tskcnt);
|
||||
bus->dpc_triggered = true;
|
||||
queue_work(bus->brcmf_wq, &bus->datawork);
|
||||
}
|
||||
|
||||
static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
|
||||
static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
|
||||
{
|
||||
brcmf_dbg(TIMER, "Enter\n");
|
||||
|
||||
|
@ -3577,7 +3587,7 @@ static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
|
|||
if (!bus->intr ||
|
||||
(bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) {
|
||||
|
||||
if (atomic_read(&bus->dpc_tskcnt) == 0) {
|
||||
if (!bus->dpc_triggered) {
|
||||
u8 devpend;
|
||||
|
||||
sdio_claim_host(bus->sdiodev->func[1]);
|
||||
|
@ -3595,7 +3605,7 @@ static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
|
|||
bus->sdcnt.pollcnt++;
|
||||
atomic_set(&bus->ipend, 1);
|
||||
|
||||
atomic_inc(&bus->dpc_tskcnt);
|
||||
bus->dpc_triggered = true;
|
||||
queue_work(bus->brcmf_wq, &bus->datawork);
|
||||
}
|
||||
}
|
||||
|
@ -3622,22 +3632,25 @@ static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
|
|||
#endif /* DEBUG */
|
||||
|
||||
/* On idle timeout clear activity flag and/or turn off clock */
|
||||
if ((bus->idletime > 0) && (bus->clkstate == CLK_AVAIL)) {
|
||||
if (++bus->idlecount >= bus->idletime) {
|
||||
bus->idlecount = 0;
|
||||
if (bus->activity) {
|
||||
bus->activity = false;
|
||||
brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
|
||||
} else {
|
||||
if (!bus->dpc_triggered) {
|
||||
rmb();
|
||||
if ((!bus->dpc_running) && (bus->idletime > 0) &&
|
||||
(bus->clkstate == CLK_AVAIL)) {
|
||||
bus->idlecount++;
|
||||
if (bus->idlecount > bus->idletime) {
|
||||
brcmf_dbg(SDIO, "idle\n");
|
||||
sdio_claim_host(bus->sdiodev->func[1]);
|
||||
brcmf_sdio_wd_timer(bus, 0);
|
||||
bus->idlecount = 0;
|
||||
brcmf_sdio_bus_sleep(bus, true, false);
|
||||
sdio_release_host(bus->sdiodev->func[1]);
|
||||
}
|
||||
} else {
|
||||
bus->idlecount = 0;
|
||||
}
|
||||
} else {
|
||||
bus->idlecount = 0;
|
||||
}
|
||||
|
||||
return (atomic_read(&bus->ipend) > 0);
|
||||
}
|
||||
|
||||
static void brcmf_sdio_dataworker(struct work_struct *work)
|
||||
|
@ -3645,10 +3658,14 @@ static void brcmf_sdio_dataworker(struct work_struct *work)
|
|||
struct brcmf_sdio *bus = container_of(work, struct brcmf_sdio,
|
||||
datawork);
|
||||
|
||||
while (atomic_read(&bus->dpc_tskcnt)) {
|
||||
atomic_set(&bus->dpc_tskcnt, 0);
|
||||
bus->dpc_running = true;
|
||||
wmb();
|
||||
while (ACCESS_ONCE(bus->dpc_triggered)) {
|
||||
bus->dpc_triggered = false;
|
||||
brcmf_sdio_dpc(bus);
|
||||
bus->idlecount = 0;
|
||||
}
|
||||
bus->dpc_running = false;
|
||||
if (brcmf_sdiod_freezing(bus->sdiodev)) {
|
||||
brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DOWN);
|
||||
brcmf_sdiod_try_freeze(bus->sdiodev);
|
||||
|
@ -3771,8 +3788,8 @@ static int brcmf_sdio_buscoreprep(void *ctx)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void brcmf_sdio_buscore_exitdl(void *ctx, struct brcmf_chip *chip,
|
||||
u32 rstvec)
|
||||
static void brcmf_sdio_buscore_activate(void *ctx, struct brcmf_chip *chip,
|
||||
u32 rstvec)
|
||||
{
|
||||
struct brcmf_sdio_dev *sdiodev = ctx;
|
||||
struct brcmf_core *core;
|
||||
|
@ -3815,7 +3832,7 @@ static void brcmf_sdio_buscore_write32(void *ctx, u32 addr, u32 val)
|
|||
|
||||
static const struct brcmf_buscore_ops brcmf_sdio_buscore_ops = {
|
||||
.prepare = brcmf_sdio_buscoreprep,
|
||||
.exit_dl = brcmf_sdio_buscore_exitdl,
|
||||
.activate = brcmf_sdio_buscore_activate,
|
||||
.read32 = brcmf_sdio_buscore_read32,
|
||||
.write32 = brcmf_sdio_buscore_write32,
|
||||
};
|
||||
|
@ -3869,13 +3886,6 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
|
|||
drivestrength = DEFAULT_SDIO_DRIVE_STRENGTH;
|
||||
brcmf_sdio_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength);
|
||||
|
||||
/* Get info on the SOCRAM cores... */
|
||||
bus->ramsize = bus->ci->ramsize;
|
||||
if (!(bus->ramsize)) {
|
||||
brcmf_err("failed to find SOCRAM memory!\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Set card control so an SDIO card reset does a WLAN backplane reset */
|
||||
reg_val = brcmf_sdiod_regrb(bus->sdiodev,
|
||||
SDIO_CCCR_BRCM_CARDCTRL, &err);
|
||||
|
@ -4148,7 +4158,8 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
|
|||
bus->watchdog_tsk = NULL;
|
||||
}
|
||||
/* Initialize DPC thread */
|
||||
atomic_set(&bus->dpc_tskcnt, 0);
|
||||
bus->dpc_triggered = false;
|
||||
bus->dpc_running = false;
|
||||
|
||||
/* Assign bus interface call back */
|
||||
bus->sdiodev->bus_if->dev = bus->sdiodev->dev;
|
||||
|
@ -4243,14 +4254,14 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
|
|||
if (bus->ci) {
|
||||
if (bus->sdiodev->state != BRCMF_SDIOD_NOMEDIUM) {
|
||||
sdio_claim_host(bus->sdiodev->func[1]);
|
||||
brcmf_sdio_wd_timer(bus, 0);
|
||||
brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
|
||||
/* Leave the device in state where it is
|
||||
* 'quiet'. This is done by putting it in
|
||||
* download_state which essentially resets
|
||||
* all necessary cores.
|
||||
* 'passive'. This is done by resetting all
|
||||
* necessary cores.
|
||||
*/
|
||||
msleep(20);
|
||||
brcmf_chip_enter_download(bus->ci);
|
||||
brcmf_chip_set_passive(bus->ci);
|
||||
brcmf_sdio_clkctl(bus, CLK_NONE, false);
|
||||
sdio_release_host(bus->sdiodev->func[1]);
|
||||
}
|
||||
|
|
|
@ -4959,7 +4959,7 @@ static int brcms_b_up_prep(struct brcms_hardware *wlc_hw)
|
|||
* Configure pci/pcmcia here instead of in brcms_c_attach()
|
||||
* to allow mfg hotswap: down, hotswap (chip power cycle), up.
|
||||
*/
|
||||
bcma_core_pci_irq_ctl(wlc_hw->d11core->bus, wlc_hw->d11core,
|
||||
bcma_host_pci_irq_ctl(wlc_hw->d11core->bus, wlc_hw->d11core,
|
||||
true);
|
||||
|
||||
/*
|
||||
|
|
|
@ -37,6 +37,8 @@
|
|||
#define BRCM_CC_43362_CHIP_ID 43362
|
||||
#define BRCM_CC_4335_CHIP_ID 0x4335
|
||||
#define BRCM_CC_4339_CHIP_ID 0x4339
|
||||
#define BRCM_CC_43430_CHIP_ID 43430
|
||||
#define BRCM_CC_4345_CHIP_ID 0x4345
|
||||
#define BRCM_CC_4354_CHIP_ID 0x4354
|
||||
#define BRCM_CC_4356_CHIP_ID 0x4356
|
||||
#define BRCM_CC_43566_CHIP_ID 43566
|
||||
|
|
|
@ -183,7 +183,14 @@ struct chipcregs {
|
|||
u8 uart1lsr;
|
||||
u8 uart1msr;
|
||||
u8 uart1scratch;
|
||||
u32 PAD[126];
|
||||
u32 PAD[62];
|
||||
|
||||
/* save/restore, corerev >= 48 */
|
||||
u32 sr_capability; /* 0x500 */
|
||||
u32 sr_control0; /* 0x504 */
|
||||
u32 sr_control1; /* 0x508 */
|
||||
u32 gpio_control; /* 0x50C */
|
||||
u32 PAD[60];
|
||||
|
||||
/* PMU registers (corerev >= 20) */
|
||||
u32 pmucontrol; /* 0x600 */
|
||||
|
|
|
@ -447,7 +447,7 @@ static int cw1200_spi_disconnect(struct spi_device *func)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static int cw1200_spi_suspend(struct device *dev, pm_message_t state)
|
||||
static int cw1200_spi_suspend(struct device *dev)
|
||||
{
|
||||
struct hwbus_priv *self = spi_get_drvdata(to_spi_device(dev));
|
||||
|
||||
|
@ -458,10 +458,8 @@ static int cw1200_spi_suspend(struct device *dev, pm_message_t state)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int cw1200_spi_resume(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static SIMPLE_DEV_PM_OPS(cw1200_pm_ops, cw1200_spi_suspend, NULL);
|
||||
|
||||
#endif
|
||||
|
||||
static struct spi_driver spi_driver = {
|
||||
|
@ -472,8 +470,7 @@ static struct spi_driver spi_driver = {
|
|||
.bus = &spi_bus_type,
|
||||
.owner = THIS_MODULE,
|
||||
#ifdef CONFIG_PM
|
||||
.suspend = cw1200_spi_suspend,
|
||||
.resume = cw1200_spi_resume,
|
||||
.pm = &cw1200_pm_ops,
|
||||
#endif
|
||||
},
|
||||
};
|
||||
|
|
|
@ -1114,16 +1114,17 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
|||
scd_queues &= ~(BIT(IWL_IPAN_CMD_QUEUE_NUM) |
|
||||
BIT(IWL_DEFAULT_CMD_QUEUE_NUM));
|
||||
|
||||
if (vif)
|
||||
scd_queues &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]);
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n", scd_queues);
|
||||
if (iwlagn_txfifo_flush(priv, scd_queues)) {
|
||||
IWL_ERR(priv, "flush request fail\n");
|
||||
goto done;
|
||||
if (drop) {
|
||||
IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n",
|
||||
scd_queues);
|
||||
if (iwlagn_txfifo_flush(priv, scd_queues)) {
|
||||
IWL_ERR(priv, "flush request fail\n");
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(priv, "wait transmit/flush all frames\n");
|
||||
iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
|
||||
iwl_trans_wait_tx_queue_empty(priv->trans, scd_queues);
|
||||
done:
|
||||
mutex_unlock(&priv->mutex);
|
||||
IWL_DEBUG_MAC80211(priv, "leave\n");
|
||||
|
|
|
@ -3153,12 +3153,13 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
|
|||
desc += sprintf(buff+desc, "lq type %s\n",
|
||||
(is_legacy(tbl->lq_type)) ? "legacy" : "HT");
|
||||
if (is_Ht(tbl->lq_type)) {
|
||||
desc += sprintf(buff+desc, " %s",
|
||||
desc += sprintf(buff + desc, " %s",
|
||||
(is_siso(tbl->lq_type)) ? "SISO" :
|
||||
((is_mimo2(tbl->lq_type)) ? "MIMO2" : "MIMO3"));
|
||||
desc += sprintf(buff+desc, " %s",
|
||||
desc += sprintf(buff + desc, " %s",
|
||||
(tbl->is_ht40) ? "40MHz" : "20MHz");
|
||||
desc += sprintf(buff+desc, " %s %s %s\n", (tbl->is_SGI) ? "SGI" : "",
|
||||
desc += sprintf(buff + desc, " %s %s %s\n",
|
||||
(tbl->is_SGI) ? "SGI" : "",
|
||||
(lq_sta->is_green) ? "GF enabled" : "",
|
||||
(lq_sta->is_agg) ? "AGG on" : "");
|
||||
}
|
||||
|
|
|
@ -189,9 +189,9 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
|
|||
rate_flags |= RATE_MCS_CCK_MSK;
|
||||
|
||||
/* Set up antennas */
|
||||
if (priv->lib->bt_params &&
|
||||
priv->lib->bt_params->advanced_bt_coexist &&
|
||||
priv->bt_full_concurrent) {
|
||||
if (priv->lib->bt_params &&
|
||||
priv->lib->bt_params->advanced_bt_coexist &&
|
||||
priv->bt_full_concurrent) {
|
||||
/* operated as 1x1 in full concurrency mode */
|
||||
priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
|
||||
first_antenna(priv->nvm_data->valid_tx_ant));
|
||||
|
|
|
@ -69,12 +69,12 @@
|
|||
#include "iwl-agn-hw.h"
|
||||
|
||||
/* Highest firmware API version supported */
|
||||
#define IWL7260_UCODE_API_MAX 12
|
||||
#define IWL3160_UCODE_API_MAX 12
|
||||
#define IWL7260_UCODE_API_MAX 13
|
||||
#define IWL3160_UCODE_API_MAX 13
|
||||
|
||||
/* Oldest version we won't warn about */
|
||||
#define IWL7260_UCODE_API_OK 10
|
||||
#define IWL3160_UCODE_API_OK 10
|
||||
#define IWL7260_UCODE_API_OK 12
|
||||
#define IWL3160_UCODE_API_OK 12
|
||||
|
||||
/* Lowest firmware API version supported */
|
||||
#define IWL7260_UCODE_API_MIN 10
|
||||
|
|
|
@ -69,10 +69,10 @@
|
|||
#include "iwl-agn-hw.h"
|
||||
|
||||
/* Highest firmware API version supported */
|
||||
#define IWL8000_UCODE_API_MAX 12
|
||||
#define IWL8000_UCODE_API_MAX 13
|
||||
|
||||
/* Oldest version we won't warn about */
|
||||
#define IWL8000_UCODE_API_OK 10
|
||||
#define IWL8000_UCODE_API_OK 12
|
||||
|
||||
/* Lowest firmware API version supported */
|
||||
#define IWL8000_UCODE_API_MIN 10
|
||||
|
|
|
@ -157,6 +157,7 @@ do { \
|
|||
/* 0x0000F000 - 0x00001000 */
|
||||
#define IWL_DL_ASSOC 0x00001000
|
||||
#define IWL_DL_DROP 0x00002000
|
||||
#define IWL_DL_LAR 0x00004000
|
||||
#define IWL_DL_COEX 0x00008000
|
||||
/* 0x000F0000 - 0x00010000 */
|
||||
#define IWL_DL_FW 0x00010000
|
||||
|
@ -219,5 +220,6 @@ do { \
|
|||
#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
|
||||
#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
|
||||
#define IWL_DEBUG_RPM(p, f, a...) IWL_DEBUG(p, IWL_DL_RPM, f, ## a)
|
||||
#define IWL_DEBUG_LAR(p, f, a...) IWL_DEBUG(p, IWL_DL_LAR, f, ## a)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1014,34 +1014,34 @@ static int validate_sec_sizes(struct iwl_drv *drv,
|
|||
|
||||
/* Verify that uCode images will fit in card's SRAM. */
|
||||
if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) >
|
||||
cfg->max_inst_size) {
|
||||
cfg->max_inst_size) {
|
||||
IWL_ERR(drv, "uCode instr len %Zd too large to fit in\n",
|
||||
get_sec_size(pieces, IWL_UCODE_REGULAR,
|
||||
IWL_UCODE_SECTION_INST));
|
||||
IWL_UCODE_SECTION_INST));
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) >
|
||||
cfg->max_data_size) {
|
||||
cfg->max_data_size) {
|
||||
IWL_ERR(drv, "uCode data len %Zd too large to fit in\n",
|
||||
get_sec_size(pieces, IWL_UCODE_REGULAR,
|
||||
IWL_UCODE_SECTION_DATA));
|
||||
IWL_UCODE_SECTION_DATA));
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) >
|
||||
cfg->max_inst_size) {
|
||||
if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) >
|
||||
cfg->max_inst_size) {
|
||||
IWL_ERR(drv, "uCode init instr len %Zd too large to fit in\n",
|
||||
get_sec_size(pieces, IWL_UCODE_INIT,
|
||||
IWL_UCODE_SECTION_INST));
|
||||
IWL_UCODE_SECTION_INST));
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA) >
|
||||
cfg->max_data_size) {
|
||||
cfg->max_data_size) {
|
||||
IWL_ERR(drv, "uCode init data len %Zd too large to fit in\n",
|
||||
get_sec_size(pieces, IWL_UCODE_REGULAR,
|
||||
IWL_UCODE_SECTION_DATA));
|
||||
IWL_UCODE_SECTION_DATA));
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
|
@ -1546,6 +1546,10 @@ module_param_named(d0i3_disable, iwlwifi_mod_params.d0i3_disable,
|
|||
bool, S_IRUGO);
|
||||
MODULE_PARM_DESC(d0i3_disable, "disable d0i3 functionality (default: Y)");
|
||||
|
||||
module_param_named(lar_disable, iwlwifi_mod_params.lar_disable,
|
||||
bool, S_IRUGO);
|
||||
MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)");
|
||||
|
||||
module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable,
|
||||
bool, S_IRUGO | S_IWUSR);
|
||||
#ifdef CONFIG_IWLWIFI_UAPSD
|
||||
|
|
|
@ -68,7 +68,7 @@
|
|||
|
||||
/* for all modules */
|
||||
#define DRV_NAME "iwlwifi"
|
||||
#define DRV_COPYRIGHT "Copyright(c) 2003- 2014 Intel Corporation"
|
||||
#define DRV_COPYRIGHT "Copyright(c) 2003- 2015 Intel Corporation"
|
||||
#define DRV_AUTHOR "<ilw@linux.intel.com>"
|
||||
|
||||
/* radio config bits (actual values from NVM definition) */
|
||||
|
|
|
@ -94,6 +94,7 @@ struct iwl_nvm_data {
|
|||
u32 nvm_version;
|
||||
s8 max_tx_pwr_half_dbm;
|
||||
|
||||
bool lar_enabled;
|
||||
struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
|
||||
struct ieee80211_channel channels[];
|
||||
};
|
||||
|
|
|
@ -240,10 +240,9 @@ enum iwl_ucode_tlv_flag {
|
|||
/**
|
||||
* enum iwl_ucode_tlv_api - ucode api
|
||||
* @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex
|
||||
* @IWL_UCODE_TLV_API_DISABLE_STA_TX: ucode supports tx_disable bit.
|
||||
* @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif.
|
||||
* @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
|
||||
* longer than the passive one, which is essential for fragmented scan.
|
||||
* @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
|
||||
* IWL_UCODE_TLV_API_HDC_PHASE_0: ucode supports finer configuration of LTR
|
||||
* @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,
|
||||
* regardless of the band or the number of the probes. FW will calculate
|
||||
|
@ -258,9 +257,8 @@ enum iwl_ucode_tlv_flag {
|
|||
*/
|
||||
enum iwl_ucode_tlv_api {
|
||||
IWL_UCODE_TLV_API_BT_COEX_SPLIT = BIT(3),
|
||||
IWL_UCODE_TLV_API_DISABLE_STA_TX = BIT(5),
|
||||
IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7),
|
||||
IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8),
|
||||
IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = BIT(9),
|
||||
IWL_UCODE_TLV_API_HDC_PHASE_0 = BIT(10),
|
||||
IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13),
|
||||
IWL_UCODE_TLV_API_SCD_CFG = BIT(15),
|
||||
|
@ -292,6 +290,7 @@ enum iwl_ucode_tlv_api {
|
|||
* @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
|
||||
* @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
|
||||
* @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running
|
||||
* @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
|
||||
*/
|
||||
enum iwl_ucode_tlv_capa {
|
||||
IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0),
|
||||
|
@ -308,6 +307,7 @@ enum iwl_ucode_tlv_capa {
|
|||
IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = BIT(18),
|
||||
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = BIT(22),
|
||||
IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = BIT(28),
|
||||
IWL_UCODE_TLV_CAPA_BT_COEX_RRC = BIT(30),
|
||||
};
|
||||
|
||||
/* The default calibrate table size if not specified by firmware file */
|
||||
|
|
|
@ -201,6 +201,8 @@ void iwl_force_nmi(struct iwl_trans *trans)
|
|||
} else {
|
||||
iwl_write_prph(trans, DEVICE_SET_NMI_8000B_REG,
|
||||
DEVICE_SET_NMI_8000B_VAL);
|
||||
iwl_write_prph(trans, DEVICE_SET_NMI_REG,
|
||||
DEVICE_SET_NMI_VAL_DRV);
|
||||
}
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_force_nmi);
|
||||
|
|
|
@ -103,6 +103,7 @@ enum iwl_disable_11n {
|
|||
* @debug_level: levels are IWL_DL_*
|
||||
* @ant_coupling: antenna coupling in dB, default = 0
|
||||
* @d0i3_disable: disable d0i3, default = 1,
|
||||
* @lar_disable: disable LAR (regulatory), default = 0
|
||||
* @fw_monitor: allow to use firmware monitor
|
||||
*/
|
||||
struct iwl_mod_params {
|
||||
|
@ -121,6 +122,7 @@ struct iwl_mod_params {
|
|||
char *nvm_file;
|
||||
bool uapsd_disable;
|
||||
bool d0i3_disable;
|
||||
bool lar_disable;
|
||||
bool fw_monitor;
|
||||
};
|
||||
|
||||
|
|
|
@ -103,8 +103,16 @@ enum family_8000_nvm_offsets {
|
|||
SKU_FAMILY_8000 = 4,
|
||||
N_HW_ADDRS_FAMILY_8000 = 5,
|
||||
|
||||
/* NVM PHY-SKU-Section offset (in words) for B0 */
|
||||
RADIO_CFG_FAMILY_8000_B0 = 0,
|
||||
SKU_FAMILY_8000_B0 = 2,
|
||||
N_HW_ADDRS_FAMILY_8000_B0 = 3,
|
||||
|
||||
/* NVM REGULATORY -Section offset (in words) definitions */
|
||||
NVM_CHANNELS_FAMILY_8000 = 0,
|
||||
NVM_LAR_OFFSET_FAMILY_8000_OLD = 0x4C7,
|
||||
NVM_LAR_OFFSET_FAMILY_8000 = 0x507,
|
||||
NVM_LAR_ENABLED_FAMILY_8000 = 0x7,
|
||||
|
||||
/* NVM calibration section offset (in words) definitions */
|
||||
NVM_CALIB_SECTION_FAMILY_8000 = 0x2B8,
|
||||
|
@ -146,7 +154,9 @@ static const u8 iwl_nvm_channels_family_8000[] = {
|
|||
#define NUM_2GHZ_CHANNELS_FAMILY_8000 14
|
||||
#define FIRST_2GHZ_HT_MINUS 5
|
||||
#define LAST_2GHZ_HT_PLUS 9
|
||||
#define LAST_5GHZ_HT 161
|
||||
#define LAST_5GHZ_HT 165
|
||||
#define LAST_5GHZ_HT_FAMILY_8000 181
|
||||
#define N_HW_ADDR_MASK 0xF
|
||||
|
||||
/* rate data (static) */
|
||||
static struct ieee80211_rate iwl_cfg80211_rates[] = {
|
||||
|
@ -201,9 +211,57 @@ enum iwl_nvm_channel_flags {
|
|||
#define CHECK_AND_PRINT_I(x) \
|
||||
((ch_flags & NVM_CHANNEL_##x) ? # x " " : "")
|
||||
|
||||
static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
|
||||
u16 nvm_flags, const struct iwl_cfg *cfg)
|
||||
{
|
||||
u32 flags = IEEE80211_CHAN_NO_HT40;
|
||||
u32 last_5ghz_ht = LAST_5GHZ_HT;
|
||||
|
||||
if (cfg->device_family == IWL_DEVICE_FAMILY_8000)
|
||||
last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
|
||||
|
||||
if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) {
|
||||
if (ch_num <= LAST_2GHZ_HT_PLUS)
|
||||
flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
|
||||
if (ch_num >= FIRST_2GHZ_HT_MINUS)
|
||||
flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
|
||||
} else if (ch_num <= last_5ghz_ht && (nvm_flags & NVM_CHANNEL_40MHZ)) {
|
||||
if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
|
||||
flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
|
||||
else
|
||||
flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
|
||||
}
|
||||
if (!(nvm_flags & NVM_CHANNEL_80MHZ))
|
||||
flags |= IEEE80211_CHAN_NO_80MHZ;
|
||||
if (!(nvm_flags & NVM_CHANNEL_160MHZ))
|
||||
flags |= IEEE80211_CHAN_NO_160MHZ;
|
||||
|
||||
if (!(nvm_flags & NVM_CHANNEL_IBSS))
|
||||
flags |= IEEE80211_CHAN_NO_IR;
|
||||
|
||||
if (!(nvm_flags & NVM_CHANNEL_ACTIVE))
|
||||
flags |= IEEE80211_CHAN_NO_IR;
|
||||
|
||||
if (nvm_flags & NVM_CHANNEL_RADAR)
|
||||
flags |= IEEE80211_CHAN_RADAR;
|
||||
|
||||
if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY)
|
||||
flags |= IEEE80211_CHAN_INDOOR_ONLY;
|
||||
|
||||
/* Set the GO concurrent flag only in case that NO_IR is set.
|
||||
* Otherwise it is meaningless
|
||||
*/
|
||||
if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) &&
|
||||
(flags & IEEE80211_CHAN_NO_IR))
|
||||
flags |= IEEE80211_CHAN_GO_CONCURRENT;
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
|
||||
struct iwl_nvm_data *data,
|
||||
const __le16 * const nvm_ch_flags)
|
||||
const __le16 * const nvm_ch_flags,
|
||||
bool lar_supported)
|
||||
{
|
||||
int ch_idx;
|
||||
int n_channels = 0;
|
||||
|
@ -228,9 +286,14 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
|
|||
|
||||
if (ch_idx >= num_2ghz_channels &&
|
||||
!data->sku_cap_band_52GHz_enable)
|
||||
ch_flags &= ~NVM_CHANNEL_VALID;
|
||||
continue;
|
||||
|
||||
if (!(ch_flags & NVM_CHANNEL_VALID)) {
|
||||
if (!lar_supported && !(ch_flags & NVM_CHANNEL_VALID)) {
|
||||
/*
|
||||
* Channels might become valid later if lar is
|
||||
* supported, hence we still want to add them to
|
||||
* the list of supported channels to cfg80211.
|
||||
*/
|
||||
IWL_DEBUG_EEPROM(dev,
|
||||
"Ch. %d Flags %x [%sGHz] - No traffic\n",
|
||||
nvm_chan[ch_idx],
|
||||
|
@ -250,45 +313,6 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
|
|||
ieee80211_channel_to_frequency(
|
||||
channel->hw_value, channel->band);
|
||||
|
||||
/* TODO: Need to be dependent to the NVM */
|
||||
channel->flags = IEEE80211_CHAN_NO_HT40;
|
||||
if (ch_idx < num_2ghz_channels &&
|
||||
(ch_flags & NVM_CHANNEL_40MHZ)) {
|
||||
if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS)
|
||||
channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
|
||||
if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS)
|
||||
channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
|
||||
} else if (nvm_chan[ch_idx] <= LAST_5GHZ_HT &&
|
||||
(ch_flags & NVM_CHANNEL_40MHZ)) {
|
||||
if ((ch_idx - num_2ghz_channels) % 2 == 0)
|
||||
channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
|
||||
else
|
||||
channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
|
||||
}
|
||||
if (!(ch_flags & NVM_CHANNEL_80MHZ))
|
||||
channel->flags |= IEEE80211_CHAN_NO_80MHZ;
|
||||
if (!(ch_flags & NVM_CHANNEL_160MHZ))
|
||||
channel->flags |= IEEE80211_CHAN_NO_160MHZ;
|
||||
|
||||
if (!(ch_flags & NVM_CHANNEL_IBSS))
|
||||
channel->flags |= IEEE80211_CHAN_NO_IR;
|
||||
|
||||
if (!(ch_flags & NVM_CHANNEL_ACTIVE))
|
||||
channel->flags |= IEEE80211_CHAN_NO_IR;
|
||||
|
||||
if (ch_flags & NVM_CHANNEL_RADAR)
|
||||
channel->flags |= IEEE80211_CHAN_RADAR;
|
||||
|
||||
if (ch_flags & NVM_CHANNEL_INDOOR_ONLY)
|
||||
channel->flags |= IEEE80211_CHAN_INDOOR_ONLY;
|
||||
|
||||
/* Set the GO concurrent flag only in case that NO_IR is set.
|
||||
* Otherwise it is meaningless
|
||||
*/
|
||||
if ((ch_flags & NVM_CHANNEL_GO_CONCURRENT) &&
|
||||
(channel->flags & IEEE80211_CHAN_NO_IR))
|
||||
channel->flags |= IEEE80211_CHAN_GO_CONCURRENT;
|
||||
|
||||
/* Initialize regulatory-based run-time data */
|
||||
|
||||
/*
|
||||
|
@ -297,6 +321,15 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
|
|||
*/
|
||||
channel->max_power = IWL_DEFAULT_MAX_TX_POWER;
|
||||
is_5ghz = channel->band == IEEE80211_BAND_5GHZ;
|
||||
|
||||
/* don't put limitations in case we're using LAR */
|
||||
if (!lar_supported)
|
||||
channel->flags = iwl_get_channel_flags(nvm_chan[ch_idx],
|
||||
ch_idx, is_5ghz,
|
||||
ch_flags, cfg);
|
||||
else
|
||||
channel->flags = 0;
|
||||
|
||||
IWL_DEBUG_EEPROM(dev,
|
||||
"Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
|
||||
channel->hw_value,
|
||||
|
@ -370,8 +403,8 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
|
|||
|
||||
static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
|
||||
struct iwl_nvm_data *data,
|
||||
const __le16 *ch_section, bool enable_vht,
|
||||
u8 tx_chains, u8 rx_chains)
|
||||
const __le16 *ch_section,
|
||||
u8 tx_chains, u8 rx_chains, bool lar_supported)
|
||||
{
|
||||
int n_channels;
|
||||
int n_used = 0;
|
||||
|
@ -380,11 +413,12 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
|
|||
if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
|
||||
n_channels = iwl_init_channel_map(
|
||||
dev, cfg, data,
|
||||
&ch_section[NVM_CHANNELS]);
|
||||
&ch_section[NVM_CHANNELS], lar_supported);
|
||||
else
|
||||
n_channels = iwl_init_channel_map(
|
||||
dev, cfg, data,
|
||||
&ch_section[NVM_CHANNELS_FAMILY_8000]);
|
||||
&ch_section[NVM_CHANNELS_FAMILY_8000],
|
||||
lar_supported);
|
||||
|
||||
sband = &data->bands[IEEE80211_BAND_2GHZ];
|
||||
sband->band = IEEE80211_BAND_2GHZ;
|
||||
|
@ -403,7 +437,7 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
|
|||
IEEE80211_BAND_5GHZ);
|
||||
iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ,
|
||||
tx_chains, rx_chains);
|
||||
if (enable_vht)
|
||||
if (data->sku_cap_11ac_enable)
|
||||
iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap,
|
||||
tx_chains, rx_chains);
|
||||
|
||||
|
@ -413,10 +447,15 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
|
|||
}
|
||||
|
||||
static int iwl_get_sku(const struct iwl_cfg *cfg,
|
||||
const __le16 *nvm_sw)
|
||||
const __le16 *nvm_sw, const __le16 *phy_sku,
|
||||
bool is_family_8000_a_step)
|
||||
{
|
||||
if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
|
||||
return le16_to_cpup(nvm_sw + SKU);
|
||||
|
||||
if (!is_family_8000_a_step)
|
||||
return le32_to_cpup((__le32 *)(phy_sku +
|
||||
SKU_FAMILY_8000_B0));
|
||||
else
|
||||
return le32_to_cpup((__le32 *)(nvm_sw + SKU_FAMILY_8000));
|
||||
}
|
||||
|
@ -432,23 +471,36 @@ static int iwl_get_nvm_version(const struct iwl_cfg *cfg,
|
|||
}
|
||||
|
||||
static int iwl_get_radio_cfg(const struct iwl_cfg *cfg,
|
||||
const __le16 *nvm_sw)
|
||||
const __le16 *nvm_sw, const __le16 *phy_sku,
|
||||
bool is_family_8000_a_step)
|
||||
{
|
||||
if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
|
||||
return le16_to_cpup(nvm_sw + RADIO_CFG);
|
||||
|
||||
if (!is_family_8000_a_step)
|
||||
return le32_to_cpup((__le32 *)(phy_sku +
|
||||
RADIO_CFG_FAMILY_8000_B0));
|
||||
else
|
||||
return le32_to_cpup((__le32 *)(nvm_sw + RADIO_CFG_FAMILY_8000));
|
||||
|
||||
}
|
||||
|
||||
#define N_HW_ADDRS_MASK_FAMILY_8000 0xF
|
||||
static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg,
|
||||
const __le16 *nvm_sw)
|
||||
const __le16 *nvm_sw, bool is_family_8000_a_step)
|
||||
{
|
||||
int n_hw_addr;
|
||||
|
||||
if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
|
||||
return le16_to_cpup(nvm_sw + N_HW_ADDRS);
|
||||
|
||||
if (!is_family_8000_a_step)
|
||||
n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw +
|
||||
N_HW_ADDRS_FAMILY_8000_B0));
|
||||
else
|
||||
return le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000))
|
||||
& N_HW_ADDRS_MASK_FAMILY_8000;
|
||||
n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw +
|
||||
N_HW_ADDRS_FAMILY_8000));
|
||||
|
||||
return n_hw_addr & N_HW_ADDR_MASK;
|
||||
}
|
||||
|
||||
static void iwl_set_radio_cfg(const struct iwl_cfg *cfg,
|
||||
|
@ -491,7 +543,8 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
|
|||
const struct iwl_cfg *cfg,
|
||||
struct iwl_nvm_data *data,
|
||||
const __le16 *mac_override,
|
||||
const __le16 *nvm_hw)
|
||||
const __le16 *nvm_hw,
|
||||
u32 mac_addr0, u32 mac_addr1)
|
||||
{
|
||||
const u8 *hw_addr;
|
||||
|
||||
|
@ -515,48 +568,17 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
|
|||
}
|
||||
|
||||
if (nvm_hw) {
|
||||
/* read the MAC address from OTP */
|
||||
if (!dev_is_pci(dev) || (data->nvm_version < 0xE08)) {
|
||||
/* read the mac address from the WFPM location */
|
||||
hw_addr = (const u8 *)(nvm_hw +
|
||||
HW_ADDR0_WFPM_FAMILY_8000);
|
||||
data->hw_addr[0] = hw_addr[3];
|
||||
data->hw_addr[1] = hw_addr[2];
|
||||
data->hw_addr[2] = hw_addr[1];
|
||||
data->hw_addr[3] = hw_addr[0];
|
||||
/* read the MAC address from HW resisters */
|
||||
hw_addr = (const u8 *)&mac_addr0;
|
||||
data->hw_addr[0] = hw_addr[3];
|
||||
data->hw_addr[1] = hw_addr[2];
|
||||
data->hw_addr[2] = hw_addr[1];
|
||||
data->hw_addr[3] = hw_addr[0];
|
||||
|
||||
hw_addr = (const u8 *)(nvm_hw +
|
||||
HW_ADDR1_WFPM_FAMILY_8000);
|
||||
data->hw_addr[4] = hw_addr[1];
|
||||
data->hw_addr[5] = hw_addr[0];
|
||||
} else if ((data->nvm_version >= 0xE08) &&
|
||||
(data->nvm_version < 0xE0B)) {
|
||||
/* read "reverse order" from the PCIe location */
|
||||
hw_addr = (const u8 *)(nvm_hw +
|
||||
HW_ADDR0_PCIE_FAMILY_8000);
|
||||
data->hw_addr[5] = hw_addr[2];
|
||||
data->hw_addr[4] = hw_addr[1];
|
||||
data->hw_addr[3] = hw_addr[0];
|
||||
hw_addr = (const u8 *)&mac_addr1;
|
||||
data->hw_addr[4] = hw_addr[1];
|
||||
data->hw_addr[5] = hw_addr[0];
|
||||
|
||||
hw_addr = (const u8 *)(nvm_hw +
|
||||
HW_ADDR1_PCIE_FAMILY_8000);
|
||||
data->hw_addr[2] = hw_addr[3];
|
||||
data->hw_addr[1] = hw_addr[2];
|
||||
data->hw_addr[0] = hw_addr[1];
|
||||
} else {
|
||||
/* read from the PCIe location */
|
||||
hw_addr = (const u8 *)(nvm_hw +
|
||||
HW_ADDR0_PCIE_FAMILY_8000);
|
||||
data->hw_addr[5] = hw_addr[0];
|
||||
data->hw_addr[4] = hw_addr[1];
|
||||
data->hw_addr[3] = hw_addr[2];
|
||||
|
||||
hw_addr = (const u8 *)(nvm_hw +
|
||||
HW_ADDR1_PCIE_FAMILY_8000);
|
||||
data->hw_addr[2] = hw_addr[1];
|
||||
data->hw_addr[1] = hw_addr[2];
|
||||
data->hw_addr[0] = hw_addr[3];
|
||||
}
|
||||
if (!is_valid_ether_addr(data->hw_addr))
|
||||
IWL_ERR_DEV(dev,
|
||||
"mac address from hw section is not valid\n");
|
||||
|
@ -571,11 +593,15 @@ struct iwl_nvm_data *
|
|||
iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
|
||||
const __le16 *nvm_hw, const __le16 *nvm_sw,
|
||||
const __le16 *nvm_calib, const __le16 *regulatory,
|
||||
const __le16 *mac_override, u8 tx_chains, u8 rx_chains)
|
||||
const __le16 *mac_override, const __le16 *phy_sku,
|
||||
u8 tx_chains, u8 rx_chains,
|
||||
bool lar_fw_supported, bool is_family_8000_a_step,
|
||||
u32 mac_addr0, u32 mac_addr1)
|
||||
{
|
||||
struct iwl_nvm_data *data;
|
||||
u32 sku;
|
||||
u32 radio_cfg;
|
||||
u16 lar_config;
|
||||
|
||||
if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
|
||||
data = kzalloc(sizeof(*data) +
|
||||
|
@ -592,22 +618,25 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
|
|||
|
||||
data->nvm_version = iwl_get_nvm_version(cfg, nvm_sw);
|
||||
|
||||
radio_cfg = iwl_get_radio_cfg(cfg, nvm_sw);
|
||||
radio_cfg =
|
||||
iwl_get_radio_cfg(cfg, nvm_sw, phy_sku, is_family_8000_a_step);
|
||||
iwl_set_radio_cfg(cfg, data, radio_cfg);
|
||||
if (data->valid_tx_ant)
|
||||
tx_chains &= data->valid_tx_ant;
|
||||
if (data->valid_rx_ant)
|
||||
rx_chains &= data->valid_rx_ant;
|
||||
|
||||
sku = iwl_get_sku(cfg, nvm_sw);
|
||||
sku = iwl_get_sku(cfg, nvm_sw, phy_sku, is_family_8000_a_step);
|
||||
data->sku_cap_band_24GHz_enable = sku & NVM_SKU_CAP_BAND_24GHZ;
|
||||
data->sku_cap_band_52GHz_enable = sku & NVM_SKU_CAP_BAND_52GHZ;
|
||||
data->sku_cap_11n_enable = sku & NVM_SKU_CAP_11N_ENABLE;
|
||||
data->sku_cap_11ac_enable = sku & NVM_SKU_CAP_11AC_ENABLE;
|
||||
if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
|
||||
data->sku_cap_11n_enable = false;
|
||||
data->sku_cap_11ac_enable = data->sku_cap_11n_enable &&
|
||||
(sku & NVM_SKU_CAP_11AC_ENABLE);
|
||||
|
||||
data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw);
|
||||
data->n_hw_addrs =
|
||||
iwl_get_n_hw_addrs(cfg, nvm_sw, is_family_8000_a_step);
|
||||
|
||||
if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
|
||||
/* Checking for required sections */
|
||||
|
@ -626,16 +655,23 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
|
|||
iwl_set_hw_address(cfg, data, nvm_hw);
|
||||
|
||||
iwl_init_sbands(dev, cfg, data, nvm_sw,
|
||||
sku & NVM_SKU_CAP_11AC_ENABLE, tx_chains,
|
||||
rx_chains);
|
||||
tx_chains, rx_chains, lar_fw_supported);
|
||||
} else {
|
||||
u16 lar_offset = data->nvm_version < 0xE39 ?
|
||||
NVM_LAR_OFFSET_FAMILY_8000_OLD :
|
||||
NVM_LAR_OFFSET_FAMILY_8000;
|
||||
|
||||
lar_config = le16_to_cpup(regulatory + lar_offset);
|
||||
data->lar_enabled = !!(lar_config &
|
||||
NVM_LAR_ENABLED_FAMILY_8000);
|
||||
|
||||
/* MAC address in family 8000 */
|
||||
iwl_set_hw_address_family_8000(dev, cfg, data, mac_override,
|
||||
nvm_hw);
|
||||
nvm_hw, mac_addr0, mac_addr1);
|
||||
|
||||
iwl_init_sbands(dev, cfg, data, regulatory,
|
||||
sku & NVM_SKU_CAP_11AC_ENABLE, tx_chains,
|
||||
rx_chains);
|
||||
tx_chains, rx_chains,
|
||||
lar_fw_supported && data->lar_enabled);
|
||||
}
|
||||
|
||||
data->calib_version = 255;
|
||||
|
@ -643,3 +679,164 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
|
|||
return data;
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
|
||||
|
||||
static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan,
|
||||
int ch_idx, u16 nvm_flags,
|
||||
const struct iwl_cfg *cfg)
|
||||
{
|
||||
u32 flags = NL80211_RRF_NO_HT40;
|
||||
u32 last_5ghz_ht = LAST_5GHZ_HT;
|
||||
|
||||
if (cfg->device_family == IWL_DEVICE_FAMILY_8000)
|
||||
last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
|
||||
|
||||
if (ch_idx < NUM_2GHZ_CHANNELS &&
|
||||
(nvm_flags & NVM_CHANNEL_40MHZ)) {
|
||||
if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS)
|
||||
flags &= ~NL80211_RRF_NO_HT40PLUS;
|
||||
if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS)
|
||||
flags &= ~NL80211_RRF_NO_HT40MINUS;
|
||||
} else if (nvm_chan[ch_idx] <= last_5ghz_ht &&
|
||||
(nvm_flags & NVM_CHANNEL_40MHZ)) {
|
||||
if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
|
||||
flags &= ~NL80211_RRF_NO_HT40PLUS;
|
||||
else
|
||||
flags &= ~NL80211_RRF_NO_HT40MINUS;
|
||||
}
|
||||
|
||||
if (!(nvm_flags & NVM_CHANNEL_80MHZ))
|
||||
flags |= NL80211_RRF_NO_80MHZ;
|
||||
if (!(nvm_flags & NVM_CHANNEL_160MHZ))
|
||||
flags |= NL80211_RRF_NO_160MHZ;
|
||||
|
||||
if (!(nvm_flags & NVM_CHANNEL_ACTIVE))
|
||||
flags |= NL80211_RRF_NO_IR;
|
||||
|
||||
if (nvm_flags & NVM_CHANNEL_RADAR)
|
||||
flags |= NL80211_RRF_DFS;
|
||||
|
||||
if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY)
|
||||
flags |= NL80211_RRF_NO_OUTDOOR;
|
||||
|
||||
/* Set the GO concurrent flag only in case that NO_IR is set.
|
||||
* Otherwise it is meaningless
|
||||
*/
|
||||
if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) &&
|
||||
(flags & NL80211_RRF_NO_IR))
|
||||
flags |= NL80211_RRF_GO_CONCURRENT;
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
struct ieee80211_regdomain *
|
||||
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
|
||||
int num_of_ch, __le32 *channels, u16 fw_mcc)
|
||||
{
|
||||
int ch_idx;
|
||||
u16 ch_flags, prev_ch_flags = 0;
|
||||
const u8 *nvm_chan = cfg->device_family == IWL_DEVICE_FAMILY_8000 ?
|
||||
iwl_nvm_channels_family_8000 : iwl_nvm_channels;
|
||||
struct ieee80211_regdomain *regd;
|
||||
int size_of_regd;
|
||||
struct ieee80211_reg_rule *rule;
|
||||
enum ieee80211_band band;
|
||||
int center_freq, prev_center_freq = 0;
|
||||
int valid_rules = 0;
|
||||
bool new_rule;
|
||||
int max_num_ch = cfg->device_family == IWL_DEVICE_FAMILY_8000 ?
|
||||
IWL_NUM_CHANNELS_FAMILY_8000 : IWL_NUM_CHANNELS;
|
||||
|
||||
if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (WARN_ON(num_of_ch > max_num_ch))
|
||||
num_of_ch = max_num_ch;
|
||||
|
||||
IWL_DEBUG_DEV(dev, IWL_DL_LAR, "building regdom for %d channels\n",
|
||||
num_of_ch);
|
||||
|
||||
/* build a regdomain rule for every valid channel */
|
||||
size_of_regd =
|
||||
sizeof(struct ieee80211_regdomain) +
|
||||
num_of_ch * sizeof(struct ieee80211_reg_rule);
|
||||
|
||||
regd = kzalloc(size_of_regd, GFP_KERNEL);
|
||||
if (!regd)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
|
||||
ch_flags = (u16)__le32_to_cpup(channels + ch_idx);
|
||||
band = (ch_idx < NUM_2GHZ_CHANNELS) ?
|
||||
IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
|
||||
center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx],
|
||||
band);
|
||||
new_rule = false;
|
||||
|
||||
if (!(ch_flags & NVM_CHANNEL_VALID)) {
|
||||
IWL_DEBUG_DEV(dev, IWL_DL_LAR,
|
||||
"Ch. %d Flags %x [%sGHz] - No traffic\n",
|
||||
nvm_chan[ch_idx],
|
||||
ch_flags,
|
||||
(ch_idx >= NUM_2GHZ_CHANNELS) ?
|
||||
"5.2" : "2.4");
|
||||
continue;
|
||||
}
|
||||
|
||||
/* we can't continue the same rule */
|
||||
if (ch_idx == 0 || prev_ch_flags != ch_flags ||
|
||||
center_freq - prev_center_freq > 20) {
|
||||
valid_rules++;
|
||||
new_rule = true;
|
||||
}
|
||||
|
||||
rule = ®d->reg_rules[valid_rules - 1];
|
||||
|
||||
if (new_rule)
|
||||
rule->freq_range.start_freq_khz =
|
||||
MHZ_TO_KHZ(center_freq - 10);
|
||||
|
||||
rule->freq_range.end_freq_khz = MHZ_TO_KHZ(center_freq + 10);
|
||||
|
||||
/* this doesn't matter - not used by FW */
|
||||
rule->power_rule.max_antenna_gain = DBI_TO_MBI(6);
|
||||
rule->power_rule.max_eirp =
|
||||
DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER);
|
||||
|
||||
rule->flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
|
||||
ch_flags, cfg);
|
||||
|
||||
/* rely on auto-calculation to merge BW of contiguous chans */
|
||||
rule->flags |= NL80211_RRF_AUTO_BW;
|
||||
rule->freq_range.max_bandwidth_khz = 0;
|
||||
|
||||
prev_ch_flags = ch_flags;
|
||||
prev_center_freq = center_freq;
|
||||
|
||||
IWL_DEBUG_DEV(dev, IWL_DL_LAR,
|
||||
"Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x): Ad-Hoc %ssupported\n",
|
||||
center_freq,
|
||||
band == IEEE80211_BAND_5GHZ ? "5.2" : "2.4",
|
||||
CHECK_AND_PRINT_I(VALID),
|
||||
CHECK_AND_PRINT_I(ACTIVE),
|
||||
CHECK_AND_PRINT_I(RADAR),
|
||||
CHECK_AND_PRINT_I(WIDE),
|
||||
CHECK_AND_PRINT_I(40MHZ),
|
||||
CHECK_AND_PRINT_I(80MHZ),
|
||||
CHECK_AND_PRINT_I(160MHZ),
|
||||
CHECK_AND_PRINT_I(INDOOR_ONLY),
|
||||
CHECK_AND_PRINT_I(GO_CONCURRENT),
|
||||
ch_flags,
|
||||
((ch_flags & NVM_CHANNEL_ACTIVE) &&
|
||||
!(ch_flags & NVM_CHANNEL_RADAR))
|
||||
? "" : "not ");
|
||||
}
|
||||
|
||||
regd->n_reg_rules = valid_rules;
|
||||
|
||||
/* set alpha2 from FW. */
|
||||
regd->alpha2[0] = fw_mcc >> 8;
|
||||
regd->alpha2[1] = fw_mcc & 0xff;
|
||||
|
||||
return regd;
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info);
|
||||
|
|
|
@ -62,6 +62,7 @@
|
|||
#ifndef __iwl_nvm_parse_h__
|
||||
#define __iwl_nvm_parse_h__
|
||||
|
||||
#include <net/cfg80211.h>
|
||||
#include "iwl-eeprom-parse.h"
|
||||
|
||||
/**
|
||||
|
@ -76,6 +77,22 @@ struct iwl_nvm_data *
|
|||
iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
|
||||
const __le16 *nvm_hw, const __le16 *nvm_sw,
|
||||
const __le16 *nvm_calib, const __le16 *regulatory,
|
||||
const __le16 *mac_override, u8 tx_chains, u8 rx_chains);
|
||||
const __le16 *mac_override, const __le16 *phy_sku,
|
||||
u8 tx_chains, u8 rx_chains,
|
||||
bool lar_fw_supported, bool is_family_8000_a_step,
|
||||
u32 mac_addr0, u32 mac_addr1);
|
||||
|
||||
/**
|
||||
* iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
|
||||
*
|
||||
* This function parses the regulatory channel data received as a
|
||||
* MCC_UPDATE_CMD command. It returns a newly allocation regulatory domain,
|
||||
* to be fed into the regulatory core. An ERR_PTR is returned on error.
|
||||
* If not given to the regulatory core, the user is responsible for freeing
|
||||
* the regdomain returned here with kfree.
|
||||
*/
|
||||
struct ieee80211_regdomain *
|
||||
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
|
||||
int num_of_ch, __le32 *channels, u16 fw_mcc);
|
||||
|
||||
#endif /* __iwl_nvm_parse_h__ */
|
||||
|
|
|
@ -371,6 +371,33 @@ enum secure_load_status_reg {
|
|||
|
||||
#define DBGC_IN_SAMPLE (0xa03c00)
|
||||
|
||||
/* enable the ID buf for read */
|
||||
#define WFPM_PS_CTL_CLR 0xA0300C
|
||||
#define WFMP_MAC_ADDR_0 0xA03080
|
||||
#define WFMP_MAC_ADDR_1 0xA03084
|
||||
#define LMPM_PMG_EN 0xA01CEC
|
||||
#define RADIO_REG_SYS_MANUAL_DFT_0 0xAD4078
|
||||
#define RFIC_REG_RD 0xAD0470
|
||||
#define WFPM_CTRL_REG 0xA03030
|
||||
enum {
|
||||
ENABLE_WFPM = BIT(31),
|
||||
WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK = 0x80000000,
|
||||
};
|
||||
|
||||
#define AUX_MISC_REG 0xA200B0
|
||||
enum {
|
||||
HW_STEP_LOCATION_BITS = 24,
|
||||
};
|
||||
|
||||
#define AUX_MISC_MASTER1_EN 0xA20818
|
||||
enum aux_misc_master1_en {
|
||||
AUX_MISC_MASTER1_EN_SBE_MSK = 0x1,
|
||||
};
|
||||
|
||||
#define AUX_MISC_MASTER1_SMPHR_STATUS 0xA20800
|
||||
#define RSA_ENABLE 0xA24B08
|
||||
#define PREG_AUX_BUS_WPROT_0 0xA04CC0
|
||||
|
||||
/* FW chicken bits */
|
||||
#define LMPM_CHICK 0xA01FF8
|
||||
enum {
|
||||
|
|
|
@ -458,6 +458,8 @@ struct iwl_trans_txq_scd_cfg {
|
|||
* @txq_disable: de-configure a Tx queue to send AMPDUs
|
||||
* Must be atomic
|
||||
* @wait_tx_queue_empty: wait until tx queues are empty. May sleep.
|
||||
* @freeze_txq_timer: prevents the timer of the queue from firing until the
|
||||
* queue is set to awake. Must be atomic.
|
||||
* @dbgfs_register: add the dbgfs files under this directory. Files will be
|
||||
* automatically deleted.
|
||||
* @write8: write a u8 to a register at offset ofs from the BAR
|
||||
|
@ -517,6 +519,8 @@ struct iwl_trans_ops {
|
|||
|
||||
int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
|
||||
int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm);
|
||||
void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
|
||||
bool freeze);
|
||||
|
||||
void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
|
||||
void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
|
||||
|
@ -873,6 +877,17 @@ void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
|
|||
iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
|
||||
}
|
||||
|
||||
static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
|
||||
unsigned long txqs,
|
||||
bool freeze)
|
||||
{
|
||||
if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
|
||||
IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
|
||||
|
||||
if (trans->ops->freeze_txq_timer)
|
||||
trans->ops->freeze_txq_timer(trans, txqs, freeze);
|
||||
}
|
||||
|
||||
static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans,
|
||||
u32 txqs)
|
||||
{
|
||||
|
|
|
@ -72,158 +72,6 @@
|
|||
#include "mvm.h"
|
||||
#include "iwl-debug.h"
|
||||
|
||||
const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX] = {
|
||||
[BT_KILL_MSK_DEFAULT] = 0xfffffc00,
|
||||
[BT_KILL_MSK_NEVER] = 0xffffffff,
|
||||
[BT_KILL_MSK_ALWAYS] = 0,
|
||||
};
|
||||
|
||||
const u8 iwl_bt_cts_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
|
||||
{
|
||||
BT_KILL_MSK_ALWAYS,
|
||||
BT_KILL_MSK_ALWAYS,
|
||||
BT_KILL_MSK_ALWAYS,
|
||||
},
|
||||
{
|
||||
BT_KILL_MSK_NEVER,
|
||||
BT_KILL_MSK_NEVER,
|
||||
BT_KILL_MSK_NEVER,
|
||||
},
|
||||
{
|
||||
BT_KILL_MSK_NEVER,
|
||||
BT_KILL_MSK_NEVER,
|
||||
BT_KILL_MSK_NEVER,
|
||||
},
|
||||
{
|
||||
BT_KILL_MSK_DEFAULT,
|
||||
BT_KILL_MSK_NEVER,
|
||||
BT_KILL_MSK_DEFAULT,
|
||||
},
|
||||
};
|
||||
|
||||
const u8 iwl_bt_ack_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
|
||||
{
|
||||
BT_KILL_MSK_ALWAYS,
|
||||
BT_KILL_MSK_ALWAYS,
|
||||
BT_KILL_MSK_ALWAYS,
|
||||
},
|
||||
{
|
||||
BT_KILL_MSK_ALWAYS,
|
||||
BT_KILL_MSK_ALWAYS,
|
||||
BT_KILL_MSK_ALWAYS,
|
||||
},
|
||||
{
|
||||
BT_KILL_MSK_ALWAYS,
|
||||
BT_KILL_MSK_ALWAYS,
|
||||
BT_KILL_MSK_ALWAYS,
|
||||
},
|
||||
{
|
||||
BT_KILL_MSK_DEFAULT,
|
||||
BT_KILL_MSK_ALWAYS,
|
||||
BT_KILL_MSK_DEFAULT,
|
||||
},
|
||||
};
|
||||
|
||||
static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = {
|
||||
cpu_to_le32(0xf0f0f0f0), /* 50% */
|
||||
cpu_to_le32(0xc0c0c0c0), /* 25% */
|
||||
cpu_to_le32(0xfcfcfcfc), /* 75% */
|
||||
cpu_to_le32(0xfefefefe), /* 87.5% */
|
||||
};
|
||||
|
||||
static const __le32 iwl_single_shared_ant[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
|
||||
{
|
||||
cpu_to_le32(0x40000000),
|
||||
cpu_to_le32(0x00000000),
|
||||
cpu_to_le32(0x44000000),
|
||||
cpu_to_le32(0x00000000),
|
||||
cpu_to_le32(0x40000000),
|
||||
cpu_to_le32(0x00000000),
|
||||
cpu_to_le32(0x44000000),
|
||||
cpu_to_le32(0x00000000),
|
||||
cpu_to_le32(0xc0004000),
|
||||
cpu_to_le32(0xf0005000),
|
||||
cpu_to_le32(0xc0004000),
|
||||
cpu_to_le32(0xf0005000),
|
||||
},
|
||||
{
|
||||
cpu_to_le32(0x40000000),
|
||||
cpu_to_le32(0x00000000),
|
||||
cpu_to_le32(0x44000000),
|
||||
cpu_to_le32(0x00000000),
|
||||
cpu_to_le32(0x40000000),
|
||||
cpu_to_le32(0x00000000),
|
||||
cpu_to_le32(0x44000000),
|
||||
cpu_to_le32(0x00000000),
|
||||
cpu_to_le32(0xc0004000),
|
||||
cpu_to_le32(0xf0005000),
|
||||
cpu_to_le32(0xc0004000),
|
||||
cpu_to_le32(0xf0005000),
|
||||
},
|
||||
{
|
||||
cpu_to_le32(0x40000000),
|
||||
cpu_to_le32(0x00000000),
|
||||
cpu_to_le32(0x44000000),
|
||||
cpu_to_le32(0x00000000),
|
||||
cpu_to_le32(0x40000000),
|
||||
cpu_to_le32(0x00000000),
|
||||
cpu_to_le32(0x44000000),
|
||||
cpu_to_le32(0x00000000),
|
||||
cpu_to_le32(0xc0004000),
|
||||
cpu_to_le32(0xf0005000),
|
||||
cpu_to_le32(0xc0004000),
|
||||
cpu_to_le32(0xf0005000),
|
||||
},
|
||||
};
|
||||
|
||||
static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
|
||||
{
|
||||
/* Tight */
|
||||
cpu_to_le32(0xaaaaaaaa),
|
||||
cpu_to_le32(0xaaaaaaaa),
|
||||
cpu_to_le32(0xaeaaaaaa),
|
||||
cpu_to_le32(0xaaaaaaaa),
|
||||
cpu_to_le32(0xcc00ff28),
|
||||
cpu_to_le32(0x0000aaaa),
|
||||
cpu_to_le32(0xcc00aaaa),
|
||||
cpu_to_le32(0x0000aaaa),
|
||||
cpu_to_le32(0xc0004000),
|
||||
cpu_to_le32(0x00004000),
|
||||
cpu_to_le32(0xf0005000),
|
||||
cpu_to_le32(0xf0005000),
|
||||
},
|
||||
{
|
||||
/* Loose */
|
||||
cpu_to_le32(0xaaaaaaaa),
|
||||
cpu_to_le32(0xaaaaaaaa),
|
||||
cpu_to_le32(0xaaaaaaaa),
|
||||
cpu_to_le32(0xaaaaaaaa),
|
||||
cpu_to_le32(0xcc00ff28),
|
||||
cpu_to_le32(0x0000aaaa),
|
||||
cpu_to_le32(0xcc00aaaa),
|
||||
cpu_to_le32(0x0000aaaa),
|
||||
cpu_to_le32(0x00000000),
|
||||
cpu_to_le32(0x00000000),
|
||||
cpu_to_le32(0xf0005000),
|
||||
cpu_to_le32(0xf0005000),
|
||||
},
|
||||
{
|
||||
/* Tx Tx disabled */
|
||||
cpu_to_le32(0xaaaaaaaa),
|
||||
cpu_to_le32(0xaaaaaaaa),
|
||||
cpu_to_le32(0xeeaaaaaa),
|
||||
cpu_to_le32(0xaaaaaaaa),
|
||||
cpu_to_le32(0xcc00ff28),
|
||||
cpu_to_le32(0x0000aaaa),
|
||||
cpu_to_le32(0xcc00aaaa),
|
||||
cpu_to_le32(0x0000aaaa),
|
||||
cpu_to_le32(0xc0004000),
|
||||
cpu_to_le32(0xc0004000),
|
||||
cpu_to_le32(0xf0005000),
|
||||
cpu_to_le32(0xf0005000),
|
||||
},
|
||||
};
|
||||
|
||||
/* 20MHz / 40MHz below / 40Mhz above*/
|
||||
static const __le64 iwl_ci_mask[][3] = {
|
||||
/* dummy entry for channel 0 */
|
||||
|
@ -596,14 +444,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
|
|||
goto send_cmd;
|
||||
}
|
||||
|
||||
bt_cmd->max_kill = cpu_to_le32(5);
|
||||
bt_cmd->bt4_antenna_isolation_thr =
|
||||
cpu_to_le32(IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS);
|
||||
bt_cmd->bt4_tx_tx_delta_freq_thr = cpu_to_le32(15);
|
||||
bt_cmd->bt4_tx_rx_max_freq0 = cpu_to_le32(15);
|
||||
bt_cmd->override_primary_lut = cpu_to_le32(BT_COEX_INVALID_LUT);
|
||||
bt_cmd->override_secondary_lut = cpu_to_le32(BT_COEX_INVALID_LUT);
|
||||
|
||||
mode = iwlwifi_mod_params.bt_coex_active ? BT_COEX_NW : BT_COEX_DISABLE;
|
||||
bt_cmd->mode = cpu_to_le32(mode);
|
||||
|
||||
|
@ -622,18 +462,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
|
|||
|
||||
bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET);
|
||||
|
||||
if (mvm->cfg->bt_shared_single_ant)
|
||||
memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
|
||||
sizeof(iwl_single_shared_ant));
|
||||
else
|
||||
memcpy(&bt_cmd->decision_lut, iwl_combined_lookup,
|
||||
sizeof(iwl_combined_lookup));
|
||||
|
||||
memcpy(&bt_cmd->mplut_prio_boost, iwl_bt_prio_boost,
|
||||
sizeof(iwl_bt_prio_boost));
|
||||
bt_cmd->multiprio_lut[0] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG0);
|
||||
bt_cmd->multiprio_lut[1] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG1);
|
||||
|
||||
send_cmd:
|
||||
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
|
||||
memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
|
||||
|
@ -644,48 +472,6 @@ send_cmd:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int iwl_mvm_bt_udpate_sw_boost(struct iwl_mvm *mvm)
|
||||
{
|
||||
struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif;
|
||||
u32 primary_lut = le32_to_cpu(notif->primary_ch_lut);
|
||||
u32 secondary_lut = le32_to_cpu(notif->secondary_ch_lut);
|
||||
u32 ag = le32_to_cpu(notif->bt_activity_grading);
|
||||
struct iwl_bt_coex_sw_boost_update_cmd cmd = {};
|
||||
u8 ack_kill_msk[NUM_PHY_CTX] = {};
|
||||
u8 cts_kill_msk[NUM_PHY_CTX] = {};
|
||||
int i;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
ack_kill_msk[0] = iwl_bt_ack_kill_msk[ag][primary_lut];
|
||||
cts_kill_msk[0] = iwl_bt_cts_kill_msk[ag][primary_lut];
|
||||
|
||||
ack_kill_msk[1] = iwl_bt_ack_kill_msk[ag][secondary_lut];
|
||||
cts_kill_msk[1] = iwl_bt_cts_kill_msk[ag][secondary_lut];
|
||||
|
||||
/* Don't send HCMD if there is no update */
|
||||
if (!memcmp(ack_kill_msk, mvm->bt_ack_kill_msk, sizeof(ack_kill_msk)) ||
|
||||
!memcmp(cts_kill_msk, mvm->bt_cts_kill_msk, sizeof(cts_kill_msk)))
|
||||
return 0;
|
||||
|
||||
memcpy(mvm->bt_ack_kill_msk, ack_kill_msk,
|
||||
sizeof(mvm->bt_ack_kill_msk));
|
||||
memcpy(mvm->bt_cts_kill_msk, cts_kill_msk,
|
||||
sizeof(mvm->bt_cts_kill_msk));
|
||||
|
||||
BUILD_BUG_ON(ARRAY_SIZE(ack_kill_msk) < ARRAY_SIZE(cmd.boost_values));
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(cmd.boost_values); i++) {
|
||||
cmd.boost_values[i].kill_ack_msk =
|
||||
cpu_to_le32(iwl_bt_ctl_kill_msk[ack_kill_msk[i]]);
|
||||
cmd.boost_values[i].kill_cts_msk =
|
||||
cpu_to_le32(iwl_bt_ctl_kill_msk[cts_kill_msk[i]]);
|
||||
}
|
||||
|
||||
return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_SW_BOOST, 0,
|
||||
sizeof(cmd), &cmd);
|
||||
}
|
||||
|
||||
static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
|
||||
bool enable)
|
||||
{
|
||||
|
@ -951,9 +737,6 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
|
|||
IWL_ERR(mvm, "Failed to send BT_CI cmd\n");
|
||||
memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd));
|
||||
}
|
||||
|
||||
if (iwl_mvm_bt_udpate_sw_boost(mvm))
|
||||
IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
|
||||
}
|
||||
|
||||
int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
|
||||
|
@ -1074,9 +857,6 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
ieee80211_iterate_active_interfaces_atomic(
|
||||
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
|
||||
iwl_mvm_bt_rssi_iterator, &data);
|
||||
|
||||
if (iwl_mvm_bt_udpate_sw_boost(mvm))
|
||||
IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
|
||||
}
|
||||
|
||||
#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000)
|
||||
|
|
|
@ -288,6 +288,65 @@ static const __le64 iwl_ci_mask[][3] = {
|
|||
},
|
||||
};
|
||||
|
||||
enum iwl_bt_kill_msk {
|
||||
BT_KILL_MSK_DEFAULT,
|
||||
BT_KILL_MSK_NEVER,
|
||||
BT_KILL_MSK_ALWAYS,
|
||||
BT_KILL_MSK_MAX,
|
||||
};
|
||||
|
||||
static const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX] = {
|
||||
[BT_KILL_MSK_DEFAULT] = 0xfffffc00,
|
||||
[BT_KILL_MSK_NEVER] = 0xffffffff,
|
||||
[BT_KILL_MSK_ALWAYS] = 0,
|
||||
};
|
||||
|
||||
static const u8 iwl_bt_cts_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
|
||||
{
|
||||
BT_KILL_MSK_ALWAYS,
|
||||
BT_KILL_MSK_ALWAYS,
|
||||
BT_KILL_MSK_ALWAYS,
|
||||
},
|
||||
{
|
||||
BT_KILL_MSK_NEVER,
|
||||
BT_KILL_MSK_NEVER,
|
||||
BT_KILL_MSK_NEVER,
|
||||
},
|
||||
{
|
||||
BT_KILL_MSK_NEVER,
|
||||
BT_KILL_MSK_NEVER,
|
||||
BT_KILL_MSK_NEVER,
|
||||
},
|
||||
{
|
||||
BT_KILL_MSK_DEFAULT,
|
||||
BT_KILL_MSK_NEVER,
|
||||
BT_KILL_MSK_DEFAULT,
|
||||
},
|
||||
};
|
||||
|
||||
static const u8 iwl_bt_ack_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
|
||||
{
|
||||
BT_KILL_MSK_ALWAYS,
|
||||
BT_KILL_MSK_ALWAYS,
|
||||
BT_KILL_MSK_ALWAYS,
|
||||
},
|
||||
{
|
||||
BT_KILL_MSK_ALWAYS,
|
||||
BT_KILL_MSK_ALWAYS,
|
||||
BT_KILL_MSK_ALWAYS,
|
||||
},
|
||||
{
|
||||
BT_KILL_MSK_ALWAYS,
|
||||
BT_KILL_MSK_ALWAYS,
|
||||
BT_KILL_MSK_ALWAYS,
|
||||
},
|
||||
{
|
||||
BT_KILL_MSK_DEFAULT,
|
||||
BT_KILL_MSK_ALWAYS,
|
||||
BT_KILL_MSK_DEFAULT,
|
||||
},
|
||||
};
|
||||
|
||||
struct corunning_block_luts {
|
||||
u8 range;
|
||||
__le32 lut20[BT_COEX_CORUN_LUT_SIZE];
|
||||
|
@ -633,7 +692,7 @@ int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm)
|
|||
if (IWL_MVM_BT_COEX_TTC)
|
||||
bt_cmd->flags |= cpu_to_le32(BT_COEX_TTC);
|
||||
|
||||
if (IWL_MVM_BT_COEX_RRC)
|
||||
if (iwl_mvm_bt_is_rrc_supported(mvm))
|
||||
bt_cmd->flags |= cpu_to_le32(BT_COEX_RRC);
|
||||
|
||||
if (mvm->cfg->bt_shared_single_ant)
|
||||
|
|
|
@ -694,6 +694,9 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
if (ret)
|
||||
IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
|
||||
|
||||
if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm))
|
||||
IWL_ERR(mvm, "Failed to initialize D3 LAR information\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1596,7 +1599,7 @@ iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
|
||||
/* RF-kill already asserted again... */
|
||||
if (!cmd.resp_pkt) {
|
||||
ret = -ERFKILL;
|
||||
fw_status = ERR_PTR(-ERFKILL);
|
||||
goto out_free_resp;
|
||||
}
|
||||
|
||||
|
@ -1605,7 +1608,7 @@ iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
len = iwl_rx_packet_payload_len(cmd.resp_pkt);
|
||||
if (len < status_size) {
|
||||
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
|
||||
ret = -EIO;
|
||||
fw_status = ERR_PTR(-EIO);
|
||||
goto out_free_resp;
|
||||
}
|
||||
|
||||
|
@ -1613,7 +1616,7 @@ iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
if (len != (status_size +
|
||||
ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4))) {
|
||||
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
|
||||
ret = -EIO;
|
||||
fw_status = ERR_PTR(-EIO);
|
||||
goto out_free_resp;
|
||||
}
|
||||
|
||||
|
@ -1621,7 +1624,7 @@ iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
|
||||
out_free_resp:
|
||||
iwl_free_resp(&cmd);
|
||||
return ret ? ERR_PTR(ret) : fw_status;
|
||||
return fw_status;
|
||||
}
|
||||
|
||||
/* releases the MVM mutex */
|
||||
|
@ -1874,6 +1877,12 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
|
|||
/* query SRAM first in case we want event logging */
|
||||
iwl_mvm_read_d3_sram(mvm);
|
||||
|
||||
/*
|
||||
* Query the current location and source from the D3 firmware so we
|
||||
* can play it back when we re-intiailize the D0 firmware
|
||||
*/
|
||||
iwl_mvm_update_changed_regdom(mvm);
|
||||
|
||||
if (mvm->net_detect) {
|
||||
iwl_mvm_query_netdetect_reasons(mvm, vif);
|
||||
/* has unlocked the mutex, so skip that */
|
||||
|
@ -1883,9 +1892,9 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
|
|||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
if (keep)
|
||||
mvm->keep_vif = vif;
|
||||
#endif
|
||||
/* has unlocked the mutex, so skip that */
|
||||
goto out_iterate;
|
||||
#endif
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
|
|
|
@ -562,11 +562,12 @@ static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
|
|||
"\tSecondary Channel Bitmap 0x%016llx\n",
|
||||
le64_to_cpu(cmd->bt_secondary_ci));
|
||||
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "BT Configuration CMD\n");
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill Mask 0x%08x\n",
|
||||
iwl_bt_ctl_kill_msk[mvm->bt_ack_kill_msk[0]]);
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill Mask 0x%08x\n",
|
||||
iwl_bt_ctl_kill_msk[mvm->bt_cts_kill_msk[0]]);
|
||||
pos += scnprintf(buf+pos, bufsz-pos,
|
||||
"BT Configuration CMD - 0=default, 1=never, 2=always\n");
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill msk idx %d\n",
|
||||
mvm->bt_ack_kill_msk[0]);
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill msk idx %d\n",
|
||||
mvm->bt_cts_kill_msk[0]);
|
||||
|
||||
} else {
|
||||
struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
|
||||
|
@ -579,21 +580,6 @@ static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
|
|||
pos += scnprintf(buf+pos, bufsz-pos,
|
||||
"\tSecondary Channel Bitmap 0x%016llx\n",
|
||||
le64_to_cpu(cmd->bt_secondary_ci));
|
||||
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "BT Configuration CMD\n");
|
||||
pos += scnprintf(buf+pos, bufsz-pos,
|
||||
"\tPrimary: ACK Kill Mask 0x%08x\n",
|
||||
iwl_bt_ctl_kill_msk[mvm->bt_ack_kill_msk[0]]);
|
||||
pos += scnprintf(buf+pos, bufsz-pos,
|
||||
"\tPrimary: CTS Kill Mask 0x%08x\n",
|
||||
iwl_bt_ctl_kill_msk[mvm->bt_cts_kill_msk[0]]);
|
||||
pos += scnprintf(buf+pos, bufsz-pos,
|
||||
"\tSecondary: ACK Kill Mask 0x%08x\n",
|
||||
iwl_bt_ctl_kill_msk[mvm->bt_ack_kill_msk[1]]);
|
||||
pos += scnprintf(buf+pos, bufsz-pos,
|
||||
"\tSecondary: CTS Kill Mask 0x%08x\n",
|
||||
iwl_bt_ctl_kill_msk[mvm->bt_cts_kill_msk[1]]);
|
||||
|
||||
}
|
||||
|
||||
mutex_unlock(&mvm->mutex);
|
||||
|
|
|
@ -235,36 +235,12 @@ enum iwl_bt_coex_enabled_modules {
|
|||
* struct iwl_bt_coex_cmd - bt coex configuration command
|
||||
* @mode: enum %iwl_bt_coex_mode
|
||||
* @enabled_modules: enum %iwl_bt_coex_enabled_modules
|
||||
* @max_kill: max count of Tx retries due to kill from PTA
|
||||
* @override_primary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
|
||||
* should be set by default
|
||||
* @override_secondary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
|
||||
* should be set by default
|
||||
* @bt4_antenna_isolation_thr: antenna threshold value
|
||||
* @bt4_tx_tx_delta_freq_thr: TxTx delta frequency
|
||||
* @bt4_tx_rx_max_freq0: TxRx max frequency
|
||||
* @multiprio_lut: multi priority LUT configuration
|
||||
* @mplut_prio_boost: BT priority boost registers
|
||||
* @decision_lut: PTA decision LUT, per Prio-Ch
|
||||
*
|
||||
* The structure is used for the BT_COEX command.
|
||||
*/
|
||||
struct iwl_bt_coex_cmd {
|
||||
__le32 mode;
|
||||
__le32 enabled_modules;
|
||||
|
||||
__le32 max_kill;
|
||||
__le32 override_primary_lut;
|
||||
__le32 override_secondary_lut;
|
||||
__le32 bt4_antenna_isolation_thr;
|
||||
|
||||
__le32 bt4_tx_tx_delta_freq_thr;
|
||||
__le32 bt4_tx_rx_max_freq0;
|
||||
|
||||
__le32 multiprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE];
|
||||
__le32 mplut_prio_boost[BT_COEX_BOOST_SIZE];
|
||||
|
||||
__le32 decision_lut[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE];
|
||||
} __packed; /* BT_COEX_CMD_API_S_VER_6 */
|
||||
|
||||
/**
|
||||
|
@ -279,29 +255,6 @@ struct iwl_bt_coex_corun_lut_update_cmd {
|
|||
__le32 corun_lut40[BT_COEX_CORUN_LUT_SIZE];
|
||||
} __packed; /* BT_COEX_UPDATE_CORUN_LUT_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_bt_coex_sw_boost - SW boost values
|
||||
* @wifi_tx_prio_boost: SW boost of wifi tx priority
|
||||
* @wifi_rx_prio_boost: SW boost of wifi rx priority
|
||||
* @kill_ack_msk: kill ACK mask. 1 - Tx ACK, 0 - kill Tx of ACK.
|
||||
* @kill_cts_msk: kill CTS mask. 1 - Tx CTS, 0 - kill Tx of CTS.
|
||||
*/
|
||||
struct iwl_bt_coex_sw_boost {
|
||||
__le32 wifi_tx_prio_boost;
|
||||
__le32 wifi_rx_prio_boost;
|
||||
__le32 kill_ack_msk;
|
||||
__le32 kill_cts_msk;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_bt_coex_sw_boost_update_cmd - command to update the SW boost
|
||||
* @boost_values: check struct %iwl_bt_coex_sw_boost - one for each channel
|
||||
* primary / secondary / low priority
|
||||
*/
|
||||
struct iwl_bt_coex_sw_boost_update_cmd {
|
||||
struct iwl_bt_coex_sw_boost boost_values[3];
|
||||
} __packed; /* BT_COEX_UPDATE_SW_BOOST_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_bt_coex_reduced_txp_update_cmd
|
||||
* @reduced_txp: bit BT_REDUCED_TX_POWER_BIT to enable / disable, rest of the
|
||||
|
|
|
@ -212,6 +212,10 @@ enum {
|
|||
REPLY_RX_MPDU_CMD = 0xc1,
|
||||
BA_NOTIF = 0xc5,
|
||||
|
||||
/* Location Aware Regulatory */
|
||||
MCC_UPDATE_CMD = 0xc8,
|
||||
MCC_CHUB_UPDATE_CMD = 0xc9,
|
||||
|
||||
MARKER_CMD = 0xcb,
|
||||
|
||||
/* BT Coex */
|
||||
|
@ -362,7 +366,8 @@ enum {
|
|||
NVM_SECTION_TYPE_CALIBRATION = 4,
|
||||
NVM_SECTION_TYPE_PRODUCTION = 5,
|
||||
NVM_SECTION_TYPE_MAC_OVERRIDE = 11,
|
||||
NVM_MAX_NUM_SECTIONS = 12,
|
||||
NVM_SECTION_TYPE_PHY_SKU = 12,
|
||||
NVM_MAX_NUM_SECTIONS = 13,
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -1442,7 +1447,19 @@ enum iwl_sf_scenario {
|
|||
#define SF_W_MARK_LEGACY 4096
|
||||
#define SF_W_MARK_SCAN 4096
|
||||
|
||||
/* SF Scenarios timers for FULL_ON state (aligned to 32 uSec) */
|
||||
/* SF Scenarios timers for default configuration (aligned to 32 uSec) */
|
||||
#define SF_SINGLE_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */
|
||||
#define SF_SINGLE_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */
|
||||
#define SF_AGG_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */
|
||||
#define SF_AGG_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */
|
||||
#define SF_MCAST_IDLE_TIMER_DEF 160 /* 150 mSec */
|
||||
#define SF_MCAST_AGING_TIMER_DEF 400 /* 0.4 mSec */
|
||||
#define SF_BA_IDLE_TIMER_DEF 160 /* 150 uSec */
|
||||
#define SF_BA_AGING_TIMER_DEF 400 /* 0.4 mSec */
|
||||
#define SF_TX_RE_IDLE_TIMER_DEF 160 /* 150 uSec */
|
||||
#define SF_TX_RE_AGING_TIMER_DEF 400 /* 0.4 mSec */
|
||||
|
||||
/* SF Scenarios timers for BSS MAC configuration (aligned to 32 uSec) */
|
||||
#define SF_SINGLE_UNICAST_IDLE_TIMER 320 /* 300 uSec */
|
||||
#define SF_SINGLE_UNICAST_AGING_TIMER 2016 /* 2 mSec */
|
||||
#define SF_AGG_UNICAST_IDLE_TIMER 320 /* 300 uSec */
|
||||
|
@ -1473,6 +1490,92 @@ struct iwl_sf_cfg_cmd {
|
|||
__le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
|
||||
} __packed; /* SF_CFG_API_S_VER_2 */
|
||||
|
||||
/***********************************
|
||||
* Location Aware Regulatory (LAR) API - MCC updates
|
||||
***********************************/
|
||||
|
||||
/**
|
||||
* struct iwl_mcc_update_cmd - Request the device to update geographic
|
||||
* regulatory profile according to the given MCC (Mobile Country Code).
|
||||
* The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
|
||||
* 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
|
||||
* MCC in the cmd response will be the relevant MCC in the NVM.
|
||||
* @mcc: given mobile country code
|
||||
* @source_id: the source from where we got the MCC, see iwl_mcc_source
|
||||
* @reserved: reserved for alignment
|
||||
*/
|
||||
struct iwl_mcc_update_cmd {
|
||||
__le16 mcc;
|
||||
u8 source_id;
|
||||
u8 reserved;
|
||||
} __packed; /* LAR_UPDATE_MCC_CMD_API_S */
|
||||
|
||||
/**
|
||||
* iwl_mcc_update_resp - response to MCC_UPDATE_CMD.
|
||||
* Contains the new channel control profile map, if changed, and the new MCC
|
||||
* (mobile country code).
|
||||
* The new MCC may be different than what was requested in MCC_UPDATE_CMD.
|
||||
* @status: see &enum iwl_mcc_update_status
|
||||
* @mcc: the new applied MCC
|
||||
* @cap: capabilities for all channels which matches the MCC
|
||||
* @source_id: the MCC source, see iwl_mcc_source
|
||||
* @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51
|
||||
* channels, depending on platform)
|
||||
* @channels: channel control data map, DWORD for each channel. Only the first
|
||||
* 16bits are used.
|
||||
*/
|
||||
struct iwl_mcc_update_resp {
|
||||
__le32 status;
|
||||
__le16 mcc;
|
||||
u8 cap;
|
||||
u8 source_id;
|
||||
__le32 n_channels;
|
||||
__le32 channels[0];
|
||||
} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S */
|
||||
|
||||
/**
|
||||
* struct iwl_mcc_chub_notif - chub notifies of mcc change
|
||||
* (MCC_CHUB_UPDATE_CMD = 0xc9)
|
||||
* The Chub (Communication Hub, CommsHUB) is a HW component that connects to
|
||||
* the cellular and connectivity cores that gets updates of the mcc, and
|
||||
* notifies the ucode directly of any mcc change.
|
||||
* The ucode requests the driver to request the device to update geographic
|
||||
* regulatory profile according to the given MCC (Mobile Country Code).
|
||||
* The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
|
||||
* 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
|
||||
* MCC in the cmd response will be the relevant MCC in the NVM.
|
||||
* @mcc: given mobile country code
|
||||
* @source_id: identity of the change originator, see iwl_mcc_source
|
||||
* @reserved1: reserved for alignment
|
||||
*/
|
||||
struct iwl_mcc_chub_notif {
|
||||
u16 mcc;
|
||||
u8 source_id;
|
||||
u8 reserved1;
|
||||
} __packed; /* LAR_MCC_NOTIFY_S */
|
||||
|
||||
enum iwl_mcc_update_status {
|
||||
MCC_RESP_NEW_CHAN_PROFILE,
|
||||
MCC_RESP_SAME_CHAN_PROFILE,
|
||||
MCC_RESP_INVALID,
|
||||
MCC_RESP_NVM_DISABLED,
|
||||
MCC_RESP_ILLEGAL,
|
||||
MCC_RESP_LOW_PRIORITY,
|
||||
};
|
||||
|
||||
enum iwl_mcc_source {
|
||||
MCC_SOURCE_OLD_FW = 0,
|
||||
MCC_SOURCE_ME = 1,
|
||||
MCC_SOURCE_BIOS = 2,
|
||||
MCC_SOURCE_3G_LTE_HOST = 3,
|
||||
MCC_SOURCE_3G_LTE_DEVICE = 4,
|
||||
MCC_SOURCE_WIFI = 5,
|
||||
MCC_SOURCE_RESERVED = 6,
|
||||
MCC_SOURCE_DEFAULT = 7,
|
||||
MCC_SOURCE_UNINITIALIZED = 8,
|
||||
MCC_SOURCE_GET_CURRENT = 0x10
|
||||
};
|
||||
|
||||
/* DTS measurements */
|
||||
|
||||
enum iwl_dts_measurement_flags {
|
||||
|
|
|
@ -739,6 +739,16 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
|
|||
if (ret)
|
||||
goto error;
|
||||
|
||||
/*
|
||||
* RTNL is not taken during Ct-kill, but we don't need to scan/Tx
|
||||
* anyway, so don't init MCC.
|
||||
*/
|
||||
if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) {
|
||||
ret = iwl_mvm_init_mcc(mvm);
|
||||
if (ret)
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
|
||||
ret = iwl_mvm_config_scan(mvm);
|
||||
if (ret)
|
||||
|
|
|
@ -86,6 +86,7 @@
|
|||
#include "iwl-fw-error-dump.h"
|
||||
#include "iwl-prph.h"
|
||||
#include "iwl-csr.h"
|
||||
#include "iwl-nvm-parse.h"
|
||||
|
||||
static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
|
||||
{
|
||||
|
@ -301,6 +302,109 @@ static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
|
|||
}
|
||||
}
|
||||
|
||||
struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
|
||||
const char *alpha2,
|
||||
enum iwl_mcc_source src_id,
|
||||
bool *changed)
|
||||
{
|
||||
struct ieee80211_regdomain *regd = NULL;
|
||||
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
|
||||
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
|
||||
struct iwl_mcc_update_resp *resp;
|
||||
|
||||
IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2);
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
resp = iwl_mvm_update_mcc(mvm, alpha2, src_id);
|
||||
if (IS_ERR_OR_NULL(resp)) {
|
||||
IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
|
||||
PTR_RET(resp));
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (changed)
|
||||
*changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE);
|
||||
|
||||
regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
|
||||
__le32_to_cpu(resp->n_channels),
|
||||
resp->channels,
|
||||
__le16_to_cpu(resp->mcc));
|
||||
/* Store the return source id */
|
||||
src_id = resp->source_id;
|
||||
kfree(resp);
|
||||
if (IS_ERR_OR_NULL(regd)) {
|
||||
IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
|
||||
PTR_RET(regd));
|
||||
goto out;
|
||||
}
|
||||
|
||||
IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n",
|
||||
regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id);
|
||||
mvm->lar_regdom_set = true;
|
||||
mvm->mcc_src = src_id;
|
||||
|
||||
out:
|
||||
return regd;
|
||||
}
|
||||
|
||||
void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm)
|
||||
{
|
||||
bool changed;
|
||||
struct ieee80211_regdomain *regd;
|
||||
|
||||
if (!iwl_mvm_is_lar_supported(mvm))
|
||||
return;
|
||||
|
||||
regd = iwl_mvm_get_current_regdomain(mvm, &changed);
|
||||
if (!IS_ERR_OR_NULL(regd)) {
|
||||
/* only update the regulatory core if changed */
|
||||
if (changed)
|
||||
regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
|
||||
|
||||
kfree(regd);
|
||||
}
|
||||
}
|
||||
|
||||
struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
|
||||
bool *changed)
|
||||
{
|
||||
return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ",
|
||||
iwl_mvm_is_wifi_mcc_supported(mvm) ?
|
||||
MCC_SOURCE_GET_CURRENT :
|
||||
MCC_SOURCE_OLD_FW, changed);
|
||||
}
|
||||
|
||||
int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
|
||||
{
|
||||
enum iwl_mcc_source used_src;
|
||||
struct ieee80211_regdomain *regd;
|
||||
const struct ieee80211_regdomain *r =
|
||||
rtnl_dereference(mvm->hw->wiphy->regd);
|
||||
|
||||
if (!r)
|
||||
return 0;
|
||||
|
||||
/* save the last source in case we overwrite it below */
|
||||
used_src = mvm->mcc_src;
|
||||
if (iwl_mvm_is_wifi_mcc_supported(mvm)) {
|
||||
/* Notify the firmware we support wifi location updates */
|
||||
regd = iwl_mvm_get_current_regdomain(mvm, NULL);
|
||||
if (!IS_ERR_OR_NULL(regd))
|
||||
kfree(regd);
|
||||
}
|
||||
|
||||
/* Now set our last stored MCC and source */
|
||||
regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src, NULL);
|
||||
if (IS_ERR_OR_NULL(regd))
|
||||
return -EIO;
|
||||
|
||||
regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
|
||||
kfree(regd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
||||
{
|
||||
struct ieee80211_hw *hw = mvm->hw;
|
||||
|
@ -356,8 +460,12 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
|||
BIT(NL80211_IFTYPE_ADHOC);
|
||||
|
||||
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
|
||||
hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
|
||||
REGULATORY_DISABLE_BEACON_HINTS;
|
||||
hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR;
|
||||
if (iwl_mvm_is_lar_supported(mvm))
|
||||
hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
|
||||
else
|
||||
hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
|
||||
REGULATORY_DISABLE_BEACON_HINTS;
|
||||
|
||||
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD)
|
||||
hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
|
||||
|
@ -1193,7 +1301,7 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
|
|||
|
||||
clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
|
||||
iwl_mvm_d0i3_enable_tx(mvm, NULL);
|
||||
ret = iwl_mvm_update_quotas(mvm, NULL);
|
||||
ret = iwl_mvm_update_quotas(mvm, false, NULL);
|
||||
if (ret)
|
||||
IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
|
||||
ret);
|
||||
|
@ -1872,7 +1980,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
|
|||
sizeof(mvmvif->beacon_stats));
|
||||
|
||||
/* add quota for this interface */
|
||||
ret = iwl_mvm_update_quotas(mvm, NULL);
|
||||
ret = iwl_mvm_update_quotas(mvm, true, NULL);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "failed to update quotas\n");
|
||||
return;
|
||||
|
@ -1924,7 +2032,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
|
|||
mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
|
||||
mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
|
||||
/* remove quota for this interface */
|
||||
ret = iwl_mvm_update_quotas(mvm, NULL);
|
||||
ret = iwl_mvm_update_quotas(mvm, false, NULL);
|
||||
if (ret)
|
||||
IWL_ERR(mvm, "failed to update quotas\n");
|
||||
|
||||
|
@ -2043,7 +2151,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
|
|||
/* power updated needs to be done before quotas */
|
||||
iwl_mvm_power_update_mac(mvm);
|
||||
|
||||
ret = iwl_mvm_update_quotas(mvm, NULL);
|
||||
ret = iwl_mvm_update_quotas(mvm, false, NULL);
|
||||
if (ret)
|
||||
goto out_quota_failed;
|
||||
|
||||
|
@ -2109,7 +2217,7 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
|
|||
if (vif->p2p && mvm->p2p_device_vif)
|
||||
iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
|
||||
|
||||
iwl_mvm_update_quotas(mvm, NULL);
|
||||
iwl_mvm_update_quotas(mvm, false, NULL);
|
||||
iwl_mvm_send_rm_bcast_sta(mvm, vif);
|
||||
iwl_mvm_binding_remove_vif(mvm, vif);
|
||||
|
||||
|
@ -2248,6 +2356,12 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
|
|||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
||||
if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
|
||||
IWL_ERR(mvm, "scan while LAR regdomain is not set\n");
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (mvm->scan_status != IWL_MVM_SCAN_NONE) {
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
|
@ -2328,25 +2442,35 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
|
|||
{
|
||||
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
unsigned long txqs = 0, tids = 0;
|
||||
int tid;
|
||||
|
||||
spin_lock_bh(&mvmsta->lock);
|
||||
for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
|
||||
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
|
||||
|
||||
if (tid_data->state != IWL_AGG_ON &&
|
||||
tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
|
||||
continue;
|
||||
|
||||
__set_bit(tid_data->txq_id, &txqs);
|
||||
|
||||
if (iwl_mvm_tid_queued(tid_data) == 0)
|
||||
continue;
|
||||
|
||||
__set_bit(tid, &tids);
|
||||
}
|
||||
|
||||
switch (cmd) {
|
||||
case STA_NOTIFY_SLEEP:
|
||||
if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
|
||||
ieee80211_sta_block_awake(hw, sta, true);
|
||||
spin_lock_bh(&mvmsta->lock);
|
||||
for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
|
||||
struct iwl_mvm_tid_data *tid_data;
|
||||
|
||||
tid_data = &mvmsta->tid_data[tid];
|
||||
if (tid_data->state != IWL_AGG_ON &&
|
||||
tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
|
||||
continue;
|
||||
if (iwl_mvm_tid_queued(tid_data) == 0)
|
||||
continue;
|
||||
for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT)
|
||||
ieee80211_sta_set_buffered(sta, tid, true);
|
||||
}
|
||||
spin_unlock_bh(&mvmsta->lock);
|
||||
|
||||
if (txqs)
|
||||
iwl_trans_freeze_txq_timer(mvm->trans, txqs, true);
|
||||
/*
|
||||
* The fw updates the STA to be asleep. Tx packets on the Tx
|
||||
* queues to this station will not be transmitted. The fw will
|
||||
|
@ -2356,11 +2480,15 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
|
|||
case STA_NOTIFY_AWAKE:
|
||||
if (WARN_ON(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
|
||||
break;
|
||||
|
||||
if (txqs)
|
||||
iwl_trans_freeze_txq_timer(mvm->trans, txqs, false);
|
||||
iwl_mvm_sta_modify_ps_wake(mvm, sta);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
spin_unlock_bh(&mvmsta->lock);
|
||||
}
|
||||
|
||||
static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
|
||||
|
@ -2598,6 +2726,12 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
|
|||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
||||
if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
|
||||
IWL_ERR(mvm, "sched-scan while LAR regdomain is not set\n");
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!vif->bss_conf.idle) {
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
|
@ -3159,14 +3293,14 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
|
|||
*/
|
||||
if (vif->type == NL80211_IFTYPE_MONITOR) {
|
||||
mvmvif->monitor_active = true;
|
||||
ret = iwl_mvm_update_quotas(mvm, NULL);
|
||||
ret = iwl_mvm_update_quotas(mvm, false, NULL);
|
||||
if (ret)
|
||||
goto out_remove_binding;
|
||||
}
|
||||
|
||||
/* Handle binding during CSA */
|
||||
if (vif->type == NL80211_IFTYPE_AP) {
|
||||
iwl_mvm_update_quotas(mvm, NULL);
|
||||
iwl_mvm_update_quotas(mvm, false, NULL);
|
||||
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
|
||||
}
|
||||
|
||||
|
@ -3190,7 +3324,7 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
|
|||
|
||||
iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA);
|
||||
|
||||
iwl_mvm_update_quotas(mvm, NULL);
|
||||
iwl_mvm_update_quotas(mvm, false, NULL);
|
||||
}
|
||||
|
||||
goto out;
|
||||
|
@ -3263,7 +3397,7 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
|
|||
break;
|
||||
}
|
||||
|
||||
iwl_mvm_update_quotas(mvm, disabled_vif);
|
||||
iwl_mvm_update_quotas(mvm, false, disabled_vif);
|
||||
iwl_mvm_binding_remove_vif(mvm, vif);
|
||||
|
||||
out:
|
||||
|
@ -3455,7 +3589,7 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
|
|||
mvm->noa_duration = noa_duration;
|
||||
mvm->noa_vif = vif;
|
||||
|
||||
return iwl_mvm_update_quotas(mvm, NULL);
|
||||
return iwl_mvm_update_quotas(mvm, false, NULL);
|
||||
case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
|
||||
/* must be associated client vif - ignore authorized */
|
||||
if (!vif || vif->type != NL80211_IFTYPE_STATION ||
|
||||
|
|
|
@ -810,6 +810,9 @@ struct iwl_mvm {
|
|||
/* system time of last beacon (for AP/GO interface) */
|
||||
u32 ap_last_beacon_gp2;
|
||||
|
||||
bool lar_regdom_set;
|
||||
enum iwl_mcc_source mcc_src;
|
||||
|
||||
u8 low_latency_agg_frame_limit;
|
||||
|
||||
/* TDLS channel switch data */
|
||||
|
@ -910,6 +913,30 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
|
|||
(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
|
||||
}
|
||||
|
||||
static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
|
||||
{
|
||||
bool nvm_lar = mvm->nvm_data->lar_enabled;
|
||||
bool tlv_lar = mvm->fw->ucode_capa.capa[0] &
|
||||
IWL_UCODE_TLV_CAPA_LAR_SUPPORT;
|
||||
|
||||
if (iwlwifi_mod_params.lar_disable)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Enable LAR only if it is supported by the FW (TLV) &&
|
||||
* enabled in the NVM
|
||||
*/
|
||||
if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000)
|
||||
return nvm_lar && tlv_lar;
|
||||
else
|
||||
return tlv_lar;
|
||||
}
|
||||
|
||||
static inline bool iwl_mvm_is_wifi_mcc_supported(struct iwl_mvm *mvm)
|
||||
{
|
||||
return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_WIFI_MCC_UPDATE;
|
||||
}
|
||||
|
||||
static inline bool iwl_mvm_is_scd_cfg_supported(struct iwl_mvm *mvm)
|
||||
{
|
||||
return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SCD_CFG;
|
||||
|
@ -921,6 +948,12 @@ static inline bool iwl_mvm_bt_is_plcr_supported(struct iwl_mvm *mvm)
|
|||
IWL_MVM_BT_COEX_CORUNNING;
|
||||
}
|
||||
|
||||
static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm)
|
||||
{
|
||||
return (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BT_COEX_RRC) &&
|
||||
IWL_MVM_BT_COEX_RRC;
|
||||
}
|
||||
|
||||
extern const u8 iwl_mvm_ac_to_tx_fifo[];
|
||||
|
||||
struct iwl_rate_info {
|
||||
|
@ -1106,7 +1139,7 @@ int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
|
|||
int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
|
||||
|
||||
/* Quota management */
|
||||
int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
|
||||
int iwl_mvm_update_quotas(struct iwl_mvm *mvm, bool force_upload,
|
||||
struct ieee80211_vif *disabled_vif);
|
||||
|
||||
/* Scanning */
|
||||
|
@ -1282,17 +1315,6 @@ int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
|
|||
struct iwl_rx_cmd_buffer *rxb,
|
||||
struct iwl_device_cmd *cmd);
|
||||
|
||||
enum iwl_bt_kill_msk {
|
||||
BT_KILL_MSK_DEFAULT,
|
||||
BT_KILL_MSK_NEVER,
|
||||
BT_KILL_MSK_ALWAYS,
|
||||
BT_KILL_MSK_MAX,
|
||||
};
|
||||
|
||||
extern const u8 iwl_bt_ack_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT];
|
||||
extern const u8 iwl_bt_cts_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT];
|
||||
extern const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX];
|
||||
|
||||
/* beacon filtering */
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
void
|
||||
|
@ -1389,6 +1411,23 @@ void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
|
|||
void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);
|
||||
int iwl_mvm_get_temp(struct iwl_mvm *mvm);
|
||||
|
||||
/* Location Aware Regulatory */
|
||||
struct iwl_mcc_update_resp *
|
||||
iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
|
||||
enum iwl_mcc_source src_id);
|
||||
int iwl_mvm_init_mcc(struct iwl_mvm *mvm);
|
||||
int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_cmd_buffer *rxb,
|
||||
struct iwl_device_cmd *cmd);
|
||||
struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
|
||||
const char *alpha2,
|
||||
enum iwl_mcc_source src_id,
|
||||
bool *changed);
|
||||
struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
|
||||
bool *changed);
|
||||
int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm);
|
||||
void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm);
|
||||
|
||||
/* smart fifo */
|
||||
int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
bool added_vif);
|
||||
|
|
|
@ -63,12 +63,16 @@
|
|||
*
|
||||
*****************************************************************************/
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/acpi.h>
|
||||
#include "iwl-trans.h"
|
||||
#include "iwl-csr.h"
|
||||
#include "mvm.h"
|
||||
#include "iwl-eeprom-parse.h"
|
||||
#include "iwl-eeprom-read.h"
|
||||
#include "iwl-nvm-parse.h"
|
||||
#include "iwl-prph.h"
|
||||
|
||||
/* Default NVM size to read */
|
||||
#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024)
|
||||
|
@ -262,7 +266,9 @@ static struct iwl_nvm_data *
|
|||
iwl_parse_nvm_sections(struct iwl_mvm *mvm)
|
||||
{
|
||||
struct iwl_nvm_section *sections = mvm->nvm_sections;
|
||||
const __le16 *hw, *sw, *calib, *regulatory, *mac_override;
|
||||
const __le16 *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
|
||||
bool is_family_8000_a_step = false, lar_enabled;
|
||||
u32 mac_addr0, mac_addr1;
|
||||
|
||||
/* Checking for required sections */
|
||||
if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
|
||||
|
@ -286,22 +292,43 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
|
|||
"Can't parse mac_address, empty sections\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_A_STEP)
|
||||
is_family_8000_a_step = true;
|
||||
|
||||
/* PHY_SKU section is mandatory in B0 */
|
||||
if (!is_family_8000_a_step &&
|
||||
!mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) {
|
||||
IWL_ERR(mvm,
|
||||
"Can't parse phy_sku in B0, empty sections\n");
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (WARN_ON(!mvm->cfg))
|
||||
return NULL;
|
||||
|
||||
/* read the mac address from WFMP registers */
|
||||
mac_addr0 = iwl_trans_read_prph(mvm->trans, WFMP_MAC_ADDR_0);
|
||||
mac_addr1 = iwl_trans_read_prph(mvm->trans, WFMP_MAC_ADDR_1);
|
||||
|
||||
hw = (const __le16 *)sections[mvm->cfg->nvm_hw_section_num].data;
|
||||
sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
|
||||
calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
|
||||
regulatory = (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
|
||||
mac_override =
|
||||
(const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data;
|
||||
phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data;
|
||||
|
||||
lar_enabled = !iwlwifi_mod_params.lar_disable &&
|
||||
(mvm->fw->ucode_capa.capa[0] &
|
||||
IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
|
||||
|
||||
return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib,
|
||||
regulatory, mac_override,
|
||||
mvm->fw->valid_tx_ant,
|
||||
mvm->fw->valid_rx_ant);
|
||||
regulatory, mac_override, phy_sku,
|
||||
mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant,
|
||||
lar_enabled, is_family_8000_a_step,
|
||||
mac_addr0, mac_addr1);
|
||||
}
|
||||
|
||||
#define MAX_NVM_FILE_LEN 16384
|
||||
|
@ -570,3 +597,258 @@ int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct iwl_mcc_update_resp *
|
||||
iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
|
||||
enum iwl_mcc_source src_id)
|
||||
{
|
||||
struct iwl_mcc_update_cmd mcc_update_cmd = {
|
||||
.mcc = cpu_to_le16(alpha2[0] << 8 | alpha2[1]),
|
||||
.source_id = (u8)src_id,
|
||||
};
|
||||
struct iwl_mcc_update_resp *mcc_resp, *resp_cp = NULL;
|
||||
struct iwl_rx_packet *pkt;
|
||||
struct iwl_host_cmd cmd = {
|
||||
.id = MCC_UPDATE_CMD,
|
||||
.flags = CMD_WANT_SKB,
|
||||
.data = { &mcc_update_cmd },
|
||||
};
|
||||
|
||||
int ret;
|
||||
u32 status;
|
||||
int resp_len, n_channels;
|
||||
u16 mcc;
|
||||
|
||||
if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
cmd.len[0] = sizeof(struct iwl_mcc_update_cmd);
|
||||
|
||||
IWL_DEBUG_LAR(mvm, "send MCC update to FW with '%c%c' src = %d\n",
|
||||
alpha2[0], alpha2[1], src_id);
|
||||
|
||||
ret = iwl_mvm_send_cmd(mvm, &cmd);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
pkt = cmd.resp_pkt;
|
||||
if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
|
||||
IWL_ERR(mvm, "Bad return from MCC_UPDATE_COMMAND (0x%08X)\n",
|
||||
pkt->hdr.flags);
|
||||
ret = -EIO;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* Extract MCC response */
|
||||
mcc_resp = (void *)pkt->data;
|
||||
status = le32_to_cpu(mcc_resp->status);
|
||||
|
||||
mcc = le16_to_cpu(mcc_resp->mcc);
|
||||
|
||||
/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
|
||||
if (mcc == 0) {
|
||||
mcc = 0x3030; /* "00" - world */
|
||||
mcc_resp->mcc = cpu_to_le16(mcc);
|
||||
}
|
||||
|
||||
n_channels = __le32_to_cpu(mcc_resp->n_channels);
|
||||
IWL_DEBUG_LAR(mvm,
|
||||
"MCC response status: 0x%x. new MCC: 0x%x ('%c%c') change: %d n_chans: %d\n",
|
||||
status, mcc, mcc >> 8, mcc & 0xff,
|
||||
!!(status == MCC_RESP_NEW_CHAN_PROFILE), n_channels);
|
||||
|
||||
resp_len = sizeof(*mcc_resp) + n_channels * sizeof(__le32);
|
||||
resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL);
|
||||
if (!resp_cp) {
|
||||
ret = -ENOMEM;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
exit:
|
||||
iwl_free_resp(&cmd);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
return resp_cp;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
#define WRD_METHOD "WRDD"
|
||||
#define WRDD_WIFI (0x07)
|
||||
#define WRDD_WIGIG (0x10)
|
||||
|
||||
static u32 iwl_mvm_wrdd_get_mcc(struct iwl_mvm *mvm, union acpi_object *wrdd)
|
||||
{
|
||||
union acpi_object *mcc_pkg, *domain_type, *mcc_value;
|
||||
u32 i;
|
||||
|
||||
if (wrdd->type != ACPI_TYPE_PACKAGE ||
|
||||
wrdd->package.count < 2 ||
|
||||
wrdd->package.elements[0].type != ACPI_TYPE_INTEGER ||
|
||||
wrdd->package.elements[0].integer.value != 0) {
|
||||
IWL_DEBUG_LAR(mvm, "Unsupported wrdd structure\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (i = 1 ; i < wrdd->package.count ; ++i) {
|
||||
mcc_pkg = &wrdd->package.elements[i];
|
||||
|
||||
if (mcc_pkg->type != ACPI_TYPE_PACKAGE ||
|
||||
mcc_pkg->package.count < 2 ||
|
||||
mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
|
||||
mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
|
||||
mcc_pkg = NULL;
|
||||
continue;
|
||||
}
|
||||
|
||||
domain_type = &mcc_pkg->package.elements[0];
|
||||
if (domain_type->integer.value == WRDD_WIFI)
|
||||
break;
|
||||
|
||||
mcc_pkg = NULL;
|
||||
}
|
||||
|
||||
if (mcc_pkg) {
|
||||
mcc_value = &mcc_pkg->package.elements[1];
|
||||
return mcc_value->integer.value;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iwl_mvm_get_bios_mcc(struct iwl_mvm *mvm, char *mcc)
|
||||
{
|
||||
acpi_handle root_handle;
|
||||
acpi_handle handle;
|
||||
struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL};
|
||||
acpi_status status;
|
||||
u32 mcc_val;
|
||||
struct pci_dev *pdev = to_pci_dev(mvm->dev);
|
||||
|
||||
root_handle = ACPI_HANDLE(&pdev->dev);
|
||||
if (!root_handle) {
|
||||
IWL_DEBUG_LAR(mvm,
|
||||
"Could not retrieve root port ACPI handle\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/* Get the method's handle */
|
||||
status = acpi_get_handle(root_handle, (acpi_string)WRD_METHOD, &handle);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
IWL_DEBUG_LAR(mvm, "WRD method not found\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/* Call WRDD with no arguments */
|
||||
status = acpi_evaluate_object(handle, NULL, NULL, &wrdd);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
IWL_DEBUG_LAR(mvm, "WRDC invocation failed (0x%x)\n", status);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
mcc_val = iwl_mvm_wrdd_get_mcc(mvm, wrdd.pointer);
|
||||
kfree(wrdd.pointer);
|
||||
if (!mcc_val)
|
||||
return -ENOENT;
|
||||
|
||||
mcc[0] = (mcc_val >> 8) & 0xff;
|
||||
mcc[1] = mcc_val & 0xff;
|
||||
mcc[2] = '\0';
|
||||
return 0;
|
||||
}
|
||||
#else /* CONFIG_ACPI */
|
||||
static int iwl_mvm_get_bios_mcc(struct iwl_mvm *mvm, char *mcc)
|
||||
{
|
||||
return -ENOENT;
|
||||
}
|
||||
#endif
|
||||
|
||||
int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
|
||||
{
|
||||
bool tlv_lar;
|
||||
bool nvm_lar;
|
||||
int retval;
|
||||
struct ieee80211_regdomain *regd;
|
||||
char mcc[3];
|
||||
|
||||
if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
|
||||
tlv_lar = mvm->fw->ucode_capa.capa[0] &
|
||||
IWL_UCODE_TLV_CAPA_LAR_SUPPORT;
|
||||
nvm_lar = mvm->nvm_data->lar_enabled;
|
||||
if (tlv_lar != nvm_lar)
|
||||
IWL_INFO(mvm,
|
||||
"Conflict between TLV & NVM regarding enabling LAR (TLV = %s NVM =%s)\n",
|
||||
tlv_lar ? "enabled" : "disabled",
|
||||
nvm_lar ? "enabled" : "disabled");
|
||||
}
|
||||
|
||||
if (!iwl_mvm_is_lar_supported(mvm))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* During HW restart, only replay the last set MCC to FW. Otherwise,
|
||||
* queue an update to cfg80211 to retrieve the default alpha2 from FW.
|
||||
*/
|
||||
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
|
||||
/* This should only be called during vif up and hold RTNL */
|
||||
return iwl_mvm_init_fw_regd(mvm);
|
||||
}
|
||||
|
||||
/*
|
||||
* Driver regulatory hint for initial update, this also informs the
|
||||
* firmware we support wifi location updates.
|
||||
* Disallow scans that might crash the FW while the LAR regdomain
|
||||
* is not set.
|
||||
*/
|
||||
mvm->lar_regdom_set = false;
|
||||
|
||||
regd = iwl_mvm_get_current_regdomain(mvm, NULL);
|
||||
if (IS_ERR_OR_NULL(regd))
|
||||
return -EIO;
|
||||
|
||||
if (iwl_mvm_is_wifi_mcc_supported(mvm) &&
|
||||
!iwl_mvm_get_bios_mcc(mvm, mcc)) {
|
||||
kfree(regd);
|
||||
regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc,
|
||||
MCC_SOURCE_BIOS, NULL);
|
||||
if (IS_ERR_OR_NULL(regd))
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
retval = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
|
||||
kfree(regd);
|
||||
return retval;
|
||||
}
|
||||
|
||||
int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_cmd_buffer *rxb,
|
||||
struct iwl_device_cmd *cmd)
|
||||
{
|
||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||
struct iwl_mcc_chub_notif *notif = (void *)pkt->data;
|
||||
enum iwl_mcc_source src;
|
||||
char mcc[3];
|
||||
struct ieee80211_regdomain *regd;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
|
||||
return 0;
|
||||
|
||||
mcc[0] = notif->mcc >> 8;
|
||||
mcc[1] = notif->mcc & 0xff;
|
||||
mcc[2] = '\0';
|
||||
src = notif->source_id;
|
||||
|
||||
IWL_DEBUG_LAR(mvm,
|
||||
"RX: received chub update mcc cmd (mcc '%s' src %d)\n",
|
||||
mcc, src);
|
||||
regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, NULL);
|
||||
if (IS_ERR_OR_NULL(regd))
|
||||
return 0;
|
||||
|
||||
regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
|
||||
kfree(regd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -82,7 +82,6 @@
|
|||
#include "rs.h"
|
||||
#include "fw-api-scan.h"
|
||||
#include "time-event.h"
|
||||
#include "iwl-fw-error-dump.h"
|
||||
|
||||
#define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux"
|
||||
MODULE_DESCRIPTION(DRV_DESCRIPTION);
|
||||
|
@ -234,6 +233,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
|
|||
iwl_mvm_rx_ant_coupling_notif, true),
|
||||
|
||||
RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false),
|
||||
RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc, true),
|
||||
|
||||
RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false),
|
||||
|
||||
|
@ -358,6 +358,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
|
|||
CMD(TDLS_CHANNEL_SWITCH_CMD),
|
||||
CMD(TDLS_CHANNEL_SWITCH_NOTIFICATION),
|
||||
CMD(TDLS_CONFIG_CMD),
|
||||
CMD(MCC_UPDATE_CMD),
|
||||
};
|
||||
#undef CMD
|
||||
|
||||
|
@ -871,8 +872,8 @@ static void iwl_mvm_fw_error_dump_wk(struct work_struct *work)
|
|||
|
||||
/* start recording again if the firmware is not crashed */
|
||||
WARN_ON_ONCE((!test_bit(STATUS_FW_ERROR, &mvm->trans->status)) &&
|
||||
mvm->fw->dbg_dest_tlv &&
|
||||
iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf));
|
||||
mvm->fw->dbg_dest_tlv &&
|
||||
iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf));
|
||||
|
||||
mutex_unlock(&mvm->mutex);
|
||||
|
||||
|
@ -1270,6 +1271,10 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
|
|||
iwl_free_resp(&get_status_cmd);
|
||||
out:
|
||||
iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
|
||||
|
||||
/* the FW might have updated the regdomain */
|
||||
iwl_mvm_update_changed_regdom(mvm);
|
||||
|
||||
iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK);
|
||||
mutex_unlock(&mvm->mutex);
|
||||
}
|
||||
|
|
|
@ -358,7 +358,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
|
|||
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
|
||||
|
||||
if (!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif) ||
|
||||
!mvmvif->pm_enabled || iwl_mvm_tdls_sta_count(mvm, vif))
|
||||
!mvmvif->pm_enabled)
|
||||
return;
|
||||
|
||||
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
|
||||
|
@ -639,6 +639,10 @@ static void iwl_mvm_power_set_pm(struct iwl_mvm *mvm,
|
|||
if (vifs->ap_vif)
|
||||
ap_mvmvif = iwl_mvm_vif_from_mac80211(vifs->ap_vif);
|
||||
|
||||
/* don't allow PM if any TDLS stations exist */
|
||||
if (iwl_mvm_tdls_sta_count(mvm, NULL))
|
||||
return;
|
||||
|
||||
/* enable PM on bss if bss stand alone */
|
||||
if (vifs->bss_active && !vifs->p2p_active && !vifs->ap_active) {
|
||||
bss_mvmvif->pm_enabled = true;
|
||||
|
|
|
@ -172,6 +172,7 @@ static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm,
|
|||
}
|
||||
|
||||
int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
|
||||
bool force_update,
|
||||
struct ieee80211_vif *disabled_vif)
|
||||
{
|
||||
struct iwl_time_quota_cmd cmd = {};
|
||||
|
@ -309,7 +310,7 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
|
|||
"zero quota on binding %d\n", i);
|
||||
}
|
||||
|
||||
if (!send) {
|
||||
if (!send && !force_update) {
|
||||
/* don't send a practically unchanged command, the firmware has
|
||||
* to re-initialize a lot of state and that can have an adverse
|
||||
* impact on it
|
||||
|
|
|
@ -1065,6 +1065,37 @@ static inline bool rs_rate_column_match(struct rs_rate *a,
|
|||
&& ant_match;
|
||||
}
|
||||
|
||||
static inline enum rs_column rs_get_column_from_rate(struct rs_rate *rate)
|
||||
{
|
||||
if (is_legacy(rate)) {
|
||||
if (rate->ant == ANT_A)
|
||||
return RS_COLUMN_LEGACY_ANT_A;
|
||||
|
||||
if (rate->ant == ANT_B)
|
||||
return RS_COLUMN_LEGACY_ANT_B;
|
||||
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (is_siso(rate)) {
|
||||
if (rate->ant == ANT_A || rate->stbc || rate->bfer)
|
||||
return rate->sgi ? RS_COLUMN_SISO_ANT_A_SGI :
|
||||
RS_COLUMN_SISO_ANT_A;
|
||||
|
||||
if (rate->ant == ANT_B)
|
||||
return rate->sgi ? RS_COLUMN_SISO_ANT_B_SGI :
|
||||
RS_COLUMN_SISO_ANT_B;
|
||||
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (is_mimo(rate))
|
||||
return rate->sgi ? RS_COLUMN_MIMO2_SGI : RS_COLUMN_MIMO2;
|
||||
|
||||
err:
|
||||
return RS_COLUMN_INVALID;
|
||||
}
|
||||
|
||||
static u8 rs_get_tid(struct ieee80211_hdr *hdr)
|
||||
{
|
||||
u8 tid = IWL_MAX_TID_COUNT;
|
||||
|
@ -1106,18 +1137,44 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
|||
return;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MAC80211_DEBUGFS
|
||||
/* Disable last tx check if we are debugging with fixed rate */
|
||||
if (lq_sta->pers.dbg_fixed_rate) {
|
||||
IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n");
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
/* This packet was aggregated but doesn't carry status info */
|
||||
if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
|
||||
!(info->flags & IEEE80211_TX_STAT_AMPDU))
|
||||
return;
|
||||
|
||||
rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, &tx_resp_rate);
|
||||
|
||||
#ifdef CONFIG_MAC80211_DEBUGFS
|
||||
/* Disable last tx check if we are debugging with fixed rate but
|
||||
* update tx stats */
|
||||
if (lq_sta->pers.dbg_fixed_rate) {
|
||||
int index = tx_resp_rate.index;
|
||||
enum rs_column column;
|
||||
int attempts, success;
|
||||
|
||||
column = rs_get_column_from_rate(&tx_resp_rate);
|
||||
if (WARN_ONCE(column == RS_COLUMN_INVALID,
|
||||
"Can't map rate 0x%x to column",
|
||||
tx_resp_hwrate))
|
||||
return;
|
||||
|
||||
if (info->flags & IEEE80211_TX_STAT_AMPDU) {
|
||||
attempts = info->status.ampdu_len;
|
||||
success = info->status.ampdu_ack_len;
|
||||
} else {
|
||||
attempts = info->status.rates[0].count;
|
||||
success = !!(info->flags & IEEE80211_TX_STAT_ACK);
|
||||
}
|
||||
|
||||
lq_sta->pers.tx_stats[column][index].total += attempts;
|
||||
lq_sta->pers.tx_stats[column][index].success += success;
|
||||
|
||||
IWL_DEBUG_RATE(mvm, "Fixed rate 0x%x success %d attempts %d\n",
|
||||
tx_resp_hwrate, success, attempts);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (time_after(jiffies,
|
||||
(unsigned long)(lq_sta->last_tx +
|
||||
(IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) {
|
||||
|
@ -1142,7 +1199,6 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
|||
table = &lq_sta->lq;
|
||||
lq_hwrate = le32_to_cpu(table->rs_table[0]);
|
||||
rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate);
|
||||
rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, &tx_resp_rate);
|
||||
|
||||
/* Here we actually compare this rate to the latest LQ command */
|
||||
if (!rs_rate_equal(&tx_resp_rate, &lq_rate, allow_ant_mismatch)) {
|
||||
|
@ -3343,16 +3399,16 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
|
|||
(is_legacy(rate)) ? "legacy" :
|
||||
is_vht(rate) ? "VHT" : "HT");
|
||||
if (!is_legacy(rate)) {
|
||||
desc += sprintf(buff+desc, " %s",
|
||||
desc += sprintf(buff + desc, " %s",
|
||||
(is_siso(rate)) ? "SISO" : "MIMO2");
|
||||
desc += sprintf(buff+desc, " %s",
|
||||
(is_ht20(rate)) ? "20MHz" :
|
||||
(is_ht40(rate)) ? "40MHz" :
|
||||
(is_ht80(rate)) ? "80Mhz" : "BAD BW");
|
||||
desc += sprintf(buff+desc, " %s %s %s\n",
|
||||
(rate->sgi) ? "SGI" : "NGI",
|
||||
(rate->ldpc) ? "LDPC" : "BCC",
|
||||
(lq_sta->is_agg) ? "AGG on" : "");
|
||||
desc += sprintf(buff + desc, " %s",
|
||||
(is_ht20(rate)) ? "20MHz" :
|
||||
(is_ht40(rate)) ? "40MHz" :
|
||||
(is_ht80(rate)) ? "80Mhz" : "BAD BW");
|
||||
desc += sprintf(buff + desc, " %s %s %s\n",
|
||||
(rate->sgi) ? "SGI" : "NGI",
|
||||
(rate->ldpc) ? "LDPC" : "BCC",
|
||||
(lq_sta->is_agg) ? "AGG on" : "");
|
||||
}
|
||||
desc += sprintf(buff+desc, "last tx rate=0x%X\n",
|
||||
lq_sta->last_rate_n_flags);
|
||||
|
@ -3373,13 +3429,13 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
|
|||
ss_params = le32_to_cpu(lq_sta->lq.ss_params);
|
||||
desc += sprintf(buff+desc, "single stream params: %s%s%s%s\n",
|
||||
(ss_params & LQ_SS_PARAMS_VALID) ?
|
||||
"VALID," : "INVALID",
|
||||
"VALID" : "INVALID",
|
||||
(ss_params & LQ_SS_BFER_ALLOWED) ?
|
||||
"BFER," : "",
|
||||
", BFER" : "",
|
||||
(ss_params & LQ_SS_STBC_1SS_ALLOWED) ?
|
||||
"STBC," : "",
|
||||
", STBC" : "",
|
||||
(ss_params & LQ_SS_FORCE) ?
|
||||
"FORCE" : "");
|
||||
", FORCE" : "");
|
||||
desc += sprintf(buff+desc,
|
||||
"Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
|
||||
lq_sta->lq.initial_rate_index[0],
|
||||
|
|
|
@ -99,7 +99,35 @@ static void iwl_mvm_bound_iface_iterator(void *_data, u8 *mac,
|
|||
|
||||
/*
|
||||
* Aging and idle timeouts for the different possible scenarios
|
||||
* in SF_FULL_ON state.
|
||||
* in default configuration
|
||||
*/
|
||||
static const
|
||||
__le32 sf_full_timeout_def[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = {
|
||||
{
|
||||
cpu_to_le32(SF_SINGLE_UNICAST_AGING_TIMER_DEF),
|
||||
cpu_to_le32(SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
|
||||
},
|
||||
{
|
||||
cpu_to_le32(SF_AGG_UNICAST_AGING_TIMER_DEF),
|
||||
cpu_to_le32(SF_AGG_UNICAST_IDLE_TIMER_DEF)
|
||||
},
|
||||
{
|
||||
cpu_to_le32(SF_MCAST_AGING_TIMER_DEF),
|
||||
cpu_to_le32(SF_MCAST_IDLE_TIMER_DEF)
|
||||
},
|
||||
{
|
||||
cpu_to_le32(SF_BA_AGING_TIMER_DEF),
|
||||
cpu_to_le32(SF_BA_IDLE_TIMER_DEF)
|
||||
},
|
||||
{
|
||||
cpu_to_le32(SF_TX_RE_AGING_TIMER_DEF),
|
||||
cpu_to_le32(SF_TX_RE_IDLE_TIMER_DEF)
|
||||
},
|
||||
};
|
||||
|
||||
/*
|
||||
* Aging and idle timeouts for the different possible scenarios
|
||||
* in single BSS MAC configuration.
|
||||
*/
|
||||
static const __le32 sf_full_timeout[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = {
|
||||
{
|
||||
|
@ -124,7 +152,8 @@ static const __le32 sf_full_timeout[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = {
|
|||
},
|
||||
};
|
||||
|
||||
static void iwl_mvm_fill_sf_command(struct iwl_sf_cfg_cmd *sf_cmd,
|
||||
static void iwl_mvm_fill_sf_command(struct iwl_mvm *mvm,
|
||||
struct iwl_sf_cfg_cmd *sf_cmd,
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
int i, j, watermark;
|
||||
|
@ -163,24 +192,38 @@ static void iwl_mvm_fill_sf_command(struct iwl_sf_cfg_cmd *sf_cmd,
|
|||
cpu_to_le32(SF_LONG_DELAY_AGING_TIMER);
|
||||
}
|
||||
}
|
||||
BUILD_BUG_ON(sizeof(sf_full_timeout) !=
|
||||
sizeof(__le32) * SF_NUM_SCENARIO * SF_NUM_TIMEOUT_TYPES);
|
||||
|
||||
memcpy(sf_cmd->full_on_timeouts, sf_full_timeout,
|
||||
sizeof(sf_full_timeout));
|
||||
if (sta || IWL_UCODE_API(mvm->fw->ucode_ver) < 13) {
|
||||
BUILD_BUG_ON(sizeof(sf_full_timeout) !=
|
||||
sizeof(__le32) * SF_NUM_SCENARIO *
|
||||
SF_NUM_TIMEOUT_TYPES);
|
||||
|
||||
memcpy(sf_cmd->full_on_timeouts, sf_full_timeout,
|
||||
sizeof(sf_full_timeout));
|
||||
} else {
|
||||
BUILD_BUG_ON(sizeof(sf_full_timeout_def) !=
|
||||
sizeof(__le32) * SF_NUM_SCENARIO *
|
||||
SF_NUM_TIMEOUT_TYPES);
|
||||
|
||||
memcpy(sf_cmd->full_on_timeouts, sf_full_timeout_def,
|
||||
sizeof(sf_full_timeout_def));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
|
||||
enum iwl_sf_state new_state)
|
||||
{
|
||||
struct iwl_sf_cfg_cmd sf_cmd = {
|
||||
.state = cpu_to_le32(new_state),
|
||||
.state = cpu_to_le32(SF_FULL_ON),
|
||||
};
|
||||
struct ieee80211_sta *sta;
|
||||
int ret = 0;
|
||||
|
||||
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF &&
|
||||
mvm->cfg->disable_dummy_notification)
|
||||
if (IWL_UCODE_API(mvm->fw->ucode_ver) < 13)
|
||||
sf_cmd.state = cpu_to_le32(new_state);
|
||||
|
||||
if (mvm->cfg->disable_dummy_notification)
|
||||
sf_cmd.state |= cpu_to_le32(SF_CFG_DUMMY_NOTIF_OFF);
|
||||
|
||||
/*
|
||||
|
@ -192,6 +235,8 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
|
|||
|
||||
switch (new_state) {
|
||||
case SF_UNINIT:
|
||||
if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 13)
|
||||
iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
|
||||
break;
|
||||
case SF_FULL_ON:
|
||||
if (sta_id == IWL_MVM_STATION_COUNT) {
|
||||
|
@ -206,11 +251,11 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
|
|||
rcu_read_unlock();
|
||||
return -EINVAL;
|
||||
}
|
||||
iwl_mvm_fill_sf_command(&sf_cmd, sta);
|
||||
iwl_mvm_fill_sf_command(mvm, &sf_cmd, sta);
|
||||
rcu_read_unlock();
|
||||
break;
|
||||
case SF_INIT_OFF:
|
||||
iwl_mvm_fill_sf_command(&sf_cmd, NULL);
|
||||
iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
|
||||
break;
|
||||
default:
|
||||
WARN_ONCE(1, "Invalid state: %d. not sending Smart Fifo cmd\n",
|
||||
|
|
|
@ -273,7 +273,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
|
|||
else
|
||||
sta_id = mvm_sta->sta_id;
|
||||
|
||||
if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT))
|
||||
if (sta_id == IWL_MVM_STATION_COUNT)
|
||||
return -ENOSPC;
|
||||
|
||||
spin_lock_init(&mvm_sta->lock);
|
||||
|
@ -1681,9 +1681,6 @@ void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
|
|||
};
|
||||
int ret;
|
||||
|
||||
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_DISABLE_STA_TX))
|
||||
return;
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
|
||||
if (ret)
|
||||
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
|
||||
|
|
|
@ -197,6 +197,8 @@ iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
|
|||
struct iwl_time_event_notif *notif)
|
||||
{
|
||||
if (!le32_to_cpu(notif->status)) {
|
||||
if (te_data->vif->type == NL80211_IFTYPE_STATION)
|
||||
ieee80211_connection_loss(te_data->vif);
|
||||
IWL_DEBUG_TE(mvm, "CSA time event failed to start\n");
|
||||
iwl_mvm_te_clear_data(mvm, te_data);
|
||||
return;
|
||||
|
@ -261,17 +263,23 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
|
|||
"TE ended - current time %lu, estimated end %lu\n",
|
||||
jiffies, te_data->end_jiffies);
|
||||
|
||||
if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
|
||||
switch (te_data->vif->type) {
|
||||
case NL80211_IFTYPE_P2P_DEVICE:
|
||||
ieee80211_remain_on_channel_expired(mvm->hw);
|
||||
iwl_mvm_roc_finished(mvm);
|
||||
break;
|
||||
case NL80211_IFTYPE_STATION:
|
||||
/*
|
||||
* By now, we should have finished association
|
||||
* and know the dtim period.
|
||||
*/
|
||||
iwl_mvm_te_check_disconnect(mvm, te_data->vif,
|
||||
"No association and the time event is over already...");
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* By now, we should have finished association
|
||||
* and know the dtim period.
|
||||
*/
|
||||
iwl_mvm_te_check_disconnect(mvm, te_data->vif,
|
||||
"No association and the time event is over already...");
|
||||
iwl_mvm_te_clear_data(mvm, te_data);
|
||||
} else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) {
|
||||
te_data->running = true;
|
||||
|
|
|
@ -857,7 +857,7 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
|
||||
mvmvif->low_latency = value;
|
||||
|
||||
res = iwl_mvm_update_quotas(mvm, NULL);
|
||||
res = iwl_mvm_update_quotas(mvm, false, NULL);
|
||||
if (res)
|
||||
return res;
|
||||
|
||||
|
|
|
@ -413,10 +413,35 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
|||
|
||||
/* 8000 Series */
|
||||
{IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)},
|
||||
#endif /* CONFIG_IWLMVM */
|
||||
|
||||
{0}
|
||||
|
|
|
@ -217,6 +217,8 @@ struct iwl_pcie_txq_scratch_buf {
|
|||
* @active: stores if queue is active
|
||||
* @ampdu: true if this queue is an ampdu queue for an specific RA/TID
|
||||
* @wd_timeout: queue watchdog timeout (jiffies) - per queue
|
||||
* @frozen: tx stuck queue timer is frozen
|
||||
* @frozen_expiry_remainder: remember how long until the timer fires
|
||||
*
|
||||
* A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
|
||||
* descriptors) and required locking structures.
|
||||
|
@ -228,9 +230,11 @@ struct iwl_txq {
|
|||
dma_addr_t scratchbufs_dma;
|
||||
struct iwl_pcie_txq_entry *entries;
|
||||
spinlock_t lock;
|
||||
unsigned long frozen_expiry_remainder;
|
||||
struct timer_list stuck_timer;
|
||||
struct iwl_trans_pcie *trans_pcie;
|
||||
bool need_update;
|
||||
bool frozen;
|
||||
u8 active;
|
||||
bool ampdu;
|
||||
unsigned long wd_timeout;
|
||||
|
|
|
@ -682,6 +682,43 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Driver Takes the ownership on secure machine before FW load
|
||||
* and prevent race with the BT load.
|
||||
* W/A for ROM bug. (should be remove in the next Si step)
|
||||
*/
|
||||
static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
|
||||
{
|
||||
u32 val, loop = 1000;
|
||||
|
||||
/* Check the RSA semaphore is accessible - if not, we are in trouble */
|
||||
val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
|
||||
if (val & (BIT(1) | BIT(17))) {
|
||||
IWL_ERR(trans,
|
||||
"can't access the RSA semaphore it is write protected\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* take ownership on the AUX IF */
|
||||
iwl_write_prph(trans, WFPM_CTRL_REG, WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK);
|
||||
iwl_write_prph(trans, AUX_MISC_MASTER1_EN, AUX_MISC_MASTER1_EN_SBE_MSK);
|
||||
|
||||
do {
|
||||
iwl_write_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS, 0x1);
|
||||
val = iwl_read_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS);
|
||||
if (val == 0x1) {
|
||||
iwl_write_prph(trans, RSA_ENABLE, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
udelay(10);
|
||||
loop--;
|
||||
} while (loop > 0);
|
||||
|
||||
IWL_ERR(trans, "Failed to take ownership on secure machine\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static int iwl_pcie_load_cpu_sections_8000b(struct iwl_trans *trans,
|
||||
const struct fw_img *image,
|
||||
int cpu,
|
||||
|
@ -901,6 +938,11 @@ static int iwl_pcie_load_given_ucode_8000b(struct iwl_trans *trans,
|
|||
if (trans->dbg_dest_tlv)
|
||||
iwl_pcie_apply_destination(trans);
|
||||
|
||||
/* TODO: remove in the next Si step */
|
||||
ret = iwl_pcie_rsa_race_bug_wa(trans);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* configure the ucode to be ready to get the secured image */
|
||||
/* release CPU reset */
|
||||
iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
|
||||
|
@ -1462,6 +1504,60 @@ static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
|
||||
unsigned long txqs,
|
||||
bool freeze)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
int queue;
|
||||
|
||||
for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
|
||||
struct iwl_txq *txq = &trans_pcie->txq[queue];
|
||||
unsigned long now;
|
||||
|
||||
spin_lock_bh(&txq->lock);
|
||||
|
||||
now = jiffies;
|
||||
|
||||
if (txq->frozen == freeze)
|
||||
goto next_queue;
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
|
||||
freeze ? "Freezing" : "Waking", queue);
|
||||
|
||||
txq->frozen = freeze;
|
||||
|
||||
if (txq->q.read_ptr == txq->q.write_ptr)
|
||||
goto next_queue;
|
||||
|
||||
if (freeze) {
|
||||
if (unlikely(time_after(now,
|
||||
txq->stuck_timer.expires))) {
|
||||
/*
|
||||
* The timer should have fired, maybe it is
|
||||
* spinning right now on the lock.
|
||||
*/
|
||||
goto next_queue;
|
||||
}
|
||||
/* remember how long until the timer fires */
|
||||
txq->frozen_expiry_remainder =
|
||||
txq->stuck_timer.expires - now;
|
||||
del_timer(&txq->stuck_timer);
|
||||
goto next_queue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wake a non-empty queue -> arm timer with the
|
||||
* remainder before it froze
|
||||
*/
|
||||
mod_timer(&txq->stuck_timer,
|
||||
now + txq->frozen_expiry_remainder);
|
||||
|
||||
next_queue:
|
||||
spin_unlock_bh(&txq->lock);
|
||||
}
|
||||
}
|
||||
|
||||
#define IWL_FLUSH_WAIT_MS 2000
|
||||
|
||||
static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
|
||||
|
@ -1713,7 +1809,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
|
|||
int ret;
|
||||
size_t bufsz;
|
||||
|
||||
bufsz = sizeof(char) * 64 * trans->cfg->base_params->num_of_queues;
|
||||
bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues;
|
||||
|
||||
if (!trans_pcie->txq)
|
||||
return -EAGAIN;
|
||||
|
@ -1726,11 +1822,11 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
|
|||
txq = &trans_pcie->txq[cnt];
|
||||
q = &txq->q;
|
||||
pos += scnprintf(buf + pos, bufsz - pos,
|
||||
"hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d%s\n",
|
||||
"hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
|
||||
cnt, q->read_ptr, q->write_ptr,
|
||||
!!test_bit(cnt, trans_pcie->queue_used),
|
||||
!!test_bit(cnt, trans_pcie->queue_stopped),
|
||||
txq->need_update,
|
||||
txq->need_update, txq->frozen,
|
||||
(cnt == trans_pcie->cmd_queue ? " HCMD" : ""));
|
||||
}
|
||||
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
|
||||
|
@ -1961,24 +2057,25 @@ static const struct {
|
|||
{ .start = 0x00a01c7c, .end = 0x00a01c7c },
|
||||
{ .start = 0x00a01c28, .end = 0x00a01c54 },
|
||||
{ .start = 0x00a01c5c, .end = 0x00a01c5c },
|
||||
{ .start = 0x00a01c84, .end = 0x00a01c84 },
|
||||
{ .start = 0x00a01c60, .end = 0x00a01cdc },
|
||||
{ .start = 0x00a01ce0, .end = 0x00a01d0c },
|
||||
{ .start = 0x00a01d18, .end = 0x00a01d20 },
|
||||
{ .start = 0x00a01d2c, .end = 0x00a01d30 },
|
||||
{ .start = 0x00a01d40, .end = 0x00a01d5c },
|
||||
{ .start = 0x00a01d80, .end = 0x00a01d80 },
|
||||
{ .start = 0x00a01d98, .end = 0x00a01d98 },
|
||||
{ .start = 0x00a01d98, .end = 0x00a01d9c },
|
||||
{ .start = 0x00a01da8, .end = 0x00a01da8 },
|
||||
{ .start = 0x00a01db8, .end = 0x00a01df4 },
|
||||
{ .start = 0x00a01dc0, .end = 0x00a01dfc },
|
||||
{ .start = 0x00a01e00, .end = 0x00a01e2c },
|
||||
{ .start = 0x00a01e40, .end = 0x00a01e60 },
|
||||
{ .start = 0x00a01e68, .end = 0x00a01e6c },
|
||||
{ .start = 0x00a01e74, .end = 0x00a01e74 },
|
||||
{ .start = 0x00a01e84, .end = 0x00a01e90 },
|
||||
{ .start = 0x00a01e9c, .end = 0x00a01ec4 },
|
||||
{ .start = 0x00a01ed0, .end = 0x00a01ed0 },
|
||||
{ .start = 0x00a01f00, .end = 0x00a01f14 },
|
||||
{ .start = 0x00a01f44, .end = 0x00a01f58 },
|
||||
{ .start = 0x00a01f80, .end = 0x00a01fa8 },
|
||||
{ .start = 0x00a01fb0, .end = 0x00a01fbc },
|
||||
{ .start = 0x00a01ff8, .end = 0x00a01ffc },
|
||||
{ .start = 0x00a01ed0, .end = 0x00a01ee0 },
|
||||
{ .start = 0x00a01f00, .end = 0x00a01f1c },
|
||||
{ .start = 0x00a01f44, .end = 0x00a01ffc },
|
||||
{ .start = 0x00a02000, .end = 0x00a02048 },
|
||||
{ .start = 0x00a02068, .end = 0x00a020f0 },
|
||||
{ .start = 0x00a02100, .end = 0x00a02118 },
|
||||
|
@ -2305,6 +2402,7 @@ static const struct iwl_trans_ops trans_ops_pcie = {
|
|||
.dbgfs_register = iwl_trans_pcie_dbgfs_register,
|
||||
|
||||
.wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
|
||||
.freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer,
|
||||
|
||||
.write8 = iwl_trans_pcie_write8,
|
||||
.write32 = iwl_trans_pcie_write32,
|
||||
|
@ -2423,10 +2521,45 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
|||
* "dash" value). To keep hw_rev backwards compatible - we'll store it
|
||||
* in the old format.
|
||||
*/
|
||||
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
|
||||
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
trans->hw_rev = (trans->hw_rev & 0xfff0) |
|
||||
(CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
|
||||
|
||||
/*
|
||||
* in-order to recognize C step driver should read chip version
|
||||
* id located at the AUX bus MISC address space.
|
||||
*/
|
||||
iwl_set_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
|
||||
udelay(2);
|
||||
|
||||
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
|
||||
25000);
|
||||
if (ret < 0) {
|
||||
IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n");
|
||||
goto out_pci_disable_msi;
|
||||
}
|
||||
|
||||
if (iwl_trans_grab_nic_access(trans, false, &flags)) {
|
||||
u32 hw_step;
|
||||
|
||||
hw_step = __iwl_read_prph(trans, WFPM_CTRL_REG);
|
||||
hw_step |= ENABLE_WFPM;
|
||||
__iwl_write_prph(trans, WFPM_CTRL_REG, hw_step);
|
||||
hw_step = __iwl_read_prph(trans, AUX_MISC_REG);
|
||||
hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF;
|
||||
if (hw_step == 0x3)
|
||||
trans->hw_rev = (trans->hw_rev & 0xFFFFFFF3) |
|
||||
(SILICON_C_STEP << 2);
|
||||
iwl_trans_release_nic_access(trans, &flags);
|
||||
}
|
||||
}
|
||||
|
||||
trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
|
||||
snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
|
||||
"PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
|
||||
|
|
|
@ -725,33 +725,50 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
|
|||
iwl_pcie_tx_start(trans, 0);
|
||||
}
|
||||
|
||||
static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
unsigned long flags;
|
||||
int ch, ret;
|
||||
u32 mask = 0;
|
||||
|
||||
spin_lock(&trans_pcie->irq_lock);
|
||||
|
||||
if (!iwl_trans_grab_nic_access(trans, false, &flags))
|
||||
goto out;
|
||||
|
||||
/* Stop each Tx DMA channel */
|
||||
for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
|
||||
iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
|
||||
mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch);
|
||||
}
|
||||
|
||||
/* Wait for DMA channels to be idle */
|
||||
ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000);
|
||||
if (ret < 0)
|
||||
IWL_ERR(trans,
|
||||
"Failing on timeout while stopping DMA channel %d [0x%08x]\n",
|
||||
ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG));
|
||||
|
||||
iwl_trans_release_nic_access(trans, &flags);
|
||||
|
||||
out:
|
||||
spin_unlock(&trans_pcie->irq_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* iwl_pcie_tx_stop - Stop all Tx DMA channels
|
||||
*/
|
||||
int iwl_pcie_tx_stop(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
int ch, txq_id, ret;
|
||||
int txq_id;
|
||||
|
||||
/* Turn off all Tx DMA fifos */
|
||||
spin_lock(&trans_pcie->irq_lock);
|
||||
|
||||
iwl_scd_deactivate_fifos(trans);
|
||||
|
||||
/* Stop each Tx DMA channel, and wait for it to be idle */
|
||||
for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
|
||||
iwl_write_direct32(trans,
|
||||
FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
|
||||
ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
|
||||
FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
|
||||
if (ret < 0)
|
||||
IWL_ERR(trans,
|
||||
"Failing on timeout while stopping DMA channel %d [0x%08x]\n",
|
||||
ch,
|
||||
iwl_read_direct32(trans,
|
||||
FH_TSSR_TX_STATUS_REG));
|
||||
}
|
||||
spin_unlock(&trans_pcie->irq_lock);
|
||||
/* Turn off all Tx DMA channels */
|
||||
iwl_pcie_tx_stop_fh(trans);
|
||||
|
||||
/*
|
||||
* This function can be called before the op_mode disabled the
|
||||
|
@ -912,9 +929,18 @@ error:
|
|||
|
||||
static inline void iwl_pcie_txq_progress(struct iwl_txq *txq)
|
||||
{
|
||||
lockdep_assert_held(&txq->lock);
|
||||
|
||||
if (!txq->wd_timeout)
|
||||
return;
|
||||
|
||||
/*
|
||||
* station is asleep and we send data - that must
|
||||
* be uAPSD or PS-Poll. Don't rearm the timer.
|
||||
*/
|
||||
if (txq->frozen)
|
||||
return;
|
||||
|
||||
/*
|
||||
* if empty delete timer, otherwise move timer forward
|
||||
* since we're making progress on this queue
|
||||
|
@ -1248,6 +1274,9 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
|
|||
SCD_TX_STTS_QUEUE_OFFSET(txq_id);
|
||||
static const u32 zero_val[4] = {};
|
||||
|
||||
trans_pcie->txq[txq_id].frozen_expiry_remainder = 0;
|
||||
trans_pcie->txq[txq_id].frozen = false;
|
||||
|
||||
/*
|
||||
* Upon HW Rfkill - we stop the device, and then stop the queues
|
||||
* in the op_mode. Just for the sake of the simplicity of the op_mode,
|
||||
|
|
|
@ -365,7 +365,6 @@ static int if_usb_reset_device(struct if_usb_card *cardp)
|
|||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(if_usb_reset_device);
|
||||
|
||||
/**
|
||||
* usb_tx_block - transfer data to the device
|
||||
|
@ -907,7 +906,6 @@ restart:
|
|||
lbtf_deb_leave_args(LBTF_DEB_USB, "ret %d", ret);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(if_usb_prog_firmware);
|
||||
|
||||
|
||||
#define if_usb_suspend NULL
|
||||
|
|
|
@ -159,6 +159,7 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
|
|||
int tid;
|
||||
struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &resp->params.add_ba_rsp;
|
||||
struct mwifiex_tx_ba_stream_tbl *tx_ba_tbl;
|
||||
struct mwifiex_ra_list_tbl *ra_list;
|
||||
u16 block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
|
||||
|
||||
add_ba_rsp->ssn = cpu_to_le16((le16_to_cpu(add_ba_rsp->ssn))
|
||||
|
@ -166,7 +167,13 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
|
|||
|
||||
tid = (block_ack_param_set & IEEE80211_ADDBA_PARAM_TID_MASK)
|
||||
>> BLOCKACKPARAM_TID_POS;
|
||||
ra_list = mwifiex_wmm_get_ralist_node(priv, tid, add_ba_rsp->
|
||||
peer_mac_addr);
|
||||
if (le16_to_cpu(add_ba_rsp->status_code) != BA_RESULT_SUCCESS) {
|
||||
if (ra_list) {
|
||||
ra_list->ba_status = BA_SETUP_NONE;
|
||||
ra_list->amsdu_in_ampdu = false;
|
||||
}
|
||||
mwifiex_del_ba_tbl(priv, tid, add_ba_rsp->peer_mac_addr,
|
||||
TYPE_DELBA_SENT, true);
|
||||
if (add_ba_rsp->add_rsp_result != BA_RESULT_TIMEOUT)
|
||||
|
@ -185,6 +192,10 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
|
|||
tx_ba_tbl->amsdu = true;
|
||||
else
|
||||
tx_ba_tbl->amsdu = false;
|
||||
if (ra_list) {
|
||||
ra_list->amsdu_in_ampdu = tx_ba_tbl->amsdu;
|
||||
ra_list->ba_status = BA_SETUP_COMPLETE;
|
||||
}
|
||||
} else {
|
||||
dev_err(priv->adapter->dev, "BA stream not created\n");
|
||||
}
|
||||
|
@ -515,6 +526,7 @@ void mwifiex_create_ba_tbl(struct mwifiex_private *priv, u8 *ra, int tid,
|
|||
enum mwifiex_ba_status ba_status)
|
||||
{
|
||||
struct mwifiex_tx_ba_stream_tbl *new_node;
|
||||
struct mwifiex_ra_list_tbl *ra_list;
|
||||
unsigned long flags;
|
||||
|
||||
if (!mwifiex_get_ba_tbl(priv, tid, ra)) {
|
||||
|
@ -522,7 +534,11 @@ void mwifiex_create_ba_tbl(struct mwifiex_private *priv, u8 *ra, int tid,
|
|||
GFP_ATOMIC);
|
||||
if (!new_node)
|
||||
return;
|
||||
|
||||
ra_list = mwifiex_wmm_get_ralist_node(priv, tid, ra);
|
||||
if (ra_list) {
|
||||
ra_list->ba_status = ba_status;
|
||||
ra_list->amsdu_in_ampdu = false;
|
||||
}
|
||||
INIT_LIST_HEAD(&new_node->list);
|
||||
|
||||
new_node->tid = tid;
|
||||
|
|
|
@ -77,22 +77,6 @@ mwifiex_is_station_ampdu_allowed(struct mwifiex_private *priv,
|
|||
return (node->ampdu_sta[tid] != BA_STREAM_NOT_ALLOWED) ? true : false;
|
||||
}
|
||||
|
||||
/* This function checks whether AMSDU is allowed for BA stream. */
|
||||
static inline u8
|
||||
mwifiex_is_amsdu_in_ampdu_allowed(struct mwifiex_private *priv,
|
||||
struct mwifiex_ra_list_tbl *ptr, int tid)
|
||||
{
|
||||
struct mwifiex_tx_ba_stream_tbl *tx_tbl;
|
||||
|
||||
if (is_broadcast_ether_addr(ptr->ra))
|
||||
return false;
|
||||
tx_tbl = mwifiex_get_ba_tbl(priv, tid, ptr->ra);
|
||||
if (tx_tbl)
|
||||
return tx_tbl->amsdu;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* This function checks whether AMPDU is allowed or not for a particular TID. */
|
||||
static inline u8
|
||||
mwifiex_is_ampdu_allowed(struct mwifiex_private *priv,
|
||||
|
@ -181,22 +165,6 @@ mwifiex_find_stream_to_delete(struct mwifiex_private *priv, int ptr_tid,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function checks whether BA stream is set up or not.
|
||||
*/
|
||||
static inline int
|
||||
mwifiex_is_ba_stream_setup(struct mwifiex_private *priv,
|
||||
struct mwifiex_ra_list_tbl *ptr, int tid)
|
||||
{
|
||||
struct mwifiex_tx_ba_stream_tbl *tx_tbl;
|
||||
|
||||
tx_tbl = mwifiex_get_ba_tbl(priv, tid, ptr->ra);
|
||||
if (tx_tbl && IS_BASTREAM_SETUP(tx_tbl))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function checks whether associated station is 11n enabled
|
||||
*/
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче