Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next into for-davem
Conflicts: net/wireless/nl80211.c
This commit is contained in:
Коммит
57ed5cd695
|
@ -22,6 +22,8 @@
|
|||
struct bcma_bus;
|
||||
|
||||
/* main.c */
|
||||
bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value,
|
||||
int timeout);
|
||||
int bcma_bus_register(struct bcma_bus *bus);
|
||||
void bcma_bus_unregister(struct bcma_bus *bus);
|
||||
int __init bcma_bus_early_register(struct bcma_bus *bus,
|
||||
|
|
|
@ -140,8 +140,15 @@ void bcma_core_chipcommon_init(struct bcma_drv_cc *cc)
|
|||
bcma_core_chipcommon_early_init(cc);
|
||||
|
||||
if (cc->core->id.rev >= 20) {
|
||||
bcma_cc_write32(cc, BCMA_CC_GPIOPULLUP, 0);
|
||||
bcma_cc_write32(cc, BCMA_CC_GPIOPULLDOWN, 0);
|
||||
u32 pullup = 0, pulldown = 0;
|
||||
|
||||
if (cc->core->bus->chipinfo.id == BCMA_CHIP_ID_BCM43142) {
|
||||
pullup = 0x402e0;
|
||||
pulldown = 0x20500;
|
||||
}
|
||||
|
||||
bcma_cc_write32(cc, BCMA_CC_GPIOPULLUP, pullup);
|
||||
bcma_cc_write32(cc, BCMA_CC_GPIOPULLDOWN, pulldown);
|
||||
}
|
||||
|
||||
if (cc->capabilities & BCMA_CC_CAP_PMU)
|
||||
|
|
|
@ -56,6 +56,109 @@ void bcma_chipco_regctl_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(bcma_chipco_regctl_maskset);
|
||||
|
||||
static u32 bcma_pmu_xtalfreq(struct bcma_drv_cc *cc)
|
||||
{
|
||||
u32 ilp_ctl, alp_hz;
|
||||
|
||||
if (!(bcma_cc_read32(cc, BCMA_CC_PMU_STAT) &
|
||||
BCMA_CC_PMU_STAT_EXT_LPO_AVAIL))
|
||||
return 0;
|
||||
|
||||
bcma_cc_write32(cc, BCMA_CC_PMU_XTAL_FREQ,
|
||||
BIT(BCMA_CC_PMU_XTAL_FREQ_MEASURE_SHIFT));
|
||||
usleep_range(1000, 2000);
|
||||
|
||||
ilp_ctl = bcma_cc_read32(cc, BCMA_CC_PMU_XTAL_FREQ);
|
||||
ilp_ctl &= BCMA_CC_PMU_XTAL_FREQ_ILPCTL_MASK;
|
||||
|
||||
bcma_cc_write32(cc, BCMA_CC_PMU_XTAL_FREQ, 0);
|
||||
|
||||
alp_hz = ilp_ctl * 32768 / 4;
|
||||
return (alp_hz + 50000) / 100000 * 100;
|
||||
}
|
||||
|
||||
static void bcma_pmu2_pll_init0(struct bcma_drv_cc *cc, u32 xtalfreq)
|
||||
{
|
||||
struct bcma_bus *bus = cc->core->bus;
|
||||
u32 freq_tgt_target = 0, freq_tgt_current;
|
||||
u32 pll0, mask;
|
||||
|
||||
switch (bus->chipinfo.id) {
|
||||
case BCMA_CHIP_ID_BCM43142:
|
||||
/* pmu2_xtaltab0_adfll_485 */
|
||||
switch (xtalfreq) {
|
||||
case 12000:
|
||||
freq_tgt_target = 0x50D52;
|
||||
break;
|
||||
case 20000:
|
||||
freq_tgt_target = 0x307FE;
|
||||
break;
|
||||
case 26000:
|
||||
freq_tgt_target = 0x254EA;
|
||||
break;
|
||||
case 37400:
|
||||
freq_tgt_target = 0x19EF8;
|
||||
break;
|
||||
case 52000:
|
||||
freq_tgt_target = 0x12A75;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (!freq_tgt_target) {
|
||||
bcma_err(bus, "Unknown TGT frequency for xtalfreq %d\n",
|
||||
xtalfreq);
|
||||
return;
|
||||
}
|
||||
|
||||
pll0 = bcma_chipco_pll_read(cc, BCMA_CC_PMU15_PLL_PLLCTL0);
|
||||
freq_tgt_current = (pll0 & BCMA_CC_PMU15_PLL_PC0_FREQTGT_MASK) >>
|
||||
BCMA_CC_PMU15_PLL_PC0_FREQTGT_SHIFT;
|
||||
|
||||
if (freq_tgt_current == freq_tgt_target) {
|
||||
bcma_debug(bus, "Target TGT frequency already set\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Turn off PLL */
|
||||
switch (bus->chipinfo.id) {
|
||||
case BCMA_CHIP_ID_BCM43142:
|
||||
mask = (u32)~(BCMA_RES_4314_HT_AVAIL |
|
||||
BCMA_RES_4314_MACPHY_CLK_AVAIL);
|
||||
|
||||
bcma_cc_mask32(cc, BCMA_CC_PMU_MINRES_MSK, mask);
|
||||
bcma_cc_mask32(cc, BCMA_CC_PMU_MAXRES_MSK, mask);
|
||||
bcma_wait_value(cc->core, BCMA_CLKCTLST,
|
||||
BCMA_CLKCTLST_HAVEHT, 0, 20000);
|
||||
break;
|
||||
}
|
||||
|
||||
pll0 &= ~BCMA_CC_PMU15_PLL_PC0_FREQTGT_MASK;
|
||||
pll0 |= freq_tgt_target << BCMA_CC_PMU15_PLL_PC0_FREQTGT_SHIFT;
|
||||
bcma_chipco_pll_write(cc, BCMA_CC_PMU15_PLL_PLLCTL0, pll0);
|
||||
|
||||
/* Flush */
|
||||
if (cc->pmu.rev >= 2)
|
||||
bcma_cc_set32(cc, BCMA_CC_PMU_CTL, BCMA_CC_PMU_CTL_PLL_UPD);
|
||||
|
||||
/* TODO: Do we need to update OTP? */
|
||||
}
|
||||
|
||||
static void bcma_pmu_pll_init(struct bcma_drv_cc *cc)
|
||||
{
|
||||
struct bcma_bus *bus = cc->core->bus;
|
||||
u32 xtalfreq = bcma_pmu_xtalfreq(cc);
|
||||
|
||||
switch (bus->chipinfo.id) {
|
||||
case BCMA_CHIP_ID_BCM43142:
|
||||
if (xtalfreq == 0)
|
||||
xtalfreq = 20000;
|
||||
bcma_pmu2_pll_init0(cc, xtalfreq);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void bcma_pmu_resources_init(struct bcma_drv_cc *cc)
|
||||
{
|
||||
struct bcma_bus *bus = cc->core->bus;
|
||||
|
@ -66,6 +169,25 @@ static void bcma_pmu_resources_init(struct bcma_drv_cc *cc)
|
|||
min_msk = 0x200D;
|
||||
max_msk = 0xFFFF;
|
||||
break;
|
||||
case BCMA_CHIP_ID_BCM43142:
|
||||
min_msk = BCMA_RES_4314_LPLDO_PU |
|
||||
BCMA_RES_4314_PMU_SLEEP_DIS |
|
||||
BCMA_RES_4314_PMU_BG_PU |
|
||||
BCMA_RES_4314_CBUCK_LPOM_PU |
|
||||
BCMA_RES_4314_CBUCK_PFM_PU |
|
||||
BCMA_RES_4314_CLDO_PU |
|
||||
BCMA_RES_4314_LPLDO2_LVM |
|
||||
BCMA_RES_4314_WL_PMU_PU |
|
||||
BCMA_RES_4314_LDO3P3_PU |
|
||||
BCMA_RES_4314_OTP_PU |
|
||||
BCMA_RES_4314_WL_PWRSW_PU |
|
||||
BCMA_RES_4314_LQ_AVAIL |
|
||||
BCMA_RES_4314_LOGIC_RET |
|
||||
BCMA_RES_4314_MEM_SLEEP |
|
||||
BCMA_RES_4314_MACPHY_RET |
|
||||
BCMA_RES_4314_WL_CORE_READY;
|
||||
max_msk = 0x3FFFFFFF;
|
||||
break;
|
||||
default:
|
||||
bcma_debug(bus, "PMU resource config unknown or not needed for device 0x%04X\n",
|
||||
bus->chipinfo.id);
|
||||
|
@ -165,6 +287,7 @@ void bcma_pmu_init(struct bcma_drv_cc *cc)
|
|||
bcma_cc_set32(cc, BCMA_CC_PMU_CTL,
|
||||
BCMA_CC_PMU_CTL_NOILPONW);
|
||||
|
||||
bcma_pmu_pll_init(cc);
|
||||
bcma_pmu_resources_init(cc);
|
||||
bcma_pmu_workarounds(cc);
|
||||
}
|
||||
|
|
|
@ -275,6 +275,7 @@ static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
|
|||
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4358) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4365) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
|
||||
{ 0, },
|
||||
};
|
||||
|
|
|
@ -93,6 +93,25 @@ struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value,
|
||||
int timeout)
|
||||
{
|
||||
unsigned long deadline = jiffies + timeout;
|
||||
u32 val;
|
||||
|
||||
do {
|
||||
val = bcma_read32(core, reg);
|
||||
if ((val & mask) == value)
|
||||
return true;
|
||||
cpu_relax();
|
||||
udelay(10);
|
||||
} while (!time_after_eq(jiffies, deadline));
|
||||
|
||||
bcma_warn(core->bus, "Timeout waiting for register 0x%04X!\n", reg);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void bcma_release_core_dev(struct device *dev)
|
||||
{
|
||||
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
|
||||
|
|
|
@ -503,6 +503,7 @@ static bool bcma_sprom_onchip_available(struct bcma_bus *bus)
|
|||
case BCMA_CHIP_ID_BCM4331:
|
||||
present = chip_status & BCMA_CC_CHIPST_4331_OTP_PRESENT;
|
||||
break;
|
||||
case BCMA_CHIP_ID_BCM43142:
|
||||
case BCMA_CHIP_ID_BCM43224:
|
||||
case BCMA_CHIP_ID_BCM43225:
|
||||
/* for these chips OTP is always available */
|
||||
|
|
|
@ -1619,6 +1619,7 @@ static struct usb_driver btusb_driver = {
|
|||
#ifdef CONFIG_PM
|
||||
.suspend = btusb_suspend,
|
||||
.resume = btusb_resume,
|
||||
.reset_resume = btusb_resume,
|
||||
#endif
|
||||
.id_table = btusb_table,
|
||||
.supports_autosuspend = 1,
|
||||
|
|
|
@ -54,7 +54,6 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
|
|||
key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
|
||||
break;
|
||||
case WLAN_CIPHER_SUITE_TKIP:
|
||||
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
|
||||
arg.key_cipher = WMI_CIPHER_TKIP;
|
||||
arg.key_txmic_len = 8;
|
||||
arg.key_rxmic_len = 8;
|
||||
|
@ -237,6 +236,8 @@ chan_to_phymode(const struct cfg80211_chan_def *chandef)
|
|||
case NL80211_CHAN_WIDTH_40:
|
||||
phymode = MODE_11NG_HT40;
|
||||
break;
|
||||
case NL80211_CHAN_WIDTH_5:
|
||||
case NL80211_CHAN_WIDTH_10:
|
||||
case NL80211_CHAN_WIDTH_80:
|
||||
case NL80211_CHAN_WIDTH_80P80:
|
||||
case NL80211_CHAN_WIDTH_160:
|
||||
|
@ -258,6 +259,8 @@ chan_to_phymode(const struct cfg80211_chan_def *chandef)
|
|||
case NL80211_CHAN_WIDTH_80:
|
||||
phymode = MODE_11AC_VHT80;
|
||||
break;
|
||||
case NL80211_CHAN_WIDTH_5:
|
||||
case NL80211_CHAN_WIDTH_10:
|
||||
case NL80211_CHAN_WIDTH_80P80:
|
||||
case NL80211_CHAN_WIDTH_160:
|
||||
phymode = MODE_UNKNOWN;
|
||||
|
@ -2721,30 +2724,30 @@ static const struct ieee80211_channel ath10k_2ghz_channels[] = {
|
|||
};
|
||||
|
||||
static const struct ieee80211_channel ath10k_5ghz_channels[] = {
|
||||
CHAN5G(36, 5180, 14),
|
||||
CHAN5G(40, 5200, 15),
|
||||
CHAN5G(44, 5220, 16),
|
||||
CHAN5G(48, 5240, 17),
|
||||
CHAN5G(52, 5260, 18),
|
||||
CHAN5G(56, 5280, 19),
|
||||
CHAN5G(60, 5300, 20),
|
||||
CHAN5G(64, 5320, 21),
|
||||
CHAN5G(100, 5500, 22),
|
||||
CHAN5G(104, 5520, 23),
|
||||
CHAN5G(108, 5540, 24),
|
||||
CHAN5G(112, 5560, 25),
|
||||
CHAN5G(116, 5580, 26),
|
||||
CHAN5G(120, 5600, 27),
|
||||
CHAN5G(124, 5620, 28),
|
||||
CHAN5G(128, 5640, 29),
|
||||
CHAN5G(132, 5660, 30),
|
||||
CHAN5G(136, 5680, 31),
|
||||
CHAN5G(140, 5700, 32),
|
||||
CHAN5G(149, 5745, 33),
|
||||
CHAN5G(153, 5765, 34),
|
||||
CHAN5G(157, 5785, 35),
|
||||
CHAN5G(161, 5805, 36),
|
||||
CHAN5G(165, 5825, 37),
|
||||
CHAN5G(36, 5180, 0),
|
||||
CHAN5G(40, 5200, 0),
|
||||
CHAN5G(44, 5220, 0),
|
||||
CHAN5G(48, 5240, 0),
|
||||
CHAN5G(52, 5260, 0),
|
||||
CHAN5G(56, 5280, 0),
|
||||
CHAN5G(60, 5300, 0),
|
||||
CHAN5G(64, 5320, 0),
|
||||
CHAN5G(100, 5500, 0),
|
||||
CHAN5G(104, 5520, 0),
|
||||
CHAN5G(108, 5540, 0),
|
||||
CHAN5G(112, 5560, 0),
|
||||
CHAN5G(116, 5580, 0),
|
||||
CHAN5G(120, 5600, 0),
|
||||
CHAN5G(124, 5620, 0),
|
||||
CHAN5G(128, 5640, 0),
|
||||
CHAN5G(132, 5660, 0),
|
||||
CHAN5G(136, 5680, 0),
|
||||
CHAN5G(140, 5700, 0),
|
||||
CHAN5G(149, 5745, 0),
|
||||
CHAN5G(153, 5765, 0),
|
||||
CHAN5G(157, 5785, 0),
|
||||
CHAN5G(161, 5805, 0),
|
||||
CHAN5G(165, 5825, 0),
|
||||
};
|
||||
|
||||
static struct ieee80211_rate ath10k_rates[] = {
|
||||
|
|
|
@ -1883,9 +1883,10 @@ static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
|
|||
ath10k_warn("request_irq(%d) failed %d\n",
|
||||
ar_pci->pdev->irq + i, ret);
|
||||
|
||||
for (; i >= MSI_ASSIGN_CE_INITIAL; i--)
|
||||
free_irq(ar_pci->pdev->irq, ar);
|
||||
for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
|
||||
free_irq(ar_pci->pdev->irq + i, ar);
|
||||
|
||||
free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
|
||||
pci_disable_msi(ar_pci->pdev);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -3606,7 +3606,7 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
|
|||
* 7:4 R/W SWITCH_TABLE_COM_SPDT_WLAN_IDLE
|
||||
* SWITCH_TABLE_COM_SPDT_WLAN_IDLE
|
||||
*/
|
||||
if (AR_SREV_9462_20(ah) || AR_SREV_9565(ah)) {
|
||||
if (AR_SREV_9462_20_OR_LATER(ah) || AR_SREV_9565(ah)) {
|
||||
value = ar9003_switch_com_spdt_get(ah, is2ghz);
|
||||
REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL,
|
||||
AR_SWITCH_TABLE_COM_SPDT_ALL, value);
|
||||
|
@ -4059,8 +4059,9 @@ static void ar9003_hw_thermo_cal_apply(struct ath_hw *ah)
|
|||
{
|
||||
u32 data, ko, kg;
|
||||
|
||||
if (!AR_SREV_9462_20(ah))
|
||||
if (!AR_SREV_9462_20_OR_LATER(ah))
|
||||
return;
|
||||
|
||||
ar9300_otp_read_word(ah, 1, &data);
|
||||
ko = data & 0xff;
|
||||
kg = (data >> 8) & 0xff;
|
||||
|
@ -4752,7 +4753,7 @@ tempslope:
|
|||
AR_PHY_TPC_19_ALPHA_THERM, temp_slope);
|
||||
}
|
||||
|
||||
if (AR_SREV_9462_20(ah))
|
||||
if (AR_SREV_9462_20_OR_LATER(ah))
|
||||
REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
|
||||
AR_PHY_TPC_19_B1_ALPHA_THERM, temp_slope);
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include "ar955x_1p0_initvals.h"
|
||||
#include "ar9580_1p0_initvals.h"
|
||||
#include "ar9462_2p0_initvals.h"
|
||||
#include "ar9462_2p1_initvals.h"
|
||||
#include "ar9565_1p0_initvals.h"
|
||||
|
||||
/* General hardware code for the AR9003 hadware family */
|
||||
|
@ -197,6 +198,31 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
|
|||
|
||||
INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
|
||||
ar9485_1_1_pcie_phy_clkreq_disable_L1);
|
||||
} else if (AR_SREV_9462_21(ah)) {
|
||||
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
|
||||
ar9462_2p1_mac_core);
|
||||
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
|
||||
ar9462_2p1_mac_postamble);
|
||||
INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
|
||||
ar9462_2p1_baseband_core);
|
||||
INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
|
||||
ar9462_2p1_baseband_postamble);
|
||||
INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
|
||||
ar9462_2p1_radio_core);
|
||||
INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
|
||||
ar9462_2p1_radio_postamble);
|
||||
INIT_INI_ARRAY(&ah->ini_radio_post_sys2ant,
|
||||
ar9462_2p1_radio_postamble_sys2ant);
|
||||
INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
|
||||
ar9462_2p1_soc_preamble);
|
||||
INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
|
||||
ar9462_2p1_soc_postamble);
|
||||
INIT_INI_ARRAY(&ah->iniModesRxGain,
|
||||
ar9462_2p1_common_rx_gain);
|
||||
INIT_INI_ARRAY(&ah->iniModesFastClock,
|
||||
ar9462_2p1_modes_fast_clock);
|
||||
INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
|
||||
ar9462_2p1_baseband_core_txfir_coeff_japan_2484);
|
||||
} else if (AR_SREV_9462_20(ah)) {
|
||||
|
||||
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], ar9462_2p0_mac_core);
|
||||
|
@ -407,6 +433,9 @@ static void ar9003_tx_gain_table_mode0(struct ath_hw *ah)
|
|||
else if (AR_SREV_9580(ah))
|
||||
INIT_INI_ARRAY(&ah->iniModesTxGain,
|
||||
ar9580_1p0_lowest_ob_db_tx_gain_table);
|
||||
else if (AR_SREV_9462_21(ah))
|
||||
INIT_INI_ARRAY(&ah->iniModesTxGain,
|
||||
ar9462_2p1_modes_low_ob_db_tx_gain);
|
||||
else if (AR_SREV_9462_20(ah))
|
||||
INIT_INI_ARRAY(&ah->iniModesTxGain,
|
||||
ar9462_modes_low_ob_db_tx_gain_table_2p0);
|
||||
|
@ -438,6 +467,9 @@ static void ar9003_tx_gain_table_mode1(struct ath_hw *ah)
|
|||
else if (AR_SREV_9550(ah))
|
||||
INIT_INI_ARRAY(&ah->iniModesTxGain,
|
||||
ar955x_1p0_modes_no_xpa_tx_gain_table);
|
||||
else if (AR_SREV_9462_21(ah))
|
||||
INIT_INI_ARRAY(&ah->iniModesTxGain,
|
||||
ar9462_2p1_modes_high_ob_db_tx_gain);
|
||||
else if (AR_SREV_9462_20(ah))
|
||||
INIT_INI_ARRAY(&ah->iniModesTxGain,
|
||||
ar9462_modes_high_ob_db_tx_gain_table_2p0);
|
||||
|
@ -507,6 +539,12 @@ static void ar9003_tx_gain_table_mode4(struct ath_hw *ah)
|
|||
else if (AR_SREV_9580(ah))
|
||||
INIT_INI_ARRAY(&ah->iniModesTxGain,
|
||||
ar9580_1p0_mixed_ob_db_tx_gain_table);
|
||||
else if (AR_SREV_9462_21(ah))
|
||||
INIT_INI_ARRAY(&ah->iniModesTxGain,
|
||||
ar9462_2p1_modes_mix_ob_db_tx_gain);
|
||||
else if (AR_SREV_9462_20(ah))
|
||||
INIT_INI_ARRAY(&ah->iniModesTxGain,
|
||||
ar9462_modes_mix_ob_db_tx_gain_table_2p0);
|
||||
else
|
||||
INIT_INI_ARRAY(&ah->iniModesTxGain,
|
||||
ar9300Modes_mixed_ob_db_tx_gain_table_2p2);
|
||||
|
@ -584,6 +622,9 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
|
|||
} else if (AR_SREV_9580(ah))
|
||||
INIT_INI_ARRAY(&ah->iniModesRxGain,
|
||||
ar9580_1p0_rx_gain_table);
|
||||
else if (AR_SREV_9462_21(ah))
|
||||
INIT_INI_ARRAY(&ah->iniModesRxGain,
|
||||
ar9462_2p1_common_rx_gain);
|
||||
else if (AR_SREV_9462_20(ah))
|
||||
INIT_INI_ARRAY(&ah->iniModesRxGain,
|
||||
ar9462_common_rx_gain_table_2p0);
|
||||
|
@ -606,6 +647,9 @@ static void ar9003_rx_gain_table_mode1(struct ath_hw *ah)
|
|||
else if (AR_SREV_9485_11(ah))
|
||||
INIT_INI_ARRAY(&ah->iniModesRxGain,
|
||||
ar9485Common_wo_xlna_rx_gain_1_1);
|
||||
else if (AR_SREV_9462_21(ah))
|
||||
INIT_INI_ARRAY(&ah->iniModesRxGain,
|
||||
ar9462_2p1_common_wo_xlna_rx_gain);
|
||||
else if (AR_SREV_9462_20(ah))
|
||||
INIT_INI_ARRAY(&ah->iniModesRxGain,
|
||||
ar9462_common_wo_xlna_rx_gain_table_2p0);
|
||||
|
@ -627,7 +671,16 @@ static void ar9003_rx_gain_table_mode1(struct ath_hw *ah)
|
|||
|
||||
static void ar9003_rx_gain_table_mode2(struct ath_hw *ah)
|
||||
{
|
||||
if (AR_SREV_9462_20(ah)) {
|
||||
if (AR_SREV_9462_21(ah)) {
|
||||
INIT_INI_ARRAY(&ah->iniModesRxGain,
|
||||
ar9462_2p1_common_mixed_rx_gain);
|
||||
INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_core,
|
||||
ar9462_2p1_baseband_core_mix_rxgain);
|
||||
INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_postamble,
|
||||
ar9462_2p1_baseband_postamble_mix_rxgain);
|
||||
INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
|
||||
ar9462_2p1_baseband_postamble_5g_xlna);
|
||||
} else if (AR_SREV_9462_20(ah)) {
|
||||
INIT_INI_ARRAY(&ah->iniModesRxGain,
|
||||
ar9462_common_mixed_rx_gain_table_2p0);
|
||||
INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_core,
|
||||
|
@ -641,7 +694,12 @@ static void ar9003_rx_gain_table_mode2(struct ath_hw *ah)
|
|||
|
||||
static void ar9003_rx_gain_table_mode3(struct ath_hw *ah)
|
||||
{
|
||||
if (AR_SREV_9462_20(ah)) {
|
||||
if (AR_SREV_9462_21(ah)) {
|
||||
INIT_INI_ARRAY(&ah->iniModesRxGain,
|
||||
ar9462_2p1_common_5g_xlna_only_rx_gain);
|
||||
INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
|
||||
ar9462_2p1_baseband_postamble_5g_xlna);
|
||||
} else if (AR_SREV_9462_20(ah)) {
|
||||
INIT_INI_ARRAY(&ah->iniModesRxGain,
|
||||
ar9462_2p0_5g_xlna_only_rxgain);
|
||||
INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
|
||||
|
|
|
@ -743,7 +743,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
|
|||
ar9003_hw_prog_ini(ah, &ah->iniMac[i], modesIndex);
|
||||
ar9003_hw_prog_ini(ah, &ah->iniBB[i], modesIndex);
|
||||
ar9003_hw_prog_ini(ah, &ah->iniRadio[i], modesIndex);
|
||||
if (i == ATH_INI_POST && AR_SREV_9462_20(ah))
|
||||
if (i == ATH_INI_POST && AR_SREV_9462_20_OR_LATER(ah))
|
||||
ar9003_hw_prog_ini(ah,
|
||||
&ah->ini_radio_post_sys2ant,
|
||||
modesIndex);
|
||||
|
@ -754,7 +754,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
|
|||
*/
|
||||
REG_WRITE_ARRAY(&ah->iniModesRxGain, 1, regWrites);
|
||||
|
||||
if (AR_SREV_9462_20(ah)) {
|
||||
if (AR_SREV_9462_20_OR_LATER(ah)) {
|
||||
/*
|
||||
* CUS217 mix LNA mode.
|
||||
*/
|
||||
|
@ -1512,7 +1512,7 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
|
|||
ar9003_hw_prog_ini(ah, &ah->iniBB[ATH_INI_POST], modesIndex);
|
||||
ar9003_hw_prog_ini(ah, &ah->iniRadio[ATH_INI_POST], modesIndex);
|
||||
|
||||
if (AR_SREV_9462_20(ah))
|
||||
if (AR_SREV_9462_20_OR_LATER(ah))
|
||||
ar9003_hw_prog_ini(ah, &ah->ini_radio_post_sys2ant,
|
||||
modesIndex);
|
||||
|
||||
|
|
|
@ -954,7 +954,7 @@
|
|||
#define AR_PHY_TPC_5_B1 (AR_SM1_BASE + 0x208)
|
||||
#define AR_PHY_TPC_6_B1 (AR_SM1_BASE + 0x20c)
|
||||
#define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220)
|
||||
#define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + (AR_SREV_AR9462(ah) ? \
|
||||
#define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + (AR_SREV_9462_20_OR_LATER(ah) ? \
|
||||
0x280 : 0x240))
|
||||
#define AR_PHY_TPC_19_B1 (AR_SM1_BASE + 0x240)
|
||||
#define AR_PHY_TPC_19_B1_ALPHA_THERM 0xff
|
||||
|
@ -1048,7 +1048,7 @@
|
|||
#define AR_GLB_GPIO_CONTROL (AR_GLB_BASE)
|
||||
#define AR_PHY_GLB_CONTROL (AR_GLB_BASE + 0x44)
|
||||
#define AR_GLB_SCRATCH(_ah) (AR_GLB_BASE + \
|
||||
(AR_SREV_9462_20(_ah) ? 0x4c : 0x50))
|
||||
(AR_SREV_9462_20_OR_LATER(_ah) ? 0x4c : 0x50))
|
||||
#define AR_GLB_STATUS (AR_GLB_BASE + 0x48)
|
||||
|
||||
/*
|
||||
|
|
|
@ -879,6 +879,69 @@ static const u32 ar9462_2p0_radio_postamble[][5] = {
|
|||
{0x0001650c, 0x48000000, 0x40000000, 0x40000000, 0x40000000},
|
||||
};
|
||||
|
||||
static const u32 ar9462_modes_mix_ob_db_tx_gain_table_2p0[][5] = {
|
||||
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
|
||||
{0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
|
||||
{0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
|
||||
{0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
|
||||
{0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
|
||||
{0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
|
||||
{0x0000a410, 0x0000d0da, 0x0000d0da, 0x0000d0de, 0x0000d0de},
|
||||
{0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
|
||||
{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
|
||||
{0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
|
||||
{0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
|
||||
{0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
|
||||
{0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
|
||||
{0x0000a514, 0x18022622, 0x18022622, 0x12000400, 0x12000400},
|
||||
{0x0000a518, 0x1b022822, 0x1b022822, 0x16000402, 0x16000402},
|
||||
{0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
|
||||
{0x0000a520, 0x22022c41, 0x22022c41, 0x1c000603, 0x1c000603},
|
||||
{0x0000a524, 0x28023042, 0x28023042, 0x21000a02, 0x21000a02},
|
||||
{0x0000a528, 0x2c023044, 0x2c023044, 0x25000a04, 0x25000a04},
|
||||
{0x0000a52c, 0x2f023644, 0x2f023644, 0x28000a20, 0x28000a20},
|
||||
{0x0000a530, 0x34025643, 0x34025643, 0x2c000e20, 0x2c000e20},
|
||||
{0x0000a534, 0x38025a44, 0x38025a44, 0x30000e22, 0x30000e22},
|
||||
{0x0000a538, 0x3b025e45, 0x3b025e45, 0x34000e24, 0x34000e24},
|
||||
{0x0000a53c, 0x41025e4a, 0x41025e4a, 0x38001640, 0x38001640},
|
||||
{0x0000a540, 0x48025e6c, 0x48025e6c, 0x3c001660, 0x3c001660},
|
||||
{0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3f001861, 0x3f001861},
|
||||
{0x0000a548, 0x55025eb3, 0x55025eb3, 0x43001a81, 0x43001a81},
|
||||
{0x0000a54c, 0x58025ef3, 0x58025ef3, 0x47001a83, 0x47001a83},
|
||||
{0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x4a001c84, 0x4a001c84},
|
||||
{0x0000a554, 0x62025f56, 0x62025f56, 0x4e001ce3, 0x4e001ce3},
|
||||
{0x0000a558, 0x66027f56, 0x66027f56, 0x52001ce5, 0x52001ce5},
|
||||
{0x0000a55c, 0x6a029f56, 0x6a029f56, 0x56001ce9, 0x56001ce9},
|
||||
{0x0000a560, 0x70049f56, 0x70049f56, 0x5a001ceb, 0x5a001ceb},
|
||||
{0x0000a564, 0x751ffff6, 0x751ffff6, 0x5c001eec, 0x5c001eec},
|
||||
{0x0000a568, 0x751ffff6, 0x751ffff6, 0x5e001ef0, 0x5e001ef0},
|
||||
{0x0000a56c, 0x751ffff6, 0x751ffff6, 0x60001ef4, 0x60001ef4},
|
||||
{0x0000a570, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
|
||||
{0x0000a574, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
|
||||
{0x0000a578, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
|
||||
{0x0000a57c, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
|
||||
{0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
|
||||
{0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
|
||||
{0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
|
||||
{0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
|
||||
{0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
|
||||
{0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
|
||||
{0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
|
||||
{0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
|
||||
{0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
|
||||
{0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
|
||||
{0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
|
||||
{0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
|
||||
{0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
|
||||
{0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
|
||||
{0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
|
||||
{0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
|
||||
{0x0000b2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
|
||||
{0x0000b2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
|
||||
{0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
|
||||
{0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
|
||||
};
|
||||
|
||||
static const u32 ar9462_modes_high_ob_db_tx_gain_table_2p0[][5] = {
|
||||
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
|
||||
{0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -634,6 +634,7 @@ void ath_ant_comb_update(struct ath_softc *sc);
|
|||
#define ATH9K_PCI_CUS198 0x0001
|
||||
#define ATH9K_PCI_CUS230 0x0002
|
||||
#define ATH9K_PCI_CUS217 0x0004
|
||||
#define ATH9K_PCI_WOW 0x0008
|
||||
|
||||
/*
|
||||
* Default cache line size, in bytes.
|
||||
|
|
|
@ -234,10 +234,15 @@ static inline void ath9k_skb_queue_complete(struct hif_device_usb *hif_dev,
|
|||
struct sk_buff *skb;
|
||||
|
||||
while ((skb = __skb_dequeue(queue)) != NULL) {
|
||||
#ifdef CONFIG_ATH9K_HTC_DEBUGFS
|
||||
int ln = skb->len;
|
||||
#endif
|
||||
ath9k_htc_txcompletion_cb(hif_dev->htc_handle,
|
||||
skb, txok);
|
||||
if (txok)
|
||||
if (txok) {
|
||||
TX_STAT_INC(skb_success);
|
||||
TX_STAT_ADD(skb_success_bytes, ln);
|
||||
}
|
||||
else
|
||||
TX_STAT_INC(skb_failed);
|
||||
}
|
||||
|
@ -620,6 +625,7 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
|
|||
|
||||
err:
|
||||
for (i = 0; i < pool_index; i++) {
|
||||
RX_STAT_ADD(skb_completed_bytes, skb_pool[i]->len);
|
||||
ath9k_htc_rx_msg(hif_dev->htc_handle, skb_pool[i],
|
||||
skb_pool[i]->len, USB_WLAN_RX_PIPE);
|
||||
RX_STAT_INC(skb_completed);
|
||||
|
|
|
@ -324,7 +324,9 @@ static inline struct ath9k_htc_tx_ctl *HTC_SKB_CB(struct sk_buff *skb)
|
|||
#ifdef CONFIG_ATH9K_HTC_DEBUGFS
|
||||
|
||||
#define TX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c++)
|
||||
#define TX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c += a)
|
||||
#define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.rx_stats.c++)
|
||||
#define RX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.rx_stats.c += a)
|
||||
#define CAB_STAT_INC priv->debug.tx_stats.cab_queued++
|
||||
|
||||
#define TX_QSTAT_INC(q) (priv->debug.tx_stats.queue_stats[q]++)
|
||||
|
@ -337,6 +339,7 @@ struct ath_tx_stats {
|
|||
u32 buf_completed;
|
||||
u32 skb_queued;
|
||||
u32 skb_success;
|
||||
u32 skb_success_bytes;
|
||||
u32 skb_failed;
|
||||
u32 cab_queued;
|
||||
u32 queue_stats[IEEE80211_NUM_ACS];
|
||||
|
@ -345,6 +348,7 @@ struct ath_tx_stats {
|
|||
struct ath_rx_stats {
|
||||
u32 skb_allocated;
|
||||
u32 skb_completed;
|
||||
u32 skb_completed_bytes;
|
||||
u32 skb_dropped;
|
||||
u32 err_crc;
|
||||
u32 err_decrypt_crc;
|
||||
|
@ -362,10 +366,20 @@ struct ath9k_debug {
|
|||
struct ath_rx_stats rx_stats;
|
||||
};
|
||||
|
||||
void ath9k_htc_get_et_strings(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif,
|
||||
u32 sset, u8 *data);
|
||||
int ath9k_htc_get_et_sset_count(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif, int sset);
|
||||
void ath9k_htc_get_et_stats(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif,
|
||||
struct ethtool_stats *stats, u64 *data);
|
||||
#else
|
||||
|
||||
#define TX_STAT_INC(c) do { } while (0)
|
||||
#define TX_STAT_ADD(c, a) do { } while (0)
|
||||
#define RX_STAT_INC(c) do { } while (0)
|
||||
#define RX_STAT_ADD(c, a) do { } while (0)
|
||||
#define CAB_STAT_INC do { } while (0)
|
||||
|
||||
#define TX_QSTAT_INC(c) do { } while (0)
|
||||
|
@ -583,6 +597,8 @@ bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
|
|||
void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv);
|
||||
void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw);
|
||||
|
||||
struct base_eep_header *ath9k_htc_get_eeprom_base(struct ath9k_htc_priv *priv);
|
||||
|
||||
#ifdef CONFIG_MAC80211_LEDS
|
||||
void ath9k_init_leds(struct ath9k_htc_priv *priv);
|
||||
void ath9k_deinit_leds(struct ath9k_htc_priv *priv);
|
||||
|
|
|
@ -496,21 +496,7 @@ static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
|
|||
ssize_t retval = 0;
|
||||
char *buf;
|
||||
|
||||
/*
|
||||
* This can be done since all the 3 EEPROM families have the
|
||||
* same base header upto a certain point, and we are interested in
|
||||
* the data only upto that point.
|
||||
*/
|
||||
|
||||
if (AR_SREV_9271(priv->ah))
|
||||
pBase = (struct base_eep_header *)
|
||||
&priv->ah->eeprom.map4k.baseEepHeader;
|
||||
else if (priv->ah->hw_version.usbdev == AR9280_USB)
|
||||
pBase = (struct base_eep_header *)
|
||||
&priv->ah->eeprom.def.baseEepHeader;
|
||||
else if (priv->ah->hw_version.usbdev == AR9287_USB)
|
||||
pBase = (struct base_eep_header *)
|
||||
&priv->ah->eeprom.map9287.baseEepHeader;
|
||||
pBase = ath9k_htc_get_eeprom_base(priv);
|
||||
|
||||
if (pBase == NULL) {
|
||||
ath_err(common, "Unknown EEPROM type\n");
|
||||
|
@ -916,6 +902,87 @@ static const struct file_operations fops_modal_eeprom = {
|
|||
.llseek = default_llseek,
|
||||
};
|
||||
|
||||
|
||||
/* Ethtool support for get-stats */
|
||||
#define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO"
|
||||
static const char ath9k_htc_gstrings_stats[][ETH_GSTRING_LEN] = {
|
||||
"tx_pkts_nic",
|
||||
"tx_bytes_nic",
|
||||
"rx_pkts_nic",
|
||||
"rx_bytes_nic",
|
||||
|
||||
AMKSTR(d_tx_pkts),
|
||||
|
||||
"d_rx_crc_err",
|
||||
"d_rx_decrypt_crc_err",
|
||||
"d_rx_phy_err",
|
||||
"d_rx_mic_err",
|
||||
"d_rx_pre_delim_crc_err",
|
||||
"d_rx_post_delim_crc_err",
|
||||
"d_rx_decrypt_busy_err",
|
||||
|
||||
"d_rx_phyerr_radar",
|
||||
"d_rx_phyerr_ofdm_timing",
|
||||
"d_rx_phyerr_cck_timing",
|
||||
|
||||
};
|
||||
#define ATH9K_HTC_SSTATS_LEN ARRAY_SIZE(ath9k_htc_gstrings_stats)
|
||||
|
||||
void ath9k_htc_get_et_strings(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif,
|
||||
u32 sset, u8 *data)
|
||||
{
|
||||
if (sset == ETH_SS_STATS)
|
||||
memcpy(data, *ath9k_htc_gstrings_stats,
|
||||
sizeof(ath9k_htc_gstrings_stats));
|
||||
}
|
||||
|
||||
int ath9k_htc_get_et_sset_count(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif, int sset)
|
||||
{
|
||||
if (sset == ETH_SS_STATS)
|
||||
return ATH9K_HTC_SSTATS_LEN;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define STXBASE priv->debug.tx_stats
|
||||
#define SRXBASE priv->debug.rx_stats
|
||||
#define ASTXQ(a) \
|
||||
data[i++] = STXBASE.a[IEEE80211_AC_BE]; \
|
||||
data[i++] = STXBASE.a[IEEE80211_AC_BK]; \
|
||||
data[i++] = STXBASE.a[IEEE80211_AC_VI]; \
|
||||
data[i++] = STXBASE.a[IEEE80211_AC_VO]
|
||||
|
||||
void ath9k_htc_get_et_stats(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif,
|
||||
struct ethtool_stats *stats, u64 *data)
|
||||
{
|
||||
struct ath9k_htc_priv *priv = hw->priv;
|
||||
int i = 0;
|
||||
|
||||
data[i++] = STXBASE.skb_success;
|
||||
data[i++] = STXBASE.skb_success_bytes;
|
||||
data[i++] = SRXBASE.skb_completed;
|
||||
data[i++] = SRXBASE.skb_completed_bytes;
|
||||
|
||||
ASTXQ(queue_stats);
|
||||
|
||||
data[i++] = SRXBASE.err_crc;
|
||||
data[i++] = SRXBASE.err_decrypt_crc;
|
||||
data[i++] = SRXBASE.err_phy;
|
||||
data[i++] = SRXBASE.err_mic;
|
||||
data[i++] = SRXBASE.err_pre_delim;
|
||||
data[i++] = SRXBASE.err_post_delim;
|
||||
data[i++] = SRXBASE.err_decrypt_busy;
|
||||
|
||||
data[i++] = SRXBASE.err_phy_stats[ATH9K_PHYERR_RADAR];
|
||||
data[i++] = SRXBASE.err_phy_stats[ATH9K_PHYERR_OFDM_TIMING];
|
||||
data[i++] = SRXBASE.err_phy_stats[ATH9K_PHYERR_CCK_TIMING];
|
||||
|
||||
WARN_ON(i != ATH9K_HTC_SSTATS_LEN);
|
||||
}
|
||||
|
||||
|
||||
int ath9k_htc_init_debug(struct ath_hw *ah)
|
||||
{
|
||||
struct ath_common *common = ath9k_hw_common(ah);
|
||||
|
|
|
@ -701,8 +701,10 @@ static const struct ieee80211_iface_limit if_limits[] = {
|
|||
{ .max = 2, .types = BIT(NL80211_IFTYPE_STATION) |
|
||||
BIT(NL80211_IFTYPE_P2P_CLIENT) },
|
||||
{ .max = 2, .types = BIT(NL80211_IFTYPE_AP) |
|
||||
BIT(NL80211_IFTYPE_P2P_GO) |
|
||||
BIT(NL80211_IFTYPE_MESH_POINT) },
|
||||
#ifdef CONFIG_MAC80211_MESH
|
||||
BIT(NL80211_IFTYPE_MESH_POINT) |
|
||||
#endif
|
||||
BIT(NL80211_IFTYPE_P2P_GO) },
|
||||
};
|
||||
|
||||
static const struct ieee80211_iface_combination if_comb = {
|
||||
|
@ -716,6 +718,7 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
|
|||
struct ieee80211_hw *hw)
|
||||
{
|
||||
struct ath_common *common = ath9k_hw_common(priv->ah);
|
||||
struct base_eep_header *pBase;
|
||||
|
||||
hw->flags = IEEE80211_HW_SIGNAL_DBM |
|
||||
IEEE80211_HW_AMPDU_AGGREGATION |
|
||||
|
@ -771,6 +774,12 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
|
|||
&priv->sbands[IEEE80211_BAND_5GHZ].ht_cap);
|
||||
}
|
||||
|
||||
pBase = ath9k_htc_get_eeprom_base(priv);
|
||||
if (pBase) {
|
||||
hw->wiphy->available_antennas_rx = pBase->rxMask;
|
||||
hw->wiphy->available_antennas_tx = pBase->txMask;
|
||||
}
|
||||
|
||||
SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
|
||||
}
|
||||
|
||||
|
|
|
@ -1183,7 +1183,7 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
|
|||
mutex_lock(&priv->htc_pm_lock);
|
||||
|
||||
priv->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
|
||||
if (priv->ps_idle)
|
||||
if (!priv->ps_idle)
|
||||
chip_reset = true;
|
||||
|
||||
mutex_unlock(&priv->htc_pm_lock);
|
||||
|
@ -1774,6 +1774,43 @@ static int ath9k_htc_get_stats(struct ieee80211_hw *hw,
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct base_eep_header *ath9k_htc_get_eeprom_base(struct ath9k_htc_priv *priv)
|
||||
{
|
||||
struct base_eep_header *pBase = NULL;
|
||||
/*
|
||||
* This can be done since all the 3 EEPROM families have the
|
||||
* same base header upto a certain point, and we are interested in
|
||||
* the data only upto that point.
|
||||
*/
|
||||
|
||||
if (AR_SREV_9271(priv->ah))
|
||||
pBase = (struct base_eep_header *)
|
||||
&priv->ah->eeprom.map4k.baseEepHeader;
|
||||
else if (priv->ah->hw_version.usbdev == AR9280_USB)
|
||||
pBase = (struct base_eep_header *)
|
||||
&priv->ah->eeprom.def.baseEepHeader;
|
||||
else if (priv->ah->hw_version.usbdev == AR9287_USB)
|
||||
pBase = (struct base_eep_header *)
|
||||
&priv->ah->eeprom.map9287.baseEepHeader;
|
||||
return pBase;
|
||||
}
|
||||
|
||||
|
||||
static int ath9k_htc_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant,
|
||||
u32 *rx_ant)
|
||||
{
|
||||
struct ath9k_htc_priv *priv = hw->priv;
|
||||
struct base_eep_header *pBase = ath9k_htc_get_eeprom_base(priv);
|
||||
if (pBase) {
|
||||
*tx_ant = pBase->txMask;
|
||||
*rx_ant = pBase->rxMask;
|
||||
} else {
|
||||
*tx_ant = 0;
|
||||
*rx_ant = 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ieee80211_ops ath9k_htc_ops = {
|
||||
.tx = ath9k_htc_tx,
|
||||
.start = ath9k_htc_start,
|
||||
|
@ -1799,4 +1836,11 @@ struct ieee80211_ops ath9k_htc_ops = {
|
|||
.set_coverage_class = ath9k_htc_set_coverage_class,
|
||||
.set_bitrate_mask = ath9k_htc_set_bitrate_mask,
|
||||
.get_stats = ath9k_htc_get_stats,
|
||||
.get_antenna = ath9k_htc_get_antenna,
|
||||
|
||||
#ifdef CONFIG_ATH9K_HTC_DEBUGFS
|
||||
.get_et_sset_count = ath9k_htc_get_et_sset_count,
|
||||
.get_et_stats = ath9k_htc_get_et_stats,
|
||||
.get_et_strings = ath9k_htc_get_et_strings,
|
||||
#endif
|
||||
};
|
||||
|
|
|
@ -2599,7 +2599,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
|
|||
if (!(ah->ent_mode & AR_ENT_OTP_49GHZ_DISABLE))
|
||||
pCap->hw_caps |= ATH9K_HW_CAP_MCI;
|
||||
|
||||
if (AR_SREV_9462_20(ah))
|
||||
if (AR_SREV_9462_20_OR_LATER(ah))
|
||||
pCap->hw_caps |= ATH9K_HW_CAP_RTT;
|
||||
}
|
||||
|
||||
|
|
|
@ -837,6 +837,7 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
|
|||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
if ((ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) &&
|
||||
(sc->driver_data & ATH9K_PCI_WOW) &&
|
||||
device_can_wakeup(sc->dev))
|
||||
hw->wiphy->wowlan = &ath9k_wowlan_support;
|
||||
|
||||
|
|
|
@ -79,6 +79,63 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
|
|||
0x6661),
|
||||
.driver_data = ATH9K_PCI_CUS217 },
|
||||
|
||||
/* AR9462 with WoW support */
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
|
||||
0x0034,
|
||||
PCI_VENDOR_ID_ATHEROS,
|
||||
0x3117),
|
||||
.driver_data = ATH9K_PCI_WOW },
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
|
||||
0x0034,
|
||||
PCI_VENDOR_ID_LENOVO,
|
||||
0x3214),
|
||||
.driver_data = ATH9K_PCI_WOW },
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
|
||||
0x0034,
|
||||
PCI_VENDOR_ID_ATTANSIC,
|
||||
0x0091),
|
||||
.driver_data = ATH9K_PCI_WOW },
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
|
||||
0x0034,
|
||||
PCI_VENDOR_ID_AZWAVE,
|
||||
0x2110),
|
||||
.driver_data = ATH9K_PCI_WOW },
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
|
||||
0x0034,
|
||||
PCI_VENDOR_ID_ASUSTEK,
|
||||
0x850E),
|
||||
.driver_data = ATH9K_PCI_WOW },
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
|
||||
0x0034,
|
||||
0x11AD, /* LITEON */
|
||||
0x6631),
|
||||
.driver_data = ATH9K_PCI_WOW },
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
|
||||
0x0034,
|
||||
0x11AD, /* LITEON */
|
||||
0x6641),
|
||||
.driver_data = ATH9K_PCI_WOW },
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
|
||||
0x0034,
|
||||
PCI_VENDOR_ID_HP,
|
||||
0x1864),
|
||||
.driver_data = ATH9K_PCI_WOW },
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
|
||||
0x0034,
|
||||
0x14CD, /* USI */
|
||||
0x0063),
|
||||
.driver_data = ATH9K_PCI_WOW },
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
|
||||
0x0034,
|
||||
0x14CD, /* USI */
|
||||
0x0064),
|
||||
.driver_data = ATH9K_PCI_WOW },
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
|
||||
0x0034,
|
||||
0x10CF, /* Fujitsu */
|
||||
0x1783),
|
||||
.driver_data = ATH9K_PCI_WOW },
|
||||
|
||||
{ PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */
|
||||
{ PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */
|
||||
{ PCI_VDEVICE(ATHEROS, 0x0036) }, /* PCI-E AR9565 */
|
||||
|
|
|
@ -806,6 +806,7 @@
|
|||
#define AR_SREV_REVISION_9580_10 4 /* AR9580 1.0 */
|
||||
#define AR_SREV_VERSION_9462 0x280
|
||||
#define AR_SREV_REVISION_9462_20 2
|
||||
#define AR_SREV_REVISION_9462_21 3
|
||||
#define AR_SREV_VERSION_9565 0x2C0
|
||||
#define AR_SREV_REVISION_9565_10 0
|
||||
#define AR_SREV_VERSION_9550 0x400
|
||||
|
@ -911,10 +912,18 @@
|
|||
|
||||
#define AR_SREV_9462(_ah) \
|
||||
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462))
|
||||
|
||||
#define AR_SREV_9462_20(_ah) \
|
||||
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
|
||||
((_ah)->hw_version.macRev == AR_SREV_REVISION_9462_20))
|
||||
((_ah)->hw_version.macRev == AR_SREV_REVISION_9462_20))
|
||||
#define AR_SREV_9462_21(_ah) \
|
||||
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
|
||||
((_ah)->hw_version.macRev == AR_SREV_REVISION_9462_21))
|
||||
#define AR_SREV_9462_20_OR_LATER(_ah) \
|
||||
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
|
||||
((_ah)->hw_version.macRev >= AR_SREV_REVISION_9462_20))
|
||||
#define AR_SREV_9462_21_OR_LATER(_ah) \
|
||||
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
|
||||
((_ah)->hw_version.macRev >= AR_SREV_REVISION_9462_21))
|
||||
|
||||
#define AR_SREV_9565(_ah) \
|
||||
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565))
|
||||
|
|
|
@ -1673,6 +1673,8 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
|
|||
txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
|
||||
last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
|
||||
|
||||
|
@ -1711,8 +1713,10 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
|
|||
|
||||
if (ac == last_ac ||
|
||||
txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
|
||||
return;
|
||||
break;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/***********/
|
||||
|
@ -1778,9 +1782,13 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
|
|||
}
|
||||
|
||||
if (!internal) {
|
||||
txq->axq_depth++;
|
||||
if (bf_is_ampdu_not_probing(bf))
|
||||
txq->axq_ampdu_depth++;
|
||||
while (bf) {
|
||||
txq->axq_depth++;
|
||||
if (bf_is_ampdu_not_probing(bf))
|
||||
txq->axq_ampdu_depth++;
|
||||
|
||||
bf = bf->bf_lastbf->bf_next;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -621,7 +621,8 @@ static struct vring *wil_find_tx_vring(struct wil6210_priv *wil,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len)
|
||||
static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
|
||||
int vring_index)
|
||||
{
|
||||
wil_desc_addr_set(&d->dma.addr, pa);
|
||||
d->dma.ip_length = 0;
|
||||
|
@ -630,7 +631,7 @@ static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len)
|
|||
d->dma.error = 0;
|
||||
d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
|
||||
d->dma.length = cpu_to_le16((u16)len);
|
||||
d->dma.d0 = 0;
|
||||
d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
|
||||
d->mac.d[0] = 0;
|
||||
d->mac.d[1] = 0;
|
||||
d->mac.d[2] = 0;
|
||||
|
@ -684,7 +685,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
|
|||
if (unlikely(dma_mapping_error(dev, pa)))
|
||||
return -EINVAL;
|
||||
/* 1-st segment */
|
||||
wil_tx_desc_map(d, pa, skb_headlen(skb));
|
||||
wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index);
|
||||
d->mac.d[2] |= ((nr_frags + 1) <<
|
||||
MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
|
||||
if (nr_frags)
|
||||
|
@ -701,15 +702,14 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
|
|||
DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(dev, pa)))
|
||||
goto dma_error;
|
||||
wil_tx_desc_map(d, pa, len);
|
||||
wil_tx_desc_map(d, pa, len, vring_index);
|
||||
vring->ctx[i] = NULL;
|
||||
*_d = *d;
|
||||
}
|
||||
/* for the last seg only */
|
||||
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
|
||||
d->dma.d0 |= BIT(9); /* BUG: undocumented bit */
|
||||
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS);
|
||||
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
|
||||
d->dma.d0 |= (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
|
||||
*_d = *d;
|
||||
|
||||
wil_hex_dump_txrx("Tx ", DUMP_PREFIX_NONE, 32, 4,
|
||||
|
|
|
@ -201,6 +201,10 @@ struct vring_tx_mac {
|
|||
#define DMA_CFG_DESC_TX_0_CMD_EOP_LEN 1
|
||||
#define DMA_CFG_DESC_TX_0_CMD_EOP_MSK 0x100
|
||||
|
||||
#define DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS 9
|
||||
#define DMA_CFG_DESC_TX_0_CMD_MARK_WB_LEN 1
|
||||
#define DMA_CFG_DESC_TX_0_CMD_MARK_WB_MSK 0x200
|
||||
|
||||
#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS 10
|
||||
#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_LEN 1
|
||||
#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_MSK 0x400
|
||||
|
|
|
@ -31,12 +31,6 @@ config B43_BCMA
|
|||
depends on B43 && (BCMA = y || BCMA = B43)
|
||||
default y
|
||||
|
||||
config B43_BCMA_EXTRA
|
||||
bool "Hardware support that overlaps with the brcmsmac driver"
|
||||
depends on B43_BCMA
|
||||
default n if BRCMSMAC
|
||||
default y
|
||||
|
||||
config B43_SSB
|
||||
bool
|
||||
depends on B43 && (SSB = y || SSB = B43)
|
||||
|
|
|
@ -113,13 +113,15 @@ static int b43_modparam_pio = 0;
|
|||
module_param_named(pio, b43_modparam_pio, int, 0644);
|
||||
MODULE_PARM_DESC(pio, "Use PIO accesses by default: 0=DMA, 1=PIO");
|
||||
|
||||
static int modparam_allhwsupport = !IS_ENABLED(CONFIG_BRCMSMAC);
|
||||
module_param_named(allhwsupport, modparam_allhwsupport, int, 0444);
|
||||
MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if overlaps with the brcmsmac driver)");
|
||||
|
||||
#ifdef CONFIG_B43_BCMA
|
||||
static const struct bcma_device_id b43_bcma_tbl[] = {
|
||||
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x11, BCMA_ANY_CLASS),
|
||||
#ifdef CONFIG_B43_BCMA_EXTRA
|
||||
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x17, BCMA_ANY_CLASS),
|
||||
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x18, BCMA_ANY_CLASS),
|
||||
#endif
|
||||
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x1D, BCMA_ANY_CLASS),
|
||||
BCMA_CORETABLE_END
|
||||
};
|
||||
|
@ -5396,6 +5398,12 @@ static int b43_bcma_probe(struct bcma_device *core)
|
|||
struct b43_wl *wl;
|
||||
int err;
|
||||
|
||||
if (!modparam_allhwsupport &&
|
||||
(core->id.rev == 0x17 || core->id.rev == 0x18)) {
|
||||
pr_err("Support for cores revisions 0x17 and 0x18 disabled by module param allhwsupport=0. Try b43.allhwsupport=1\n");
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
dev = b43_bus_dev_bcma_init(core);
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
|
|
|
@ -162,7 +162,7 @@ int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
static int
|
||||
brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
|
||||
{
|
||||
int err = 0, i;
|
||||
|
@ -193,12 +193,33 @@ brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
|
|||
return err;
|
||||
}
|
||||
|
||||
static int
|
||||
brcmf_sdio_addrprep(struct brcmf_sdio_dev *sdiodev, uint width, u32 *addr)
|
||||
{
|
||||
uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
|
||||
int err = 0;
|
||||
|
||||
if (bar0 != sdiodev->sbwad) {
|
||||
err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
sdiodev->sbwad = bar0;
|
||||
}
|
||||
|
||||
*addr &= SBSDIO_SB_OFT_ADDR_MASK;
|
||||
|
||||
if (width == 4)
|
||||
*addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
|
||||
void *data, bool write)
|
||||
{
|
||||
u8 func_num, reg_size;
|
||||
u32 bar;
|
||||
s32 retry = 0;
|
||||
int ret;
|
||||
|
||||
|
@ -218,18 +239,7 @@ brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
|
|||
func_num = SDIO_FUNC_1;
|
||||
reg_size = 4;
|
||||
|
||||
/* Set the window for SB core register */
|
||||
bar = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
|
||||
if (bar != sdiodev->sbwad) {
|
||||
ret = brcmf_sdcard_set_sbaddr_window(sdiodev, bar);
|
||||
if (ret != 0) {
|
||||
memset(data, 0xFF, reg_size);
|
||||
return ret;
|
||||
}
|
||||
sdiodev->sbwad = bar;
|
||||
}
|
||||
addr &= SBSDIO_SB_OFT_ADDR_MASK;
|
||||
addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
|
||||
brcmf_sdio_addrprep(sdiodev, reg_size, &addr);
|
||||
}
|
||||
|
||||
do {
|
||||
|
@ -321,10 +331,11 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
|
|||
bool write, u32 addr, struct sk_buff_head *pktlist)
|
||||
{
|
||||
unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
|
||||
unsigned int max_blks, max_req_sz;
|
||||
unsigned int max_blks, max_req_sz, orig_offset, dst_offset;
|
||||
unsigned short max_seg_sz, seg_sz;
|
||||
unsigned char *pkt_data;
|
||||
struct sk_buff *pkt_next = NULL;
|
||||
unsigned char *pkt_data, *orig_data, *dst_data;
|
||||
struct sk_buff *pkt_next = NULL, *local_pkt_next;
|
||||
struct sk_buff_head local_list, *target_list;
|
||||
struct mmc_request mmc_req;
|
||||
struct mmc_command mmc_cmd;
|
||||
struct mmc_data mmc_dat;
|
||||
|
@ -361,6 +372,32 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
|
|||
req_sz);
|
||||
}
|
||||
|
||||
target_list = pktlist;
|
||||
/* for host with broken sg support, prepare a page aligned list */
|
||||
__skb_queue_head_init(&local_list);
|
||||
if (sdiodev->pdata && sdiodev->pdata->broken_sg_support && !write) {
|
||||
req_sz = 0;
|
||||
skb_queue_walk(pktlist, pkt_next)
|
||||
req_sz += pkt_next->len;
|
||||
req_sz = ALIGN(req_sz, sdiodev->func[fn]->cur_blksize);
|
||||
while (req_sz > PAGE_SIZE) {
|
||||
pkt_next = brcmu_pkt_buf_get_skb(PAGE_SIZE);
|
||||
if (pkt_next == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto exit;
|
||||
}
|
||||
__skb_queue_tail(&local_list, pkt_next);
|
||||
req_sz -= PAGE_SIZE;
|
||||
}
|
||||
pkt_next = brcmu_pkt_buf_get_skb(req_sz);
|
||||
if (pkt_next == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto exit;
|
||||
}
|
||||
__skb_queue_tail(&local_list, pkt_next);
|
||||
target_list = &local_list;
|
||||
}
|
||||
|
||||
host = sdiodev->func[fn]->card->host;
|
||||
func_blk_sz = sdiodev->func[fn]->cur_blksize;
|
||||
/* Blocks per command is limited by host count, host transfer
|
||||
|
@ -370,13 +407,15 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
|
|||
max_req_sz = min_t(unsigned int, host->max_req_size,
|
||||
max_blks * func_blk_sz);
|
||||
max_seg_sz = min_t(unsigned short, host->max_segs, SG_MAX_SINGLE_ALLOC);
|
||||
max_seg_sz = min_t(unsigned short, max_seg_sz, pktlist->qlen);
|
||||
seg_sz = pktlist->qlen;
|
||||
max_seg_sz = min_t(unsigned short, max_seg_sz, target_list->qlen);
|
||||
seg_sz = target_list->qlen;
|
||||
pkt_offset = 0;
|
||||
pkt_next = pktlist->next;
|
||||
pkt_next = target_list->next;
|
||||
|
||||
if (sg_alloc_table(&st, max_seg_sz, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
if (sg_alloc_table(&st, max_seg_sz, GFP_KERNEL)) {
|
||||
ret = -ENOMEM;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
while (seg_sz) {
|
||||
req_sz = 0;
|
||||
|
@ -386,7 +425,7 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
|
|||
memset(&mmc_dat, 0, sizeof(struct mmc_data));
|
||||
sgl = st.sgl;
|
||||
/* prep sg table */
|
||||
while (pkt_next != (struct sk_buff *)pktlist) {
|
||||
while (pkt_next != (struct sk_buff *)target_list) {
|
||||
pkt_data = pkt_next->data + pkt_offset;
|
||||
sg_data_sz = pkt_next->len - pkt_offset;
|
||||
if (sg_data_sz > host->max_seg_size)
|
||||
|
@ -413,8 +452,8 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
|
|||
if (req_sz % func_blk_sz != 0) {
|
||||
brcmf_err("sg request length %u is not %u aligned\n",
|
||||
req_sz, func_blk_sz);
|
||||
sg_free_table(&st);
|
||||
return -ENOTBLK;
|
||||
ret = -ENOTBLK;
|
||||
goto exit;
|
||||
}
|
||||
mmc_dat.sg = st.sgl;
|
||||
mmc_dat.sg_len = sg_cnt;
|
||||
|
@ -447,35 +486,36 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
|
|||
}
|
||||
}
|
||||
|
||||
sg_free_table(&st);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int brcmf_sdcard_recv_prepare(struct brcmf_sdio_dev *sdiodev, uint fn,
|
||||
uint flags, uint width, u32 *addr)
|
||||
{
|
||||
uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
|
||||
int err = 0;
|
||||
|
||||
/* Async not implemented yet */
|
||||
if (flags & SDIO_REQ_ASYNC)
|
||||
return -ENOTSUPP;
|
||||
|
||||
if (bar0 != sdiodev->sbwad) {
|
||||
err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
sdiodev->sbwad = bar0;
|
||||
if (sdiodev->pdata && sdiodev->pdata->broken_sg_support && !write) {
|
||||
local_pkt_next = local_list.next;
|
||||
orig_offset = 0;
|
||||
skb_queue_walk(pktlist, pkt_next) {
|
||||
dst_offset = 0;
|
||||
do {
|
||||
req_sz = local_pkt_next->len - orig_offset;
|
||||
req_sz = min_t(uint, pkt_next->len - dst_offset,
|
||||
req_sz);
|
||||
orig_data = local_pkt_next->data + orig_offset;
|
||||
dst_data = pkt_next->data + dst_offset;
|
||||
memcpy(dst_data, orig_data, req_sz);
|
||||
orig_offset += req_sz;
|
||||
dst_offset += req_sz;
|
||||
if (orig_offset == local_pkt_next->len) {
|
||||
orig_offset = 0;
|
||||
local_pkt_next = local_pkt_next->next;
|
||||
}
|
||||
if (dst_offset == pkt_next->len)
|
||||
break;
|
||||
} while (!skb_queue_empty(&local_list));
|
||||
}
|
||||
}
|
||||
|
||||
*addr &= SBSDIO_SB_OFT_ADDR_MASK;
|
||||
exit:
|
||||
sg_free_table(&st);
|
||||
while ((pkt_next = __skb_dequeue(&local_list)) != NULL)
|
||||
brcmu_pkt_buf_free_skb(pkt_next);
|
||||
|
||||
if (width == 4)
|
||||
*addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -512,7 +552,7 @@ brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
|
|||
fn, addr, pkt->len);
|
||||
|
||||
width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
|
||||
err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
|
||||
err = brcmf_sdio_addrprep(sdiodev, width, &addr);
|
||||
if (err)
|
||||
goto done;
|
||||
|
||||
|
@ -536,7 +576,7 @@ int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
|
|||
fn, addr, pktq->qlen);
|
||||
|
||||
width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
|
||||
err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
|
||||
err = brcmf_sdio_addrprep(sdiodev, width, &addr);
|
||||
if (err)
|
||||
goto done;
|
||||
|
||||
|
@ -574,37 +614,20 @@ brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
|
|||
uint flags, struct sk_buff *pkt)
|
||||
{
|
||||
uint width;
|
||||
uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
|
||||
int err = 0;
|
||||
struct sk_buff_head pkt_list;
|
||||
|
||||
brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
|
||||
fn, addr, pkt->len);
|
||||
|
||||
/* Async not implemented yet */
|
||||
if (flags & SDIO_REQ_ASYNC)
|
||||
return -ENOTSUPP;
|
||||
|
||||
if (bar0 != sdiodev->sbwad) {
|
||||
err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0);
|
||||
if (err)
|
||||
goto done;
|
||||
|
||||
sdiodev->sbwad = bar0;
|
||||
}
|
||||
|
||||
addr &= SBSDIO_SB_OFT_ADDR_MASK;
|
||||
|
||||
width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
|
||||
if (width == 4)
|
||||
addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
|
||||
brcmf_sdio_addrprep(sdiodev, width, &addr);
|
||||
|
||||
skb_queue_head_init(&pkt_list);
|
||||
skb_queue_tail(&pkt_list, pkt);
|
||||
err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, &pkt_list);
|
||||
skb_dequeue_tail(&pkt_list);
|
||||
|
||||
done:
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -281,8 +281,6 @@ void brcmf_txflowblock(struct device *dev, bool state)
|
|||
|
||||
void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
|
||||
{
|
||||
unsigned char *eth;
|
||||
uint len;
|
||||
struct sk_buff *skb, *pnext;
|
||||
struct brcmf_if *ifp;
|
||||
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
|
||||
|
@ -306,33 +304,12 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
|
|||
continue;
|
||||
}
|
||||
|
||||
/* Get the protocol, maintain skb around eth_type_trans()
|
||||
* The main reason for this hack is for the limitation of
|
||||
* Linux 2.4 where 'eth_type_trans' uses the
|
||||
* 'net->hard_header_len'
|
||||
* to perform skb_pull inside vs ETH_HLEN. Since to avoid
|
||||
* coping of the packet coming from the network stack to add
|
||||
* BDC, Hardware header etc, during network interface
|
||||
* registration
|
||||
* we set the 'net->hard_header_len' to ETH_HLEN + extra space
|
||||
* required
|
||||
* for BDC, Hardware header etc. and not just the ETH_HLEN
|
||||
*/
|
||||
eth = skb->data;
|
||||
len = skb->len;
|
||||
|
||||
skb->dev = ifp->ndev;
|
||||
skb->protocol = eth_type_trans(skb, skb->dev);
|
||||
|
||||
if (skb->pkt_type == PACKET_MULTICAST)
|
||||
ifp->stats.multicast++;
|
||||
|
||||
skb->data = eth;
|
||||
skb->len = len;
|
||||
|
||||
/* Strip header, count, deliver upward */
|
||||
skb_pull(skb, ETH_HLEN);
|
||||
|
||||
/* Process special event packets */
|
||||
brcmf_fweh_process_skb(drvr, skb);
|
||||
|
||||
|
@ -348,10 +325,8 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
|
|||
netif_rx(skb);
|
||||
else
|
||||
/* If the receive is not processed inside an ISR,
|
||||
* the softirqd must be woken explicitly to service
|
||||
* the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
|
||||
* by netif_rx_ni(), but in earlier kernels, we need
|
||||
* to do it manually.
|
||||
* the softirqd must be woken explicitly to service the
|
||||
* NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
|
||||
*/
|
||||
netif_rx_ni(skb);
|
||||
}
|
||||
|
|
|
@ -796,9 +796,8 @@ static int brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb)
|
|||
u8 fillers;
|
||||
__le32 pkttag = cpu_to_le32(brcmf_skbcb(skb)->htod);
|
||||
|
||||
brcmf_dbg(TRACE, "enter: ea=%pM, ifidx=%u (%u), pkttag=0x%08X, hslot=%d\n",
|
||||
entry->ea, entry->interface_id,
|
||||
brcmf_skb_if_flags_get_field(skb, INDEX),
|
||||
brcmf_dbg(TRACE, "enter: %s, idx=%d pkttag=0x%08X, hslot=%d\n",
|
||||
entry->name, brcmf_skb_if_flags_get_field(skb, INDEX),
|
||||
le32_to_cpu(pkttag), (le32_to_cpu(pkttag) >> 8) & 0xffff);
|
||||
if (entry->send_tim_signal)
|
||||
data_offset += 2 + BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN;
|
||||
|
@ -822,8 +821,8 @@ static int brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb)
|
|||
wlh[1] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN;
|
||||
wlh[2] = entry->mac_handle;
|
||||
wlh[3] = entry->traffic_pending_bmp;
|
||||
brcmf_dbg(TRACE, "adding TIM info: %02X:%02X:%02X:%02X\n",
|
||||
wlh[0], wlh[1], wlh[2], wlh[3]);
|
||||
brcmf_dbg(TRACE, "adding TIM info: handle %d bmp 0x%X\n",
|
||||
entry->mac_handle, entry->traffic_pending_bmp);
|
||||
wlh += BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN + 2;
|
||||
entry->traffic_lastreported_bmp = entry->traffic_pending_bmp;
|
||||
}
|
||||
|
@ -906,10 +905,26 @@ static int brcmf_fws_rssi_indicate(struct brcmf_fws_info *fws, s8 rssi)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* using macro so sparse checking does not complain
|
||||
* about locking imbalance.
|
||||
*/
|
||||
#define brcmf_fws_lock(drvr, flags) \
|
||||
do { \
|
||||
flags = 0; \
|
||||
spin_lock_irqsave(&((drvr)->fws_spinlock), (flags)); \
|
||||
} while (0)
|
||||
|
||||
/* using macro so sparse checking does not complain
|
||||
* about locking imbalance.
|
||||
*/
|
||||
#define brcmf_fws_unlock(drvr, flags) \
|
||||
spin_unlock_irqrestore(&((drvr)->fws_spinlock), (flags))
|
||||
|
||||
static
|
||||
int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data)
|
||||
{
|
||||
struct brcmf_fws_mac_descriptor *entry, *existing;
|
||||
ulong flags;
|
||||
u8 mac_handle;
|
||||
u8 ifidx;
|
||||
u8 *addr;
|
||||
|
@ -923,8 +938,10 @@ int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data)
|
|||
if (entry->occupied) {
|
||||
brcmf_dbg(TRACE, "deleting %s mac %pM\n",
|
||||
entry->name, addr);
|
||||
brcmf_fws_lock(fws->drvr, flags);
|
||||
brcmf_fws_macdesc_cleanup(fws, entry, -1);
|
||||
brcmf_fws_macdesc_deinit(entry);
|
||||
brcmf_fws_unlock(fws->drvr, flags);
|
||||
} else
|
||||
fws->stats.mac_update_failed++;
|
||||
return 0;
|
||||
|
@ -933,11 +950,13 @@ int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data)
|
|||
existing = brcmf_fws_macdesc_lookup(fws, addr);
|
||||
if (IS_ERR(existing)) {
|
||||
if (!entry->occupied) {
|
||||
brcmf_fws_lock(fws->drvr, flags);
|
||||
entry->mac_handle = mac_handle;
|
||||
brcmf_fws_macdesc_init(entry, addr, ifidx);
|
||||
brcmf_fws_macdesc_set_name(fws, entry);
|
||||
brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT,
|
||||
BRCMF_FWS_PSQ_LEN);
|
||||
brcmf_fws_unlock(fws->drvr, flags);
|
||||
brcmf_dbg(TRACE, "add %s mac %pM\n", entry->name, addr);
|
||||
} else {
|
||||
fws->stats.mac_update_failed++;
|
||||
|
@ -945,11 +964,13 @@ int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data)
|
|||
} else {
|
||||
if (entry != existing) {
|
||||
brcmf_dbg(TRACE, "copy mac %s\n", existing->name);
|
||||
brcmf_fws_lock(fws->drvr, flags);
|
||||
memcpy(entry, existing,
|
||||
offsetof(struct brcmf_fws_mac_descriptor, psq));
|
||||
entry->mac_handle = mac_handle;
|
||||
brcmf_fws_macdesc_deinit(existing);
|
||||
brcmf_fws_macdesc_set_name(fws, entry);
|
||||
brcmf_fws_unlock(fws->drvr, flags);
|
||||
brcmf_dbg(TRACE, "relocate %s mac %pM\n", entry->name,
|
||||
addr);
|
||||
} else {
|
||||
|
@ -965,7 +986,9 @@ static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws,
|
|||
u8 type, u8 *data)
|
||||
{
|
||||
struct brcmf_fws_mac_descriptor *entry;
|
||||
ulong flags;
|
||||
u8 mac_handle;
|
||||
int ret;
|
||||
|
||||
mac_handle = data[0];
|
||||
entry = &fws->desc.nodes[mac_handle & 0x1F];
|
||||
|
@ -973,26 +996,30 @@ static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws,
|
|||
fws->stats.mac_ps_update_failed++;
|
||||
return -ESRCH;
|
||||
}
|
||||
brcmf_fws_lock(fws->drvr, flags);
|
||||
/* a state update should wipe old credits */
|
||||
entry->requested_credit = 0;
|
||||
entry->requested_packet = 0;
|
||||
if (type == BRCMF_FWS_TYPE_MAC_OPEN) {
|
||||
entry->state = BRCMF_FWS_STATE_OPEN;
|
||||
return BRCMF_FWS_RET_OK_SCHEDULE;
|
||||
ret = BRCMF_FWS_RET_OK_SCHEDULE;
|
||||
} else {
|
||||
entry->state = BRCMF_FWS_STATE_CLOSE;
|
||||
brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_BK, false);
|
||||
brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_BE, false);
|
||||
brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_VI, false);
|
||||
brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_VO, true);
|
||||
ret = BRCMF_FWS_RET_OK_NOSCHEDULE;
|
||||
}
|
||||
return BRCMF_FWS_RET_OK_NOSCHEDULE;
|
||||
brcmf_fws_unlock(fws->drvr, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws,
|
||||
u8 type, u8 *data)
|
||||
{
|
||||
struct brcmf_fws_mac_descriptor *entry;
|
||||
ulong flags;
|
||||
u8 ifidx;
|
||||
int ret;
|
||||
|
||||
|
@ -1011,17 +1038,24 @@ static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws,
|
|||
|
||||
brcmf_dbg(TRACE, "%s (%d): %s\n", brcmf_fws_get_tlv_name(type), type,
|
||||
entry->name);
|
||||
brcmf_fws_lock(fws->drvr, flags);
|
||||
switch (type) {
|
||||
case BRCMF_FWS_TYPE_INTERFACE_OPEN:
|
||||
entry->state = BRCMF_FWS_STATE_OPEN;
|
||||
return BRCMF_FWS_RET_OK_SCHEDULE;
|
||||
ret = BRCMF_FWS_RET_OK_SCHEDULE;
|
||||
break;
|
||||
case BRCMF_FWS_TYPE_INTERFACE_CLOSE:
|
||||
entry->state = BRCMF_FWS_STATE_CLOSE;
|
||||
return BRCMF_FWS_RET_OK_NOSCHEDULE;
|
||||
ret = BRCMF_FWS_RET_OK_NOSCHEDULE;
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
brcmf_fws_unlock(fws->drvr, flags);
|
||||
goto fail;
|
||||
}
|
||||
brcmf_fws_unlock(fws->drvr, flags);
|
||||
return ret;
|
||||
|
||||
fail:
|
||||
fws->stats.if_update_failed++;
|
||||
return ret;
|
||||
|
@ -1031,6 +1065,7 @@ static int brcmf_fws_request_indicate(struct brcmf_fws_info *fws, u8 type,
|
|||
u8 *data)
|
||||
{
|
||||
struct brcmf_fws_mac_descriptor *entry;
|
||||
ulong flags;
|
||||
|
||||
entry = &fws->desc.nodes[data[1] & 0x1F];
|
||||
if (!entry->occupied) {
|
||||
|
@ -1044,12 +1079,14 @@ static int brcmf_fws_request_indicate(struct brcmf_fws_info *fws, u8 type,
|
|||
brcmf_dbg(TRACE, "%s (%d): %s cnt %d bmp %d\n",
|
||||
brcmf_fws_get_tlv_name(type), type, entry->name,
|
||||
data[0], data[2]);
|
||||
brcmf_fws_lock(fws->drvr, flags);
|
||||
if (type == BRCMF_FWS_TYPE_MAC_REQUEST_CREDIT)
|
||||
entry->requested_credit = data[0];
|
||||
else
|
||||
entry->requested_packet = data[0];
|
||||
|
||||
entry->ac_bitmap = data[2];
|
||||
brcmf_fws_unlock(fws->drvr, flags);
|
||||
return BRCMF_FWS_RET_OK_SCHEDULE;
|
||||
}
|
||||
|
||||
|
@ -1346,6 +1383,7 @@ brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
|
|||
static int brcmf_fws_fifocreditback_indicate(struct brcmf_fws_info *fws,
|
||||
u8 *data)
|
||||
{
|
||||
ulong flags;
|
||||
int i;
|
||||
|
||||
if (fws->fcmode != BRCMF_FWS_FCMODE_EXPLICIT_CREDIT) {
|
||||
|
@ -1354,16 +1392,19 @@ static int brcmf_fws_fifocreditback_indicate(struct brcmf_fws_info *fws,
|
|||
}
|
||||
|
||||
brcmf_dbg(DATA, "enter: data %pM\n", data);
|
||||
brcmf_fws_lock(fws->drvr, flags);
|
||||
for (i = 0; i < BRCMF_FWS_FIFO_COUNT; i++)
|
||||
brcmf_fws_return_credits(fws, i, data[i]);
|
||||
|
||||
brcmf_dbg(DATA, "map: credit %x delay %x\n", fws->fifo_credit_map,
|
||||
fws->fifo_delay_map);
|
||||
brcmf_fws_unlock(fws->drvr, flags);
|
||||
return BRCMF_FWS_RET_OK_SCHEDULE;
|
||||
}
|
||||
|
||||
static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data)
|
||||
{
|
||||
ulong lflags;
|
||||
__le32 status_le;
|
||||
u32 status;
|
||||
u32 hslot;
|
||||
|
@ -1377,7 +1418,10 @@ static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data)
|
|||
hslot = brcmf_txstatus_get_field(status, HSLOT);
|
||||
genbit = brcmf_txstatus_get_field(status, GENERATION);
|
||||
|
||||
return brcmf_fws_txs_process(fws, flags, hslot, genbit);
|
||||
brcmf_fws_lock(fws->drvr, lflags);
|
||||
brcmf_fws_txs_process(fws, flags, hslot, genbit);
|
||||
brcmf_fws_unlock(fws->drvr, lflags);
|
||||
return BRCMF_FWS_RET_OK_NOSCHEDULE;
|
||||
}
|
||||
|
||||
static int brcmf_fws_dbg_seqnum_check(struct brcmf_fws_info *fws, u8 *data)
|
||||
|
@ -1390,21 +1434,6 @@ static int brcmf_fws_dbg_seqnum_check(struct brcmf_fws_info *fws, u8 *data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* using macro so sparse checking does not complain
|
||||
* about locking imbalance.
|
||||
*/
|
||||
#define brcmf_fws_lock(drvr, flags) \
|
||||
do { \
|
||||
flags = 0; \
|
||||
spin_lock_irqsave(&((drvr)->fws_spinlock), (flags)); \
|
||||
} while (0)
|
||||
|
||||
/* using macro so sparse checking does not complain
|
||||
* about locking imbalance.
|
||||
*/
|
||||
#define brcmf_fws_unlock(drvr, flags) \
|
||||
spin_unlock_irqrestore(&((drvr)->fws_spinlock), (flags))
|
||||
|
||||
static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp,
|
||||
const struct brcmf_event_msg *e,
|
||||
void *data)
|
||||
|
@ -1455,7 +1484,6 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
|
|||
struct sk_buff *skb)
|
||||
{
|
||||
struct brcmf_fws_info *fws = drvr->fws;
|
||||
ulong flags;
|
||||
u8 *signal_data;
|
||||
s16 data_len;
|
||||
u8 type;
|
||||
|
@ -1475,9 +1503,6 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* lock during tlv parsing */
|
||||
brcmf_fws_lock(drvr, flags);
|
||||
|
||||
fws->stats.header_pulls++;
|
||||
data_len = signal_len;
|
||||
signal_data = skb->data;
|
||||
|
@ -1571,25 +1596,17 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
|
|||
if (skb->len == 0)
|
||||
fws->stats.header_only_pkt++;
|
||||
|
||||
brcmf_fws_unlock(drvr, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo,
|
||||
static void brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo,
|
||||
struct sk_buff *p)
|
||||
{
|
||||
struct brcmf_skbuff_cb *skcb = brcmf_skbcb(p);
|
||||
struct brcmf_fws_mac_descriptor *entry = skcb->mac;
|
||||
int rc = 0;
|
||||
bool first_time;
|
||||
int hslot = BRCMF_FWS_HANGER_MAXITEMS;
|
||||
u8 free_ctr;
|
||||
u8 flags;
|
||||
|
||||
first_time = skcb->state != BRCMF_FWS_SKBSTATE_SUPPRESSED;
|
||||
|
||||
brcmf_skb_if_flags_set_field(p, TRANSMIT, 1);
|
||||
brcmf_skb_htod_tag_set_field(p, FIFO, fifo);
|
||||
brcmf_skb_htod_tag_set_field(p, GENERATION, entry->generation);
|
||||
flags = BRCMF_FWS_HTOD_FLAG_PKTFROMHOST;
|
||||
if (brcmf_skb_if_flags_get_field(p, REQUESTED)) {
|
||||
|
@ -1600,80 +1617,36 @@ static int brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo,
|
|||
flags |= BRCMF_FWS_HTOD_FLAG_PKT_REQUESTED;
|
||||
}
|
||||
brcmf_skb_htod_tag_set_field(p, FLAGS, flags);
|
||||
if (first_time) {
|
||||
/* obtaining free slot may fail, but that will be caught
|
||||
* by the hanger push. This assures the packet has a BDC
|
||||
* header upon return.
|
||||
*/
|
||||
hslot = brcmf_fws_hanger_get_free_slot(&fws->hanger);
|
||||
free_ctr = entry->seq[fifo];
|
||||
brcmf_skb_htod_tag_set_field(p, HSLOT, hslot);
|
||||
brcmf_skb_htod_tag_set_field(p, FREERUN, free_ctr);
|
||||
rc = brcmf_fws_hanger_pushpkt(&fws->hanger, p, hslot);
|
||||
if (rc)
|
||||
brcmf_err("hanger push failed: rc=%d\n", rc);
|
||||
}
|
||||
|
||||
if (rc == 0)
|
||||
brcmf_fws_hdrpush(fws, p);
|
||||
|
||||
return rc;
|
||||
brcmf_fws_hdrpush(fws, p);
|
||||
}
|
||||
|
||||
static void
|
||||
brcmf_fws_rollback_toq(struct brcmf_fws_info *fws,
|
||||
struct sk_buff *skb, int fifo)
|
||||
static void brcmf_fws_rollback_toq(struct brcmf_fws_info *fws,
|
||||
struct sk_buff *skb, int fifo)
|
||||
{
|
||||
/*
|
||||
put the packet back to the head of queue
|
||||
|
||||
- suppressed packet goes back to suppress sub-queue
|
||||
- pull out the header, if new or delayed packet
|
||||
|
||||
Note: hslot is used only when header removal is done.
|
||||
*/
|
||||
struct brcmf_fws_mac_descriptor *entry;
|
||||
enum brcmf_fws_skb_state state;
|
||||
struct sk_buff *pktout;
|
||||
int qidx, hslot;
|
||||
int rc = 0;
|
||||
int hslot;
|
||||
|
||||
state = brcmf_skbcb(skb)->state;
|
||||
entry = brcmf_skbcb(skb)->mac;
|
||||
hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
|
||||
if (entry->occupied) {
|
||||
qidx = 2 * fifo;
|
||||
if (brcmf_skbcb(skb)->state == BRCMF_FWS_SKBSTATE_SUPPRESSED)
|
||||
qidx++;
|
||||
|
||||
if (entry != NULL) {
|
||||
if (state == BRCMF_FWS_SKBSTATE_SUPPRESSED) {
|
||||
/* wl-header is saved for suppressed packets */
|
||||
pktout = brcmu_pktq_penq_head(&entry->psq, 2 * fifo + 1,
|
||||
skb);
|
||||
if (pktout == NULL) {
|
||||
brcmf_err("suppress queue full\n");
|
||||
rc = -ENOSPC;
|
||||
}
|
||||
} else {
|
||||
/* delay-q packets are going to delay-q */
|
||||
pktout = brcmu_pktq_penq_head(&entry->psq,
|
||||
2 * fifo, skb);
|
||||
if (pktout == NULL) {
|
||||
brcmf_err("delay queue full\n");
|
||||
rc = -ENOSPC;
|
||||
}
|
||||
|
||||
/* free the hanger slot */
|
||||
brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &pktout,
|
||||
true);
|
||||
|
||||
/* decrement sequence count */
|
||||
entry->seq[fifo]--;
|
||||
pktout = brcmu_pktq_penq_head(&entry->psq, qidx, skb);
|
||||
if (pktout == NULL) {
|
||||
brcmf_err("%s queue %d full\n", entry->name, qidx);
|
||||
rc = -ENOSPC;
|
||||
}
|
||||
} else {
|
||||
brcmf_err("no mac entry linked\n");
|
||||
brcmf_err("%s entry removed\n", entry->name);
|
||||
rc = -ENOENT;
|
||||
}
|
||||
|
||||
if (rc) {
|
||||
fws->stats.rollback_failed++;
|
||||
hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
|
||||
brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED,
|
||||
hslot, 0);
|
||||
} else {
|
||||
|
@ -1707,37 +1680,6 @@ static int brcmf_fws_borrow_credit(struct brcmf_fws_info *fws)
|
|||
return -ENAVAIL;
|
||||
}
|
||||
|
||||
static int brcmf_fws_consume_credit(struct brcmf_fws_info *fws, int fifo,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
|
||||
int *credit = &fws->fifo_credit[fifo];
|
||||
|
||||
if (fifo != BRCMF_FWS_FIFO_AC_BE)
|
||||
fws->borrow_defer_timestamp = jiffies +
|
||||
BRCMF_FWS_BORROW_DEFER_PERIOD;
|
||||
|
||||
if (!(*credit)) {
|
||||
/* Try to borrow a credit from other queue */
|
||||
if (fifo != BRCMF_FWS_FIFO_AC_BE ||
|
||||
(brcmf_fws_borrow_credit(fws) != 0)) {
|
||||
brcmf_dbg(DATA, "ac=%d, credits depleted\n", fifo);
|
||||
return -ENAVAIL;
|
||||
}
|
||||
} else {
|
||||
(*credit)--;
|
||||
if (!(*credit))
|
||||
fws->fifo_credit_map &= ~(1 << fifo);
|
||||
}
|
||||
|
||||
brcmf_fws_macdesc_use_req_credit(entry, skb);
|
||||
|
||||
brcmf_dbg(DATA, "ac=%d, credits=%02d:%02d:%02d:%02d\n", fifo,
|
||||
fws->fifo_credit[0], fws->fifo_credit[1],
|
||||
fws->fifo_credit[2], fws->fifo_credit[3]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int brcmf_fws_commit_skb(struct brcmf_fws_info *fws, int fifo,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
|
@ -1751,15 +1693,10 @@ static int brcmf_fws_commit_skb(struct brcmf_fws_info *fws, int fifo,
|
|||
if (IS_ERR(entry))
|
||||
return PTR_ERR(entry);
|
||||
|
||||
rc = brcmf_fws_precommit_skb(fws, fifo, skb);
|
||||
if (rc < 0) {
|
||||
fws->stats.generic_error++;
|
||||
goto rollback;
|
||||
}
|
||||
|
||||
brcmf_dbg(DATA, "%s flags %X htod %X\n", entry->name, skcb->if_flags,
|
||||
skcb->htod);
|
||||
brcmf_fws_precommit_skb(fws, fifo, skb);
|
||||
rc = brcmf_bus_txdata(bus, skb);
|
||||
brcmf_dbg(DATA, "%s flags %X htod %X bus_tx %d\n", entry->name,
|
||||
skcb->if_flags, skcb->htod, rc);
|
||||
if (rc < 0) {
|
||||
brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb);
|
||||
goto rollback;
|
||||
|
@ -1768,7 +1705,6 @@ static int brcmf_fws_commit_skb(struct brcmf_fws_info *fws, int fifo,
|
|||
entry->transit_count++;
|
||||
if (entry->suppressed)
|
||||
entry->suppr_transit_count++;
|
||||
entry->seq[fifo]++;
|
||||
fws->stats.pkt2bus++;
|
||||
fws->stats.send_pkts[fifo]++;
|
||||
if (brcmf_skb_if_flags_get_field(skb, REQUESTED))
|
||||
|
@ -1781,6 +1717,24 @@ rollback:
|
|||
return rc;
|
||||
}
|
||||
|
||||
static int brcmf_fws_assign_htod(struct brcmf_fws_info *fws, struct sk_buff *p,
|
||||
int fifo)
|
||||
{
|
||||
struct brcmf_skbuff_cb *skcb = brcmf_skbcb(p);
|
||||
int rc, hslot;
|
||||
|
||||
hslot = brcmf_fws_hanger_get_free_slot(&fws->hanger);
|
||||
brcmf_skb_htod_tag_set_field(p, HSLOT, hslot);
|
||||
brcmf_skb_htod_tag_set_field(p, FREERUN, skcb->mac->seq[fifo]);
|
||||
brcmf_skb_htod_tag_set_field(p, FIFO, fifo);
|
||||
rc = brcmf_fws_hanger_pushpkt(&fws->hanger, p, hslot);
|
||||
if (!rc)
|
||||
skcb->mac->seq[fifo]++;
|
||||
else
|
||||
fws->stats.generic_error++;
|
||||
return rc;
|
||||
}
|
||||
|
||||
int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
|
||||
{
|
||||
struct brcmf_pub *drvr = ifp->drvr;
|
||||
|
@ -1809,33 +1763,25 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
|
|||
|
||||
/* set control buffer information */
|
||||
skcb->if_flags = 0;
|
||||
skcb->mac = brcmf_fws_macdesc_find(fws, ifp, eh->h_dest);
|
||||
skcb->state = BRCMF_FWS_SKBSTATE_NEW;
|
||||
brcmf_skb_if_flags_set_field(skb, INDEX, ifp->ifidx);
|
||||
if (!multicast)
|
||||
fifo = brcmf_fws_prio2fifo[skb->priority];
|
||||
|
||||
brcmf_fws_lock(drvr, flags);
|
||||
if (fifo != BRCMF_FWS_FIFO_AC_BE && fifo < BRCMF_FWS_FIFO_BCMC)
|
||||
fws->borrow_defer_timestamp = jiffies +
|
||||
BRCMF_FWS_BORROW_DEFER_PERIOD;
|
||||
|
||||
skcb->mac = brcmf_fws_macdesc_find(fws, ifp, eh->h_dest);
|
||||
brcmf_dbg(DATA, "%s mac %pM multi %d fifo %d\n", skcb->mac->name,
|
||||
eh->h_dest, multicast, fifo);
|
||||
|
||||
brcmf_fws_lock(drvr, flags);
|
||||
/* multicast credit support is conditional, setting
|
||||
* flag to false to assure credit is consumed below.
|
||||
*/
|
||||
if (fws->bcmc_credit_check)
|
||||
multicast = false;
|
||||
|
||||
if (skcb->mac->suppressed ||
|
||||
fws->bus_flow_blocked ||
|
||||
brcmf_fws_macdesc_closed(fws, skcb->mac, fifo) ||
|
||||
brcmu_pktq_mlen(&skcb->mac->psq, 3 << (fifo * 2)) ||
|
||||
(!multicast &&
|
||||
brcmf_fws_consume_credit(fws, fifo, skb) < 0)) {
|
||||
/* enqueue the packet in delayQ */
|
||||
drvr->fws->fifo_delay_map |= 1 << fifo;
|
||||
if (!brcmf_fws_assign_htod(fws, skb, fifo)) {
|
||||
brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_DELAYED, fifo, skb);
|
||||
brcmf_fws_schedule_deq(fws);
|
||||
} else {
|
||||
brcmf_fws_commit_skb(fws, fifo, skb);
|
||||
brcmf_err("drop skb: no hanger slot\n");
|
||||
brcmu_pkt_buf_free_skb(skb);
|
||||
}
|
||||
brcmf_fws_unlock(drvr, flags);
|
||||
return 0;
|
||||
|
@ -1895,7 +1841,7 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker)
|
|||
fws = container_of(worker, struct brcmf_fws_info, fws_dequeue_work);
|
||||
|
||||
brcmf_fws_lock(fws->drvr, flags);
|
||||
for (fifo = NL80211_NUM_ACS; fifo >= 0 && !fws->bus_flow_blocked;
|
||||
for (fifo = BRCMF_FWS_FIFO_BCMC; fifo >= 0 && !fws->bus_flow_blocked;
|
||||
fifo--) {
|
||||
while ((fws->fifo_credit[fifo]) || ((!fws->bcmc_credit_check) &&
|
||||
(fifo == BRCMF_FWS_FIFO_BCMC))) {
|
||||
|
|
|
@ -229,8 +229,6 @@ brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
|
|||
#define SDIO_REQ_4BYTE 0x1
|
||||
/* Fixed address (FIFO) (vs. incrementing address) */
|
||||
#define SDIO_REQ_FIXED 0x2
|
||||
/* Async request (vs. sync request) */
|
||||
#define SDIO_REQ_ASYNC 0x4
|
||||
|
||||
/* Read/write to memory block (F1, no FIFO) via CMD53 (sync only).
|
||||
* rw: read or write (0/1)
|
||||
|
@ -251,9 +249,6 @@ extern int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
|
|||
extern int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
|
||||
extern int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev);
|
||||
|
||||
extern int brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev,
|
||||
u32 address);
|
||||
|
||||
/* attach, return handler on success, NULL if failed.
|
||||
* The handler shall be provided by all subsequent calls. No local cache
|
||||
* cfghdl points to the starting address of pci device mapped memory
|
||||
|
|
|
@ -267,7 +267,7 @@ struct cw1200_common {
|
|||
struct delayed_work bss_loss_work;
|
||||
spinlock_t bss_loss_lock; /* Protect BSS loss state */
|
||||
int bss_loss_state;
|
||||
int bss_loss_confirm_id;
|
||||
u32 bss_loss_confirm_id;
|
||||
int delayed_link_loss;
|
||||
struct work_struct bss_params_work;
|
||||
|
||||
|
|
|
@ -61,7 +61,7 @@ static int cw1200_spi_memcpy_fromio(struct hwbus_priv *self,
|
|||
void *dst, int count)
|
||||
{
|
||||
int ret, i;
|
||||
uint16_t regaddr;
|
||||
u16 regaddr;
|
||||
struct spi_message m;
|
||||
|
||||
struct spi_transfer t_addr = {
|
||||
|
@ -76,15 +76,18 @@ static int cw1200_spi_memcpy_fromio(struct hwbus_priv *self,
|
|||
regaddr = (SDIO_TO_SPI_ADDR(addr))<<12;
|
||||
regaddr |= SET_READ;
|
||||
regaddr |= (count>>1);
|
||||
regaddr = cpu_to_le16(regaddr);
|
||||
|
||||
#ifdef SPI_DEBUG
|
||||
pr_info("READ : %04d from 0x%02x (%04x)\n", count, addr,
|
||||
le16_to_cpu(regaddr));
|
||||
pr_info("READ : %04d from 0x%02x (%04x)\n", count, addr, regaddr);
|
||||
#endif
|
||||
|
||||
/* Header is LE16 */
|
||||
regaddr = cpu_to_le16(regaddr);
|
||||
|
||||
/* We have to byteswap if the SPI bus is limited to 8b operation
|
||||
or we are running on a Big Endian system
|
||||
*/
|
||||
#if defined(__LITTLE_ENDIAN)
|
||||
/* We have to byteswap if the SPI bus is limited to 8b operation */
|
||||
if (self->func->bits_per_word == 8)
|
||||
#endif
|
||||
regaddr = swab16(regaddr);
|
||||
|
@ -104,8 +107,10 @@ static int cw1200_spi_memcpy_fromio(struct hwbus_priv *self,
|
|||
printk("\n");
|
||||
#endif
|
||||
|
||||
/* We have to byteswap if the SPI bus is limited to 8b operation
|
||||
or we are running on a Big Endian system
|
||||
*/
|
||||
#if defined(__LITTLE_ENDIAN)
|
||||
/* We have to byteswap if the SPI bus is limited to 8b operation */
|
||||
if (self->func->bits_per_word == 8)
|
||||
#endif
|
||||
{
|
||||
|
@ -122,7 +127,7 @@ static int cw1200_spi_memcpy_toio(struct hwbus_priv *self,
|
|||
const void *src, int count)
|
||||
{
|
||||
int rval, i;
|
||||
uint16_t regaddr;
|
||||
u16 regaddr;
|
||||
struct spi_transfer t_addr = {
|
||||
.tx_buf = ®addr,
|
||||
.len = sizeof(regaddr),
|
||||
|
@ -136,20 +141,23 @@ static int cw1200_spi_memcpy_toio(struct hwbus_priv *self,
|
|||
regaddr = (SDIO_TO_SPI_ADDR(addr))<<12;
|
||||
regaddr &= SET_WRITE;
|
||||
regaddr |= (count>>1);
|
||||
regaddr = cpu_to_le16(regaddr);
|
||||
|
||||
#ifdef SPI_DEBUG
|
||||
pr_info("WRITE: %04d to 0x%02x (%04x)\n", count, addr,
|
||||
le16_to_cpu(regaddr));
|
||||
pr_info("WRITE: %04d to 0x%02x (%04x)\n", count, addr, regaddr);
|
||||
#endif
|
||||
|
||||
/* Header is LE16 */
|
||||
regaddr = cpu_to_le16(regaddr);
|
||||
|
||||
/* We have to byteswap if the SPI bus is limited to 8b operation
|
||||
or we are running on a Big Endian system
|
||||
*/
|
||||
#if defined(__LITTLE_ENDIAN)
|
||||
/* We have to byteswap if the SPI bus is limited to 8b operation */
|
||||
if (self->func->bits_per_word == 8)
|
||||
#endif
|
||||
{
|
||||
uint16_t *buf = (uint16_t *)src;
|
||||
regaddr = swab16(regaddr);
|
||||
regaddr = swab16(regaddr);
|
||||
for (i = 0; i < ((count + 1) >> 1); i++)
|
||||
buf[i] = swab16(buf[i]);
|
||||
}
|
||||
|
|
|
@ -69,31 +69,33 @@ static int __cw1200_reg_write(struct cw1200_common *priv, u16 addr,
|
|||
static inline int __cw1200_reg_read_32(struct cw1200_common *priv,
|
||||
u16 addr, u32 *val)
|
||||
{
|
||||
int i = __cw1200_reg_read(priv, addr, val, sizeof(*val), 0);
|
||||
*val = le32_to_cpu(*val);
|
||||
__le32 tmp;
|
||||
int i = __cw1200_reg_read(priv, addr, &tmp, sizeof(tmp), 0);
|
||||
*val = le32_to_cpu(tmp);
|
||||
return i;
|
||||
}
|
||||
|
||||
static inline int __cw1200_reg_write_32(struct cw1200_common *priv,
|
||||
u16 addr, u32 val)
|
||||
{
|
||||
val = cpu_to_le32(val);
|
||||
return __cw1200_reg_write(priv, addr, &val, sizeof(val), 0);
|
||||
__le32 tmp = cpu_to_le32(val);
|
||||
return __cw1200_reg_write(priv, addr, &tmp, sizeof(tmp), 0);
|
||||
}
|
||||
|
||||
static inline int __cw1200_reg_read_16(struct cw1200_common *priv,
|
||||
u16 addr, u16 *val)
|
||||
{
|
||||
int i = __cw1200_reg_read(priv, addr, val, sizeof(*val), 0);
|
||||
*val = le16_to_cpu(*val);
|
||||
__le16 tmp;
|
||||
int i = __cw1200_reg_read(priv, addr, &tmp, sizeof(tmp), 0);
|
||||
*val = le16_to_cpu(tmp);
|
||||
return i;
|
||||
}
|
||||
|
||||
static inline int __cw1200_reg_write_16(struct cw1200_common *priv,
|
||||
u16 addr, u16 val)
|
||||
{
|
||||
val = cpu_to_le16(val);
|
||||
return __cw1200_reg_write(priv, addr, &val, sizeof(val), 0);
|
||||
__le16 tmp = cpu_to_le16(val);
|
||||
return __cw1200_reg_write(priv, addr, &tmp, sizeof(tmp), 0);
|
||||
}
|
||||
|
||||
int cw1200_reg_read(struct cw1200_common *priv, u16 addr, void *buf,
|
||||
|
|
|
@ -169,35 +169,34 @@ int cw1200_reg_write(struct cw1200_common *priv, u16 addr,
|
|||
static inline int cw1200_reg_read_16(struct cw1200_common *priv,
|
||||
u16 addr, u16 *val)
|
||||
{
|
||||
u32 tmp;
|
||||
__le32 tmp;
|
||||
int i;
|
||||
i = cw1200_reg_read(priv, addr, &tmp, sizeof(tmp));
|
||||
tmp = le32_to_cpu(tmp);
|
||||
*val = tmp & 0xffff;
|
||||
*val = le32_to_cpu(tmp) & 0xfffff;
|
||||
return i;
|
||||
}
|
||||
|
||||
static inline int cw1200_reg_write_16(struct cw1200_common *priv,
|
||||
u16 addr, u16 val)
|
||||
{
|
||||
u32 tmp = val;
|
||||
tmp = cpu_to_le32(tmp);
|
||||
__le32 tmp = cpu_to_le32((u32)val);
|
||||
return cw1200_reg_write(priv, addr, &tmp, sizeof(tmp));
|
||||
}
|
||||
|
||||
static inline int cw1200_reg_read_32(struct cw1200_common *priv,
|
||||
u16 addr, u32 *val)
|
||||
{
|
||||
int i = cw1200_reg_read(priv, addr, val, sizeof(*val));
|
||||
*val = le32_to_cpu(*val);
|
||||
__le32 tmp;
|
||||
int i = cw1200_reg_read(priv, addr, &tmp, sizeof(tmp));
|
||||
*val = le32_to_cpu(tmp);
|
||||
return i;
|
||||
}
|
||||
|
||||
static inline int cw1200_reg_write_32(struct cw1200_common *priv,
|
||||
u16 addr, u32 val)
|
||||
{
|
||||
val = cpu_to_le32(val);
|
||||
return cw1200_reg_write(priv, addr, &val, sizeof(val));
|
||||
__le32 tmp = cpu_to_le32(val);
|
||||
return cw1200_reg_write(priv, addr, &tmp, sizeof(val));
|
||||
}
|
||||
|
||||
int cw1200_indirect_read(struct cw1200_common *priv, u32 addr, void *buf,
|
||||
|
@ -224,22 +223,24 @@ static inline int cw1200_ahb_read(struct cw1200_common *priv, u32 addr,
|
|||
static inline int cw1200_apb_read_32(struct cw1200_common *priv,
|
||||
u32 addr, u32 *val)
|
||||
{
|
||||
int i = cw1200_apb_read(priv, addr, val, sizeof(*val));
|
||||
*val = le32_to_cpu(*val);
|
||||
__le32 tmp;
|
||||
int i = cw1200_apb_read(priv, addr, &tmp, sizeof(tmp));
|
||||
*val = le32_to_cpu(tmp);
|
||||
return i;
|
||||
}
|
||||
|
||||
static inline int cw1200_apb_write_32(struct cw1200_common *priv,
|
||||
u32 addr, u32 val)
|
||||
{
|
||||
val = cpu_to_le32(val);
|
||||
return cw1200_apb_write(priv, addr, &val, sizeof(val));
|
||||
__le32 tmp = cpu_to_le32(val);
|
||||
return cw1200_apb_write(priv, addr, &tmp, sizeof(val));
|
||||
}
|
||||
static inline int cw1200_ahb_read_32(struct cw1200_common *priv,
|
||||
u32 addr, u32 *val)
|
||||
{
|
||||
int i = cw1200_ahb_read(priv, addr, val, sizeof(*val));
|
||||
*val = le32_to_cpu(*val);
|
||||
__le32 tmp;
|
||||
int i = cw1200_ahb_read(priv, addr, &tmp, sizeof(tmp));
|
||||
*val = le32_to_cpu(tmp);
|
||||
return i;
|
||||
}
|
||||
|
||||
|
|
|
@ -238,8 +238,8 @@ static const struct ieee80211_ops cw1200_ops = {
|
|||
/*.cancel_remain_on_channel = cw1200_cancel_remain_on_channel, */
|
||||
};
|
||||
|
||||
int cw1200_ba_rx_tids = -1;
|
||||
int cw1200_ba_tx_tids = -1;
|
||||
static int cw1200_ba_rx_tids = -1;
|
||||
static int cw1200_ba_tx_tids = -1;
|
||||
module_param(cw1200_ba_rx_tids, int, 0644);
|
||||
module_param(cw1200_ba_tx_tids, int, 0644);
|
||||
MODULE_PARM_DESC(cw1200_ba_rx_tids, "Block ACK RX TIDs");
|
||||
|
|
|
@ -355,7 +355,7 @@ int cw1200_queue_get(struct cw1200_queue *queue,
|
|||
*tx = (struct wsm_tx *)item->skb->data;
|
||||
*tx_info = IEEE80211_SKB_CB(item->skb);
|
||||
*txpriv = &item->txpriv;
|
||||
(*tx)->packet_id = __cpu_to_le32(item->packet_id);
|
||||
(*tx)->packet_id = item->packet_id;
|
||||
list_move_tail(&item->head, &queue->pending);
|
||||
++queue->num_pending;
|
||||
--queue->link_map_cache[item->txpriv.link_id];
|
||||
|
|
|
@ -621,7 +621,7 @@ int cw1200_conf_tx(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
|
|||
mutex_lock(&priv->conf_mutex);
|
||||
|
||||
if (queue < dev->queues) {
|
||||
old_uapsd_flags = priv->uapsd_info.uapsd_flags;
|
||||
old_uapsd_flags = le16_to_cpu(priv->uapsd_info.uapsd_flags);
|
||||
|
||||
WSM_TX_QUEUE_SET(&priv->tx_queue_params, queue, 0, 0, 0);
|
||||
ret = wsm_set_tx_queue_params(priv,
|
||||
|
@ -645,7 +645,7 @@ int cw1200_conf_tx(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
|
|||
ret = cw1200_set_uapsd_param(priv, &priv->edca);
|
||||
if (!ret && priv->setbssparams_done &&
|
||||
(priv->join_status == CW1200_JOIN_STATUS_STA) &&
|
||||
(old_uapsd_flags != priv->uapsd_info.uapsd_flags))
|
||||
(old_uapsd_flags != le16_to_cpu(priv->uapsd_info.uapsd_flags)))
|
||||
ret = cw1200_set_pm(priv, &priv->powersave_mode);
|
||||
}
|
||||
} else {
|
||||
|
@ -1089,18 +1089,18 @@ static int cw1200_parse_sdd_file(struct cw1200_common *priv)
|
|||
ret = -1;
|
||||
break;
|
||||
}
|
||||
v = le16_to_cpu(*((u16 *)(p + 2)));
|
||||
v = le16_to_cpu(*((__le16 *)(p + 2)));
|
||||
if (!v) /* non-zero means this is enabled */
|
||||
break;
|
||||
|
||||
v = le16_to_cpu(*((u16 *)(p + 4)));
|
||||
v = le16_to_cpu(*((__le16 *)(p + 4)));
|
||||
priv->conf_listen_interval = (v >> 7) & 0x1F;
|
||||
pr_debug("PTA found; Listen Interval %d\n",
|
||||
priv->conf_listen_interval);
|
||||
break;
|
||||
}
|
||||
case SDD_REFERENCE_FREQUENCY_ELT_ID: {
|
||||
u16 clk = le16_to_cpu(*((u16 *)(p + 2)));
|
||||
u16 clk = le16_to_cpu(*((__le16 *)(p + 2)));
|
||||
if (clk != priv->hw_refclk)
|
||||
pr_warn("SDD file doesn't match configured refclk (%d vs %d)\n",
|
||||
clk, priv->hw_refclk);
|
||||
|
@ -1785,9 +1785,9 @@ static int cw1200_set_btcoexinfo(struct cw1200_common *priv)
|
|||
} else {
|
||||
pr_debug("[STA] STA has non ERP rates\n");
|
||||
/* B only mode */
|
||||
arg.internalTxRate = (__ffs(priv->association_mode.basic_rate_set));
|
||||
arg.internalTxRate = (__ffs(le32_to_cpu(priv->association_mode.basic_rate_set)));
|
||||
}
|
||||
arg.nonErpInternalTxRate = (__ffs(priv->association_mode.basic_rate_set));
|
||||
arg.nonErpInternalTxRate = (__ffs(le32_to_cpu(priv->association_mode.basic_rate_set)));
|
||||
} else {
|
||||
/* P2P mode */
|
||||
arg.internalTxRate = (__ffs(priv->bss_params.operational_rate_set & ~0xF));
|
||||
|
@ -1908,7 +1908,7 @@ void cw1200_bss_info_changed(struct ieee80211_hw *dev,
|
|||
|
||||
if (info->assoc || info->ibss_joined) {
|
||||
struct ieee80211_sta *sta = NULL;
|
||||
u32 val = 0;
|
||||
__le32 htprot = 0;
|
||||
|
||||
if (info->dtim_period)
|
||||
priv->join_dtim_period = info->dtim_period;
|
||||
|
@ -1935,19 +1935,18 @@ void cw1200_bss_info_changed(struct ieee80211_hw *dev,
|
|||
/* Non Greenfield stations present */
|
||||
if (priv->ht_info.operation_mode &
|
||||
IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT)
|
||||
val |= WSM_NON_GREENFIELD_STA_PRESENT;
|
||||
htprot |= cpu_to_le32(WSM_NON_GREENFIELD_STA_PRESENT);
|
||||
|
||||
/* Set HT protection method */
|
||||
val |= (priv->ht_info.operation_mode & IEEE80211_HT_OP_MODE_PROTECTION) << 2;
|
||||
htprot |= cpu_to_le32((priv->ht_info.operation_mode & IEEE80211_HT_OP_MODE_PROTECTION) << 2);
|
||||
|
||||
/* TODO:
|
||||
* STBC_param.dual_cts
|
||||
* STBC_param.LSIG_TXOP_FILL
|
||||
*/
|
||||
|
||||
val = cpu_to_le32(val);
|
||||
wsm_write_mib(priv, WSM_MIB_ID_SET_HT_PROTECTION,
|
||||
&val, sizeof(val));
|
||||
&htprot, sizeof(htprot));
|
||||
|
||||
priv->association_mode.greenfield =
|
||||
cw1200_ht_greenfield(&priv->ht_info);
|
||||
|
|
|
@ -599,15 +599,15 @@ cw1200_tx_h_bt(struct cw1200_common *priv,
|
|||
} else if (ieee80211_is_data(t->hdr->frame_control)) {
|
||||
/* Skip LLC SNAP header (+6) */
|
||||
u8 *payload = &t->skb->data[t->hdrlen];
|
||||
u16 *ethertype = (u16 *)&payload[6];
|
||||
if (*ethertype == __be16_to_cpu(ETH_P_PAE))
|
||||
__be16 *ethertype = (__be16 *)&payload[6];
|
||||
if (be16_to_cpu(*ethertype) == ETH_P_PAE)
|
||||
priority = WSM_EPTA_PRIORITY_EAPOL;
|
||||
} else if (ieee80211_is_assoc_req(t->hdr->frame_control) ||
|
||||
ieee80211_is_reassoc_req(t->hdr->frame_control)) {
|
||||
struct ieee80211_mgmt *mgt_frame =
|
||||
(struct ieee80211_mgmt *)t->hdr;
|
||||
|
||||
if (mgt_frame->u.assoc_req.listen_interval <
|
||||
if (le16_to_cpu(mgt_frame->u.assoc_req.listen_interval) <
|
||||
priv->listen_interval) {
|
||||
pr_debug("Modified Listen Interval to %d from %d\n",
|
||||
priv->listen_interval,
|
||||
|
@ -615,8 +615,7 @@ cw1200_tx_h_bt(struct cw1200_common *priv,
|
|||
/* Replace listen interval derieved from
|
||||
* the one read from SDD
|
||||
*/
|
||||
mgt_frame->u.assoc_req.listen_interval =
|
||||
priv->listen_interval;
|
||||
mgt_frame->u.assoc_req.listen_interval = cpu_to_le16(priv->listen_interval);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -42,19 +42,19 @@
|
|||
(buf)->data += size; \
|
||||
} while (0)
|
||||
|
||||
#define __WSM_GET(buf, type, cvt) \
|
||||
#define __WSM_GET(buf, type, type2, cvt) \
|
||||
({ \
|
||||
type val; \
|
||||
if ((buf)->data + sizeof(type) > (buf)->end) \
|
||||
goto underflow; \
|
||||
val = cvt(*(type *)(buf)->data); \
|
||||
val = cvt(*(type2 *)(buf)->data); \
|
||||
(buf)->data += sizeof(type); \
|
||||
val; \
|
||||
})
|
||||
|
||||
#define WSM_GET8(buf) __WSM_GET(buf, u8, (u8))
|
||||
#define WSM_GET16(buf) __WSM_GET(buf, u16, __le16_to_cpu)
|
||||
#define WSM_GET32(buf) __WSM_GET(buf, u32, __le32_to_cpu)
|
||||
#define WSM_GET8(buf) __WSM_GET(buf, u8, u8, (u8))
|
||||
#define WSM_GET16(buf) __WSM_GET(buf, u16, __le16, __le16_to_cpu)
|
||||
#define WSM_GET32(buf) __WSM_GET(buf, u32, __le32, __le32_to_cpu)
|
||||
|
||||
#define WSM_PUT(buf, ptr, size) \
|
||||
do { \
|
||||
|
@ -65,18 +65,18 @@
|
|||
(buf)->data += size; \
|
||||
} while (0)
|
||||
|
||||
#define __WSM_PUT(buf, val, type, cvt) \
|
||||
#define __WSM_PUT(buf, val, type, type2, cvt) \
|
||||
do { \
|
||||
if ((buf)->data + sizeof(type) > (buf)->end) \
|
||||
if (wsm_buf_reserve((buf), sizeof(type))) \
|
||||
goto nomem; \
|
||||
*(type *)(buf)->data = cvt(val); \
|
||||
*(type2 *)(buf)->data = cvt(val); \
|
||||
(buf)->data += sizeof(type); \
|
||||
} while (0)
|
||||
|
||||
#define WSM_PUT8(buf, val) __WSM_PUT(buf, val, u8, (u8))
|
||||
#define WSM_PUT16(buf, val) __WSM_PUT(buf, val, u16, __cpu_to_le16)
|
||||
#define WSM_PUT32(buf, val) __WSM_PUT(buf, val, u32, __cpu_to_le32)
|
||||
#define WSM_PUT8(buf, val) __WSM_PUT(buf, val, u8, u8, (u8))
|
||||
#define WSM_PUT16(buf, val) __WSM_PUT(buf, val, u16, __le16, __cpu_to_le16)
|
||||
#define WSM_PUT32(buf, val) __WSM_PUT(buf, val, u32, __le32, __cpu_to_le32)
|
||||
|
||||
static void wsm_buf_reset(struct wsm_buf *buf);
|
||||
static int wsm_buf_reserve(struct wsm_buf *buf, size_t extra_size);
|
||||
|
@ -931,8 +931,8 @@ static int wsm_event_indication(struct cw1200_common *priv, struct wsm_buf *buf)
|
|||
if (!event)
|
||||
return -ENOMEM;
|
||||
|
||||
event->evt.id = __le32_to_cpu(WSM_GET32(buf));
|
||||
event->evt.data = __le32_to_cpu(WSM_GET32(buf));
|
||||
event->evt.id = WSM_GET32(buf);
|
||||
event->evt.data = WSM_GET32(buf);
|
||||
|
||||
pr_debug("[WSM] Event: %d(%d)\n",
|
||||
event->evt.id, event->evt.data);
|
||||
|
@ -1311,7 +1311,7 @@ int wsm_handle_rx(struct cw1200_common *priv, u16 id,
|
|||
|
||||
wsm_buf.begin = (u8 *)&wsm[0];
|
||||
wsm_buf.data = (u8 *)&wsm[1];
|
||||
wsm_buf.end = &wsm_buf.begin[__le32_to_cpu(wsm->len)];
|
||||
wsm_buf.end = &wsm_buf.begin[__le16_to_cpu(wsm->len)];
|
||||
|
||||
pr_debug("[WSM] <<< 0x%.4X (%td)\n", id,
|
||||
wsm_buf.end - wsm_buf.begin);
|
||||
|
@ -1550,7 +1550,7 @@ static bool wsm_handle_tx_data(struct cw1200_common *priv,
|
|||
*/
|
||||
pr_debug("[WSM] Convert probe request to scan.\n");
|
||||
wsm_lock_tx_async(priv);
|
||||
priv->pending_frame_id = __le32_to_cpu(wsm->packet_id);
|
||||
priv->pending_frame_id = wsm->packet_id;
|
||||
if (queue_delayed_work(priv->workqueue,
|
||||
&priv->scan.probe_work, 0) <= 0)
|
||||
wsm_unlock_tx(priv);
|
||||
|
@ -1558,15 +1558,14 @@ static bool wsm_handle_tx_data(struct cw1200_common *priv,
|
|||
break;
|
||||
case do_drop:
|
||||
pr_debug("[WSM] Drop frame (0x%.4X).\n", fctl);
|
||||
BUG_ON(cw1200_queue_remove(queue,
|
||||
__le32_to_cpu(wsm->packet_id)));
|
||||
BUG_ON(cw1200_queue_remove(queue, wsm->packet_id));
|
||||
handled = true;
|
||||
break;
|
||||
case do_wep:
|
||||
pr_debug("[WSM] Issue set_default_wep_key.\n");
|
||||
wsm_lock_tx_async(priv);
|
||||
priv->wep_default_key_id = tx_info->control.hw_key->keyidx;
|
||||
priv->pending_frame_id = __le32_to_cpu(wsm->packet_id);
|
||||
priv->pending_frame_id = wsm->packet_id;
|
||||
if (queue_work(priv->workqueue, &priv->wep_key_work) <= 0)
|
||||
wsm_unlock_tx(priv);
|
||||
handled = true;
|
||||
|
|
|
@ -806,7 +806,7 @@ struct wsm_tx {
|
|||
struct wsm_hdr hdr;
|
||||
|
||||
/* Packet identifier that meant to be used in completion. */
|
||||
__le32 packet_id;
|
||||
u32 packet_id; /* Note this is actually a cookie */
|
||||
|
||||
/* WSM_TRANSMIT_RATE_... */
|
||||
u8 max_tx_rate;
|
||||
|
@ -825,18 +825,18 @@ struct wsm_tx {
|
|||
u8 flags;
|
||||
|
||||
/* Should be 0. */
|
||||
__le32 reserved;
|
||||
u32 reserved;
|
||||
|
||||
/* The elapsed time in TUs, after the initial transmission */
|
||||
/* of an MSDU, after which further attempts to transmit */
|
||||
/* the MSDU shall be terminated. Overrides the global */
|
||||
/* dot11MaxTransmitMsduLifeTime setting [optional] */
|
||||
/* Device will set the default value if this is 0. */
|
||||
__le32 expire_time;
|
||||
u32 expire_time;
|
||||
|
||||
/* WSM_HT_TX_... */
|
||||
__le32 ht_tx_parameters;
|
||||
};
|
||||
} __packed;
|
||||
|
||||
/* = sizeof(generic hi hdr) + sizeof(wsm hdr) + sizeof(alignment) */
|
||||
#define WSM_TX_EXTRA_HEADROOM (28)
|
||||
|
@ -846,10 +846,10 @@ struct wsm_tx {
|
|||
|
||||
struct wsm_rx {
|
||||
/* WSM_STATUS_... */
|
||||
__le32 status;
|
||||
u32 status;
|
||||
|
||||
/* Specifies the channel of the received packet. */
|
||||
__le16 channel_number;
|
||||
u16 channel_number;
|
||||
|
||||
/* WSM_TRANSMIT_RATE_... */
|
||||
u8 rx_rate;
|
||||
|
@ -859,11 +859,8 @@ struct wsm_rx {
|
|||
u8 rcpi_rssi;
|
||||
|
||||
/* WSM_RX_STATUS_... */
|
||||
__le32 flags;
|
||||
|
||||
/* Payload */
|
||||
u8 data[0];
|
||||
} __packed;
|
||||
u32 flags;
|
||||
};
|
||||
|
||||
/* = sizeof(generic hi hdr) + sizeof(wsm hdr) */
|
||||
#define WSM_RX_EXTRA_HEADROOM (16)
|
||||
|
@ -1119,22 +1116,22 @@ int wsm_set_tx_queue_params(struct cw1200_common *priv,
|
|||
#define WSM_EDCA_PARAMS_RESP_ID 0x0413
|
||||
struct wsm_edca_queue_params {
|
||||
/* CWmin (in slots) for the access class. */
|
||||
__le16 cwmin;
|
||||
u16 cwmin;
|
||||
|
||||
/* CWmax (in slots) for the access class. */
|
||||
__le16 cwmax;
|
||||
u16 cwmax;
|
||||
|
||||
/* AIFS (in slots) for the access class. */
|
||||
__le16 aifns;
|
||||
u16 aifns;
|
||||
|
||||
/* TX OP Limit (in microseconds) for the access class. */
|
||||
__le16 txop_limit;
|
||||
u16 txop_limit;
|
||||
|
||||
/* dot11MaxReceiveLifetime to be used for the specified */
|
||||
/* the access class. Overrides the global */
|
||||
/* dot11MaxReceiveLifetime value */
|
||||
__le32 max_rx_lifetime;
|
||||
} __packed;
|
||||
u32 max_rx_lifetime;
|
||||
};
|
||||
|
||||
struct wsm_edca_params {
|
||||
/* NOTE: index is a linux queue id. */
|
||||
|
@ -1147,12 +1144,12 @@ struct wsm_edca_params {
|
|||
__uapsd) \
|
||||
do { \
|
||||
struct wsm_edca_queue_params *p = &(__edca)->params[__queue]; \
|
||||
p->cwmin = (__cw_min); \
|
||||
p->cwmax = (__cw_max); \
|
||||
p->aifns = (__aifs); \
|
||||
p->txop_limit = ((__txop) * TXOP_UNIT); \
|
||||
p->max_rx_lifetime = (__lifetime); \
|
||||
(__edca)->uapsd_enable[__queue] = (__uapsd); \
|
||||
p->cwmin = __cw_min; \
|
||||
p->cwmax = __cw_max; \
|
||||
p->aifns = __aifs; \
|
||||
p->txop_limit = ((__txop) * TXOP_UNIT); \
|
||||
p->max_rx_lifetime = __lifetime; \
|
||||
(__edca)->uapsd_enable[__queue] = (__uapsd); \
|
||||
} while (0)
|
||||
|
||||
int wsm_set_edca_params(struct cw1200_common *priv,
|
||||
|
@ -1475,7 +1472,7 @@ static inline int wsm_set_template_frame(struct cw1200_common *priv,
|
|||
u8 *p = skb_push(arg->skb, 4);
|
||||
p[0] = arg->frame_type;
|
||||
p[1] = arg->rate;
|
||||
((u16 *)p)[1] = __cpu_to_le16(arg->skb->len - 4);
|
||||
((__le16 *)p)[1] = __cpu_to_le16(arg->skb->len - 4);
|
||||
ret = wsm_write_mib(priv, WSM_MIB_ID_TEMPLATE_FRAME, p, arg->skb->len);
|
||||
skb_pull(arg->skb, 4);
|
||||
return ret;
|
||||
|
|
|
@ -3548,6 +3548,7 @@ static int ipw_load(struct ipw_priv *priv)
|
|||
ipw_rx_queue_reset(priv, priv->rxq);
|
||||
if (!priv->rxq) {
|
||||
IPW_ERROR("Unable to initialize Rx queue\n");
|
||||
rc = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
|
||||
|
|
|
@ -1195,7 +1195,7 @@ static int libipw_parse_info_param(struct libipw_info_element
|
|||
#ifdef CONFIG_LIBIPW_DEBUG
|
||||
p += snprintf(p, sizeof(rates_str) -
|
||||
(p - rates_str), "%02X ",
|
||||
network->rates[i]);
|
||||
network->rates_ex[i]);
|
||||
#endif
|
||||
if (libipw_is_ofdm_rate
|
||||
(info_element->data[i])) {
|
||||
|
|
|
@ -128,16 +128,6 @@ config IWLWIFI_DEVICE_TRACING
|
|||
occur.
|
||||
endmenu
|
||||
|
||||
config IWLWIFI_DEVICE_TESTMODE
|
||||
def_bool y
|
||||
depends on IWLWIFI
|
||||
depends on NL80211_TESTMODE
|
||||
help
|
||||
This option enables the testmode support for iwlwifi device through
|
||||
NL80211_TESTMODE. This provide the capabilities of enable user space
|
||||
validation applications to interacts with the device through the
|
||||
generic netlink message via NL80211_TESTMODE channel.
|
||||
|
||||
config IWLWIFI_P2P
|
||||
def_bool y
|
||||
bool "iwlwifi experimental P2P support"
|
||||
|
|
|
@ -13,7 +13,6 @@ iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o
|
|||
iwlwifi-objs += $(iwlwifi-m)
|
||||
|
||||
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
|
||||
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-test.o
|
||||
|
||||
ccflags-y += -D__CHECK_ENDIAN__ -I$(src)
|
||||
|
||||
|
|
|
@ -8,6 +8,5 @@ iwldvm-objs += scan.o led.o
|
|||
iwldvm-objs += rxon.o devices.o
|
||||
|
||||
iwldvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
|
||||
iwldvm-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += testmode.o
|
||||
|
||||
ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
|
||||
|
|
|
@ -405,43 +405,6 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
|
|||
|
||||
extern int iwl_alive_start(struct iwl_priv *priv);
|
||||
|
||||
/* testmode support */
|
||||
#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
|
||||
|
||||
extern int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data,
|
||||
int len);
|
||||
extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw,
|
||||
struct sk_buff *skb,
|
||||
struct netlink_callback *cb,
|
||||
void *data, int len);
|
||||
extern void iwl_testmode_init(struct iwl_priv *priv);
|
||||
extern void iwl_testmode_free(struct iwl_priv *priv);
|
||||
|
||||
#else
|
||||
|
||||
static inline
|
||||
int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static inline
|
||||
int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
|
||||
struct netlink_callback *cb,
|
||||
void *data, int len)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static inline void iwl_testmode_init(struct iwl_priv *priv)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void iwl_testmode_free(struct iwl_priv *priv)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||
void iwl_print_rx_config_cmd(struct iwl_priv *priv,
|
||||
enum iwl_rxon_context_id ctxid);
|
||||
|
|
|
@ -52,8 +52,6 @@
|
|||
#include "rs.h"
|
||||
#include "tt.h"
|
||||
|
||||
#include "iwl-test.h"
|
||||
|
||||
/* CT-KILL constants */
|
||||
#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
|
||||
#define CT_KILL_THRESHOLD 114 /* in Celsius */
|
||||
|
@ -691,10 +689,6 @@ struct iwl_priv {
|
|||
struct iwl_spectrum_notification measure_report;
|
||||
u8 measurement_status;
|
||||
|
||||
#define IWL_OWNERSHIP_DRIVER 0
|
||||
#define IWL_OWNERSHIP_TM 1
|
||||
u8 ucode_owner;
|
||||
|
||||
/* ucode beacon time */
|
||||
u32 ucode_beacon_time;
|
||||
int missed_beacon_threshold;
|
||||
|
@ -889,7 +883,7 @@ struct iwl_priv {
|
|||
#endif /* CONFIG_IWLWIFI_DEBUGFS */
|
||||
|
||||
struct iwl_nvm_data *nvm_data;
|
||||
/* eeprom blob for debugfs/testmode */
|
||||
/* eeprom blob for debugfs */
|
||||
u8 *eeprom_blob;
|
||||
size_t eeprom_blob_size;
|
||||
|
||||
|
@ -905,11 +899,6 @@ struct iwl_priv {
|
|||
unsigned long blink_on, blink_off;
|
||||
bool led_registered;
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
|
||||
struct iwl_test tst;
|
||||
u32 tm_fixed_rate;
|
||||
#endif
|
||||
|
||||
/* WoWLAN GTK rekey data */
|
||||
u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN];
|
||||
__le64 replay_ctr;
|
||||
|
|
|
@ -1288,12 +1288,6 @@ int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
|
|||
if (!(cmd->flags & CMD_ASYNC))
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
|
||||
if (priv->ucode_owner == IWL_OWNERSHIP_TM &&
|
||||
!(cmd->flags & CMD_ON_DEMAND)) {
|
||||
IWL_DEBUG_HC(priv, "tm own the uCode, no regular hcmd send\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return iwl_trans_send_cmd(priv->trans, cmd);
|
||||
}
|
||||
|
||||
|
|
|
@ -1766,8 +1766,6 @@ struct ieee80211_ops iwlagn_hw_ops = {
|
|||
.remain_on_channel = iwlagn_mac_remain_on_channel,
|
||||
.cancel_remain_on_channel = iwlagn_mac_cancel_remain_on_channel,
|
||||
.rssi_callback = iwlagn_mac_rssi_callback,
|
||||
CFG80211_TESTMODE_CMD(iwlagn_mac_testmode_cmd)
|
||||
CFG80211_TESTMODE_DUMP(iwlagn_mac_testmode_dump)
|
||||
.set_tim = iwlagn_mac_set_tim,
|
||||
};
|
||||
|
||||
|
|
|
@ -1105,8 +1105,6 @@ static int iwl_init_drv(struct iwl_priv *priv)
|
|||
priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
|
||||
priv->agg_tids_count = 0;
|
||||
|
||||
priv->ucode_owner = IWL_OWNERSHIP_DRIVER;
|
||||
|
||||
priv->rx_statistics_jiffies = jiffies;
|
||||
|
||||
/* Choose which receivers/antennas to use */
|
||||
|
@ -1172,12 +1170,6 @@ static void iwl_option_config(struct iwl_priv *priv)
|
|||
IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TRACING disabled\n");
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
|
||||
IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TESTMODE enabled\n");
|
||||
#else
|
||||
IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TESTMODE disabled\n");
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_P2P
|
||||
IWL_INFO(priv, "CONFIG_IWLWIFI_P2P enabled\n");
|
||||
#else
|
||||
|
@ -1355,8 +1347,8 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
|
|||
IWL_BT_ANTENNA_COUPLING_THRESHOLD) ?
|
||||
true : false;
|
||||
|
||||
/* enable/disable bt channel inhibition */
|
||||
priv->bt_ch_announce = iwlwifi_mod_params.bt_ch_announce;
|
||||
/* bt channel inhibition enabled*/
|
||||
priv->bt_ch_announce = true;
|
||||
IWL_DEBUG_INFO(priv, "BT channel inhibition is %s\n",
|
||||
(priv->bt_ch_announce) ? "On" : "Off");
|
||||
|
||||
|
@ -1451,7 +1443,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
|
|||
********************/
|
||||
iwl_setup_deferred_work(priv);
|
||||
iwl_setup_rx_handlers(priv);
|
||||
iwl_testmode_init(priv);
|
||||
|
||||
iwl_power_initialize(priv);
|
||||
iwl_tt_initialize(priv);
|
||||
|
@ -1488,7 +1479,6 @@ out_mac80211_unregister:
|
|||
iwlagn_mac_unregister(priv);
|
||||
out_destroy_workqueue:
|
||||
iwl_tt_exit(priv);
|
||||
iwl_testmode_free(priv);
|
||||
iwl_cancel_deferred_work(priv);
|
||||
destroy_workqueue(priv->workqueue);
|
||||
priv->workqueue = NULL;
|
||||
|
@ -1510,7 +1500,6 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
|
|||
|
||||
IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
|
||||
|
||||
iwl_testmode_free(priv);
|
||||
iwlagn_mac_unregister(priv);
|
||||
|
||||
iwl_tt_exit(priv);
|
||||
|
|
|
@ -351,12 +351,6 @@ static void rs_program_fix_rate(struct iwl_priv *priv,
|
|||
lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
|
||||
lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
|
||||
/* testmode has higher priority to overwirte the fixed rate */
|
||||
if (priv->tm_fixed_rate)
|
||||
lq_sta->dbg_fixed_rate = priv->tm_fixed_rate;
|
||||
#endif
|
||||
|
||||
IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
|
||||
lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
|
||||
|
||||
|
@ -419,23 +413,18 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
|
|||
|
||||
load = rs_tl_get_load(lq_data, tid);
|
||||
|
||||
if ((iwlwifi_mod_params.auto_agg) || (load > IWL_AGG_LOAD_THRESHOLD)) {
|
||||
IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
|
||||
sta->addr, tid);
|
||||
ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
|
||||
if (ret == -EAGAIN) {
|
||||
/*
|
||||
* driver and mac80211 is out of sync
|
||||
* this might be cause by reloading firmware
|
||||
* stop the tx ba session here
|
||||
*/
|
||||
IWL_ERR(priv, "Fail start Tx agg on tid: %d\n",
|
||||
tid);
|
||||
ieee80211_stop_tx_ba_session(sta, tid);
|
||||
}
|
||||
} else {
|
||||
IWL_DEBUG_HT(priv, "Aggregation not enabled for tid %d "
|
||||
"because load = %u\n", tid, load);
|
||||
IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
|
||||
sta->addr, tid);
|
||||
ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
|
||||
if (ret == -EAGAIN) {
|
||||
/*
|
||||
* driver and mac80211 is out of sync
|
||||
* this might be cause by reloading firmware
|
||||
* stop the tx ba session here
|
||||
*/
|
||||
IWL_ERR(priv, "Fail start Tx agg on tid: %d\n",
|
||||
tid);
|
||||
ieee80211_stop_tx_ba_session(sta, tid);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -1083,11 +1072,6 @@ done:
|
|||
if (sta && sta->supp_rates[sband->band])
|
||||
rs_rate_scale_perform(priv, skb, sta, lq_sta);
|
||||
|
||||
#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_IWLWIFI_DEVICE_TESTMODE)
|
||||
if ((priv->tm_fixed_rate) &&
|
||||
(priv->tm_fixed_rate != lq_sta->dbg_fixed_rate))
|
||||
rs_program_fix_rate(priv, lq_sta);
|
||||
#endif
|
||||
if (priv->lib->bt_params && priv->lib->bt_params->advanced_bt_coexist)
|
||||
rs_bt_update_lq(priv, ctx, lq_sta);
|
||||
}
|
||||
|
@ -2913,9 +2897,6 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
|
|||
if (sband->band == IEEE80211_BAND_5GHZ)
|
||||
lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
|
||||
lq_sta->is_agg = 0;
|
||||
#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
|
||||
priv->tm_fixed_rate = 0;
|
||||
#endif
|
||||
#ifdef CONFIG_MAC80211_DEBUGFS
|
||||
lq_sta->dbg_fixed_rate = 0;
|
||||
#endif
|
||||
|
|
|
@ -335,8 +335,7 @@ static void iwlagn_recover_from_statistics(struct iwl_priv *priv,
|
|||
if (msecs < 99)
|
||||
return;
|
||||
|
||||
if (iwlwifi_mod_params.plcp_check &&
|
||||
!iwlagn_good_plcp_health(priv, cur_ofdm, cur_ofdm_ht, msecs))
|
||||
if (!iwlagn_good_plcp_health(priv, cur_ofdm, cur_ofdm_ht, msecs))
|
||||
iwl_force_rf_reset(priv, false);
|
||||
}
|
||||
|
||||
|
@ -1120,32 +1119,17 @@ int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
|
|||
*/
|
||||
iwl_notification_wait_notify(&priv->notif_wait, pkt);
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
|
||||
/*
|
||||
* RX data may be forwarded to userspace in one
|
||||
* of two cases: the user owns the fw through testmode or when
|
||||
* the user requested to monitor the rx w/o affecting the regular flow.
|
||||
* In these cases the iwl_test object will handle forwarding the rx
|
||||
* data to user space.
|
||||
* Note that if the ownership flag != IWL_OWNERSHIP_TM the flow
|
||||
* continues.
|
||||
*/
|
||||
iwl_test_rx(&priv->tst, rxb);
|
||||
#endif
|
||||
|
||||
if (priv->ucode_owner != IWL_OWNERSHIP_TM) {
|
||||
/* Based on type of command response or notification,
|
||||
* handle those that need handling via function in
|
||||
* rx_handlers table. See iwl_setup_rx_handlers() */
|
||||
if (priv->rx_handlers[pkt->hdr.cmd]) {
|
||||
priv->rx_handlers_stats[pkt->hdr.cmd]++;
|
||||
err = priv->rx_handlers[pkt->hdr.cmd] (priv, rxb, cmd);
|
||||
} else {
|
||||
/* No handling needed */
|
||||
IWL_DEBUG_RX(priv, "No handler needed for %s, 0x%02x\n",
|
||||
iwl_dvm_get_cmd_string(pkt->hdr.cmd),
|
||||
pkt->hdr.cmd);
|
||||
}
|
||||
/* Based on type of command response or notification,
|
||||
* handle those that need handling via function in
|
||||
* rx_handlers table. See iwl_setup_rx_handlers() */
|
||||
if (priv->rx_handlers[pkt->hdr.cmd]) {
|
||||
priv->rx_handlers_stats[pkt->hdr.cmd]++;
|
||||
err = priv->rx_handlers[pkt->hdr.cmd] (priv, rxb, cmd);
|
||||
} else {
|
||||
/* No handling needed */
|
||||
IWL_DEBUG_RX(priv, "No handler needed for %s, 0x%02x\n",
|
||||
iwl_dvm_get_cmd_string(pkt->hdr.cmd),
|
||||
pkt->hdr.cmd);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -1,471 +0,0 @@
|
|||
/******************************************************************************
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
*
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
|
||||
* USA
|
||||
*
|
||||
* The full GNU General Public License is included in this distribution
|
||||
* in the file called COPYING.
|
||||
*
|
||||
* Contact Information:
|
||||
* Intel Linux Wireless <ilw@linux.intel.com>
|
||||
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
*
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*****************************************************************************/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <net/net_namespace.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <net/cfg80211.h>
|
||||
#include <net/mac80211.h>
|
||||
#include <net/netlink.h>
|
||||
|
||||
#include "iwl-debug.h"
|
||||
#include "iwl-trans.h"
|
||||
#include "dev.h"
|
||||
#include "agn.h"
|
||||
#include "iwl-test.h"
|
||||
#include "iwl-testmode.h"
|
||||
|
||||
static int iwl_testmode_send_cmd(struct iwl_op_mode *op_mode,
|
||||
struct iwl_host_cmd *cmd)
|
||||
{
|
||||
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
|
||||
return iwl_dvm_send_cmd(priv, cmd);
|
||||
}
|
||||
|
||||
static bool iwl_testmode_valid_hw_addr(u32 addr)
|
||||
{
|
||||
if (iwlagn_hw_valid_rtc_data_addr(addr))
|
||||
return true;
|
||||
|
||||
if (IWLAGN_RTC_INST_LOWER_BOUND <= addr &&
|
||||
addr < IWLAGN_RTC_INST_UPPER_BOUND)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static u32 iwl_testmode_get_fw_ver(struct iwl_op_mode *op_mode)
|
||||
{
|
||||
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
|
||||
return priv->fw->ucode_ver;
|
||||
}
|
||||
|
||||
static struct sk_buff*
|
||||
iwl_testmode_alloc_reply(struct iwl_op_mode *op_mode, int len)
|
||||
{
|
||||
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
|
||||
return cfg80211_testmode_alloc_reply_skb(priv->hw->wiphy, len);
|
||||
}
|
||||
|
||||
static int iwl_testmode_reply(struct iwl_op_mode *op_mode, struct sk_buff *skb)
|
||||
{
|
||||
return cfg80211_testmode_reply(skb);
|
||||
}
|
||||
|
||||
static struct sk_buff *iwl_testmode_alloc_event(struct iwl_op_mode *op_mode,
|
||||
int len)
|
||||
{
|
||||
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
|
||||
return cfg80211_testmode_alloc_event_skb(priv->hw->wiphy, len,
|
||||
GFP_ATOMIC);
|
||||
}
|
||||
|
||||
static void iwl_testmode_event(struct iwl_op_mode *op_mode, struct sk_buff *skb)
|
||||
{
|
||||
return cfg80211_testmode_event(skb, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
static struct iwl_test_ops tst_ops = {
|
||||
.send_cmd = iwl_testmode_send_cmd,
|
||||
.valid_hw_addr = iwl_testmode_valid_hw_addr,
|
||||
.get_fw_ver = iwl_testmode_get_fw_ver,
|
||||
.alloc_reply = iwl_testmode_alloc_reply,
|
||||
.reply = iwl_testmode_reply,
|
||||
.alloc_event = iwl_testmode_alloc_event,
|
||||
.event = iwl_testmode_event,
|
||||
};
|
||||
|
||||
void iwl_testmode_init(struct iwl_priv *priv)
|
||||
{
|
||||
iwl_test_init(&priv->tst, priv->trans, &tst_ops);
|
||||
}
|
||||
|
||||
void iwl_testmode_free(struct iwl_priv *priv)
|
||||
{
|
||||
iwl_test_free(&priv->tst);
|
||||
}
|
||||
|
||||
static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
|
||||
{
|
||||
struct iwl_notification_wait calib_wait;
|
||||
static const u8 calib_complete[] = {
|
||||
CALIBRATION_COMPLETE_NOTIFICATION
|
||||
};
|
||||
int ret;
|
||||
|
||||
iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
|
||||
calib_complete, ARRAY_SIZE(calib_complete),
|
||||
NULL, NULL);
|
||||
ret = iwl_init_alive_start(priv);
|
||||
if (ret) {
|
||||
IWL_ERR(priv, "Fail init calibration: %d\n", ret);
|
||||
goto cfg_init_calib_error;
|
||||
}
|
||||
|
||||
ret = iwl_wait_notification(&priv->notif_wait, &calib_wait, 2 * HZ);
|
||||
if (ret)
|
||||
IWL_ERR(priv, "Error detecting"
|
||||
" CALIBRATION_COMPLETE_NOTIFICATION: %d\n", ret);
|
||||
return ret;
|
||||
|
||||
cfg_init_calib_error:
|
||||
iwl_remove_notification(&priv->notif_wait, &calib_wait);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function handles the user application commands for driver.
|
||||
*
|
||||
* It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
|
||||
* handlers respectively.
|
||||
*
|
||||
* If it's an unknown commdn ID, -ENOSYS is replied; otherwise, the returned
|
||||
* value of the actual command execution is replied to the user application.
|
||||
*
|
||||
* If there's any message responding to the user space, IWL_TM_ATTR_SYNC_RSP
|
||||
* is used for carry the message while IWL_TM_ATTR_COMMAND must set to
|
||||
* IWL_TM_CMD_DEV2APP_SYNC_RSP.
|
||||
*
|
||||
* @hw: ieee80211_hw object that represents the device
|
||||
* @tb: gnl message fields from the user space
|
||||
*/
|
||||
static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
|
||||
{
|
||||
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
|
||||
struct iwl_trans *trans = priv->trans;
|
||||
struct sk_buff *skb;
|
||||
unsigned char *rsp_data_ptr = NULL;
|
||||
int status = 0, rsp_data_len = 0;
|
||||
u32 inst_size = 0, data_size = 0;
|
||||
const struct fw_img *img;
|
||||
|
||||
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
|
||||
case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
|
||||
rsp_data_ptr = (unsigned char *)priv->cfg->name;
|
||||
rsp_data_len = strlen(priv->cfg->name);
|
||||
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
|
||||
rsp_data_len + 20);
|
||||
if (!skb) {
|
||||
IWL_ERR(priv, "Memory allocation fail\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
|
||||
IWL_TM_CMD_DEV2APP_SYNC_RSP) ||
|
||||
nla_put(skb, IWL_TM_ATTR_SYNC_RSP,
|
||||
rsp_data_len, rsp_data_ptr))
|
||||
goto nla_put_failure;
|
||||
status = cfg80211_testmode_reply(skb);
|
||||
if (status < 0)
|
||||
IWL_ERR(priv, "Error sending msg : %d\n", status);
|
||||
break;
|
||||
|
||||
case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
|
||||
status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_INIT);
|
||||
if (status)
|
||||
IWL_ERR(priv, "Error loading init ucode: %d\n", status);
|
||||
break;
|
||||
|
||||
case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
|
||||
iwl_testmode_cfg_init_calib(priv);
|
||||
priv->ucode_loaded = false;
|
||||
iwl_trans_stop_device(trans);
|
||||
break;
|
||||
|
||||
case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
|
||||
status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR);
|
||||
if (status) {
|
||||
IWL_ERR(priv,
|
||||
"Error loading runtime ucode: %d\n", status);
|
||||
break;
|
||||
}
|
||||
status = iwl_alive_start(priv);
|
||||
if (status)
|
||||
IWL_ERR(priv,
|
||||
"Error starting the device: %d\n", status);
|
||||
break;
|
||||
|
||||
case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
|
||||
iwl_scan_cancel_timeout(priv, 200);
|
||||
priv->ucode_loaded = false;
|
||||
iwl_trans_stop_device(trans);
|
||||
status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_WOWLAN);
|
||||
if (status) {
|
||||
IWL_ERR(priv,
|
||||
"Error loading WOWLAN ucode: %d\n", status);
|
||||
break;
|
||||
}
|
||||
status = iwl_alive_start(priv);
|
||||
if (status)
|
||||
IWL_ERR(priv,
|
||||
"Error starting the device: %d\n", status);
|
||||
break;
|
||||
|
||||
case IWL_TM_CMD_APP2DEV_GET_EEPROM:
|
||||
if (priv->eeprom_blob) {
|
||||
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
|
||||
priv->eeprom_blob_size + 20);
|
||||
if (!skb) {
|
||||
IWL_ERR(priv, "Memory allocation fail\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
|
||||
IWL_TM_CMD_DEV2APP_EEPROM_RSP) ||
|
||||
nla_put(skb, IWL_TM_ATTR_EEPROM,
|
||||
priv->eeprom_blob_size,
|
||||
priv->eeprom_blob))
|
||||
goto nla_put_failure;
|
||||
status = cfg80211_testmode_reply(skb);
|
||||
if (status < 0)
|
||||
IWL_ERR(priv, "Error sending msg : %d\n",
|
||||
status);
|
||||
} else
|
||||
return -ENODATA;
|
||||
break;
|
||||
|
||||
case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
|
||||
if (!tb[IWL_TM_ATTR_FIXRATE]) {
|
||||
IWL_ERR(priv, "Missing fixrate setting\n");
|
||||
return -ENOMSG;
|
||||
}
|
||||
priv->tm_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]);
|
||||
break;
|
||||
|
||||
case IWL_TM_CMD_APP2DEV_GET_FW_INFO:
|
||||
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20 + 8);
|
||||
if (!skb) {
|
||||
IWL_ERR(priv, "Memory allocation fail\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (!priv->ucode_loaded) {
|
||||
IWL_ERR(priv, "No uCode has not been loaded\n");
|
||||
return -EINVAL;
|
||||
} else {
|
||||
img = &priv->fw->img[priv->cur_ucode];
|
||||
inst_size = img->sec[IWL_UCODE_SECTION_INST].len;
|
||||
data_size = img->sec[IWL_UCODE_SECTION_DATA].len;
|
||||
}
|
||||
if (nla_put_u32(skb, IWL_TM_ATTR_FW_TYPE, priv->cur_ucode) ||
|
||||
nla_put_u32(skb, IWL_TM_ATTR_FW_INST_SIZE, inst_size) ||
|
||||
nla_put_u32(skb, IWL_TM_ATTR_FW_DATA_SIZE, data_size))
|
||||
goto nla_put_failure;
|
||||
status = cfg80211_testmode_reply(skb);
|
||||
if (status < 0)
|
||||
IWL_ERR(priv, "Error sending msg : %d\n", status);
|
||||
break;
|
||||
|
||||
default:
|
||||
IWL_ERR(priv, "Unknown testmode driver command ID\n");
|
||||
return -ENOSYS;
|
||||
}
|
||||
return status;
|
||||
|
||||
nla_put_failure:
|
||||
kfree_skb(skb);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function handles the user application switch ucode ownership.
|
||||
*
|
||||
* It retrieves the mandatory fields IWL_TM_ATTR_UCODE_OWNER and
|
||||
* decide who the current owner of the uCode
|
||||
*
|
||||
* If the current owner is OWNERSHIP_TM, then the only host command
|
||||
* can deliver to uCode is from testmode, all the other host commands
|
||||
* will dropped.
|
||||
*
|
||||
* default driver is the owner of uCode in normal operational mode
|
||||
*
|
||||
* @hw: ieee80211_hw object that represents the device
|
||||
* @tb: gnl message fields from the user space
|
||||
*/
|
||||
static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
|
||||
{
|
||||
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
|
||||
u8 owner;
|
||||
|
||||
if (!tb[IWL_TM_ATTR_UCODE_OWNER]) {
|
||||
IWL_ERR(priv, "Missing ucode owner\n");
|
||||
return -ENOMSG;
|
||||
}
|
||||
|
||||
owner = nla_get_u8(tb[IWL_TM_ATTR_UCODE_OWNER]);
|
||||
if (owner == IWL_OWNERSHIP_DRIVER) {
|
||||
priv->ucode_owner = owner;
|
||||
iwl_test_enable_notifications(&priv->tst, false);
|
||||
} else if (owner == IWL_OWNERSHIP_TM) {
|
||||
priv->ucode_owner = owner;
|
||||
iwl_test_enable_notifications(&priv->tst, true);
|
||||
} else {
|
||||
IWL_ERR(priv, "Invalid owner\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* The testmode gnl message handler that takes the gnl message from the
|
||||
* user space and parses it per the policy iwl_testmode_gnl_msg_policy, then
|
||||
* invoke the corresponding handlers.
|
||||
*
|
||||
* This function is invoked when there is user space application sending
|
||||
* gnl message through the testmode tunnel NL80211_CMD_TESTMODE regulated
|
||||
* by nl80211.
|
||||
*
|
||||
* It retrieves the mandatory field, IWL_TM_ATTR_COMMAND, before
|
||||
* dispatching it to the corresponding handler.
|
||||
*
|
||||
* If IWL_TM_ATTR_COMMAND is missing, -ENOMSG is replied to user application;
|
||||
* -ENOSYS is replied to the user application if the command is unknown;
|
||||
* Otherwise, the command is dispatched to the respective handler.
|
||||
*
|
||||
* @hw: ieee80211_hw object that represents the device
|
||||
* @data: pointer to user space message
|
||||
* @len: length in byte of @data
|
||||
*/
|
||||
int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
|
||||
{
|
||||
struct nlattr *tb[IWL_TM_ATTR_MAX];
|
||||
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
|
||||
int result;
|
||||
|
||||
result = iwl_test_parse(&priv->tst, tb, data, len);
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
/* in case multiple accesses to the device happens */
|
||||
mutex_lock(&priv->mutex);
|
||||
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
|
||||
case IWL_TM_CMD_APP2DEV_UCODE:
|
||||
case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
|
||||
case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
|
||||
case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
|
||||
case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
|
||||
case IWL_TM_CMD_APP2DEV_END_TRACE:
|
||||
case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
|
||||
case IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
|
||||
case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
|
||||
case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
|
||||
case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
|
||||
result = iwl_test_handle_cmd(&priv->tst, tb);
|
||||
break;
|
||||
|
||||
case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
|
||||
case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
|
||||
case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
|
||||
case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
|
||||
case IWL_TM_CMD_APP2DEV_GET_EEPROM:
|
||||
case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
|
||||
case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
|
||||
case IWL_TM_CMD_APP2DEV_GET_FW_INFO:
|
||||
IWL_DEBUG_INFO(priv, "testmode cmd to driver\n");
|
||||
result = iwl_testmode_driver(hw, tb);
|
||||
break;
|
||||
|
||||
case IWL_TM_CMD_APP2DEV_OWNERSHIP:
|
||||
IWL_DEBUG_INFO(priv, "testmode change uCode ownership\n");
|
||||
result = iwl_testmode_ownership(hw, tb);
|
||||
break;
|
||||
|
||||
default:
|
||||
IWL_ERR(priv, "Unknown testmode command\n");
|
||||
result = -ENOSYS;
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&priv->mutex);
|
||||
|
||||
if (result)
|
||||
IWL_ERR(priv, "Test cmd failed result=%d\n", result);
|
||||
return result;
|
||||
}
|
||||
|
||||
int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
|
||||
struct netlink_callback *cb,
|
||||
void *data, int len)
|
||||
{
|
||||
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
|
||||
int result;
|
||||
u32 cmd;
|
||||
|
||||
if (cb->args[3]) {
|
||||
/* offset by 1 since commands start at 0 */
|
||||
cmd = cb->args[3] - 1;
|
||||
} else {
|
||||
struct nlattr *tb[IWL_TM_ATTR_MAX];
|
||||
|
||||
result = iwl_test_parse(&priv->tst, tb, data, len);
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
|
||||
cb->args[3] = cmd + 1;
|
||||
}
|
||||
|
||||
/* in case multiple accesses to the device happens */
|
||||
mutex_lock(&priv->mutex);
|
||||
result = iwl_test_dump(&priv->tst, cmd, skb, cb);
|
||||
mutex_unlock(&priv->mutex);
|
||||
return result;
|
||||
}
|
|
@ -162,18 +162,6 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
|
|||
if (ieee80211_is_data(fc)) {
|
||||
tx_cmd->initial_rate_index = 0;
|
||||
tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
|
||||
#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
|
||||
if (priv->tm_fixed_rate) {
|
||||
/*
|
||||
* rate overwrite by testmode
|
||||
* we not only send lq command to change rate
|
||||
* we also re-enforce per data pkt base.
|
||||
*/
|
||||
tx_cmd->tx_flags &= ~TX_CMD_FLG_STA_RATE_MSK;
|
||||
memcpy(&tx_cmd->rate_n_flags, &priv->tm_fixed_rate,
|
||||
sizeof(tx_cmd->rate_n_flags));
|
||||
}
|
||||
#endif
|
||||
return;
|
||||
} else if (ieee80211_is_back_req(fc))
|
||||
tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
|
||||
|
|
|
@ -67,16 +67,16 @@
|
|||
#include "iwl-agn-hw.h"
|
||||
|
||||
/* Highest firmware API version supported */
|
||||
#define IWL7260_UCODE_API_MAX 6
|
||||
#define IWL3160_UCODE_API_MAX 6
|
||||
#define IWL7260_UCODE_API_MAX 7
|
||||
#define IWL3160_UCODE_API_MAX 7
|
||||
|
||||
/* Oldest version we won't warn about */
|
||||
#define IWL7260_UCODE_API_OK 6
|
||||
#define IWL3160_UCODE_API_OK 6
|
||||
#define IWL7260_UCODE_API_OK 7
|
||||
#define IWL3160_UCODE_API_OK 7
|
||||
|
||||
/* Lowest firmware API version supported */
|
||||
#define IWL7260_UCODE_API_MIN 6
|
||||
#define IWL3160_UCODE_API_MIN 6
|
||||
#define IWL7260_UCODE_API_MIN 7
|
||||
#define IWL3160_UCODE_API_MIN 7
|
||||
|
||||
/* NVM versions */
|
||||
#define IWL7260_NVM_VERSION 0x0a1d
|
||||
|
|
|
@ -222,6 +222,7 @@ struct iwl_cfg {
|
|||
const u32 max_inst_size;
|
||||
u8 valid_tx_ant;
|
||||
u8 valid_rx_ant;
|
||||
bool bt_shared_single_ant;
|
||||
u16 nvm_ver;
|
||||
u16 nvm_calib_ver;
|
||||
/* params not likely to change within a device family */
|
||||
|
|
|
@ -1111,11 +1111,8 @@ void iwl_drv_stop(struct iwl_drv *drv)
|
|||
/* shared module parameters */
|
||||
struct iwl_mod_params iwlwifi_mod_params = {
|
||||
.restart_fw = true,
|
||||
.plcp_check = true,
|
||||
.bt_coex_active = true,
|
||||
.power_level = IWL_POWER_INDEX_1,
|
||||
.bt_ch_announce = true,
|
||||
.auto_agg = true,
|
||||
.wd_disable = true,
|
||||
/* the rest are 0 by default */
|
||||
};
|
||||
|
@ -1223,14 +1220,6 @@ module_param_named(antenna_coupling, iwlwifi_mod_params.ant_coupling,
|
|||
MODULE_PARM_DESC(antenna_coupling,
|
||||
"specify antenna coupling in dB (defualt: 0 dB)");
|
||||
|
||||
module_param_named(bt_ch_inhibition, iwlwifi_mod_params.bt_ch_announce,
|
||||
bool, S_IRUGO);
|
||||
MODULE_PARM_DESC(bt_ch_inhibition,
|
||||
"Enable BT channel inhibition (default: enable)");
|
||||
|
||||
module_param_named(plcp_check, iwlwifi_mod_params.plcp_check, bool, S_IRUGO);
|
||||
MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
|
||||
|
||||
module_param_named(wd_disable, iwlwifi_mod_params.wd_disable, int, S_IRUGO);
|
||||
MODULE_PARM_DESC(wd_disable,
|
||||
"Disable stuck queue watchdog timer 0=system default, "
|
||||
|
@ -1272,8 +1261,3 @@ module_param_named(power_level, iwlwifi_mod_params.power_level,
|
|||
int, S_IRUGO);
|
||||
MODULE_PARM_DESC(power_level,
|
||||
"default power save level (range from 1 - 5, default: 1)");
|
||||
|
||||
module_param_named(auto_agg, iwlwifi_mod_params.auto_agg,
|
||||
bool, S_IRUGO);
|
||||
MODULE_PARM_DESC(auto_agg,
|
||||
"enable agg w/o check traffic load (default: enable)");
|
||||
|
|
|
@ -93,7 +93,6 @@ enum iwl_power_level {
|
|||
* use IWL_DISABLE_HT_* constants
|
||||
* @amsdu_size_8K: enable 8K amsdu size, default = 0
|
||||
* @restart_fw: restart firmware, default = 1
|
||||
* @plcp_check: enable plcp health check, default = true
|
||||
* @wd_disable: enable stuck queue check, default = 0
|
||||
* @bt_coex_active: enable bt coex, default = true
|
||||
* @led_mode: system default, default = 0
|
||||
|
@ -101,15 +100,12 @@ enum iwl_power_level {
|
|||
* @power_level: power level, default = 1
|
||||
* @debug_level: levels are IWL_DL_*
|
||||
* @ant_coupling: antenna coupling in dB, default = 0
|
||||
* @bt_ch_announce: BT channel inhibition, default = enable
|
||||
* @auto_agg: enable agg. without check, default = true
|
||||
*/
|
||||
struct iwl_mod_params {
|
||||
int sw_crypto;
|
||||
unsigned int disable_11n;
|
||||
int amsdu_size_8K;
|
||||
bool restart_fw;
|
||||
bool plcp_check;
|
||||
int wd_disable;
|
||||
bool bt_coex_active;
|
||||
int led_mode;
|
||||
|
@ -119,8 +115,6 @@ struct iwl_mod_params {
|
|||
u32 debug_level;
|
||||
#endif
|
||||
int ant_coupling;
|
||||
bool bt_ch_announce;
|
||||
bool auto_agg;
|
||||
char *nvm_file;
|
||||
};
|
||||
|
||||
|
|
|
@ -1,852 +0,0 @@
|
|||
/******************************************************************************
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
*
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
|
||||
* USA
|
||||
*
|
||||
* The full GNU General Public License is included in this distribution
|
||||
* in the file called COPYING.
|
||||
*
|
||||
* Contact Information:
|
||||
* Intel Linux Wireless <ilw@linux.intel.com>
|
||||
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
*
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*****************************************************************************/
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <net/netlink.h>
|
||||
|
||||
#include "iwl-drv.h"
|
||||
#include "iwl-io.h"
|
||||
#include "iwl-fh.h"
|
||||
#include "iwl-prph.h"
|
||||
#include "iwl-trans.h"
|
||||
#include "iwl-test.h"
|
||||
#include "iwl-csr.h"
|
||||
#include "iwl-testmode.h"
|
||||
|
||||
/*
|
||||
* Periphery registers absolute lower bound. This is used in order to
|
||||
* differentiate registery access through HBUS_TARG_PRPH_* and
|
||||
* HBUS_TARG_MEM_* accesses.
|
||||
*/
|
||||
#define IWL_ABS_PRPH_START (0xA00000)
|
||||
|
||||
/*
|
||||
* The TLVs used in the gnl message policy between the kernel module and
|
||||
* user space application. iwl_testmode_gnl_msg_policy is to be carried
|
||||
* through the NL80211_CMD_TESTMODE channel regulated by nl80211.
|
||||
* See iwl-testmode.h
|
||||
*/
|
||||
static
|
||||
struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
|
||||
[IWL_TM_ATTR_COMMAND] = { .type = NLA_U32, },
|
||||
|
||||
[IWL_TM_ATTR_UCODE_CMD_ID] = { .type = NLA_U8, },
|
||||
[IWL_TM_ATTR_UCODE_CMD_DATA] = { .type = NLA_UNSPEC, },
|
||||
|
||||
[IWL_TM_ATTR_REG_OFFSET] = { .type = NLA_U32, },
|
||||
[IWL_TM_ATTR_REG_VALUE8] = { .type = NLA_U8, },
|
||||
[IWL_TM_ATTR_REG_VALUE32] = { .type = NLA_U32, },
|
||||
|
||||
[IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, },
|
||||
[IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, },
|
||||
|
||||
[IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, },
|
||||
|
||||
[IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, },
|
||||
[IWL_TM_ATTR_TRACE_DUMP] = { .type = NLA_UNSPEC, },
|
||||
[IWL_TM_ATTR_TRACE_SIZE] = { .type = NLA_U32, },
|
||||
|
||||
[IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, },
|
||||
|
||||
[IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, },
|
||||
|
||||
[IWL_TM_ATTR_MEM_ADDR] = { .type = NLA_U32, },
|
||||
[IWL_TM_ATTR_BUFFER_SIZE] = { .type = NLA_U32, },
|
||||
[IWL_TM_ATTR_BUFFER_DUMP] = { .type = NLA_UNSPEC, },
|
||||
|
||||
[IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, },
|
||||
[IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, },
|
||||
[IWL_TM_ATTR_FW_TYPE] = { .type = NLA_U32, },
|
||||
[IWL_TM_ATTR_FW_INST_SIZE] = { .type = NLA_U32, },
|
||||
[IWL_TM_ATTR_FW_DATA_SIZE] = { .type = NLA_U32, },
|
||||
|
||||
[IWL_TM_ATTR_ENABLE_NOTIFICATION] = {.type = NLA_FLAG, },
|
||||
};
|
||||
|
||||
static inline void iwl_test_trace_clear(struct iwl_test *tst)
|
||||
{
|
||||
memset(&tst->trace, 0, sizeof(struct iwl_test_trace));
|
||||
}
|
||||
|
||||
static void iwl_test_trace_stop(struct iwl_test *tst)
|
||||
{
|
||||
if (!tst->trace.enabled)
|
||||
return;
|
||||
|
||||
if (tst->trace.cpu_addr && tst->trace.dma_addr)
|
||||
dma_free_coherent(tst->trans->dev,
|
||||
tst->trace.tsize,
|
||||
tst->trace.cpu_addr,
|
||||
tst->trace.dma_addr);
|
||||
|
||||
iwl_test_trace_clear(tst);
|
||||
}
|
||||
|
||||
static inline void iwl_test_mem_clear(struct iwl_test *tst)
|
||||
{
|
||||
memset(&tst->mem, 0, sizeof(struct iwl_test_mem));
|
||||
}
|
||||
|
||||
static inline void iwl_test_mem_stop(struct iwl_test *tst)
|
||||
{
|
||||
if (!tst->mem.in_read)
|
||||
return;
|
||||
|
||||
iwl_test_mem_clear(tst);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initializes the test object
|
||||
* During the lifetime of the test object it is assumed that the transport is
|
||||
* started. The test object should be stopped before the transport is stopped.
|
||||
*/
|
||||
void iwl_test_init(struct iwl_test *tst, struct iwl_trans *trans,
|
||||
struct iwl_test_ops *ops)
|
||||
{
|
||||
tst->trans = trans;
|
||||
tst->ops = ops;
|
||||
|
||||
iwl_test_trace_clear(tst);
|
||||
iwl_test_mem_clear(tst);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iwl_test_init);
|
||||
|
||||
/*
|
||||
* Stop the test object
|
||||
*/
|
||||
void iwl_test_free(struct iwl_test *tst)
|
||||
{
|
||||
iwl_test_mem_stop(tst);
|
||||
iwl_test_trace_stop(tst);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iwl_test_free);
|
||||
|
||||
static inline int iwl_test_send_cmd(struct iwl_test *tst,
|
||||
struct iwl_host_cmd *cmd)
|
||||
{
|
||||
return tst->ops->send_cmd(tst->trans->op_mode, cmd);
|
||||
}
|
||||
|
||||
static inline bool iwl_test_valid_hw_addr(struct iwl_test *tst, u32 addr)
|
||||
{
|
||||
return tst->ops->valid_hw_addr(addr);
|
||||
}
|
||||
|
||||
static inline u32 iwl_test_fw_ver(struct iwl_test *tst)
|
||||
{
|
||||
return tst->ops->get_fw_ver(tst->trans->op_mode);
|
||||
}
|
||||
|
||||
static inline struct sk_buff*
|
||||
iwl_test_alloc_reply(struct iwl_test *tst, int len)
|
||||
{
|
||||
return tst->ops->alloc_reply(tst->trans->op_mode, len);
|
||||
}
|
||||
|
||||
static inline int iwl_test_reply(struct iwl_test *tst, struct sk_buff *skb)
|
||||
{
|
||||
return tst->ops->reply(tst->trans->op_mode, skb);
|
||||
}
|
||||
|
||||
static inline struct sk_buff*
|
||||
iwl_test_alloc_event(struct iwl_test *tst, int len)
|
||||
{
|
||||
return tst->ops->alloc_event(tst->trans->op_mode, len);
|
||||
}
|
||||
|
||||
static inline void
|
||||
iwl_test_event(struct iwl_test *tst, struct sk_buff *skb)
|
||||
{
|
||||
return tst->ops->event(tst->trans->op_mode, skb);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function handles the user application commands to the fw. The fw
|
||||
* commands are sent in a synchronuous manner. In case that the user requested
|
||||
* to get commands response, it is send to the user.
|
||||
*/
|
||||
static int iwl_test_fw_cmd(struct iwl_test *tst, struct nlattr **tb)
|
||||
{
|
||||
struct iwl_host_cmd cmd;
|
||||
struct iwl_rx_packet *pkt;
|
||||
struct sk_buff *skb;
|
||||
void *reply_buf;
|
||||
u32 reply_len;
|
||||
int ret;
|
||||
bool cmd_want_skb;
|
||||
|
||||
memset(&cmd, 0, sizeof(struct iwl_host_cmd));
|
||||
|
||||
if (!tb[IWL_TM_ATTR_UCODE_CMD_ID] ||
|
||||
!tb[IWL_TM_ATTR_UCODE_CMD_DATA]) {
|
||||
IWL_ERR(tst->trans, "Missing fw command mandatory fields\n");
|
||||
return -ENOMSG;
|
||||
}
|
||||
|
||||
cmd.flags = CMD_ON_DEMAND | CMD_SYNC;
|
||||
cmd_want_skb = nla_get_flag(tb[IWL_TM_ATTR_UCODE_CMD_SKB]);
|
||||
if (cmd_want_skb)
|
||||
cmd.flags |= CMD_WANT_SKB;
|
||||
|
||||
cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]);
|
||||
cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
|
||||
cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
|
||||
cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
|
||||
IWL_DEBUG_INFO(tst->trans, "test fw cmd=0x%x, flags 0x%x, len %d\n",
|
||||
cmd.id, cmd.flags, cmd.len[0]);
|
||||
|
||||
ret = iwl_test_send_cmd(tst, &cmd);
|
||||
if (ret) {
|
||||
IWL_ERR(tst->trans, "Failed to send hcmd\n");
|
||||
return ret;
|
||||
}
|
||||
if (!cmd_want_skb)
|
||||
return ret;
|
||||
|
||||
/* Handling return of SKB to the user */
|
||||
pkt = cmd.resp_pkt;
|
||||
if (!pkt) {
|
||||
IWL_ERR(tst->trans, "HCMD received a null response packet\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
reply_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
|
||||
skb = iwl_test_alloc_reply(tst, reply_len + 20);
|
||||
reply_buf = kmemdup(&pkt->hdr, reply_len, GFP_KERNEL);
|
||||
if (!skb || !reply_buf) {
|
||||
kfree_skb(skb);
|
||||
kfree(reply_buf);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* The reply is in a page, that we cannot send to user space. */
|
||||
iwl_free_resp(&cmd);
|
||||
|
||||
if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
|
||||
IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
|
||||
nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf))
|
||||
goto nla_put_failure;
|
||||
return iwl_test_reply(tst, skb);
|
||||
|
||||
nla_put_failure:
|
||||
IWL_DEBUG_INFO(tst->trans, "Failed creating NL attributes\n");
|
||||
kfree(reply_buf);
|
||||
kfree_skb(skb);
|
||||
return -ENOMSG;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handles the user application commands for register access.
|
||||
*/
|
||||
static int iwl_test_reg(struct iwl_test *tst, struct nlattr **tb)
|
||||
{
|
||||
u32 ofs, val32, cmd;
|
||||
u8 val8;
|
||||
struct sk_buff *skb;
|
||||
int status = 0;
|
||||
struct iwl_trans *trans = tst->trans;
|
||||
|
||||
if (!tb[IWL_TM_ATTR_REG_OFFSET]) {
|
||||
IWL_ERR(trans, "Missing reg offset\n");
|
||||
return -ENOMSG;
|
||||
}
|
||||
|
||||
ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]);
|
||||
IWL_DEBUG_INFO(trans, "test reg access cmd offset=0x%x\n", ofs);
|
||||
|
||||
cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
|
||||
|
||||
/*
|
||||
* Allow access only to FH/CSR/HBUS in direct mode.
|
||||
* Since we don't have the upper bounds for the CSR and HBUS segments,
|
||||
* we will use only the upper bound of FH for sanity check.
|
||||
*/
|
||||
if (ofs >= FH_MEM_UPPER_BOUND) {
|
||||
IWL_ERR(trans, "offset out of segment (0x0 - 0x%x)\n",
|
||||
FH_MEM_UPPER_BOUND);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (cmd) {
|
||||
case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
|
||||
val32 = iwl_read_direct32(tst->trans, ofs);
|
||||
IWL_DEBUG_INFO(trans, "32 value to read 0x%x\n", val32);
|
||||
|
||||
skb = iwl_test_alloc_reply(tst, 20);
|
||||
if (!skb) {
|
||||
IWL_ERR(trans, "Memory allocation fail\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (nla_put_u32(skb, IWL_TM_ATTR_REG_VALUE32, val32))
|
||||
goto nla_put_failure;
|
||||
status = iwl_test_reply(tst, skb);
|
||||
if (status < 0)
|
||||
IWL_ERR(trans, "Error sending msg : %d\n", status);
|
||||
break;
|
||||
|
||||
case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
|
||||
if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
|
||||
IWL_ERR(trans, "Missing value to write\n");
|
||||
return -ENOMSG;
|
||||
} else {
|
||||
val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
|
||||
IWL_DEBUG_INFO(trans, "32b write val=0x%x\n", val32);
|
||||
iwl_write_direct32(tst->trans, ofs, val32);
|
||||
}
|
||||
break;
|
||||
|
||||
case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
|
||||
if (!tb[IWL_TM_ATTR_REG_VALUE8]) {
|
||||
IWL_ERR(trans, "Missing value to write\n");
|
||||
return -ENOMSG;
|
||||
} else {
|
||||
val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
|
||||
IWL_DEBUG_INFO(trans, "8b write val=0x%x\n", val8);
|
||||
iwl_write8(tst->trans, ofs, val8);
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
IWL_ERR(trans, "Unknown test register cmd ID\n");
|
||||
return -ENOMSG;
|
||||
}
|
||||
|
||||
return status;
|
||||
|
||||
nla_put_failure:
|
||||
kfree_skb(skb);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handles the request to start FW tracing. Allocates of the trace buffer
|
||||
* and sends a reply to user space with the address of the allocated buffer.
|
||||
*/
|
||||
static int iwl_test_trace_begin(struct iwl_test *tst, struct nlattr **tb)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
int status = 0;
|
||||
|
||||
if (tst->trace.enabled)
|
||||
return -EBUSY;
|
||||
|
||||
if (!tb[IWL_TM_ATTR_TRACE_SIZE])
|
||||
tst->trace.size = TRACE_BUFF_SIZE_DEF;
|
||||
else
|
||||
tst->trace.size =
|
||||
nla_get_u32(tb[IWL_TM_ATTR_TRACE_SIZE]);
|
||||
|
||||
if (!tst->trace.size)
|
||||
return -EINVAL;
|
||||
|
||||
if (tst->trace.size < TRACE_BUFF_SIZE_MIN ||
|
||||
tst->trace.size > TRACE_BUFF_SIZE_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
tst->trace.tsize = tst->trace.size + TRACE_BUFF_PADD;
|
||||
tst->trace.cpu_addr = dma_alloc_coherent(tst->trans->dev,
|
||||
tst->trace.tsize,
|
||||
&tst->trace.dma_addr,
|
||||
GFP_KERNEL);
|
||||
if (!tst->trace.cpu_addr)
|
||||
return -ENOMEM;
|
||||
|
||||
tst->trace.enabled = true;
|
||||
tst->trace.trace_addr = (u8 *)PTR_ALIGN(tst->trace.cpu_addr, 0x100);
|
||||
|
||||
memset(tst->trace.trace_addr, 0x03B, tst->trace.size);
|
||||
|
||||
skb = iwl_test_alloc_reply(tst, sizeof(tst->trace.dma_addr) + 20);
|
||||
if (!skb) {
|
||||
IWL_ERR(tst->trans, "Memory allocation fail\n");
|
||||
iwl_test_trace_stop(tst);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (nla_put(skb, IWL_TM_ATTR_TRACE_ADDR,
|
||||
sizeof(tst->trace.dma_addr),
|
||||
(u64 *)&tst->trace.dma_addr))
|
||||
goto nla_put_failure;
|
||||
|
||||
status = iwl_test_reply(tst, skb);
|
||||
if (status < 0)
|
||||
IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
|
||||
|
||||
tst->trace.nchunks = DIV_ROUND_UP(tst->trace.size,
|
||||
DUMP_CHUNK_SIZE);
|
||||
|
||||
return status;
|
||||
|
||||
nla_put_failure:
|
||||
kfree_skb(skb);
|
||||
if (nla_get_u32(tb[IWL_TM_ATTR_COMMAND]) ==
|
||||
IWL_TM_CMD_APP2DEV_BEGIN_TRACE)
|
||||
iwl_test_trace_stop(tst);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handles indirect read from the periphery or the SRAM. The read is performed
|
||||
* to a temporary buffer. The user space application should later issue a dump
|
||||
*/
|
||||
static int iwl_test_indirect_read(struct iwl_test *tst, u32 addr, u32 size)
|
||||
{
|
||||
struct iwl_trans *trans = tst->trans;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
if (size & 0x3)
|
||||
return -EINVAL;
|
||||
|
||||
tst->mem.size = size;
|
||||
tst->mem.addr = kmalloc(tst->mem.size, GFP_KERNEL);
|
||||
if (tst->mem.addr == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Hard-coded periphery absolute address */
|
||||
if (IWL_ABS_PRPH_START <= addr &&
|
||||
addr < IWL_ABS_PRPH_START + PRPH_END) {
|
||||
if (!iwl_trans_grab_nic_access(trans, false, &flags)) {
|
||||
return -EIO;
|
||||
}
|
||||
iwl_write32(trans, HBUS_TARG_PRPH_RADDR,
|
||||
addr | (3 << 24));
|
||||
for (i = 0; i < size; i += 4)
|
||||
*(u32 *)(tst->mem.addr + i) =
|
||||
iwl_read32(trans, HBUS_TARG_PRPH_RDAT);
|
||||
iwl_trans_release_nic_access(trans, &flags);
|
||||
} else { /* target memory (SRAM) */
|
||||
iwl_trans_read_mem(trans, addr, tst->mem.addr,
|
||||
tst->mem.size / 4);
|
||||
}
|
||||
|
||||
tst->mem.nchunks =
|
||||
DIV_ROUND_UP(tst->mem.size, DUMP_CHUNK_SIZE);
|
||||
tst->mem.in_read = true;
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* Handles indirect write to the periphery or SRAM. The is performed to a
|
||||
* temporary buffer.
|
||||
*/
|
||||
static int iwl_test_indirect_write(struct iwl_test *tst, u32 addr,
|
||||
u32 size, unsigned char *buf)
|
||||
{
|
||||
struct iwl_trans *trans = tst->trans;
|
||||
u32 val, i;
|
||||
unsigned long flags;
|
||||
|
||||
if (IWL_ABS_PRPH_START <= addr &&
|
||||
addr < IWL_ABS_PRPH_START + PRPH_END) {
|
||||
/* Periphery writes can be 1-3 bytes long, or DWORDs */
|
||||
if (size < 4) {
|
||||
memcpy(&val, buf, size);
|
||||
if (!iwl_trans_grab_nic_access(trans, false, &flags))
|
||||
return -EIO;
|
||||
iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
|
||||
(addr & 0x0000FFFF) |
|
||||
((size - 1) << 24));
|
||||
iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
|
||||
iwl_trans_release_nic_access(trans, &flags);
|
||||
} else {
|
||||
if (size % 4)
|
||||
return -EINVAL;
|
||||
for (i = 0; i < size; i += 4)
|
||||
iwl_write_prph(trans, addr+i,
|
||||
*(u32 *)(buf+i));
|
||||
}
|
||||
} else if (iwl_test_valid_hw_addr(tst, addr)) {
|
||||
iwl_trans_write_mem(trans, addr, buf, size / 4);
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handles the user application commands for indirect read/write
|
||||
* to/from the periphery or the SRAM.
|
||||
*/
|
||||
static int iwl_test_indirect_mem(struct iwl_test *tst, struct nlattr **tb)
|
||||
{
|
||||
u32 addr, size, cmd;
|
||||
unsigned char *buf;
|
||||
|
||||
/* Both read and write should be blocked, for atomicity */
|
||||
if (tst->mem.in_read)
|
||||
return -EBUSY;
|
||||
|
||||
cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
|
||||
if (!tb[IWL_TM_ATTR_MEM_ADDR]) {
|
||||
IWL_ERR(tst->trans, "Error finding memory offset address\n");
|
||||
return -ENOMSG;
|
||||
}
|
||||
addr = nla_get_u32(tb[IWL_TM_ATTR_MEM_ADDR]);
|
||||
if (!tb[IWL_TM_ATTR_BUFFER_SIZE]) {
|
||||
IWL_ERR(tst->trans, "Error finding size for memory reading\n");
|
||||
return -ENOMSG;
|
||||
}
|
||||
size = nla_get_u32(tb[IWL_TM_ATTR_BUFFER_SIZE]);
|
||||
|
||||
if (cmd == IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ) {
|
||||
return iwl_test_indirect_read(tst, addr, size);
|
||||
} else {
|
||||
if (!tb[IWL_TM_ATTR_BUFFER_DUMP])
|
||||
return -EINVAL;
|
||||
buf = (unsigned char *)nla_data(tb[IWL_TM_ATTR_BUFFER_DUMP]);
|
||||
return iwl_test_indirect_write(tst, addr, size, buf);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable notifications to user space
|
||||
*/
|
||||
static int iwl_test_notifications(struct iwl_test *tst,
|
||||
struct nlattr **tb)
|
||||
{
|
||||
tst->notify = nla_get_flag(tb[IWL_TM_ATTR_ENABLE_NOTIFICATION]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handles the request to get the device id
|
||||
*/
|
||||
static int iwl_test_get_dev_id(struct iwl_test *tst, struct nlattr **tb)
|
||||
{
|
||||
u32 devid = tst->trans->hw_id;
|
||||
struct sk_buff *skb;
|
||||
int status;
|
||||
|
||||
IWL_DEBUG_INFO(tst->trans, "hw version: 0x%x\n", devid);
|
||||
|
||||
skb = iwl_test_alloc_reply(tst, 20);
|
||||
if (!skb) {
|
||||
IWL_ERR(tst->trans, "Memory allocation fail\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (nla_put_u32(skb, IWL_TM_ATTR_DEVICE_ID, devid))
|
||||
goto nla_put_failure;
|
||||
status = iwl_test_reply(tst, skb);
|
||||
if (status < 0)
|
||||
IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
|
||||
|
||||
return 0;
|
||||
|
||||
nla_put_failure:
|
||||
kfree_skb(skb);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handles the request to get the FW version
|
||||
*/
|
||||
static int iwl_test_get_fw_ver(struct iwl_test *tst, struct nlattr **tb)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
int status;
|
||||
u32 ver = iwl_test_fw_ver(tst);
|
||||
|
||||
IWL_DEBUG_INFO(tst->trans, "uCode version raw: 0x%x\n", ver);
|
||||
|
||||
skb = iwl_test_alloc_reply(tst, 20);
|
||||
if (!skb) {
|
||||
IWL_ERR(tst->trans, "Memory allocation fail\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (nla_put_u32(skb, IWL_TM_ATTR_FW_VERSION, ver))
|
||||
goto nla_put_failure;
|
||||
|
||||
status = iwl_test_reply(tst, skb);
|
||||
if (status < 0)
|
||||
IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
|
||||
|
||||
return 0;
|
||||
|
||||
nla_put_failure:
|
||||
kfree_skb(skb);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Parse the netlink message and validate that the IWL_TM_ATTR_CMD exists
|
||||
*/
|
||||
int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb,
|
||||
void *data, int len)
|
||||
{
|
||||
int result;
|
||||
|
||||
result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
|
||||
iwl_testmode_gnl_msg_policy);
|
||||
if (result) {
|
||||
IWL_ERR(tst->trans, "Fail parse gnl msg: %d\n", result);
|
||||
return result;
|
||||
}
|
||||
|
||||
/* IWL_TM_ATTR_COMMAND is absolutely mandatory */
|
||||
if (!tb[IWL_TM_ATTR_COMMAND]) {
|
||||
IWL_ERR(tst->trans, "Missing testmode command type\n");
|
||||
return -ENOMSG;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_test_parse);
|
||||
|
||||
/*
|
||||
* Handle test commands.
|
||||
* Returns 1 for unknown commands (not handled by the test object); negative
|
||||
* value in case of error.
|
||||
*/
|
||||
int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb)
|
||||
{
|
||||
int result;
|
||||
|
||||
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
|
||||
case IWL_TM_CMD_APP2DEV_UCODE:
|
||||
IWL_DEBUG_INFO(tst->trans, "test cmd to uCode\n");
|
||||
result = iwl_test_fw_cmd(tst, tb);
|
||||
break;
|
||||
|
||||
case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
|
||||
case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
|
||||
case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
|
||||
IWL_DEBUG_INFO(tst->trans, "test cmd to register\n");
|
||||
result = iwl_test_reg(tst, tb);
|
||||
break;
|
||||
|
||||
case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
|
||||
IWL_DEBUG_INFO(tst->trans, "test uCode trace cmd to driver\n");
|
||||
result = iwl_test_trace_begin(tst, tb);
|
||||
break;
|
||||
|
||||
case IWL_TM_CMD_APP2DEV_END_TRACE:
|
||||
iwl_test_trace_stop(tst);
|
||||
result = 0;
|
||||
break;
|
||||
|
||||
case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
|
||||
case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
|
||||
IWL_DEBUG_INFO(tst->trans, "test indirect memory cmd\n");
|
||||
result = iwl_test_indirect_mem(tst, tb);
|
||||
break;
|
||||
|
||||
case IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
|
||||
IWL_DEBUG_INFO(tst->trans, "test notifications cmd\n");
|
||||
result = iwl_test_notifications(tst, tb);
|
||||
break;
|
||||
|
||||
case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
|
||||
IWL_DEBUG_INFO(tst->trans, "test get FW ver cmd\n");
|
||||
result = iwl_test_get_fw_ver(tst, tb);
|
||||
break;
|
||||
|
||||
case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
|
||||
IWL_DEBUG_INFO(tst->trans, "test Get device ID cmd\n");
|
||||
result = iwl_test_get_dev_id(tst, tb);
|
||||
break;
|
||||
|
||||
default:
|
||||
IWL_DEBUG_INFO(tst->trans, "Unknown test command\n");
|
||||
result = 1;
|
||||
break;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_test_handle_cmd);
|
||||
|
||||
static int iwl_test_trace_dump(struct iwl_test *tst, struct sk_buff *skb,
|
||||
struct netlink_callback *cb)
|
||||
{
|
||||
int idx, length;
|
||||
|
||||
if (!tst->trace.enabled || !tst->trace.trace_addr)
|
||||
return -EFAULT;
|
||||
|
||||
idx = cb->args[4];
|
||||
if (idx >= tst->trace.nchunks)
|
||||
return -ENOENT;
|
||||
|
||||
length = DUMP_CHUNK_SIZE;
|
||||
if (((idx + 1) == tst->trace.nchunks) &&
|
||||
(tst->trace.size % DUMP_CHUNK_SIZE))
|
||||
length = tst->trace.size %
|
||||
DUMP_CHUNK_SIZE;
|
||||
|
||||
if (nla_put(skb, IWL_TM_ATTR_TRACE_DUMP, length,
|
||||
tst->trace.trace_addr + (DUMP_CHUNK_SIZE * idx)))
|
||||
goto nla_put_failure;
|
||||
|
||||
cb->args[4] = ++idx;
|
||||
return 0;
|
||||
|
||||
nla_put_failure:
|
||||
return -ENOBUFS;
|
||||
}
|
||||
|
||||
static int iwl_test_buffer_dump(struct iwl_test *tst, struct sk_buff *skb,
|
||||
struct netlink_callback *cb)
|
||||
{
|
||||
int idx, length;
|
||||
|
||||
if (!tst->mem.in_read)
|
||||
return -EFAULT;
|
||||
|
||||
idx = cb->args[4];
|
||||
if (idx >= tst->mem.nchunks) {
|
||||
iwl_test_mem_stop(tst);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
length = DUMP_CHUNK_SIZE;
|
||||
if (((idx + 1) == tst->mem.nchunks) &&
|
||||
(tst->mem.size % DUMP_CHUNK_SIZE))
|
||||
length = tst->mem.size % DUMP_CHUNK_SIZE;
|
||||
|
||||
if (nla_put(skb, IWL_TM_ATTR_BUFFER_DUMP, length,
|
||||
tst->mem.addr + (DUMP_CHUNK_SIZE * idx)))
|
||||
goto nla_put_failure;
|
||||
|
||||
cb->args[4] = ++idx;
|
||||
return 0;
|
||||
|
||||
nla_put_failure:
|
||||
return -ENOBUFS;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle dump commands.
|
||||
* Returns 1 for unknown commands (not handled by the test object); negative
|
||||
* value in case of error.
|
||||
*/
|
||||
int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb,
|
||||
struct netlink_callback *cb)
|
||||
{
|
||||
int result;
|
||||
|
||||
switch (cmd) {
|
||||
case IWL_TM_CMD_APP2DEV_READ_TRACE:
|
||||
IWL_DEBUG_INFO(tst->trans, "uCode trace cmd\n");
|
||||
result = iwl_test_trace_dump(tst, skb, cb);
|
||||
break;
|
||||
|
||||
case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP:
|
||||
IWL_DEBUG_INFO(tst->trans, "testmode sram dump cmd\n");
|
||||
result = iwl_test_buffer_dump(tst, skb, cb);
|
||||
break;
|
||||
|
||||
default:
|
||||
result = 1;
|
||||
break;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_test_dump);
|
||||
|
||||
/*
|
||||
* Multicast a spontaneous messages from the device to the user space.
|
||||
*/
|
||||
static void iwl_test_send_rx(struct iwl_test *tst,
|
||||
struct iwl_rx_cmd_buffer *rxb)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct iwl_rx_packet *data;
|
||||
int length;
|
||||
|
||||
data = rxb_addr(rxb);
|
||||
length = le32_to_cpu(data->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
|
||||
|
||||
/* the length doesn't include len_n_flags field, so add it manually */
|
||||
length += sizeof(__le32);
|
||||
|
||||
skb = iwl_test_alloc_event(tst, length + 20);
|
||||
if (skb == NULL) {
|
||||
IWL_ERR(tst->trans, "Out of memory for message to user\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
|
||||
IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
|
||||
nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, length, data))
|
||||
goto nla_put_failure;
|
||||
|
||||
iwl_test_event(tst, skb);
|
||||
return;
|
||||
|
||||
nla_put_failure:
|
||||
kfree_skb(skb);
|
||||
IWL_ERR(tst->trans, "Ouch, overran buffer, check allocation!\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Called whenever a Rx frames is recevied from the device. If notifications to
|
||||
* the user space are requested, sends the frames to the user.
|
||||
*/
|
||||
void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb)
|
||||
{
|
||||
if (tst->notify)
|
||||
iwl_test_send_rx(tst, rxb);
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_test_rx);
|
|
@ -1,161 +0,0 @@
|
|||
/******************************************************************************
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
*
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
|
||||
* USA
|
||||
*
|
||||
* The full GNU General Public License is included in this distribution
|
||||
* in the file called COPYING.
|
||||
*
|
||||
* Contact Information:
|
||||
* Intel Linux Wireless <ilw@linux.intel.com>
|
||||
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
*
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*****************************************************************************/
|
||||
|
||||
#ifndef __IWL_TEST_H__
|
||||
#define __IWL_TEST_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include "iwl-trans.h"
|
||||
|
||||
struct iwl_test_trace {
|
||||
u32 size;
|
||||
u32 tsize;
|
||||
u32 nchunks;
|
||||
u8 *cpu_addr;
|
||||
u8 *trace_addr;
|
||||
dma_addr_t dma_addr;
|
||||
bool enabled;
|
||||
};
|
||||
|
||||
struct iwl_test_mem {
|
||||
u32 size;
|
||||
u32 nchunks;
|
||||
u8 *addr;
|
||||
bool in_read;
|
||||
};
|
||||
|
||||
/*
|
||||
* struct iwl_test_ops: callback to the op mode
|
||||
*
|
||||
* The structure defines the callbacks that the op_mode should handle,
|
||||
* inorder to handle logic that is out of the scope of iwl_test. The
|
||||
* op_mode must set all the callbacks.
|
||||
|
||||
* @send_cmd: handler that is used by the test object to request the
|
||||
* op_mode to send a command to the fw.
|
||||
*
|
||||
* @valid_hw_addr: handler that is used by the test object to request the
|
||||
* op_mode to check if the given address is a valid address.
|
||||
*
|
||||
* @get_fw_ver: handler used to get the FW version.
|
||||
*
|
||||
* @alloc_reply: handler used by the test object to request the op_mode
|
||||
* to allocate an skb for sending a reply to the user, and initialize
|
||||
* the skb. It is assumed that the test object only fills the required
|
||||
* attributes.
|
||||
*
|
||||
* @reply: handler used by the test object to request the op_mode to reply
|
||||
* to a request. The skb is an skb previously allocated by the the
|
||||
* alloc_reply callback.
|
||||
I
|
||||
* @alloc_event: handler used by the test object to request the op_mode
|
||||
* to allocate an skb for sending an event, and initialize
|
||||
* the skb. It is assumed that the test object only fills the required
|
||||
* attributes.
|
||||
*
|
||||
* @reply: handler used by the test object to request the op_mode to send
|
||||
* an event. The skb is an skb previously allocated by the the
|
||||
* alloc_event callback.
|
||||
*/
|
||||
struct iwl_test_ops {
|
||||
int (*send_cmd)(struct iwl_op_mode *op_modes,
|
||||
struct iwl_host_cmd *cmd);
|
||||
bool (*valid_hw_addr)(u32 addr);
|
||||
u32 (*get_fw_ver)(struct iwl_op_mode *op_mode);
|
||||
|
||||
struct sk_buff *(*alloc_reply)(struct iwl_op_mode *op_mode, int len);
|
||||
int (*reply)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
|
||||
struct sk_buff* (*alloc_event)(struct iwl_op_mode *op_mode, int len);
|
||||
void (*event)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
|
||||
};
|
||||
|
||||
struct iwl_test {
|
||||
struct iwl_trans *trans;
|
||||
struct iwl_test_ops *ops;
|
||||
struct iwl_test_trace trace;
|
||||
struct iwl_test_mem mem;
|
||||
bool notify;
|
||||
};
|
||||
|
||||
void iwl_test_init(struct iwl_test *tst, struct iwl_trans *trans,
|
||||
struct iwl_test_ops *ops);
|
||||
|
||||
void iwl_test_free(struct iwl_test *tst);
|
||||
|
||||
int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb,
|
||||
void *data, int len);
|
||||
|
||||
int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb);
|
||||
|
||||
int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb,
|
||||
struct netlink_callback *cb);
|
||||
|
||||
void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb);
|
||||
|
||||
static inline void iwl_test_enable_notifications(struct iwl_test *tst,
|
||||
bool enable)
|
||||
{
|
||||
tst->notify = enable;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -1,309 +0,0 @@
|
|||
/******************************************************************************
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
*
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
|
||||
* USA
|
||||
*
|
||||
* The full GNU General Public License is included in this distribution
|
||||
* in the file called COPYING.
|
||||
*
|
||||
* Contact Information:
|
||||
* Intel Linux Wireless <ilw@linux.intel.com>
|
||||
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
*
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*****************************************************************************/
|
||||
#ifndef __IWL_TESTMODE_H__
|
||||
#define __IWL_TESTMODE_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
|
||||
/*
|
||||
* Commands from user space to kernel space(IWL_TM_CMD_ID_APP2DEV_XX) and
|
||||
* from and kernel space to user space(IWL_TM_CMD_ID_DEV2APP_XX).
|
||||
* The command ID is carried with IWL_TM_ATTR_COMMAND.
|
||||
*
|
||||
* @IWL_TM_CMD_APP2DEV_UCODE:
|
||||
* commands from user application to the uCode,
|
||||
* the actual uCode host command ID is carried with
|
||||
* IWL_TM_ATTR_UCODE_CMD_ID
|
||||
*
|
||||
* @IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
|
||||
* @IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
|
||||
* @IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
|
||||
* commands from user applicaiton to access register
|
||||
*
|
||||
* @IWL_TM_CMD_APP2DEV_GET_DEVICENAME: retrieve device name
|
||||
* @IWL_TM_CMD_APP2DEV_LOAD_INIT_FW: load initial uCode image
|
||||
* @IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB: perform calibration
|
||||
* @IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW: load runtime uCode image
|
||||
* @IWL_TM_CMD_APP2DEV_GET_EEPROM: request EEPROM data
|
||||
* @IWL_TM_CMD_APP2DEV_FIXRATE_REQ: set fix MCS
|
||||
* commands fom user space for pure driver level operations
|
||||
*
|
||||
* @IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
|
||||
* @IWL_TM_CMD_APP2DEV_END_TRACE:
|
||||
* @IWL_TM_CMD_APP2DEV_READ_TRACE:
|
||||
* commands fom user space for uCode trace operations
|
||||
*
|
||||
* @IWL_TM_CMD_DEV2APP_SYNC_RSP:
|
||||
* commands from kernel space to carry the synchronous response
|
||||
* to user application
|
||||
* @IWL_TM_CMD_DEV2APP_UCODE_RX_PKT:
|
||||
* commands from kernel space to multicast the spontaneous messages
|
||||
* to user application, or reply of host commands
|
||||
* @IWL_TM_CMD_DEV2APP_EEPROM_RSP:
|
||||
* commands from kernel space to carry the eeprom response
|
||||
* to user application
|
||||
*
|
||||
* @IWL_TM_CMD_APP2DEV_OWNERSHIP:
|
||||
* commands from user application to own change the ownership of the uCode
|
||||
* if application has the ownership, the only host command from
|
||||
* testmode will deliver to uCode. Default owner is driver
|
||||
*
|
||||
* @IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW: load Wake On Wireless LAN uCode image
|
||||
* @IWL_TM_CMD_APP2DEV_GET_FW_VERSION: retrieve uCode version
|
||||
* @IWL_TM_CMD_APP2DEV_GET_DEVICE_ID: retrieve ID information in device
|
||||
* @IWL_TM_CMD_APP2DEV_GET_FW_INFO:
|
||||
* retrieve information of existing loaded uCode image
|
||||
*
|
||||
* @IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
|
||||
* @IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP:
|
||||
* @IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
|
||||
* Commands to read/write data from periphery or SRAM memory ranges.
|
||||
* Fore reading, a READ command is sent from the userspace and the data
|
||||
* is returned when the user calls a DUMP command.
|
||||
* For writing, only a WRITE command is used.
|
||||
* @IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
|
||||
* Command to enable/disable notifications (currently RX packets) from the
|
||||
* driver to userspace.
|
||||
*/
|
||||
enum iwl_tm_cmd_t {
|
||||
IWL_TM_CMD_APP2DEV_UCODE = 1,
|
||||
IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32 = 2,
|
||||
IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32 = 3,
|
||||
IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8 = 4,
|
||||
IWL_TM_CMD_APP2DEV_GET_DEVICENAME = 5,
|
||||
IWL_TM_CMD_APP2DEV_LOAD_INIT_FW = 6,
|
||||
IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB = 7,
|
||||
IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW = 8,
|
||||
IWL_TM_CMD_APP2DEV_GET_EEPROM = 9,
|
||||
IWL_TM_CMD_APP2DEV_FIXRATE_REQ = 10,
|
||||
IWL_TM_CMD_APP2DEV_BEGIN_TRACE = 11,
|
||||
IWL_TM_CMD_APP2DEV_END_TRACE = 12,
|
||||
IWL_TM_CMD_APP2DEV_READ_TRACE = 13,
|
||||
IWL_TM_CMD_DEV2APP_SYNC_RSP = 14,
|
||||
IWL_TM_CMD_DEV2APP_UCODE_RX_PKT = 15,
|
||||
IWL_TM_CMD_DEV2APP_EEPROM_RSP = 16,
|
||||
IWL_TM_CMD_APP2DEV_OWNERSHIP = 17,
|
||||
RESERVED_18 = 18,
|
||||
RESERVED_19 = 19,
|
||||
RESERVED_20 = 20,
|
||||
RESERVED_21 = 21,
|
||||
IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW = 22,
|
||||
IWL_TM_CMD_APP2DEV_GET_FW_VERSION = 23,
|
||||
IWL_TM_CMD_APP2DEV_GET_DEVICE_ID = 24,
|
||||
IWL_TM_CMD_APP2DEV_GET_FW_INFO = 25,
|
||||
IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ = 26,
|
||||
IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP = 27,
|
||||
IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE = 28,
|
||||
IWL_TM_CMD_APP2DEV_NOTIFICATIONS = 29,
|
||||
IWL_TM_CMD_MAX = 30,
|
||||
};
|
||||
|
||||
/*
|
||||
* Atrribute filed in testmode command
|
||||
* See enum iwl_tm_cmd_t.
|
||||
*
|
||||
* @IWL_TM_ATTR_NOT_APPLICABLE:
|
||||
* The attribute is not applicable or invalid
|
||||
* @IWL_TM_ATTR_COMMAND:
|
||||
* From user space to kernel space:
|
||||
* the command either destines to ucode, driver, or register;
|
||||
* From kernel space to user space:
|
||||
* the command either carries synchronous response,
|
||||
* or the spontaneous message multicast from the device;
|
||||
*
|
||||
* @IWL_TM_ATTR_UCODE_CMD_ID:
|
||||
* @IWL_TM_ATTR_UCODE_CMD_DATA:
|
||||
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_UCODE,
|
||||
* The mandatory fields are :
|
||||
* IWL_TM_ATTR_UCODE_CMD_ID for recognizable command ID;
|
||||
* IWL_TM_ATTR_UCODE_CMD_DATA for the actual command payload
|
||||
* to the ucode
|
||||
*
|
||||
* @IWL_TM_ATTR_REG_OFFSET:
|
||||
* @IWL_TM_ATTR_REG_VALUE8:
|
||||
* @IWL_TM_ATTR_REG_VALUE32:
|
||||
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_REG_XXX,
|
||||
* The mandatory fields are:
|
||||
* IWL_TM_ATTR_REG_OFFSET for the offset of the target register;
|
||||
* IWL_TM_ATTR_REG_VALUE8 or IWL_TM_ATTR_REG_VALUE32 for value
|
||||
*
|
||||
* @IWL_TM_ATTR_SYNC_RSP:
|
||||
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_SYNC_RSP,
|
||||
* The mandatory fields are:
|
||||
* IWL_TM_ATTR_SYNC_RSP for the data content responding to the user
|
||||
* application command
|
||||
*
|
||||
* @IWL_TM_ATTR_UCODE_RX_PKT:
|
||||
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_UCODE_RX_PKT,
|
||||
* The mandatory fields are:
|
||||
* IWL_TM_ATTR_UCODE_RX_PKT for the data content multicast to the user
|
||||
* application
|
||||
*
|
||||
* @IWL_TM_ATTR_EEPROM:
|
||||
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_EEPROM,
|
||||
* The mandatory fields are:
|
||||
* IWL_TM_ATTR_EEPROM for the data content responging to the user
|
||||
* application
|
||||
*
|
||||
* @IWL_TM_ATTR_TRACE_ADDR:
|
||||
* @IWL_TM_ATTR_TRACE_SIZE:
|
||||
* @IWL_TM_ATTR_TRACE_DUMP:
|
||||
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_XXX_TRACE,
|
||||
* The mandatory fields are:
|
||||
* IWL_TM_ATTR_MEM_TRACE_ADDR for the trace address
|
||||
* IWL_TM_ATTR_MEM_TRACE_SIZE for the trace buffer size
|
||||
* IWL_TM_ATTR_MEM_TRACE_DUMP for the trace dump
|
||||
*
|
||||
* @IWL_TM_ATTR_FIXRATE:
|
||||
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_FIXRATE_REQ,
|
||||
* The mandatory fields are:
|
||||
* IWL_TM_ATTR_FIXRATE for the fixed rate
|
||||
*
|
||||
* @IWL_TM_ATTR_UCODE_OWNER:
|
||||
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_OWNERSHIP,
|
||||
* The mandatory fields are:
|
||||
* IWL_TM_ATTR_UCODE_OWNER for the new owner
|
||||
*
|
||||
* @IWL_TM_ATTR_MEM_ADDR:
|
||||
* @IWL_TM_ATTR_BUFFER_SIZE:
|
||||
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ
|
||||
* or IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE.
|
||||
* The mandatory fields are:
|
||||
* IWL_TM_ATTR_MEM_ADDR for the address in SRAM/periphery to read/write
|
||||
* IWL_TM_ATTR_BUFFER_SIZE for the buffer size of data to read/write.
|
||||
*
|
||||
* @IWL_TM_ATTR_BUFFER_DUMP:
|
||||
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP,
|
||||
* IWL_TM_ATTR_BUFFER_DUMP is used for the data that was read.
|
||||
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE,
|
||||
* this attribute contains the data to write.
|
||||
*
|
||||
* @IWL_TM_ATTR_FW_VERSION:
|
||||
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_FW_VERSION,
|
||||
* IWL_TM_ATTR_FW_VERSION for the uCode version
|
||||
*
|
||||
* @IWL_TM_ATTR_DEVICE_ID:
|
||||
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_DEVICE_ID,
|
||||
* IWL_TM_ATTR_DEVICE_ID for the device ID information
|
||||
*
|
||||
* @IWL_TM_ATTR_FW_TYPE:
|
||||
* @IWL_TM_ATTR_FW_INST_SIZE:
|
||||
* @IWL_TM_ATTR_FW_DATA_SIZE:
|
||||
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_FW_INFO,
|
||||
* The mandatory fields are:
|
||||
* IWL_TM_ATTR_FW_TYPE for the uCode type (INIT/RUNTIME/...)
|
||||
* IWL_TM_ATTR_FW_INST_SIZE for the size of instruction section
|
||||
* IWL_TM_ATTR_FW_DATA_SIZE for the size of data section
|
||||
*
|
||||
* @IWL_TM_ATTR_UCODE_CMD_SKB:
|
||||
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_UCODE this flag
|
||||
* indicates that the user wants to receive the response of the command
|
||||
* in a reply SKB. If it's not present, the response is not returned.
|
||||
* @IWL_TM_ATTR_ENABLE_NOTIFICATIONS:
|
||||
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_NOTIFICATIONS, this
|
||||
* flag enables (if present) or disables (if not) the forwarding
|
||||
* to userspace.
|
||||
*/
|
||||
enum iwl_tm_attr_t {
|
||||
IWL_TM_ATTR_NOT_APPLICABLE = 0,
|
||||
IWL_TM_ATTR_COMMAND = 1,
|
||||
IWL_TM_ATTR_UCODE_CMD_ID = 2,
|
||||
IWL_TM_ATTR_UCODE_CMD_DATA = 3,
|
||||
IWL_TM_ATTR_REG_OFFSET = 4,
|
||||
IWL_TM_ATTR_REG_VALUE8 = 5,
|
||||
IWL_TM_ATTR_REG_VALUE32 = 6,
|
||||
IWL_TM_ATTR_SYNC_RSP = 7,
|
||||
IWL_TM_ATTR_UCODE_RX_PKT = 8,
|
||||
IWL_TM_ATTR_EEPROM = 9,
|
||||
IWL_TM_ATTR_TRACE_ADDR = 10,
|
||||
IWL_TM_ATTR_TRACE_SIZE = 11,
|
||||
IWL_TM_ATTR_TRACE_DUMP = 12,
|
||||
IWL_TM_ATTR_FIXRATE = 13,
|
||||
IWL_TM_ATTR_UCODE_OWNER = 14,
|
||||
IWL_TM_ATTR_MEM_ADDR = 15,
|
||||
IWL_TM_ATTR_BUFFER_SIZE = 16,
|
||||
IWL_TM_ATTR_BUFFER_DUMP = 17,
|
||||
IWL_TM_ATTR_FW_VERSION = 18,
|
||||
IWL_TM_ATTR_DEVICE_ID = 19,
|
||||
IWL_TM_ATTR_FW_TYPE = 20,
|
||||
IWL_TM_ATTR_FW_INST_SIZE = 21,
|
||||
IWL_TM_ATTR_FW_DATA_SIZE = 22,
|
||||
IWL_TM_ATTR_UCODE_CMD_SKB = 23,
|
||||
IWL_TM_ATTR_ENABLE_NOTIFICATION = 24,
|
||||
IWL_TM_ATTR_MAX = 25,
|
||||
};
|
||||
|
||||
/* uCode trace buffer */
|
||||
#define TRACE_BUFF_SIZE_MAX 0x200000
|
||||
#define TRACE_BUFF_SIZE_MIN 0x20000
|
||||
#define TRACE_BUFF_SIZE_DEF TRACE_BUFF_SIZE_MIN
|
||||
#define TRACE_BUFF_PADD 0x2000
|
||||
|
||||
/* Maximum data size of each dump it packet */
|
||||
#define DUMP_CHUNK_SIZE (PAGE_SIZE - 1024)
|
||||
|
||||
/* Address offset of data segment in SRAM */
|
||||
#define SRAM_DATA_SEG_OFFSET 0x800000
|
||||
|
||||
#endif
|
|
@ -183,14 +183,12 @@ struct iwl_rx_packet {
|
|||
* @CMD_ASYNC: Return right away and don't want for the response
|
||||
* @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
|
||||
* response. The caller needs to call iwl_free_resp when done.
|
||||
* @CMD_ON_DEMAND: This command is sent by the test mode pipe.
|
||||
*/
|
||||
enum CMD_MODE {
|
||||
CMD_SYNC = 0,
|
||||
CMD_ASYNC = BIT(0),
|
||||
CMD_WANT_SKB = BIT(1),
|
||||
CMD_SEND_IN_RFKILL = BIT(2),
|
||||
CMD_ON_DEMAND = BIT(3),
|
||||
};
|
||||
|
||||
#define DEF_CMD_PAYLOAD_SIZE 320
|
||||
|
|
|
@ -202,6 +202,22 @@ static const __le32 iwl_concurrent_lookup[BT_COEX_LUT_SIZE] = {
|
|||
cpu_to_le32(0x00000000),
|
||||
};
|
||||
|
||||
/* single shared antenna */
|
||||
static const __le32 iwl_single_shared_ant_lookup[BT_COEX_LUT_SIZE] = {
|
||||
cpu_to_le32(0x40000000),
|
||||
cpu_to_le32(0x00000000),
|
||||
cpu_to_le32(0x44000000),
|
||||
cpu_to_le32(0x00000000),
|
||||
cpu_to_le32(0x40000000),
|
||||
cpu_to_le32(0x00000000),
|
||||
cpu_to_le32(0x44000000),
|
||||
cpu_to_le32(0x00000000),
|
||||
cpu_to_le32(0xC0004000),
|
||||
cpu_to_le32(0xF0005000),
|
||||
cpu_to_le32(0xC0004000),
|
||||
cpu_to_le32(0xF0005000),
|
||||
};
|
||||
|
||||
int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
|
||||
{
|
||||
struct iwl_bt_coex_cmd cmd = {
|
||||
|
@ -225,7 +241,10 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
|
|||
BT_VALID_REDUCED_TX_POWER |
|
||||
BT_VALID_LUT);
|
||||
|
||||
if (is_loose_coex())
|
||||
if (mvm->cfg->bt_shared_single_ant)
|
||||
memcpy(&cmd.decision_lut, iwl_single_shared_ant_lookup,
|
||||
sizeof(iwl_single_shared_ant_lookup));
|
||||
else if (is_loose_coex())
|
||||
memcpy(&cmd.decision_lut, iwl_loose_lookup,
|
||||
sizeof(iwl_tight_lookup));
|
||||
else
|
||||
|
|
|
@ -1026,6 +1026,12 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
|
|||
if (ret)
|
||||
goto out;
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
if (mvm->d3_wake_sysassert)
|
||||
d3_cfg_cmd_data.wakeup_flags |=
|
||||
cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR);
|
||||
#endif
|
||||
|
||||
/* must be last -- this switches firmware state */
|
||||
ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
|
||||
if (ret)
|
||||
|
|
|
@ -344,6 +344,13 @@ static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
|
|||
case MVM_DEBUGFS_PM_DISABLE_POWER_OFF:
|
||||
IWL_DEBUG_POWER(mvm, "disable_power_off=%d\n", val);
|
||||
dbgfs_pm->disable_power_off = val;
|
||||
case MVM_DEBUGFS_PM_LPRX_ENA:
|
||||
IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled");
|
||||
dbgfs_pm->lprx_ena = val;
|
||||
break;
|
||||
case MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD:
|
||||
IWL_DEBUG_POWER(mvm, "lprx_rssi_threshold=%d\n", val);
|
||||
dbgfs_pm->lprx_rssi_threshold = val;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -387,6 +394,17 @@ static ssize_t iwl_dbgfs_pm_params_write(struct file *file,
|
|||
if (sscanf(buf + 18, "%d", &val) != 1)
|
||||
return -EINVAL;
|
||||
param = MVM_DEBUGFS_PM_DISABLE_POWER_OFF;
|
||||
} else if (!strncmp("lprx=", buf, 5)) {
|
||||
if (sscanf(buf + 5, "%d", &val) != 1)
|
||||
return -EINVAL;
|
||||
param = MVM_DEBUGFS_PM_LPRX_ENA;
|
||||
} else if (!strncmp("lprx_rssi_threshold=", buf, 20)) {
|
||||
if (sscanf(buf + 20, "%d", &val) != 1)
|
||||
return -EINVAL;
|
||||
if (val > POWER_LPRX_RSSI_THRESHOLD_MAX || val <
|
||||
POWER_LPRX_RSSI_THRESHOLD_MIN)
|
||||
return -EINVAL;
|
||||
param = MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -421,7 +439,7 @@ static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
|
|||
le32_to_cpu(cmd.skip_dtim_periods));
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
|
||||
iwlmvm_mod_params.power_scheme);
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "flags = %d\n",
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
|
||||
le16_to_cpu(cmd.flags));
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
|
||||
cmd.keep_alive_seconds);
|
||||
|
@ -435,6 +453,10 @@ static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
|
|||
le32_to_cpu(cmd.rx_data_timeout));
|
||||
pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n",
|
||||
le32_to_cpu(cmd.tx_data_timeout));
|
||||
if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
|
||||
pos += scnprintf(buf+pos, bufsz-pos,
|
||||
"lprx_rssi_threshold = %d\n",
|
||||
le32_to_cpu(cmd.lprx_rssi_threshold));
|
||||
}
|
||||
|
||||
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
|
||||
|
@ -939,6 +961,9 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
|
|||
#ifdef CONFIG_PM_SLEEP
|
||||
MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
|
||||
MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, S_IRUSR);
|
||||
if (!debugfs_create_bool("d3_wake_sysassert", S_IRUSR | S_IWUSR,
|
||||
mvm->debugfs_dir, &mvm->d3_wake_sysassert))
|
||||
goto err;
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
@ -66,6 +66,11 @@
|
|||
|
||||
/* Power Management Commands, Responses, Notifications */
|
||||
|
||||
/* Radio LP RX Energy Threshold measured in dBm */
|
||||
#define POWER_LPRX_RSSI_THRESHOLD 75
|
||||
#define POWER_LPRX_RSSI_THRESHOLD_MAX 94
|
||||
#define POWER_LPRX_RSSI_THRESHOLD_MIN 30
|
||||
|
||||
/**
|
||||
* enum iwl_scan_flags - masks for power table command flags
|
||||
* @POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
|
||||
|
|
|
@ -865,6 +865,30 @@ int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
|
|||
return ret;
|
||||
}
|
||||
|
||||
struct iwl_mvm_mac_ap_iterator_data {
|
||||
struct iwl_mvm *mvm;
|
||||
struct ieee80211_vif *vif;
|
||||
u32 beacon_device_ts;
|
||||
u16 beacon_int;
|
||||
};
|
||||
|
||||
/* Find the beacon_device_ts and beacon_int for a managed interface */
|
||||
static void iwl_mvm_mac_ap_iterator(void *_data, u8 *mac,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
struct iwl_mvm_mac_ap_iterator_data *data = _data;
|
||||
|
||||
if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc)
|
||||
return;
|
||||
|
||||
/* Station client has higher priority over P2P client*/
|
||||
if (vif->p2p && data->beacon_device_ts)
|
||||
return;
|
||||
|
||||
data->beacon_device_ts = vif->bss_conf.sync_device_ts;
|
||||
data->beacon_int = vif->bss_conf.beacon_int;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fill the specific data for mac context of type AP of P2P GO
|
||||
*/
|
||||
|
@ -874,6 +898,11 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
|
|||
bool add)
|
||||
{
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
struct iwl_mvm_mac_ap_iterator_data data = {
|
||||
.mvm = mvm,
|
||||
.vif = vif,
|
||||
.beacon_device_ts = 0
|
||||
};
|
||||
|
||||
ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int);
|
||||
ctxt_ap->bi_reciprocal =
|
||||
|
@ -887,16 +916,33 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
|
|||
ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue);
|
||||
|
||||
/*
|
||||
* Only read the system time when the MAC is being added, when we
|
||||
* Only set the beacon time when the MAC is being added, when we
|
||||
* just modify the MAC then we should keep the time -- the firmware
|
||||
* can otherwise have a "jumping" TBTT.
|
||||
*/
|
||||
if (add)
|
||||
mvmvif->ap_beacon_time =
|
||||
iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
|
||||
if (add) {
|
||||
/*
|
||||
* If there is a station/P2P client interface which is
|
||||
* associated, set the AP's TBTT far enough from the station's
|
||||
* TBTT. Otherwise, set it to the current system time
|
||||
*/
|
||||
ieee80211_iterate_active_interfaces_atomic(
|
||||
mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
|
||||
iwl_mvm_mac_ap_iterator, &data);
|
||||
|
||||
if (data.beacon_device_ts) {
|
||||
u32 rand = (prandom_u32() % (80 - 20)) + 20;
|
||||
mvmvif->ap_beacon_time = data.beacon_device_ts +
|
||||
ieee80211_tu_to_usec(data.beacon_int * rand /
|
||||
100);
|
||||
} else {
|
||||
mvmvif->ap_beacon_time =
|
||||
iwl_read_prph(mvm->trans,
|
||||
DEVICE_SYSTEM_TIME_REG);
|
||||
}
|
||||
}
|
||||
|
||||
ctxt_ap->beacon_time = cpu_to_le32(mvmvif->ap_beacon_time);
|
||||
|
||||
ctxt_ap->beacon_tsf = 0; /* unused */
|
||||
|
||||
/* TODO: Assume that the beacon id == mac context id */
|
||||
|
|
|
@ -73,7 +73,6 @@
|
|||
#include "iwl-trans.h"
|
||||
#include "iwl-notif-wait.h"
|
||||
#include "iwl-eeprom-parse.h"
|
||||
#include "iwl-test.h"
|
||||
#include "iwl-trans.h"
|
||||
#include "sta.h"
|
||||
#include "fw-api.h"
|
||||
|
@ -159,6 +158,8 @@ enum iwl_dbgfs_pm_mask {
|
|||
MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3),
|
||||
MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4),
|
||||
MVM_DEBUGFS_PM_DISABLE_POWER_OFF = BIT(5),
|
||||
MVM_DEBUGFS_PM_LPRX_ENA = BIT(6),
|
||||
MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7),
|
||||
};
|
||||
|
||||
struct iwl_dbgfs_pm {
|
||||
|
@ -168,6 +169,8 @@ struct iwl_dbgfs_pm {
|
|||
bool skip_over_dtim;
|
||||
u8 skip_dtim_periods;
|
||||
bool disable_power_off;
|
||||
bool lprx_ena;
|
||||
u32 lprx_rssi_threshold;
|
||||
int mask;
|
||||
};
|
||||
|
||||
|
@ -353,12 +356,14 @@ struct iwl_tt_params {
|
|||
* @dynamic_smps: Is thermal throttling enabled dynamic_smps?
|
||||
* @tx_backoff: The current thremal throttling tx backoff in uSec.
|
||||
* @params: Parameters to configure the thermal throttling algorithm.
|
||||
* @throttle: Is thermal throttling is active?
|
||||
*/
|
||||
struct iwl_mvm_tt_mgmt {
|
||||
struct delayed_work ct_kill_exit;
|
||||
bool dynamic_smps;
|
||||
u32 tx_backoff;
|
||||
const struct iwl_tt_params *params;
|
||||
bool throttle;
|
||||
};
|
||||
|
||||
struct iwl_mvm {
|
||||
|
@ -461,6 +466,7 @@ struct iwl_mvm {
|
|||
struct wiphy_wowlan_support wowlan;
|
||||
int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen;
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
u32 d3_wake_sysassert; /* must be u32 for debugfs_create_bool */
|
||||
bool d3_test_active;
|
||||
bool store_d3_resume_sram;
|
||||
void *d3_resume_sram;
|
||||
|
|
|
@ -137,11 +137,12 @@ static void iwl_mvm_power_log(struct iwl_mvm *mvm,
|
|||
le32_to_cpu(cmd->rx_data_timeout));
|
||||
IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
|
||||
le32_to_cpu(cmd->tx_data_timeout));
|
||||
IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
|
||||
cmd->lprx_rssi_threshold);
|
||||
if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK))
|
||||
IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n",
|
||||
le32_to_cpu(cmd->skip_dtim_periods));
|
||||
if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
|
||||
IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
|
||||
le32_to_cpu(cmd->lprx_rssi_threshold));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -181,6 +182,14 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
|
||||
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
|
||||
|
||||
if (vif->bss_conf.beacon_rate &&
|
||||
(vif->bss_conf.beacon_rate->bitrate == 10 ||
|
||||
vif->bss_conf.beacon_rate->bitrate == 60)) {
|
||||
cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
|
||||
cmd->lprx_rssi_threshold =
|
||||
cpu_to_le32(POWER_LPRX_RSSI_THRESHOLD);
|
||||
}
|
||||
|
||||
dtimper = hw->conf.ps_dtim_period ?: 1;
|
||||
|
||||
/* Check if radar detection is required on current channel */
|
||||
|
@ -236,6 +245,15 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS)
|
||||
cmd->skip_dtim_periods =
|
||||
cpu_to_le32(mvmvif->dbgfs_pm.skip_dtim_periods);
|
||||
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) {
|
||||
if (mvmvif->dbgfs_pm.lprx_ena)
|
||||
cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
|
||||
else
|
||||
cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK);
|
||||
}
|
||||
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD)
|
||||
cmd->lprx_rssi_threshold =
|
||||
cpu_to_le32(mvmvif->dbgfs_pm.lprx_rssi_threshold);
|
||||
#endif /* CONFIG_IWLWIFI_DEBUGFS */
|
||||
}
|
||||
|
||||
|
|
|
@ -412,24 +412,18 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm,
|
|||
return ret;
|
||||
}
|
||||
|
||||
if ((iwlwifi_mod_params.auto_agg) || (load > IWL_AGG_LOAD_THRESHOLD)) {
|
||||
IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n",
|
||||
sta->addr, tid);
|
||||
ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
|
||||
if (ret == -EAGAIN) {
|
||||
/*
|
||||
* driver and mac80211 is out of sync
|
||||
* this might be cause by reloading firmware
|
||||
* stop the tx ba session here
|
||||
*/
|
||||
IWL_ERR(mvm, "Fail start Tx agg on tid: %d\n",
|
||||
tid);
|
||||
ieee80211_stop_tx_ba_session(sta, tid);
|
||||
}
|
||||
} else {
|
||||
IWL_DEBUG_HT(mvm,
|
||||
"Aggregation not enabled for tid %d because load = %u\n",
|
||||
tid, load);
|
||||
IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n",
|
||||
sta->addr, tid);
|
||||
ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
|
||||
if (ret == -EAGAIN) {
|
||||
/*
|
||||
* driver and mac80211 is out of sync
|
||||
* this might be cause by reloading firmware
|
||||
* stop the tx ba session here
|
||||
*/
|
||||
IWL_ERR(mvm, "Fail start Tx agg on tid: %d\n",
|
||||
tid);
|
||||
ieee80211_stop_tx_ba_session(sta, tid);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -427,6 +427,7 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
|
|||
const struct iwl_tt_params *params = mvm->thermal_throttle.params;
|
||||
struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
|
||||
s32 temperature = mvm->temperature;
|
||||
bool throttle_enable = false;
|
||||
int i;
|
||||
u32 tx_backoff;
|
||||
|
||||
|
@ -445,6 +446,7 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
|
|||
ieee80211_iterate_active_interfaces_atomic(
|
||||
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
|
||||
iwl_mvm_tt_smps_iterator, mvm);
|
||||
throttle_enable = true;
|
||||
} else if (tt->dynamic_smps &&
|
||||
temperature <= params->dynamic_smps_exit) {
|
||||
IWL_DEBUG_TEMP(mvm, "Disable dynamic SMPS\n");
|
||||
|
@ -456,10 +458,12 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
|
|||
}
|
||||
|
||||
if (params->support_tx_protection) {
|
||||
if (temperature >= params->tx_protection_entry)
|
||||
if (temperature >= params->tx_protection_entry) {
|
||||
iwl_mvm_tt_tx_protection(mvm, true);
|
||||
else if (temperature <= params->tx_protection_exit)
|
||||
throttle_enable = true;
|
||||
} else if (temperature <= params->tx_protection_exit) {
|
||||
iwl_mvm_tt_tx_protection(mvm, false);
|
||||
}
|
||||
}
|
||||
|
||||
if (params->support_tx_backoff) {
|
||||
|
@ -469,9 +473,22 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
|
|||
break;
|
||||
tx_backoff = params->tx_backoff[i].backoff;
|
||||
}
|
||||
if (tx_backoff != 0)
|
||||
throttle_enable = true;
|
||||
if (tt->tx_backoff != tx_backoff)
|
||||
iwl_mvm_tt_tx_backoff(mvm, tx_backoff);
|
||||
}
|
||||
|
||||
if (!tt->throttle && throttle_enable) {
|
||||
IWL_WARN(mvm,
|
||||
"Due to high temperature thermal throttling initiated\n");
|
||||
tt->throttle = true;
|
||||
} else if (tt->throttle && !tt->dynamic_smps && tt->tx_backoff == 0 &&
|
||||
temperature <= params->tx_protection_exit) {
|
||||
IWL_WARN(mvm,
|
||||
"Temperature is back to normal thermal throttling stopped\n");
|
||||
tt->throttle = false;
|
||||
}
|
||||
}
|
||||
|
||||
static const struct iwl_tt_params iwl7000_tt_params = {
|
||||
|
@ -502,6 +519,7 @@ void iwl_mvm_tt_initialize(struct iwl_mvm *mvm)
|
|||
|
||||
IWL_DEBUG_TEMP(mvm, "Initialize Thermal Throttling\n");
|
||||
tt->params = &iwl7000_tt_params;
|
||||
tt->throttle = false;
|
||||
INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill);
|
||||
}
|
||||
|
||||
|
|
|
@ -110,9 +110,10 @@
|
|||
/*
|
||||
* iwl_rxq_space - Return number of free slots available in queue.
|
||||
*/
|
||||
static int iwl_rxq_space(const struct iwl_rxq *q)
|
||||
static int iwl_rxq_space(const struct iwl_rxq *rxq)
|
||||
{
|
||||
int s = q->read - q->write;
|
||||
int s = rxq->read - rxq->write;
|
||||
|
||||
if (s <= 0)
|
||||
s += RX_QUEUE_SIZE;
|
||||
/* keep some buffer to not confuse full and empty queue */
|
||||
|
@ -143,21 +144,22 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans)
|
|||
/*
|
||||
* iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
|
||||
*/
|
||||
static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_rxq *q)
|
||||
static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
|
||||
struct iwl_rxq *rxq)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 reg;
|
||||
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
spin_lock_irqsave(&rxq->lock, flags);
|
||||
|
||||
if (q->need_update == 0)
|
||||
if (rxq->need_update == 0)
|
||||
goto exit_unlock;
|
||||
|
||||
if (trans->cfg->base_params->shadow_reg_enable) {
|
||||
/* shadow register enabled */
|
||||
/* Device expects a multiple of 8 */
|
||||
q->write_actual = (q->write & ~0x7);
|
||||
iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, q->write_actual);
|
||||
rxq->write_actual = (rxq->write & ~0x7);
|
||||
iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
|
||||
} else {
|
||||
struct iwl_trans_pcie *trans_pcie =
|
||||
IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
@ -175,22 +177,22 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_rxq *q)
|
|||
goto exit_unlock;
|
||||
}
|
||||
|
||||
q->write_actual = (q->write & ~0x7);
|
||||
rxq->write_actual = (rxq->write & ~0x7);
|
||||
iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
|
||||
q->write_actual);
|
||||
rxq->write_actual);
|
||||
|
||||
/* Else device is assumed to be awake */
|
||||
} else {
|
||||
/* Device expects a multiple of 8 */
|
||||
q->write_actual = (q->write & ~0x7);
|
||||
rxq->write_actual = (rxq->write & ~0x7);
|
||||
iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
|
||||
q->write_actual);
|
||||
rxq->write_actual);
|
||||
}
|
||||
}
|
||||
q->need_update = 0;
|
||||
rxq->need_update = 0;
|
||||
|
||||
exit_unlock:
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -355,19 +357,16 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
|
|||
struct iwl_rxq *rxq = &trans_pcie->rxq;
|
||||
int i;
|
||||
|
||||
/* Fill the rx_used queue with _all_ of the Rx buffers */
|
||||
lockdep_assert_held(&rxq->lock);
|
||||
|
||||
for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
|
||||
/* In the reset function, these buffers may have been allocated
|
||||
* to an SKB, so we need to unmap and free potential storage */
|
||||
if (rxq->pool[i].page != NULL) {
|
||||
dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
|
||||
PAGE_SIZE << trans_pcie->rx_page_order,
|
||||
DMA_FROM_DEVICE);
|
||||
__free_pages(rxq->pool[i].page,
|
||||
trans_pcie->rx_page_order);
|
||||
rxq->pool[i].page = NULL;
|
||||
}
|
||||
list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
|
||||
if (!rxq->pool[i].page)
|
||||
continue;
|
||||
dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
|
||||
PAGE_SIZE << trans_pcie->rx_page_order,
|
||||
DMA_FROM_DEVICE);
|
||||
__free_pages(rxq->pool[i].page, trans_pcie->rx_page_order);
|
||||
rxq->pool[i].page = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -491,6 +490,20 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
|
|||
iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
|
||||
}
|
||||
|
||||
static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
|
||||
{
|
||||
int i;
|
||||
|
||||
lockdep_assert_held(&rxq->lock);
|
||||
|
||||
INIT_LIST_HEAD(&rxq->rx_free);
|
||||
INIT_LIST_HEAD(&rxq->rx_used);
|
||||
rxq->free_count = 0;
|
||||
|
||||
for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
|
||||
list_add(&rxq->pool[i].list, &rxq->rx_used);
|
||||
}
|
||||
|
||||
int iwl_pcie_rx_init(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
@ -505,13 +518,12 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
|
|||
}
|
||||
|
||||
spin_lock_irqsave(&rxq->lock, flags);
|
||||
INIT_LIST_HEAD(&rxq->rx_free);
|
||||
INIT_LIST_HEAD(&rxq->rx_used);
|
||||
|
||||
INIT_WORK(&trans_pcie->rx_replenish,
|
||||
iwl_pcie_rx_replenish_work);
|
||||
INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
|
||||
|
||||
/* free all first - we might be reconfigured for a different size */
|
||||
iwl_pcie_rxq_free_rbs(trans);
|
||||
iwl_pcie_rx_init_rxb_lists(rxq);
|
||||
|
||||
for (i = 0; i < RX_QUEUE_SIZE; i++)
|
||||
rxq->queue[i] = NULL;
|
||||
|
@ -520,7 +532,6 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
|
|||
* not restocked the Rx queue with fresh buffers */
|
||||
rxq->read = rxq->write = 0;
|
||||
rxq->write_actual = 0;
|
||||
rxq->free_count = 0;
|
||||
memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
|
||||
|
|
|
@ -838,8 +838,9 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
|
|||
unsigned long *flags)
|
||||
{
|
||||
int ret;
|
||||
struct iwl_trans_pcie *pcie_trans = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
spin_lock_irqsave(&pcie_trans->reg_lock, *flags);
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
|
||||
|
||||
/* this bit wakes up the NIC */
|
||||
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
|
||||
|
@ -875,7 +876,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
|
|||
WARN_ONCE(1,
|
||||
"Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
|
||||
val);
|
||||
spin_unlock_irqrestore(&pcie_trans->reg_lock, *flags);
|
||||
spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -884,22 +885,22 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
|
|||
* Fool sparse by faking we release the lock - sparse will
|
||||
* track nic_access anyway.
|
||||
*/
|
||||
__release(&pcie_trans->reg_lock);
|
||||
__release(&trans_pcie->reg_lock);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
|
||||
unsigned long *flags)
|
||||
{
|
||||
struct iwl_trans_pcie *pcie_trans = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
lockdep_assert_held(&pcie_trans->reg_lock);
|
||||
lockdep_assert_held(&trans_pcie->reg_lock);
|
||||
|
||||
/*
|
||||
* Fool sparse by faking we acquiring the lock - sparse will
|
||||
* track nic_access anyway.
|
||||
*/
|
||||
__acquire(&pcie_trans->reg_lock);
|
||||
__acquire(&trans_pcie->reg_lock);
|
||||
|
||||
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
|
||||
|
@ -910,7 +911,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
|
|||
* scheduled on different CPUs (after we drop reg_lock).
|
||||
*/
|
||||
mmiowb();
|
||||
spin_unlock_irqrestore(&pcie_trans->reg_lock, *flags);
|
||||
spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
|
||||
}
|
||||
|
||||
static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
|
||||
|
|
|
@ -100,7 +100,7 @@
|
|||
#define CSR_REG_BASE 0x1000
|
||||
#define CSR_REG_SIZE 0x0800
|
||||
#define EEPROM_BASE 0x0000
|
||||
#define EEPROM_SIZE 0x0110
|
||||
#define EEPROM_SIZE 0x0200
|
||||
#define BBP_BASE 0x0000
|
||||
#define BBP_SIZE 0x00ff
|
||||
#define RF_BASE 0x0004
|
||||
|
@ -2625,11 +2625,13 @@ struct mac_iveiv_entry {
|
|||
/*
|
||||
* DMA descriptor defines.
|
||||
*/
|
||||
#define TXWI_DESC_SIZE (4 * sizeof(__le32))
|
||||
#define RXWI_DESC_SIZE (4 * sizeof(__le32))
|
||||
|
||||
#define TXWI_DESC_SIZE_5592 (5 * sizeof(__le32))
|
||||
#define RXWI_DESC_SIZE_5592 (6 * sizeof(__le32))
|
||||
#define TXWI_DESC_SIZE_4WORDS (4 * sizeof(__le32))
|
||||
#define TXWI_DESC_SIZE_5WORDS (5 * sizeof(__le32))
|
||||
|
||||
#define RXWI_DESC_SIZE_4WORDS (4 * sizeof(__le32))
|
||||
#define RXWI_DESC_SIZE_6WORDS (6 * sizeof(__le32))
|
||||
|
||||
/*
|
||||
* TX WI structure
|
||||
*/
|
||||
|
|
|
@ -2392,7 +2392,7 @@ static void rt2800_config_channel_rf55xx(struct rt2x00_dev *rt2x00dev,
|
|||
rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
|
||||
|
||||
rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr);
|
||||
if (info->default_power1 > power_bound)
|
||||
if (info->default_power2 > power_bound)
|
||||
rt2x00_set_field8(&rfcsr, RFCSR50_TX, power_bound);
|
||||
else
|
||||
rt2x00_set_field8(&rfcsr, RFCSR50_TX, info->default_power2);
|
||||
|
@ -2678,30 +2678,53 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
|
|||
|
||||
tx_pin = 0;
|
||||
|
||||
/* Turn on unused PA or LNA when not using 1T or 1R */
|
||||
if (rt2x00dev->default_ant.tx_chain_num == 2) {
|
||||
switch (rt2x00dev->default_ant.tx_chain_num) {
|
||||
case 3:
|
||||
/* Turn on tertiary PAs */
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A2_EN,
|
||||
rf->channel > 14);
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G2_EN,
|
||||
rf->channel <= 14);
|
||||
/* fall-through */
|
||||
case 2:
|
||||
/* Turn on secondary PAs */
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN,
|
||||
rf->channel > 14);
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN,
|
||||
rf->channel <= 14);
|
||||
/* fall-through */
|
||||
case 1:
|
||||
/* Turn on primary PAs */
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN,
|
||||
rf->channel > 14);
|
||||
if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags))
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, 1);
|
||||
else
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN,
|
||||
rf->channel <= 14);
|
||||
break;
|
||||
}
|
||||
|
||||
/* Turn on unused PA or LNA when not using 1T or 1R */
|
||||
if (rt2x00dev->default_ant.rx_chain_num == 2) {
|
||||
switch (rt2x00dev->default_ant.rx_chain_num) {
|
||||
case 3:
|
||||
/* Turn on tertiary LNAs */
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN, 1);
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN, 1);
|
||||
/* fall-through */
|
||||
case 2:
|
||||
/* Turn on secondary LNAs */
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1);
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1);
|
||||
/* fall-through */
|
||||
case 1:
|
||||
/* Turn on primary LNAs */
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1);
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1);
|
||||
break;
|
||||
}
|
||||
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1);
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1);
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_RFTR_EN, 1);
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_TRSW_EN, 1);
|
||||
if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags))
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, 1);
|
||||
else
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN,
|
||||
rf->channel <= 14);
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN, rf->channel > 14);
|
||||
|
||||
rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin);
|
||||
|
||||
|
@ -6254,8 +6277,8 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
|
|||
default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2);
|
||||
|
||||
for (i = 14; i < spec->num_channels; i++) {
|
||||
info[i].default_power1 = default_power1[i];
|
||||
info[i].default_power2 = default_power2[i];
|
||||
info[i].default_power1 = default_power1[i - 14];
|
||||
info[i].default_power2 = default_power2[i - 14];
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -637,6 +637,7 @@ static void rt2800pci_write_tx_desc(struct queue_entry *entry,
|
|||
struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
|
||||
__le32 *txd = entry_priv->desc;
|
||||
u32 word;
|
||||
const unsigned int txwi_size = entry->queue->winfo_size;
|
||||
|
||||
/*
|
||||
* The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1
|
||||
|
@ -659,14 +660,14 @@ static void rt2800pci_write_tx_desc(struct queue_entry *entry,
|
|||
!test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
|
||||
rt2x00_set_field32(&word, TXD_W1_BURST,
|
||||
test_bit(ENTRY_TXD_BURST, &txdesc->flags));
|
||||
rt2x00_set_field32(&word, TXD_W1_SD_LEN0, TXWI_DESC_SIZE);
|
||||
rt2x00_set_field32(&word, TXD_W1_SD_LEN0, txwi_size);
|
||||
rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0);
|
||||
rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0);
|
||||
rt2x00_desc_write(txd, 1, word);
|
||||
|
||||
word = 0;
|
||||
rt2x00_set_field32(&word, TXD_W2_SD_PTR1,
|
||||
skbdesc->skb_dma + TXWI_DESC_SIZE);
|
||||
skbdesc->skb_dma + txwi_size);
|
||||
rt2x00_desc_write(txd, 2, word);
|
||||
|
||||
word = 0;
|
||||
|
@ -1193,7 +1194,7 @@ static void rt2800pci_queue_init(struct data_queue *queue)
|
|||
queue->limit = 128;
|
||||
queue->data_size = AGGREGATION_SIZE;
|
||||
queue->desc_size = RXD_DESC_SIZE;
|
||||
queue->winfo_size = RXWI_DESC_SIZE;
|
||||
queue->winfo_size = RXWI_DESC_SIZE_4WORDS;
|
||||
queue->priv_size = sizeof(struct queue_entry_priv_mmio);
|
||||
break;
|
||||
|
||||
|
@ -1204,7 +1205,7 @@ static void rt2800pci_queue_init(struct data_queue *queue)
|
|||
queue->limit = 64;
|
||||
queue->data_size = AGGREGATION_SIZE;
|
||||
queue->desc_size = TXD_DESC_SIZE;
|
||||
queue->winfo_size = TXWI_DESC_SIZE;
|
||||
queue->winfo_size = TXWI_DESC_SIZE_4WORDS;
|
||||
queue->priv_size = sizeof(struct queue_entry_priv_mmio);
|
||||
break;
|
||||
|
||||
|
@ -1212,7 +1213,7 @@ static void rt2800pci_queue_init(struct data_queue *queue)
|
|||
queue->limit = 8;
|
||||
queue->data_size = 0; /* No DMA required for beacons */
|
||||
queue->desc_size = TXD_DESC_SIZE;
|
||||
queue->winfo_size = TXWI_DESC_SIZE;
|
||||
queue->winfo_size = TXWI_DESC_SIZE_4WORDS;
|
||||
queue->priv_size = sizeof(struct queue_entry_priv_mmio);
|
||||
break;
|
||||
|
||||
|
|
|
@ -855,11 +855,11 @@ static void rt2800usb_queue_init(struct data_queue *queue)
|
|||
unsigned short txwi_size, rxwi_size;
|
||||
|
||||
if (rt2x00_rt(rt2x00dev, RT5592)) {
|
||||
txwi_size = TXWI_DESC_SIZE_5592;
|
||||
rxwi_size = RXWI_DESC_SIZE_5592;
|
||||
txwi_size = TXWI_DESC_SIZE_5WORDS;
|
||||
rxwi_size = RXWI_DESC_SIZE_6WORDS;
|
||||
} else {
|
||||
txwi_size = TXWI_DESC_SIZE;
|
||||
rxwi_size = RXWI_DESC_SIZE;
|
||||
txwi_size = TXWI_DESC_SIZE_4WORDS;
|
||||
rxwi_size = RXWI_DESC_SIZE_4WORDS;
|
||||
}
|
||||
|
||||
switch (queue->qid) {
|
||||
|
|
|
@ -2825,7 +2825,8 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
|
|||
tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START);
|
||||
for (i = 14; i < spec->num_channels; i++) {
|
||||
info[i].max_power = MAX_TXPOWER;
|
||||
info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
|
||||
info[i].default_power1 =
|
||||
TXPOWER_FROM_DEV(tx_power[i - 14]);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2167,7 +2167,8 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
|
|||
tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START);
|
||||
for (i = 14; i < spec->num_channels; i++) {
|
||||
info[i].max_power = MAX_TXPOWER;
|
||||
info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
|
||||
info[i].default_power1 =
|
||||
TXPOWER_FROM_DEV(tx_power[i - 14]);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -251,7 +251,7 @@ static struct rtl_hal_cfg rtl8723ae_hal_cfg = {
|
|||
.bar_id = 2,
|
||||
.write_readback = true,
|
||||
.name = "rtl8723ae_pci",
|
||||
.fw_name = "rtlwifi/rtl8723aefw.bin",
|
||||
.fw_name = "rtlwifi/rtl8723fw.bin",
|
||||
.ops = &rtl8723ae_hal_ops,
|
||||
.mod_params = &rtl8723ae_mod_params,
|
||||
.maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
|
||||
|
@ -353,8 +353,8 @@ MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
|
|||
MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("Realtek 8723E 802.11n PCI wireless");
|
||||
MODULE_FIRMWARE("rtlwifi/rtl8723aefw.bin");
|
||||
MODULE_FIRMWARE("rtlwifi/rtl8723aefw_B.bin");
|
||||
MODULE_FIRMWARE("rtlwifi/rtl8723fw.bin");
|
||||
MODULE_FIRMWARE("rtlwifi/rtl8723fw_B.bin");
|
||||
|
||||
module_param_named(swenc, rtl8723ae_mod_params.sw_crypto, bool, 0444);
|
||||
module_param_named(debug, rtl8723ae_mod_params.debug, int, 0444);
|
||||
|
|
|
@ -144,6 +144,7 @@ struct bcma_host_ops {
|
|||
|
||||
/* Chip IDs of PCIe devices */
|
||||
#define BCMA_CHIP_ID_BCM4313 0x4313
|
||||
#define BCMA_CHIP_ID_BCM43142 43142
|
||||
#define BCMA_CHIP_ID_BCM43224 43224
|
||||
#define BCMA_PKG_ID_BCM43224_FAB_CSM 0x8
|
||||
#define BCMA_PKG_ID_BCM43224_FAB_SMIC 0xa
|
||||
|
|
|
@ -330,6 +330,8 @@
|
|||
#define BCMA_CC_PMU_CAP 0x0604 /* PMU capabilities */
|
||||
#define BCMA_CC_PMU_CAP_REVISION 0x000000FF /* Revision mask */
|
||||
#define BCMA_CC_PMU_STAT 0x0608 /* PMU status */
|
||||
#define BCMA_CC_PMU_STAT_EXT_LPO_AVAIL 0x00000100
|
||||
#define BCMA_CC_PMU_STAT_WDRESET 0x00000080
|
||||
#define BCMA_CC_PMU_STAT_INTPEND 0x00000040 /* Interrupt pending */
|
||||
#define BCMA_CC_PMU_STAT_SBCLKST 0x00000030 /* Backplane clock status? */
|
||||
#define BCMA_CC_PMU_STAT_HAVEALP 0x00000008 /* ALP available */
|
||||
|
@ -355,6 +357,11 @@
|
|||
#define BCMA_CC_REGCTL_DATA 0x065C
|
||||
#define BCMA_CC_PLLCTL_ADDR 0x0660
|
||||
#define BCMA_CC_PLLCTL_DATA 0x0664
|
||||
#define BCMA_CC_PMU_STRAPOPT 0x0668 /* (corerev >= 28) */
|
||||
#define BCMA_CC_PMU_XTAL_FREQ 0x066C /* (pmurev >= 10) */
|
||||
#define BCMA_CC_PMU_XTAL_FREQ_ILPCTL_MASK 0x00001FFF
|
||||
#define BCMA_CC_PMU_XTAL_FREQ_MEASURE_MASK 0x80000000
|
||||
#define BCMA_CC_PMU_XTAL_FREQ_MEASURE_SHIFT 31
|
||||
#define BCMA_CC_SPROM 0x0800 /* SPROM beginning */
|
||||
/* NAND flash MLC controller registers (corerev >= 38) */
|
||||
#define BCMA_CC_NAND_REVISION 0x0C00
|
||||
|
@ -435,6 +442,23 @@
|
|||
#define BCMA_CC_PMU6_4706_PROC_NDIV_MODE_MASK 0x00000007
|
||||
#define BCMA_CC_PMU6_4706_PROC_NDIV_MODE_SHIFT 0
|
||||
|
||||
/* PMU rev 15 */
|
||||
#define BCMA_CC_PMU15_PLL_PLLCTL0 0
|
||||
#define BCMA_CC_PMU15_PLL_PC0_CLKSEL_MASK 0x00000003
|
||||
#define BCMA_CC_PMU15_PLL_PC0_CLKSEL_SHIFT 0
|
||||
#define BCMA_CC_PMU15_PLL_PC0_FREQTGT_MASK 0x003FFFFC
|
||||
#define BCMA_CC_PMU15_PLL_PC0_FREQTGT_SHIFT 2
|
||||
#define BCMA_CC_PMU15_PLL_PC0_PRESCALE_MASK 0x00C00000
|
||||
#define BCMA_CC_PMU15_PLL_PC0_PRESCALE_SHIFT 22
|
||||
#define BCMA_CC_PMU15_PLL_PC0_KPCTRL_MASK 0x07000000
|
||||
#define BCMA_CC_PMU15_PLL_PC0_KPCTRL_SHIFT 24
|
||||
#define BCMA_CC_PMU15_PLL_PC0_FCNTCTRL_MASK 0x38000000
|
||||
#define BCMA_CC_PMU15_PLL_PC0_FCNTCTRL_SHIFT 27
|
||||
#define BCMA_CC_PMU15_PLL_PC0_FDCMODE_MASK 0x40000000
|
||||
#define BCMA_CC_PMU15_PLL_PC0_FDCMODE_SHIFT 30
|
||||
#define BCMA_CC_PMU15_PLL_PC0_CTRLBIAS_MASK 0x80000000
|
||||
#define BCMA_CC_PMU15_PLL_PC0_CTRLBIAS_SHIFT 31
|
||||
|
||||
/* ALP clock on pre-PMU chips */
|
||||
#define BCMA_CC_PMU_ALP_CLOCK 20000000
|
||||
/* HT clock for systems with PMU-enabled chipcommon */
|
||||
|
@ -507,6 +531,37 @@
|
|||
#define BCMA_CHIPCTL_5357_I2S_PINS_ENABLE BIT(18)
|
||||
#define BCMA_CHIPCTL_5357_I2CSPI_PINS_ENABLE BIT(19)
|
||||
|
||||
#define BCMA_RES_4314_LPLDO_PU BIT(0)
|
||||
#define BCMA_RES_4314_PMU_SLEEP_DIS BIT(1)
|
||||
#define BCMA_RES_4314_PMU_BG_PU BIT(2)
|
||||
#define BCMA_RES_4314_CBUCK_LPOM_PU BIT(3)
|
||||
#define BCMA_RES_4314_CBUCK_PFM_PU BIT(4)
|
||||
#define BCMA_RES_4314_CLDO_PU BIT(5)
|
||||
#define BCMA_RES_4314_LPLDO2_LVM BIT(6)
|
||||
#define BCMA_RES_4314_WL_PMU_PU BIT(7)
|
||||
#define BCMA_RES_4314_LNLDO_PU BIT(8)
|
||||
#define BCMA_RES_4314_LDO3P3_PU BIT(9)
|
||||
#define BCMA_RES_4314_OTP_PU BIT(10)
|
||||
#define BCMA_RES_4314_XTAL_PU BIT(11)
|
||||
#define BCMA_RES_4314_WL_PWRSW_PU BIT(12)
|
||||
#define BCMA_RES_4314_LQ_AVAIL BIT(13)
|
||||
#define BCMA_RES_4314_LOGIC_RET BIT(14)
|
||||
#define BCMA_RES_4314_MEM_SLEEP BIT(15)
|
||||
#define BCMA_RES_4314_MACPHY_RET BIT(16)
|
||||
#define BCMA_RES_4314_WL_CORE_READY BIT(17)
|
||||
#define BCMA_RES_4314_ILP_REQ BIT(18)
|
||||
#define BCMA_RES_4314_ALP_AVAIL BIT(19)
|
||||
#define BCMA_RES_4314_MISC_PWRSW_PU BIT(20)
|
||||
#define BCMA_RES_4314_SYNTH_PWRSW_PU BIT(21)
|
||||
#define BCMA_RES_4314_RX_PWRSW_PU BIT(22)
|
||||
#define BCMA_RES_4314_RADIO_PU BIT(23)
|
||||
#define BCMA_RES_4314_VCO_LDO_PU BIT(24)
|
||||
#define BCMA_RES_4314_AFE_LDO_PU BIT(25)
|
||||
#define BCMA_RES_4314_RX_LDO_PU BIT(26)
|
||||
#define BCMA_RES_4314_TX_LDO_PU BIT(27)
|
||||
#define BCMA_RES_4314_HT_AVAIL BIT(28)
|
||||
#define BCMA_RES_4314_MACPHY_CLK_AVAIL BIT(29)
|
||||
|
||||
/* Data for the PMU, if available.
|
||||
* Check availability with ((struct bcma_chipcommon)->capabilities & BCMA_CC_CAP_PMU)
|
||||
*/
|
||||
|
|
|
@ -90,6 +90,10 @@ void __init brcmfmac_init_pdata(void)
|
|||
* oob_irq_nr, oob_irq_flags: the OOB interrupt information. The values are
|
||||
* used for registering the irq using request_irq function.
|
||||
*
|
||||
* broken_sg_support: flag for broken sg list support of SDIO host controller.
|
||||
* Set this to true if the SDIO host controller has higher align requirement
|
||||
* than 32 bytes for each scatterlist item.
|
||||
*
|
||||
* power_on: This function is called by the brcmfmac when the module gets
|
||||
* loaded. This can be particularly useful for low power devices. The platform
|
||||
* spcific routine may for example decide to power up the complete device.
|
||||
|
@ -116,6 +120,7 @@ struct brcmfmac_sdio_platform_data {
|
|||
bool oob_irq_supported;
|
||||
unsigned int oob_irq_nr;
|
||||
unsigned long oob_irq_flags;
|
||||
bool broken_sg_support;
|
||||
void (*power_on)(void);
|
||||
void (*power_off)(void);
|
||||
void (*reset)(void);
|
||||
|
|
|
@ -107,7 +107,6 @@ enum {
|
|||
HCI_MGMT,
|
||||
HCI_PAIRABLE,
|
||||
HCI_SERVICE_CACHE,
|
||||
HCI_LINK_KEYS,
|
||||
HCI_DEBUG_KEYS,
|
||||
HCI_UNREGISTER,
|
||||
|
||||
|
|
|
@ -117,13 +117,6 @@ struct oob_data {
|
|||
u8 randomizer[16];
|
||||
};
|
||||
|
||||
struct le_scan_params {
|
||||
u8 type;
|
||||
u16 interval;
|
||||
u16 window;
|
||||
int timeout;
|
||||
};
|
||||
|
||||
#define HCI_MAX_SHORT_NAME_LENGTH 10
|
||||
|
||||
struct amp_assoc {
|
||||
|
@ -283,9 +276,6 @@ struct hci_dev {
|
|||
|
||||
struct delayed_work le_scan_disable;
|
||||
|
||||
struct work_struct le_scan;
|
||||
struct le_scan_params le_scan_params;
|
||||
|
||||
__s8 adv_tx_power;
|
||||
__u8 adv_data[HCI_MAX_AD_LENGTH];
|
||||
__u8 adv_data_len;
|
||||
|
@ -432,6 +422,7 @@ void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
|
|||
struct inquiry_entry *ie);
|
||||
bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
|
||||
bool name_known, bool *ssp);
|
||||
void hci_inquiry_cache_flush(struct hci_dev *hdev);
|
||||
|
||||
/* ----- HCI Connections ----- */
|
||||
enum {
|
||||
|
@ -1114,6 +1105,16 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event);
|
|||
BIT(BDADDR_LE_PUBLIC) | \
|
||||
BIT(BDADDR_LE_RANDOM))
|
||||
|
||||
/* These LE scan and inquiry parameters were chosen according to LE General
|
||||
* Discovery Procedure specification.
|
||||
*/
|
||||
#define DISCOV_LE_SCAN_WIN 0x12
|
||||
#define DISCOV_LE_SCAN_INT 0x12
|
||||
#define DISCOV_LE_TIMEOUT msecs_to_jiffies(10240)
|
||||
#define DISCOV_INTERLEAVED_TIMEOUT msecs_to_jiffies(5120)
|
||||
#define DISCOV_INTERLEAVED_INQUIRY_LEN 0x04
|
||||
#define DISCOV_BREDR_INQUIRY_LEN 0x08
|
||||
|
||||
int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
|
||||
int mgmt_index_added(struct hci_dev *hdev);
|
||||
int mgmt_index_removed(struct hci_dev *hdev);
|
||||
|
@ -1169,10 +1170,7 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
|
|||
u8 ssp, u8 *eir, u16 eir_len);
|
||||
int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
|
||||
u8 addr_type, s8 rssi, u8 *name, u8 name_len);
|
||||
int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status);
|
||||
int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status);
|
||||
int mgmt_discovering(struct hci_dev *hdev, u8 discovering);
|
||||
int mgmt_interleaved_discovery(struct hci_dev *hdev);
|
||||
int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
|
||||
int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
|
||||
bool mgmt_valid_hdev(struct hci_dev *hdev);
|
||||
|
@ -1212,11 +1210,6 @@ void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
|
|||
u16 latency, u16 to_multiplier);
|
||||
void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
|
||||
__u8 ltk[16]);
|
||||
int hci_do_inquiry(struct hci_dev *hdev, u8 length);
|
||||
int hci_cancel_inquiry(struct hci_dev *hdev);
|
||||
int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
|
||||
int timeout);
|
||||
int hci_cancel_le_scan(struct hci_dev *hdev);
|
||||
|
||||
u8 bdaddr_to_le(u8 bdaddr_type);
|
||||
|
||||
|
|
|
@ -242,7 +242,7 @@ struct l2cap_conn_rsp {
|
|||
#define L2CAP_CID_SIGNALING 0x0001
|
||||
#define L2CAP_CID_CONN_LESS 0x0002
|
||||
#define L2CAP_CID_A2MP 0x0003
|
||||
#define L2CAP_CID_LE_DATA 0x0004
|
||||
#define L2CAP_CID_ATT 0x0004
|
||||
#define L2CAP_CID_LE_SIGNALING 0x0005
|
||||
#define L2CAP_CID_SMP 0x0006
|
||||
#define L2CAP_CID_DYN_START 0x0040
|
||||
|
|
|
@ -188,6 +188,8 @@ struct ieee80211_channel {
|
|||
* when used with 802.11g (on the 2.4 GHz band); filled by the
|
||||
* core code when registering the wiphy.
|
||||
* @IEEE80211_RATE_ERP_G: This is an ERP rate in 802.11g mode.
|
||||
* @IEEE80211_RATE_SUPPORTS_5MHZ: Rate can be used in 5 MHz mode
|
||||
* @IEEE80211_RATE_SUPPORTS_10MHZ: Rate can be used in 10 MHz mode
|
||||
*/
|
||||
enum ieee80211_rate_flags {
|
||||
IEEE80211_RATE_SHORT_PREAMBLE = 1<<0,
|
||||
|
@ -195,6 +197,8 @@ enum ieee80211_rate_flags {
|
|||
IEEE80211_RATE_MANDATORY_B = 1<<2,
|
||||
IEEE80211_RATE_MANDATORY_G = 1<<3,
|
||||
IEEE80211_RATE_ERP_G = 1<<4,
|
||||
IEEE80211_RATE_SUPPORTS_5MHZ = 1<<5,
|
||||
IEEE80211_RATE_SUPPORTS_10MHZ = 1<<6,
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -432,6 +436,30 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
|
|||
const struct cfg80211_chan_def *chandef,
|
||||
u32 prohibited_flags);
|
||||
|
||||
/**
|
||||
* ieee80211_chandef_rate_flags - returns rate flags for a channel
|
||||
*
|
||||
* In some channel types, not all rates may be used - for example CCK
|
||||
* rates may not be used in 5/10 MHz channels.
|
||||
*
|
||||
* @chandef: channel definition for the channel
|
||||
*
|
||||
* Returns: rate flags which apply for this channel
|
||||
*/
|
||||
static inline enum ieee80211_rate_flags
|
||||
ieee80211_chandef_rate_flags(struct cfg80211_chan_def *chandef)
|
||||
{
|
||||
switch (chandef->width) {
|
||||
case NL80211_CHAN_WIDTH_5:
|
||||
return IEEE80211_RATE_SUPPORTS_5MHZ;
|
||||
case NL80211_CHAN_WIDTH_10:
|
||||
return IEEE80211_RATE_SUPPORTS_10MHZ;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* enum survey_info_flags - survey information flags
|
||||
*
|
||||
|
@ -1431,7 +1459,8 @@ const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie);
|
|||
* This structure provides information needed to complete IEEE 802.11
|
||||
* authentication.
|
||||
*
|
||||
* @bss: The BSS to authenticate with.
|
||||
* @bss: The BSS to authenticate with, the callee must obtain a reference
|
||||
* to it if it needs to keep it.
|
||||
* @auth_type: Authentication type (algorithm)
|
||||
* @ie: Extra IEs to add to Authentication frame or %NULL
|
||||
* @ie_len: Length of ie buffer in octets
|
||||
|
@ -1469,11 +1498,10 @@ enum cfg80211_assoc_req_flags {
|
|||
*
|
||||
* This structure provides information needed to complete IEEE 802.11
|
||||
* (re)association.
|
||||
* @bss: The BSS to associate with. If the call is successful the driver
|
||||
* is given a reference that it must release, normally via a call to
|
||||
* cfg80211_send_rx_assoc(), or, if association timed out, with a
|
||||
* call to cfg80211_put_bss() (in addition to calling
|
||||
* cfg80211_send_assoc_timeout())
|
||||
* @bss: The BSS to associate with. If the call is successful the driver is
|
||||
* given a reference that it must give back to cfg80211_send_rx_assoc()
|
||||
* or to cfg80211_assoc_timeout(). To ensure proper refcounting, new
|
||||
* association requests while already associating must be rejected.
|
||||
* @ie: Extra IEs to add to (Re)Association Request frame or %NULL
|
||||
* @ie_len: Length of ie buffer in octets
|
||||
* @use_mfp: Use management frame protection (IEEE 802.11w) in this association
|
||||
|
@ -2342,6 +2370,7 @@ struct cfg80211_ops {
|
|||
* responds to probe-requests in hardware.
|
||||
* @WIPHY_FLAG_OFFCHAN_TX: Device supports direct off-channel TX.
|
||||
* @WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL: Device supports remain-on-channel call.
|
||||
* @WIPHY_FLAG_SUPPORTS_5_10_MHZ: Device supports 5 MHz and 10 MHz channels.
|
||||
*/
|
||||
enum wiphy_flags {
|
||||
WIPHY_FLAG_CUSTOM_REGULATORY = BIT(0),
|
||||
|
@ -2365,6 +2394,7 @@ enum wiphy_flags {
|
|||
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD = BIT(19),
|
||||
WIPHY_FLAG_OFFCHAN_TX = BIT(20),
|
||||
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL = BIT(21),
|
||||
WIPHY_FLAG_SUPPORTS_5_10_MHZ = BIT(22),
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -3492,11 +3522,11 @@ void cfg80211_rx_assoc_resp(struct net_device *dev,
|
|||
/**
|
||||
* cfg80211_assoc_timeout - notification of timed out association
|
||||
* @dev: network device
|
||||
* @addr: The MAC address of the device with which the association timed out
|
||||
* @bss: The BSS entry with which association timed out.
|
||||
*
|
||||
* This function may sleep. The caller must hold the corresponding wdev's mutex.
|
||||
*/
|
||||
void cfg80211_assoc_timeout(struct net_device *dev, const u8 *addr);
|
||||
void cfg80211_assoc_timeout(struct net_device *dev, struct cfg80211_bss *bss);
|
||||
|
||||
/**
|
||||
* cfg80211_tx_mlme_mgmt - notification of transmitted deauth/disassoc frame
|
||||
|
|
|
@ -305,6 +305,7 @@ enum ieee80211_rssi_event {
|
|||
* @basic_rates: bitmap of basic rates, each bit stands for an
|
||||
* index into the rate table configured by the driver in
|
||||
* the current band.
|
||||
* @beacon_rate: associated AP's beacon TX rate
|
||||
* @mcast_rate: per-band multicast rate index + 1 (0: disabled)
|
||||
* @bssid: The BSSID for this BSS
|
||||
* @enable_beacon: whether beaconing should be enabled or not
|
||||
|
@ -352,6 +353,7 @@ struct ieee80211_bss_conf {
|
|||
u32 sync_device_ts;
|
||||
u8 sync_dtim_count;
|
||||
u32 basic_rates;
|
||||
struct ieee80211_rate *beacon_rate;
|
||||
int mcast_rate[IEEE80211_NUM_BANDS];
|
||||
u16 ht_operation_mode;
|
||||
s32 cqm_rssi_thold;
|
||||
|
|
|
@ -2758,6 +2758,8 @@ enum nl80211_channel_type {
|
|||
* and %NL80211_ATTR_CENTER_FREQ2 attributes must be provided as well
|
||||
* @NL80211_CHAN_WIDTH_160: 160 MHz channel, the %NL80211_ATTR_CENTER_FREQ1
|
||||
* attribute must be provided as well
|
||||
* @NL80211_CHAN_WIDTH_5: 5 MHz OFDM channel
|
||||
* @NL80211_CHAN_WIDTH_10: 10 MHz OFDM channel
|
||||
*/
|
||||
enum nl80211_chan_width {
|
||||
NL80211_CHAN_WIDTH_20_NOHT,
|
||||
|
@ -2766,6 +2768,8 @@ enum nl80211_chan_width {
|
|||
NL80211_CHAN_WIDTH_80,
|
||||
NL80211_CHAN_WIDTH_80P80,
|
||||
NL80211_CHAN_WIDTH_160,
|
||||
NL80211_CHAN_WIDTH_5,
|
||||
NL80211_CHAN_WIDTH_10,
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -597,7 +597,15 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
|
|||
struct hci_dev *hdev = req->hdev;
|
||||
u8 p;
|
||||
|
||||
/* Only send HCI_Delete_Stored_Link_Key if it is supported */
|
||||
/* Some Broadcom based Bluetooth controllers do not support the
|
||||
* Delete Stored Link Key command. They are clearly indicating its
|
||||
* absence in the bit mask of supported commands.
|
||||
*
|
||||
* Check the supported commands and only if the the command is marked
|
||||
* as supported send it. If not supported assume that the controller
|
||||
* does not have actual support for stored link keys which makes this
|
||||
* command redundant anyway.
|
||||
*/
|
||||
if (hdev->commands[6] & 0x80) {
|
||||
struct hci_cp_delete_stored_link_key cp;
|
||||
|
||||
|
@ -751,7 +759,7 @@ void hci_discovery_set_state(struct hci_dev *hdev, int state)
|
|||
hdev->discovery.state = state;
|
||||
}
|
||||
|
||||
static void inquiry_cache_flush(struct hci_dev *hdev)
|
||||
void hci_inquiry_cache_flush(struct hci_dev *hdev)
|
||||
{
|
||||
struct discovery_state *cache = &hdev->discovery;
|
||||
struct inquiry_entry *p, *n;
|
||||
|
@ -964,7 +972,7 @@ int hci_inquiry(void __user *arg)
|
|||
hci_dev_lock(hdev);
|
||||
if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
|
||||
inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
|
||||
inquiry_cache_flush(hdev);
|
||||
hci_inquiry_cache_flush(hdev);
|
||||
do_inquiry = 1;
|
||||
}
|
||||
hci_dev_unlock(hdev);
|
||||
|
@ -1201,8 +1209,6 @@ static int hci_dev_do_close(struct hci_dev *hdev)
|
|||
{
|
||||
BT_DBG("%s %p", hdev->name, hdev);
|
||||
|
||||
cancel_work_sync(&hdev->le_scan);
|
||||
|
||||
cancel_delayed_work(&hdev->power_off);
|
||||
|
||||
hci_req_cancel(hdev, ENODEV);
|
||||
|
@ -1230,7 +1236,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
|
|||
cancel_delayed_work_sync(&hdev->le_scan_disable);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
inquiry_cache_flush(hdev);
|
||||
hci_inquiry_cache_flush(hdev);
|
||||
hci_conn_hash_flush(hdev);
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
|
@ -1331,7 +1337,7 @@ int hci_dev_reset(__u16 dev)
|
|||
skb_queue_purge(&hdev->cmd_q);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
inquiry_cache_flush(hdev);
|
||||
hci_inquiry_cache_flush(hdev);
|
||||
hci_conn_hash_flush(hdev);
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
|
@ -1991,80 +1997,59 @@ int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
|
|||
return mgmt_device_unblocked(hdev, bdaddr, type);
|
||||
}
|
||||
|
||||
static void le_scan_param_req(struct hci_request *req, unsigned long opt)
|
||||
static void inquiry_complete(struct hci_dev *hdev, u8 status)
|
||||
{
|
||||
struct le_scan_params *param = (struct le_scan_params *) opt;
|
||||
struct hci_cp_le_set_scan_param cp;
|
||||
if (status) {
|
||||
BT_ERR("Failed to start inquiry: status %d", status);
|
||||
|
||||
memset(&cp, 0, sizeof(cp));
|
||||
cp.type = param->type;
|
||||
cp.interval = cpu_to_le16(param->interval);
|
||||
cp.window = cpu_to_le16(param->window);
|
||||
|
||||
hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
|
||||
hci_dev_lock(hdev);
|
||||
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
|
||||
hci_dev_unlock(hdev);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
|
||||
static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
|
||||
{
|
||||
struct hci_cp_le_set_scan_enable cp;
|
||||
|
||||
memset(&cp, 0, sizeof(cp));
|
||||
cp.enable = LE_SCAN_ENABLE;
|
||||
cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
|
||||
|
||||
hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
|
||||
}
|
||||
|
||||
static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
|
||||
u16 window, int timeout)
|
||||
{
|
||||
long timeo = msecs_to_jiffies(3000);
|
||||
struct le_scan_params param;
|
||||
/* General inquiry access code (GIAC) */
|
||||
u8 lap[3] = { 0x33, 0x8b, 0x9e };
|
||||
struct hci_request req;
|
||||
struct hci_cp_inquiry cp;
|
||||
int err;
|
||||
|
||||
BT_DBG("%s", hdev->name);
|
||||
|
||||
if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
|
||||
return -EINPROGRESS;
|
||||
|
||||
param.type = type;
|
||||
param.interval = interval;
|
||||
param.window = window;
|
||||
|
||||
hci_req_lock(hdev);
|
||||
|
||||
err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) ¶m,
|
||||
timeo);
|
||||
if (!err)
|
||||
err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
|
||||
|
||||
hci_req_unlock(hdev);
|
||||
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
|
||||
timeout);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int hci_cancel_le_scan(struct hci_dev *hdev)
|
||||
{
|
||||
BT_DBG("%s", hdev->name);
|
||||
|
||||
if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
|
||||
return -EALREADY;
|
||||
|
||||
if (cancel_delayed_work(&hdev->le_scan_disable)) {
|
||||
struct hci_cp_le_set_scan_enable cp;
|
||||
|
||||
/* Send HCI command to disable LE Scan */
|
||||
memset(&cp, 0, sizeof(cp));
|
||||
hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
|
||||
if (status) {
|
||||
BT_ERR("Failed to disable LE scanning: status %d", status);
|
||||
return;
|
||||
}
|
||||
|
||||
return 0;
|
||||
switch (hdev->discovery.type) {
|
||||
case DISCOV_TYPE_LE:
|
||||
hci_dev_lock(hdev);
|
||||
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
|
||||
hci_dev_unlock(hdev);
|
||||
break;
|
||||
|
||||
case DISCOV_TYPE_INTERLEAVED:
|
||||
hci_req_init(&req, hdev);
|
||||
|
||||
memset(&cp, 0, sizeof(cp));
|
||||
memcpy(&cp.lap, lap, sizeof(cp.lap));
|
||||
cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
|
||||
hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
hci_inquiry_cache_flush(hdev);
|
||||
|
||||
err = hci_req_run(&req, inquiry_complete);
|
||||
if (err) {
|
||||
BT_ERR("Inquiry request failed: err %d", err);
|
||||
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
|
||||
}
|
||||
|
||||
hci_dev_unlock(hdev);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void le_scan_disable_work(struct work_struct *work)
|
||||
|
@ -2072,46 +2057,20 @@ static void le_scan_disable_work(struct work_struct *work)
|
|||
struct hci_dev *hdev = container_of(work, struct hci_dev,
|
||||
le_scan_disable.work);
|
||||
struct hci_cp_le_set_scan_enable cp;
|
||||
struct hci_request req;
|
||||
int err;
|
||||
|
||||
BT_DBG("%s", hdev->name);
|
||||
|
||||
hci_req_init(&req, hdev);
|
||||
|
||||
memset(&cp, 0, sizeof(cp));
|
||||
cp.enable = LE_SCAN_DISABLE;
|
||||
hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
|
||||
|
||||
hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
|
||||
}
|
||||
|
||||
static void le_scan_work(struct work_struct *work)
|
||||
{
|
||||
struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
|
||||
struct le_scan_params *param = &hdev->le_scan_params;
|
||||
|
||||
BT_DBG("%s", hdev->name);
|
||||
|
||||
hci_do_le_scan(hdev, param->type, param->interval, param->window,
|
||||
param->timeout);
|
||||
}
|
||||
|
||||
int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
|
||||
int timeout)
|
||||
{
|
||||
struct le_scan_params *param = &hdev->le_scan_params;
|
||||
|
||||
BT_DBG("%s", hdev->name);
|
||||
|
||||
if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
|
||||
return -ENOTSUPP;
|
||||
|
||||
if (work_busy(&hdev->le_scan))
|
||||
return -EINPROGRESS;
|
||||
|
||||
param->type = type;
|
||||
param->interval = interval;
|
||||
param->window = window;
|
||||
param->timeout = timeout;
|
||||
|
||||
queue_work(system_long_wq, &hdev->le_scan);
|
||||
|
||||
return 0;
|
||||
err = hci_req_run(&req, le_scan_disable_work_complete);
|
||||
if (err)
|
||||
BT_ERR("Disable LE scanning request failed: err %d", err);
|
||||
}
|
||||
|
||||
/* Alloc HCI device */
|
||||
|
@ -2148,7 +2107,6 @@ struct hci_dev *hci_alloc_dev(void)
|
|||
INIT_WORK(&hdev->cmd_work, hci_cmd_work);
|
||||
INIT_WORK(&hdev->tx_work, hci_tx_work);
|
||||
INIT_WORK(&hdev->power_on, hci_power_on);
|
||||
INIT_WORK(&hdev->le_scan, le_scan_work);
|
||||
|
||||
INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
|
||||
INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
|
||||
|
@ -3551,36 +3509,6 @@ static void hci_cmd_work(struct work_struct *work)
|
|||
}
|
||||
}
|
||||
|
||||
int hci_do_inquiry(struct hci_dev *hdev, u8 length)
|
||||
{
|
||||
/* General inquiry access code (GIAC) */
|
||||
u8 lap[3] = { 0x33, 0x8b, 0x9e };
|
||||
struct hci_cp_inquiry cp;
|
||||
|
||||
BT_DBG("%s", hdev->name);
|
||||
|
||||
if (test_bit(HCI_INQUIRY, &hdev->flags))
|
||||
return -EINPROGRESS;
|
||||
|
||||
inquiry_cache_flush(hdev);
|
||||
|
||||
memset(&cp, 0, sizeof(cp));
|
||||
memcpy(&cp.lap, lap, sizeof(cp.lap));
|
||||
cp.length = length;
|
||||
|
||||
return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
|
||||
}
|
||||
|
||||
int hci_cancel_inquiry(struct hci_dev *hdev)
|
||||
{
|
||||
BT_DBG("%s", hdev->name);
|
||||
|
||||
if (!test_bit(HCI_INQUIRY, &hdev->flags))
|
||||
return -EALREADY;
|
||||
|
||||
return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
|
||||
}
|
||||
|
||||
u8 bdaddr_to_le(u8 bdaddr_type)
|
||||
{
|
||||
switch (bdaddr_type) {
|
||||
|
|
|
@ -40,21 +40,13 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
|
||||
BT_DBG("%s status 0x%2.2x", hdev->name, status);
|
||||
|
||||
if (status) {
|
||||
hci_dev_lock(hdev);
|
||||
mgmt_stop_discovery_failed(hdev, status);
|
||||
hci_dev_unlock(hdev);
|
||||
if (status)
|
||||
return;
|
||||
}
|
||||
|
||||
clear_bit(HCI_INQUIRY, &hdev->flags);
|
||||
smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
|
||||
wake_up_bit(&hdev->flags, HCI_INQUIRY);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
hci_conn_check_pending(hdev);
|
||||
}
|
||||
|
||||
|
@ -937,20 +929,6 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
hci_dev_unlock(hdev);
|
||||
}
|
||||
|
||||
static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
{
|
||||
__u8 status = *((__u8 *) skb->data);
|
||||
|
||||
BT_DBG("%s status 0x%2.2x", hdev->name, status);
|
||||
|
||||
if (status) {
|
||||
hci_dev_lock(hdev);
|
||||
mgmt_start_discovery_failed(hdev, status);
|
||||
hci_dev_unlock(hdev);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
|
@ -963,41 +941,16 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
|
|||
if (!cp)
|
||||
return;
|
||||
|
||||
if (status)
|
||||
return;
|
||||
|
||||
switch (cp->enable) {
|
||||
case LE_SCAN_ENABLE:
|
||||
if (status) {
|
||||
hci_dev_lock(hdev);
|
||||
mgmt_start_discovery_failed(hdev, status);
|
||||
hci_dev_unlock(hdev);
|
||||
return;
|
||||
}
|
||||
|
||||
set_bit(HCI_LE_SCAN, &hdev->dev_flags);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
hci_discovery_set_state(hdev, DISCOVERY_FINDING);
|
||||
hci_dev_unlock(hdev);
|
||||
break;
|
||||
|
||||
case LE_SCAN_DISABLE:
|
||||
if (status) {
|
||||
hci_dev_lock(hdev);
|
||||
mgmt_stop_discovery_failed(hdev, status);
|
||||
hci_dev_unlock(hdev);
|
||||
return;
|
||||
}
|
||||
|
||||
clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
|
||||
|
||||
if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
|
||||
hdev->discovery.state == DISCOVERY_FINDING) {
|
||||
mgmt_interleaved_discovery(hdev);
|
||||
} else {
|
||||
hci_dev_lock(hdev);
|
||||
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
|
||||
hci_dev_unlock(hdev);
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -1077,18 +1030,10 @@ static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
|
|||
|
||||
if (status) {
|
||||
hci_conn_check_pending(hdev);
|
||||
hci_dev_lock(hdev);
|
||||
if (test_bit(HCI_MGMT, &hdev->dev_flags))
|
||||
mgmt_start_discovery_failed(hdev, status);
|
||||
hci_dev_unlock(hdev);
|
||||
return;
|
||||
}
|
||||
|
||||
set_bit(HCI_INQUIRY, &hdev->flags);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
hci_discovery_set_state(hdev, DISCOVERY_FINDING);
|
||||
hci_dev_unlock(hdev);
|
||||
}
|
||||
|
||||
static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
|
||||
|
@ -2298,10 +2243,6 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
hci_cc_user_passkey_neg_reply(hdev, skb);
|
||||
break;
|
||||
|
||||
case HCI_OP_LE_SET_SCAN_PARAM:
|
||||
hci_cc_le_set_scan_param(hdev, skb);
|
||||
break;
|
||||
|
||||
case HCI_OP_LE_SET_ADV_ENABLE:
|
||||
hci_cc_le_set_adv_enable(hdev, skb);
|
||||
break;
|
||||
|
@ -2670,7 +2611,7 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
|
||||
BT_DBG("%s", hdev->name);
|
||||
|
||||
if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
|
||||
if (!test_bit(HCI_MGMT, &hdev->dev_flags))
|
||||
return;
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
@ -2746,7 +2687,7 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
hci_conn_drop(conn);
|
||||
}
|
||||
|
||||
if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
|
||||
if (test_bit(HCI_MGMT, &hdev->dev_flags))
|
||||
hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
|
||||
ev->key_type, pin_len);
|
||||
|
||||
|
|
|
@ -76,25 +76,19 @@ static void hidp_copy_session(struct hidp_session *session, struct hidp_conninfo
|
|||
ci->flags = session->flags;
|
||||
ci->state = BT_CONNECTED;
|
||||
|
||||
ci->vendor = 0x0000;
|
||||
ci->product = 0x0000;
|
||||
ci->version = 0x0000;
|
||||
|
||||
if (session->input) {
|
||||
ci->vendor = session->input->id.vendor;
|
||||
ci->product = session->input->id.product;
|
||||
ci->version = session->input->id.version;
|
||||
if (session->input->name)
|
||||
strncpy(ci->name, session->input->name, 128);
|
||||
strlcpy(ci->name, session->input->name, 128);
|
||||
else
|
||||
strncpy(ci->name, "HID Boot Device", 128);
|
||||
}
|
||||
|
||||
if (session->hid) {
|
||||
strlcpy(ci->name, "HID Boot Device", 128);
|
||||
} else if (session->hid) {
|
||||
ci->vendor = session->hid->vendor;
|
||||
ci->product = session->hid->product;
|
||||
ci->version = session->hid->version;
|
||||
strncpy(ci->name, session->hid->name, 128);
|
||||
strlcpy(ci->name, session->hid->name, 128);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -504,8 +504,10 @@ void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
|
|||
if (conn->hcon->type == LE_LINK) {
|
||||
/* LE connection */
|
||||
chan->omtu = L2CAP_DEFAULT_MTU;
|
||||
chan->scid = L2CAP_CID_LE_DATA;
|
||||
chan->dcid = L2CAP_CID_LE_DATA;
|
||||
if (chan->dcid == L2CAP_CID_ATT)
|
||||
chan->scid = L2CAP_CID_ATT;
|
||||
else
|
||||
chan->scid = l2cap_alloc_cid(conn);
|
||||
} else {
|
||||
/* Alloc CID for connection-oriented socket */
|
||||
chan->scid = l2cap_alloc_cid(conn);
|
||||
|
@ -543,6 +545,8 @@ void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
|
|||
|
||||
l2cap_chan_hold(chan);
|
||||
|
||||
hci_conn_hold(conn->hcon);
|
||||
|
||||
list_add(&chan->list, &conn->chan_l);
|
||||
}
|
||||
|
||||
|
@ -1338,17 +1342,21 @@ static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
|
|||
|
||||
static void l2cap_le_conn_ready(struct l2cap_conn *conn)
|
||||
{
|
||||
struct sock *parent, *sk;
|
||||
struct sock *parent;
|
||||
struct l2cap_chan *chan, *pchan;
|
||||
|
||||
BT_DBG("");
|
||||
|
||||
/* Check if we have socket listening on cid */
|
||||
pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
|
||||
pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
|
||||
conn->src, conn->dst);
|
||||
if (!pchan)
|
||||
return;
|
||||
|
||||
/* Client ATT sockets should override the server one */
|
||||
if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
|
||||
return;
|
||||
|
||||
parent = pchan->sk;
|
||||
|
||||
lock_sock(parent);
|
||||
|
@ -1357,17 +1365,12 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
|
|||
if (!chan)
|
||||
goto clean;
|
||||
|
||||
sk = chan->sk;
|
||||
chan->dcid = L2CAP_CID_ATT;
|
||||
|
||||
hci_conn_hold(conn->hcon);
|
||||
conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
|
||||
bacpy(&bt_sk(chan->sk)->src, conn->src);
|
||||
bacpy(&bt_sk(chan->sk)->dst, conn->dst);
|
||||
|
||||
bacpy(&bt_sk(sk)->src, conn->src);
|
||||
bacpy(&bt_sk(sk)->dst, conn->dst);
|
||||
|
||||
l2cap_chan_add(conn, chan);
|
||||
|
||||
l2cap_chan_ready(chan);
|
||||
__l2cap_chan_add(conn, chan);
|
||||
|
||||
clean:
|
||||
release_sock(parent);
|
||||
|
@ -1380,14 +1383,17 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
|
|||
|
||||
BT_DBG("conn %p", conn);
|
||||
|
||||
if (!hcon->out && hcon->type == LE_LINK)
|
||||
l2cap_le_conn_ready(conn);
|
||||
|
||||
/* For outgoing pairing which doesn't necessarily have an
|
||||
* associated socket (e.g. mgmt_pair_device).
|
||||
*/
|
||||
if (hcon->out && hcon->type == LE_LINK)
|
||||
smp_conn_security(hcon, hcon->pending_sec_level);
|
||||
|
||||
mutex_lock(&conn->chan_lock);
|
||||
|
||||
if (hcon->type == LE_LINK)
|
||||
l2cap_le_conn_ready(conn);
|
||||
|
||||
list_for_each_entry(chan, &conn->chan_l, list) {
|
||||
|
||||
l2cap_chan_lock(chan);
|
||||
|
@ -1792,7 +1798,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
|
|||
|
||||
auth_type = l2cap_get_auth_type(chan);
|
||||
|
||||
if (chan->dcid == L2CAP_CID_LE_DATA)
|
||||
if (bdaddr_type_is_le(dst_type))
|
||||
hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
|
||||
chan->sec_level, auth_type);
|
||||
else
|
||||
|
@ -1811,16 +1817,10 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
|
|||
goto done;
|
||||
}
|
||||
|
||||
if (hcon->type == LE_LINK) {
|
||||
err = 0;
|
||||
|
||||
if (!list_empty(&conn->chan_l)) {
|
||||
err = -EBUSY;
|
||||
hci_conn_drop(hcon);
|
||||
}
|
||||
|
||||
if (err)
|
||||
goto done;
|
||||
if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
|
||||
hci_conn_drop(hcon);
|
||||
err = -EBUSY;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Update source addr of the socket */
|
||||
|
@ -1830,6 +1830,9 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
|
|||
l2cap_chan_add(conn, chan);
|
||||
l2cap_chan_lock(chan);
|
||||
|
||||
/* l2cap_chan_add takes its own ref so we can drop this one */
|
||||
hci_conn_drop(hcon);
|
||||
|
||||
l2cap_state_change(chan, BT_CONNECT);
|
||||
__set_chan_timer(chan, sk->sk_sndtimeo);
|
||||
|
||||
|
@ -3751,8 +3754,6 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
|
|||
|
||||
sk = chan->sk;
|
||||
|
||||
hci_conn_hold(conn->hcon);
|
||||
|
||||
bacpy(&bt_sk(sk)->src, conn->src);
|
||||
bacpy(&bt_sk(sk)->dst, conn->dst);
|
||||
chan->psm = psm;
|
||||
|
@ -4333,7 +4334,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn,
|
|||
struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
|
||||
u16 type, result;
|
||||
|
||||
if (cmd_len != sizeof(*rsp))
|
||||
if (cmd_len < sizeof(*rsp))
|
||||
return -EPROTO;
|
||||
|
||||
type = __le16_to_cpu(rsp->type);
|
||||
|
@ -5292,6 +5293,51 @@ static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
|
|||
}
|
||||
}
|
||||
|
||||
static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
u8 *data = skb->data;
|
||||
int len = skb->len;
|
||||
struct l2cap_cmd_hdr cmd;
|
||||
int err;
|
||||
|
||||
l2cap_raw_recv(conn, skb);
|
||||
|
||||
while (len >= L2CAP_CMD_HDR_SIZE) {
|
||||
u16 cmd_len;
|
||||
memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
|
||||
data += L2CAP_CMD_HDR_SIZE;
|
||||
len -= L2CAP_CMD_HDR_SIZE;
|
||||
|
||||
cmd_len = le16_to_cpu(cmd.len);
|
||||
|
||||
BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
|
||||
cmd.ident);
|
||||
|
||||
if (cmd_len > len || !cmd.ident) {
|
||||
BT_DBG("corrupted command");
|
||||
break;
|
||||
}
|
||||
|
||||
err = l2cap_le_sig_cmd(conn, &cmd, data);
|
||||
if (err) {
|
||||
struct l2cap_cmd_rej_unk rej;
|
||||
|
||||
BT_ERR("Wrong link type (%d)", err);
|
||||
|
||||
/* FIXME: Map err to a valid reason */
|
||||
rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
|
||||
l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
|
||||
sizeof(rej), &rej);
|
||||
}
|
||||
|
||||
data += cmd_len;
|
||||
len -= cmd_len;
|
||||
}
|
||||
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
static inline void l2cap_sig_channel(struct l2cap_conn *conn,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
|
@ -5318,11 +5364,7 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
|
|||
break;
|
||||
}
|
||||
|
||||
if (conn->hcon->type == LE_LINK)
|
||||
err = l2cap_le_sig_cmd(conn, &cmd, data);
|
||||
else
|
||||
err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
|
||||
|
||||
err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
|
||||
if (err) {
|
||||
struct l2cap_cmd_rej_unk rej;
|
||||
|
||||
|
@ -6356,16 +6398,13 @@ static void l2cap_att_channel(struct l2cap_conn *conn,
|
|||
{
|
||||
struct l2cap_chan *chan;
|
||||
|
||||
chan = l2cap_global_chan_by_scid(0, L2CAP_CID_LE_DATA,
|
||||
chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
|
||||
conn->src, conn->dst);
|
||||
if (!chan)
|
||||
goto drop;
|
||||
|
||||
BT_DBG("chan %p, len %d", chan, skb->len);
|
||||
|
||||
if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
|
||||
goto drop;
|
||||
|
||||
if (chan->imtu < skb->len)
|
||||
goto drop;
|
||||
|
||||
|
@ -6395,6 +6434,8 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
|
|||
|
||||
switch (cid) {
|
||||
case L2CAP_CID_LE_SIGNALING:
|
||||
l2cap_le_sig_channel(conn, skb);
|
||||
break;
|
||||
case L2CAP_CID_SIGNALING:
|
||||
l2cap_sig_channel(conn, skb);
|
||||
break;
|
||||
|
@ -6405,7 +6446,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
|
|||
l2cap_conless_channel(conn, psm, skb);
|
||||
break;
|
||||
|
||||
case L2CAP_CID_LE_DATA:
|
||||
case L2CAP_CID_ATT:
|
||||
l2cap_att_channel(conn, skb);
|
||||
break;
|
||||
|
||||
|
@ -6531,7 +6572,7 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
|
|||
continue;
|
||||
}
|
||||
|
||||
if (chan->scid == L2CAP_CID_LE_DATA) {
|
||||
if (chan->scid == L2CAP_CID_ATT) {
|
||||
if (!status && encrypt) {
|
||||
chan->sec_level = hcon->sec_level;
|
||||
l2cap_chan_ready(chan);
|
||||
|
|
|
@ -466,7 +466,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname,
|
|||
static bool l2cap_valid_mtu(struct l2cap_chan *chan, u16 mtu)
|
||||
{
|
||||
switch (chan->scid) {
|
||||
case L2CAP_CID_LE_DATA:
|
||||
case L2CAP_CID_ATT:
|
||||
if (mtu < L2CAP_LE_MIN_MTU)
|
||||
return false;
|
||||
break;
|
||||
|
@ -630,7 +630,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
|
|||
conn = chan->conn;
|
||||
|
||||
/*change security for LE channels */
|
||||
if (chan->scid == L2CAP_CID_LE_DATA) {
|
||||
if (chan->scid == L2CAP_CID_ATT) {
|
||||
if (!conn->hcon->out) {
|
||||
err = -EINVAL;
|
||||
break;
|
||||
|
|
|
@ -102,18 +102,6 @@ static const u16 mgmt_events[] = {
|
|||
MGMT_EV_PASSKEY_NOTIFY,
|
||||
};
|
||||
|
||||
/*
|
||||
* These LE scan and inquiry parameters were chosen according to LE General
|
||||
* Discovery Procedure specification.
|
||||
*/
|
||||
#define LE_SCAN_WIN 0x12
|
||||
#define LE_SCAN_INT 0x12
|
||||
#define LE_SCAN_TIMEOUT_LE_ONLY msecs_to_jiffies(10240)
|
||||
#define LE_SCAN_TIMEOUT_BREDR_LE msecs_to_jiffies(5120)
|
||||
|
||||
#define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */
|
||||
#define INQUIRY_LEN_BREDR_LE 0x04 /* TGAP(100)/2 */
|
||||
|
||||
#define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
|
||||
|
||||
#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
|
||||
|
@ -1748,8 +1736,6 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
|
|||
|
||||
hci_link_keys_clear(hdev);
|
||||
|
||||
set_bit(HCI_LINK_KEYS, &hdev->dev_flags);
|
||||
|
||||
if (cp->debug_keys)
|
||||
set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
|
||||
else
|
||||
|
@ -2633,28 +2619,72 @@ static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
|
|||
return err;
|
||||
}
|
||||
|
||||
int mgmt_interleaved_discovery(struct hci_dev *hdev)
|
||||
static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
|
||||
{
|
||||
struct pending_cmd *cmd;
|
||||
u8 type;
|
||||
int err;
|
||||
|
||||
BT_DBG("%s", hdev->name);
|
||||
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
|
||||
if (!cmd)
|
||||
return -ENOENT;
|
||||
|
||||
err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR_LE);
|
||||
if (err < 0)
|
||||
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
|
||||
type = hdev->discovery.type;
|
||||
|
||||
hci_dev_unlock(hdev);
|
||||
err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
|
||||
&type, sizeof(type));
|
||||
mgmt_pending_remove(cmd);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void start_discovery_complete(struct hci_dev *hdev, u8 status)
|
||||
{
|
||||
BT_DBG("status %d", status);
|
||||
|
||||
if (status) {
|
||||
hci_dev_lock(hdev);
|
||||
mgmt_start_discovery_failed(hdev, status);
|
||||
hci_dev_unlock(hdev);
|
||||
return;
|
||||
}
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
hci_discovery_set_state(hdev, DISCOVERY_FINDING);
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
switch (hdev->discovery.type) {
|
||||
case DISCOV_TYPE_LE:
|
||||
queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
|
||||
DISCOV_LE_TIMEOUT);
|
||||
break;
|
||||
|
||||
case DISCOV_TYPE_INTERLEAVED:
|
||||
queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
|
||||
DISCOV_INTERLEAVED_TIMEOUT);
|
||||
break;
|
||||
|
||||
case DISCOV_TYPE_BREDR:
|
||||
break;
|
||||
|
||||
default:
|
||||
BT_ERR("Invalid discovery type %d", hdev->discovery.type);
|
||||
}
|
||||
}
|
||||
|
||||
static int start_discovery(struct sock *sk, struct hci_dev *hdev,
|
||||
void *data, u16 len)
|
||||
{
|
||||
struct mgmt_cp_start_discovery *cp = data;
|
||||
struct pending_cmd *cmd;
|
||||
struct hci_cp_le_set_scan_param param_cp;
|
||||
struct hci_cp_le_set_scan_enable enable_cp;
|
||||
struct hci_cp_inquiry inq_cp;
|
||||
struct hci_request req;
|
||||
/* General inquiry access code (GIAC) */
|
||||
u8 lap[3] = { 0x33, 0x8b, 0x9e };
|
||||
int err;
|
||||
|
||||
BT_DBG("%s", hdev->name);
|
||||
|
@ -2687,6 +2717,8 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
|
|||
|
||||
hdev->discovery.type = cp->type;
|
||||
|
||||
hci_req_init(&req, hdev);
|
||||
|
||||
switch (hdev->discovery.type) {
|
||||
case DISCOV_TYPE_BREDR:
|
||||
if (!lmp_bredr_capable(hdev)) {
|
||||
|
@ -2696,10 +2728,23 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
|
|||
goto failed;
|
||||
}
|
||||
|
||||
err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
|
||||
if (test_bit(HCI_INQUIRY, &hdev->flags)) {
|
||||
err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
|
||||
MGMT_STATUS_BUSY);
|
||||
mgmt_pending_remove(cmd);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
hci_inquiry_cache_flush(hdev);
|
||||
|
||||
memset(&inq_cp, 0, sizeof(inq_cp));
|
||||
memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
|
||||
inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
|
||||
hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
|
||||
break;
|
||||
|
||||
case DISCOV_TYPE_LE:
|
||||
case DISCOV_TYPE_INTERLEAVED:
|
||||
if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
|
||||
err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
|
||||
MGMT_STATUS_NOT_SUPPORTED);
|
||||
|
@ -2707,20 +2752,40 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
|
|||
goto failed;
|
||||
}
|
||||
|
||||
err = hci_le_scan(hdev, LE_SCAN_ACTIVE, LE_SCAN_INT,
|
||||
LE_SCAN_WIN, LE_SCAN_TIMEOUT_LE_ONLY);
|
||||
break;
|
||||
|
||||
case DISCOV_TYPE_INTERLEAVED:
|
||||
if (!lmp_host_le_capable(hdev) || !lmp_bredr_capable(hdev)) {
|
||||
if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
|
||||
!lmp_bredr_capable(hdev)) {
|
||||
err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
|
||||
MGMT_STATUS_NOT_SUPPORTED);
|
||||
mgmt_pending_remove(cmd);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
err = hci_le_scan(hdev, LE_SCAN_ACTIVE, LE_SCAN_INT,
|
||||
LE_SCAN_WIN, LE_SCAN_TIMEOUT_BREDR_LE);
|
||||
if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags)) {
|
||||
err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
|
||||
MGMT_STATUS_REJECTED);
|
||||
mgmt_pending_remove(cmd);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
|
||||
err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
|
||||
MGMT_STATUS_BUSY);
|
||||
mgmt_pending_remove(cmd);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
memset(¶m_cp, 0, sizeof(param_cp));
|
||||
param_cp.type = LE_SCAN_ACTIVE;
|
||||
param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
|
||||
param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
|
||||
hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
|
||||
¶m_cp);
|
||||
|
||||
memset(&enable_cp, 0, sizeof(enable_cp));
|
||||
enable_cp.enable = LE_SCAN_ENABLE;
|
||||
enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
|
||||
hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
|
||||
&enable_cp);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -2730,6 +2795,7 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
|
|||
goto failed;
|
||||
}
|
||||
|
||||
err = hci_req_run(&req, start_discovery_complete);
|
||||
if (err < 0)
|
||||
mgmt_pending_remove(cmd);
|
||||
else
|
||||
|
@ -2740,6 +2806,39 @@ failed:
|
|||
return err;
|
||||
}
|
||||
|
||||
static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
|
||||
{
|
||||
struct pending_cmd *cmd;
|
||||
int err;
|
||||
|
||||
cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
|
||||
if (!cmd)
|
||||
return -ENOENT;
|
||||
|
||||
err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
|
||||
&hdev->discovery.type, sizeof(hdev->discovery.type));
|
||||
mgmt_pending_remove(cmd);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
|
||||
{
|
||||
BT_DBG("status %d", status);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
if (status) {
|
||||
mgmt_stop_discovery_failed(hdev, status);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
|
||||
|
||||
unlock:
|
||||
hci_dev_unlock(hdev);
|
||||
}
|
||||
|
||||
static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
u16 len)
|
||||
{
|
||||
|
@ -2747,6 +2846,8 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
|
|||
struct pending_cmd *cmd;
|
||||
struct hci_cp_remote_name_req_cancel cp;
|
||||
struct inquiry_entry *e;
|
||||
struct hci_request req;
|
||||
struct hci_cp_le_set_scan_enable enable_cp;
|
||||
int err;
|
||||
|
||||
BT_DBG("%s", hdev->name);
|
||||
|
@ -2773,12 +2874,20 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
|
|||
goto unlock;
|
||||
}
|
||||
|
||||
hci_req_init(&req, hdev);
|
||||
|
||||
switch (hdev->discovery.state) {
|
||||
case DISCOVERY_FINDING:
|
||||
if (test_bit(HCI_INQUIRY, &hdev->flags))
|
||||
err = hci_cancel_inquiry(hdev);
|
||||
else
|
||||
err = hci_cancel_le_scan(hdev);
|
||||
if (test_bit(HCI_INQUIRY, &hdev->flags)) {
|
||||
hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
|
||||
} else {
|
||||
cancel_delayed_work(&hdev->le_scan_disable);
|
||||
|
||||
memset(&enable_cp, 0, sizeof(enable_cp));
|
||||
enable_cp.enable = LE_SCAN_DISABLE;
|
||||
hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE,
|
||||
sizeof(enable_cp), &enable_cp);
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
|
@ -2796,16 +2905,22 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
|
|||
}
|
||||
|
||||
bacpy(&cp.bdaddr, &e->data.bdaddr);
|
||||
err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
|
||||
sizeof(cp), &cp);
|
||||
hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
|
||||
&cp);
|
||||
|
||||
break;
|
||||
|
||||
default:
|
||||
BT_DBG("unknown discovery state %u", hdev->discovery.state);
|
||||
err = -EFAULT;
|
||||
|
||||
mgmt_pending_remove(cmd);
|
||||
err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
|
||||
MGMT_STATUS_FAILED, &mgmt_cp->type,
|
||||
sizeof(mgmt_cp->type));
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
err = hci_req_run(&req, stop_discovery_complete);
|
||||
if (err < 0)
|
||||
mgmt_pending_remove(cmd);
|
||||
else
|
||||
|
@ -4063,6 +4178,9 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
|
|||
struct mgmt_ev_device_found *ev = (void *) buf;
|
||||
size_t ev_size;
|
||||
|
||||
if (!hci_discovery_active(hdev))
|
||||
return -EPERM;
|
||||
|
||||
/* Leave 5 bytes for a potential CoD field */
|
||||
if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
|
||||
return -EINVAL;
|
||||
|
@ -4114,43 +4232,6 @@ int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
|
|||
sizeof(*ev) + eir_len, NULL);
|
||||
}
|
||||
|
||||
int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
|
||||
{
|
||||
struct pending_cmd *cmd;
|
||||
u8 type;
|
||||
int err;
|
||||
|
||||
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
|
||||
|
||||
cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
|
||||
if (!cmd)
|
||||
return -ENOENT;
|
||||
|
||||
type = hdev->discovery.type;
|
||||
|
||||
err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
|
||||
&type, sizeof(type));
|
||||
mgmt_pending_remove(cmd);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
|
||||
{
|
||||
struct pending_cmd *cmd;
|
||||
int err;
|
||||
|
||||
cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
|
||||
if (!cmd)
|
||||
return -ENOENT;
|
||||
|
||||
err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
|
||||
&hdev->discovery.type, sizeof(hdev->discovery.type));
|
||||
mgmt_pending_remove(cmd);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
|
||||
{
|
||||
struct mgmt_ev_discovering ev;
|
||||
|
|
|
@ -2827,7 +2827,8 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
|
|||
!rcu_access_pointer(sdata->bss->beacon))
|
||||
need_offchan = true;
|
||||
if (!ieee80211_is_action(mgmt->frame_control) ||
|
||||
mgmt->u.action.category == WLAN_CATEGORY_PUBLIC)
|
||||
mgmt->u.action.category == WLAN_CATEGORY_PUBLIC ||
|
||||
mgmt->u.action.category == WLAN_CATEGORY_SELF_PROTECTED)
|
||||
break;
|
||||
rcu_read_lock();
|
||||
sta = sta_info_get(sdata, mgmt->da);
|
||||
|
@ -2930,19 +2931,8 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
|
|||
u16 frame_type, bool reg)
|
||||
{
|
||||
struct ieee80211_local *local = wiphy_priv(wiphy);
|
||||
struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
|
||||
|
||||
switch (frame_type) {
|
||||
case IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH:
|
||||
if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
|
||||
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
|
||||
|
||||
if (reg)
|
||||
ifibss->auth_frame_registrations++;
|
||||
else
|
||||
ifibss->auth_frame_registrations--;
|
||||
}
|
||||
break;
|
||||
case IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ:
|
||||
if (reg)
|
||||
local->probe_req_reg++;
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче