new driver mt7601u for MediaTek Wi-Fi devices MT7601U

ath10k:
 
 * qca6174 power consumption improvements, enable ASPM etc (Michal)
 
 wil6210:
 
 * support Wi-Fi Simple Configuration in STA mode
 
 iwlwifi:
 
 * a few fixes (re-enablement of interrupts for certain new
   platforms that have special power states)
 * Rework completely the RBD allocation model towards new
   multi RX hardware.
 * cleanups
 * scan reworks continuation (Luca)
 
 mwifiex:
 
 * improve firmware debug functionality
 
 rtlwifi:
 
 * update regulatory database
 
 brcmfmac:
 
 * cleanup and new feature support in PCIe code
 * alternative nvram loading for router support
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.11 (GNU/Linux)
 
 iQEcBAABAgAGBQJVb1cPAAoJEG4XJFUm622bP0oIAKhUBlC3rtrOJd+9kREAGUJQ
 Dk2xZr/p6hdb4dSHHKKroBr5mfryHknSs+AI5akJMph36DoBMD+Mwb4HlcL9cI5J
 RXIjIvQEADsK+6ME7cqnw2htWlYsX8aJI96/2Eusveo/zHyAG3+eBC3wkyqWBlBK
 EGV5ziClSe5pE5yGWj5tyr9me+qRQiO+dFJK1AoRE3Zq4pjj+5VDZoVQN0GNZGP7
 lgeNOzvPxWt+ZseslP8IeCedN5c+NpacD889NnQJyMXaouSp7LmMod000bjnKK8o
 9sRHsKxI5qHgC4mUa3Tk3cEnFqVYAo8KKOVaBVtKsMc4XoO/Qov6Z0AtXig5Xnk=
 =CM/T
 -----END PGP SIGNATURE-----

Merge tag 'wireless-drivers-next-for-davem-2015-06-03' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next

Kalle Valo says:

====================
new driver mt7601u for MediaTek Wi-Fi devices MT7601U

ath10k:

* qca6174 power consumption improvements, enable ASPM etc (Michal)

wil6210:

* support Wi-Fi Simple Configuration in STA mode

iwlwifi:

* a few fixes (re-enablement of interrupts for certain new
  platforms that have special power states)
* Rework completely the RBD allocation model towards new
  multi RX hardware.
* cleanups
* scan reworks continuation (Luca)

mwifiex:

* improve firmware debug functionality

rtlwifi:

* update regulatory database

brcmfmac:

* cleanup and new feature support in PCIe code
* alternative nvram loading for router support
====================

Conflicts:
	drivers/net/wireless/iwlwifi/Kconfig

Trivial conflict in iwlwifi Kconfig, two commits adding
the same two chip numbers to the help text, but order
transposed.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2015-06-03 23:44:57 -07:00
Родитель 22793e5d5f 1690faef64
Коммит 9d1dabfbd0
155 изменённых файлов: 13321 добавлений и 3379 удалений

Просмотреть файл

@ -6372,6 +6372,12 @@ F: include/uapi/linux/meye.h
F: include/uapi/linux/ivtv*
F: include/uapi/linux/uvcvideo.h
MEDIATEK MT7601U WIRELESS LAN DRIVER
M: Jakub Kicinski <kubakici@wp.pl>
L: linux-wireless@vger.kernel.org
S: Maintained
F: drivers/net/wireless/mediatek/mt7601u/
MEGARAID SCSI/SAS DRIVERS
M: Kashyap Desai <kashyap.desai@avagotech.com>
M: Sumit Saxena <sumit.saxena@avagotech.com>

Просмотреть файл

@ -277,6 +277,7 @@ source "drivers/net/wireless/libertas/Kconfig"
source "drivers/net/wireless/orinoco/Kconfig"
source "drivers/net/wireless/p54/Kconfig"
source "drivers/net/wireless/rt2x00/Kconfig"
source "drivers/net/wireless/mediatek/Kconfig"
source "drivers/net/wireless/rtlwifi/Kconfig"
source "drivers/net/wireless/ti/Kconfig"
source "drivers/net/wireless/zd1211rw/Kconfig"

Просмотреть файл

@ -45,6 +45,8 @@ obj-$(CONFIG_IWLWIFI) += iwlwifi/
obj-$(CONFIG_IWLEGACY) += iwlegacy/
obj-$(CONFIG_RT2X00) += rt2x00/
obj-$(CONFIG_WL_MEDIATEK) += mediatek/
obj-$(CONFIG_P54_COMMON) += p54/
obj-$(CONFIG_ATH_CARDS) += ath/

Просмотреть файл

@ -1098,14 +1098,18 @@ static void adm8211_hw_init(struct ieee80211_hw *dev)
pci_read_config_byte(priv->pdev, PCI_CACHE_LINE_SIZE, &cline);
switch (cline) {
case 0x8: reg |= (0x1 << 14);
break;
case 0x16: reg |= (0x2 << 14);
break;
case 0x32: reg |= (0x3 << 14);
break;
default: reg |= (0x0 << 14);
break;
case 0x8:
reg |= (0x1 << 14);
break;
case 0x10:
reg |= (0x2 << 14);
break;
case 0x20:
reg |= (0x3 << 14);
break;
default:
reg |= (0x0 << 14);
break;
}
}

Просмотреть файл

@ -387,7 +387,9 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
if (!skip_otp && result != 0) {
if (!(skip_otp || test_bit(ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
ar->fw_features))
&& result != 0) {
ath10k_err(ar, "otp calibration failed: %d", result);
return -EINVAL;
}

Просмотреть файл

@ -460,6 +460,14 @@ enum ath10k_fw_features {
*/
ATH10K_FW_FEATURE_WOWLAN_SUPPORT = 6,
/* Don't trust error code from otp.bin */
ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
/* Some firmware revisions pad 4th hw address to 4 byte boundary making
* it 8 bytes long in Native Wifi Rx decap.
*/
ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
/* keep last */
ATH10K_FW_FEATURE_COUNT,
};

Просмотреть файл

@ -36,6 +36,7 @@ enum ath10k_debug_mask {
ATH10K_DBG_REGULATORY = 0x00000800,
ATH10K_DBG_TESTMODE = 0x00001000,
ATH10K_DBG_WMI_PRINT = 0x00002000,
ATH10K_DBG_PCI_PS = 0x00004000,
ATH10K_DBG_ANY = 0xffffffff,
};

Просмотреть файл

@ -965,10 +965,16 @@ static void ath10k_process_rx(struct ath10k *ar,
ieee80211_rx(ar->hw, skb);
}
static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
struct ieee80211_hdr *hdr)
{
/* nwifi header is padded to 4 bytes. this fixes 4addr rx */
return round_up(ieee80211_hdrlen(hdr->frame_control), 4);
int len = ieee80211_hdrlen(hdr->frame_control);
if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
ar->fw_features))
len = round_up(len, 4);
return len;
}
static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
@ -1067,7 +1073,7 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
/* pull decapped header and copy SA & DA */
hdr = (struct ieee80211_hdr *)msdu->data;
hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
ether_addr_copy(da, ieee80211_get_DA(hdr));
ether_addr_copy(sa, ieee80211_get_SA(hdr));
skb_pull(msdu, hdr_len);

Просмотреть файл

@ -1708,7 +1708,14 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
enable_ps = false;
}
if (enable_ps) {
if (!arvif->is_started) {
/* mac80211 can update vif powersave state while disconnected.
* Firmware doesn't behave nicely and consumes more power than
* necessary if PS is disabled on a non-started vdev. Hence
* force-enable PS for non-running vdevs.
*/
psmode = WMI_STA_PS_MODE_ENABLED;
} else if (enable_ps) {
psmode = WMI_STA_PS_MODE_ENABLED;
param = WMI_STA_PS_PARAM_INACTIVITY_TIME;

Просмотреть файл

@ -330,6 +330,205 @@ static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
},
};
static bool ath10k_pci_is_awake(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
RTC_STATE_ADDRESS);
return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
}
static void __ath10k_pci_wake(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
lockdep_assert_held(&ar_pci->ps_lock);
ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n",
ar_pci->ps_wake_refcount, ar_pci->ps_awake);
iowrite32(PCIE_SOC_WAKE_V_MASK,
ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
PCIE_SOC_WAKE_ADDRESS);
}
static void __ath10k_pci_sleep(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
lockdep_assert_held(&ar_pci->ps_lock);
ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n",
ar_pci->ps_wake_refcount, ar_pci->ps_awake);
iowrite32(PCIE_SOC_WAKE_RESET,
ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
PCIE_SOC_WAKE_ADDRESS);
ar_pci->ps_awake = false;
}
static int ath10k_pci_wake_wait(struct ath10k *ar)
{
int tot_delay = 0;
int curr_delay = 5;
while (tot_delay < PCIE_WAKE_TIMEOUT) {
if (ath10k_pci_is_awake(ar))
return 0;
udelay(curr_delay);
tot_delay += curr_delay;
if (curr_delay < 50)
curr_delay += 5;
}
return -ETIMEDOUT;
}
static int ath10k_pci_wake(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&ar_pci->ps_lock, flags);
ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
ar_pci->ps_wake_refcount, ar_pci->ps_awake);
/* This function can be called very frequently. To avoid excessive
* CPU stalls for MMIO reads use a cache var to hold the device state.
*/
if (!ar_pci->ps_awake) {
__ath10k_pci_wake(ar);
ret = ath10k_pci_wake_wait(ar);
if (ret == 0)
ar_pci->ps_awake = true;
}
if (ret == 0) {
ar_pci->ps_wake_refcount++;
WARN_ON(ar_pci->ps_wake_refcount == 0);
}
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
return ret;
}
static void ath10k_pci_sleep(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
unsigned long flags;
spin_lock_irqsave(&ar_pci->ps_lock, flags);
ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
ar_pci->ps_wake_refcount, ar_pci->ps_awake);
if (WARN_ON(ar_pci->ps_wake_refcount == 0))
goto skip;
ar_pci->ps_wake_refcount--;
mod_timer(&ar_pci->ps_timer, jiffies +
msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC));
skip:
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
}
static void ath10k_pci_ps_timer(unsigned long ptr)
{
struct ath10k *ar = (void *)ptr;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
unsigned long flags;
spin_lock_irqsave(&ar_pci->ps_lock, flags);
ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n",
ar_pci->ps_wake_refcount, ar_pci->ps_awake);
if (ar_pci->ps_wake_refcount > 0)
goto skip;
__ath10k_pci_sleep(ar);
skip:
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
}
static void ath10k_pci_sleep_sync(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
unsigned long flags;
del_timer_sync(&ar_pci->ps_timer);
spin_lock_irqsave(&ar_pci->ps_lock, flags);
WARN_ON(ar_pci->ps_wake_refcount > 0);
__ath10k_pci_sleep(ar);
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
}
void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret;
ret = ath10k_pci_wake(ar);
if (ret) {
ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
value, offset, ret);
return;
}
iowrite32(value, ar_pci->mem + offset);
ath10k_pci_sleep(ar);
}
u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
u32 val;
int ret;
ret = ath10k_pci_wake(ar);
if (ret) {
ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
offset, ret);
return 0xffffffff;
}
val = ioread32(ar_pci->mem + offset);
ath10k_pci_sleep(ar);
return val;
}
u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
{
return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
}
void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
{
ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
}
u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
{
return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr);
}
void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
{
ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
}
static bool ath10k_pci_irq_pending(struct ath10k *ar)
{
u32 cause;
@ -793,60 +992,6 @@ static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
}
static bool ath10k_pci_is_awake(struct ath10k *ar)
{
u32 val = ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS);
return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
}
static int ath10k_pci_wake_wait(struct ath10k *ar)
{
int tot_delay = 0;
int curr_delay = 5;
while (tot_delay < PCIE_WAKE_TIMEOUT) {
if (ath10k_pci_is_awake(ar))
return 0;
udelay(curr_delay);
tot_delay += curr_delay;
if (curr_delay < 50)
curr_delay += 5;
}
return -ETIMEDOUT;
}
/* The rule is host is forbidden from accessing device registers while it's
* asleep. Currently ath10k_pci_wake() and ath10k_pci_sleep() calls aren't
* balanced and the device is kept awake all the time. This is intended for a
* simpler solution for the following problems:
*
* * device can enter sleep during s2ram without the host knowing,
*
* * irq handlers access registers which is a problem if other device asserts
* a shared irq line when ath10k is between hif_power_down() and
* hif_power_up().
*
* FIXME: If power consumption is a concern (and there are *real* gains) then a
* refcounted wake/sleep needs to be implemented.
*/
static int ath10k_pci_wake(struct ath10k *ar)
{
ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
PCIE_SOC_WAKE_V_MASK);
return ath10k_pci_wake_wait(ar);
}
static void ath10k_pci_sleep(struct ath10k *ar)
{
ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
PCIE_SOC_WAKE_RESET);
}
/* Called by lower (CE) layer when a send to Target completes. */
static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
{
@ -1227,11 +1372,15 @@ static void ath10k_pci_irq_enable(struct ath10k *ar)
static int ath10k_pci_hif_start(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
ath10k_pci_irq_enable(ar);
ath10k_pci_rx_post(ar);
pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
ar_pci->link_ctl);
return 0;
}
@ -1344,6 +1493,9 @@ static void ath10k_pci_flush(struct ath10k *ar)
static void ath10k_pci_hif_stop(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
unsigned long flags;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
/* Most likely the device has HTT Rx ring configured. The only way to
@ -1362,6 +1514,10 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
ath10k_pci_irq_disable(ar);
ath10k_pci_irq_sync(ar);
ath10k_pci_flush(ar);
spin_lock_irqsave(&ar_pci->ps_lock, flags);
WARN_ON(ar_pci->ps_wake_refcount > 0);
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
}
static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
@ -1981,15 +2137,15 @@ static int ath10k_pci_chip_reset(struct ath10k *ar)
static int ath10k_pci_hif_power_up(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
ret = ath10k_pci_wake(ar);
if (ret) {
ath10k_err(ar, "failed to wake up target: %d\n", ret);
return ret;
}
pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
&ar_pci->link_ctl);
pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
/*
* Bring the target up cleanly.
@ -2037,7 +2193,6 @@ err_ce:
ath10k_pci_ce_deinit(ar);
err_sleep:
ath10k_pci_sleep(ar);
return ret;
}
@ -2054,7 +2209,12 @@ static void ath10k_pci_hif_power_down(struct ath10k *ar)
static int ath10k_pci_hif_suspend(struct ath10k *ar)
{
ath10k_pci_sleep(ar);
/* The grace timer can still be counting down and ar->ps_awake be true.
* It is known that the device may be asleep after resuming regardless
* of the SoC powersave state before suspending. Hence make sure the
* device is asleep before proceeding.
*/
ath10k_pci_sleep_sync(ar);
return 0;
}
@ -2064,13 +2224,6 @@ static int ath10k_pci_hif_resume(struct ath10k *ar)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct pci_dev *pdev = ar_pci->pdev;
u32 val;
int ret;
ret = ath10k_pci_wake(ar);
if (ret) {
ath10k_err(ar, "failed to wake device up on resume: %d\n", ret);
return ret;
}
/* Suspend/Resume resets the PCI configuration space, so we have to
* re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
@ -2081,7 +2234,7 @@ static int ath10k_pci_hif_resume(struct ath10k *ar)
if ((val & 0x0000ff00) != 0)
pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
return ret;
return 0;
}
#endif
@ -2175,13 +2328,6 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
{
struct ath10k *ar = arg;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret;
ret = ath10k_pci_wake(ar);
if (ret) {
ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret);
return IRQ_NONE;
}
if (ar_pci->num_msi_intrs == 0) {
if (!ath10k_pci_irq_pending(ar))
@ -2502,7 +2648,6 @@ static int ath10k_pci_claim(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct pci_dev *pdev = ar_pci->pdev;
u32 lcr_val;
int ret;
pci_set_drvdata(pdev, ar);
@ -2536,10 +2681,6 @@ static int ath10k_pci_claim(struct ath10k *ar)
pci_set_master(pdev);
/* Workaround: Disable ASPM */
pci_read_config_dword(pdev, 0x80, &lcr_val);
pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
/* Arrange for access to Target SoC registers. */
ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
if (!ar_pci->mem) {
@ -2633,8 +2774,12 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
pdev->subsystem_vendor, pdev->subsystem_device);
spin_lock_init(&ar_pci->ce_lock);
spin_lock_init(&ar_pci->ps_lock);
setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
(unsigned long)ar);
setup_timer(&ar_pci->ps_timer, ath10k_pci_ps_timer,
(unsigned long)ar);
ret = ath10k_pci_claim(ar);
if (ret) {
@ -2642,12 +2787,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_core_destroy;
}
ret = ath10k_pci_wake(ar);
if (ret) {
ath10k_err(ar, "failed to wake up: %d\n", ret);
goto err_release;
}
ret = ath10k_pci_alloc_pipes(ar);
if (ret) {
ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
@ -2711,9 +2850,6 @@ err_free_pipes:
ath10k_pci_free_pipes(ar);
err_sleep:
ath10k_pci_sleep(ar);
err_release:
ath10k_pci_release(ar);
err_core_destroy:
@ -2743,6 +2879,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
ath10k_pci_deinit_irq(ar);
ath10k_pci_ce_deinit(ar);
ath10k_pci_free_pipes(ar);
ath10k_pci_sleep_sync(ar);
ath10k_pci_release(ar);
ath10k_core_destroy(ar);
}

Просмотреть файл

@ -185,6 +185,41 @@ struct ath10k_pci {
/* Map CE id to ce_state */
struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
struct timer_list rx_post_retry;
/* Due to HW quirks it is recommended to disable ASPM during device
* bootup. To do that the original PCI-E Link Control is stored before
* device bootup is executed and re-programmed later.
*/
u16 link_ctl;
/* Protects ps_awake and ps_wake_refcount */
spinlock_t ps_lock;
/* The device has a special powersave-oriented register. When device is
* considered asleep it drains less power and driver is forbidden from
* accessing most MMIO registers. If host were to access them without
* waking up the device might scribble over host memory or return
* 0xdeadbeef readouts.
*/
unsigned long ps_wake_refcount;
/* Waking up takes some time (up to 2ms in some cases) so it can be bad
* for latency. To mitigate this the device isn't immediately allowed
* to sleep after all references are undone - instead there's a grace
* period after which the powersave register is updated unless some
* activity to/from device happened in the meantime.
*
* Also see comments on ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC.
*/
struct timer_list ps_timer;
/* MMIO registers are used to communicate with the device. With
* intensive traffic accessing powersave register would be a bit
* wasteful overhead and would needlessly stall CPU. It is far more
* efficient to rely on a variable in RAM and update it only upon
* powersave register state changes.
*/
bool ps_awake;
};
static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
@ -209,61 +244,25 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
* for this device; but that's not guaranteed.
*/
#define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr) \
(((ioread32((pci_addr)+(SOC_CORE_BASE_ADDRESS| \
(((ath10k_pci_read32(ar, (SOC_CORE_BASE_ADDRESS | \
CORE_CTRL_ADDRESS)) & 0x7ff) << 21) | \
0x100000 | ((addr) & 0xfffff))
/* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
#define DIAG_ACCESS_CE_TIMEOUT_MS 10
/* Target exposes its registers for direct access. However before host can
* access them it needs to make sure the target is awake (ath10k_pci_wake,
* ath10k_pci_wake_wait, ath10k_pci_is_awake). Once target is awake it won't go
* to sleep unless host tells it to (ath10k_pci_sleep).
*
* If host tries to access target registers without waking it up it can
* scribble over host memory.
*
* If target is asleep waking it up may take up to even 2ms.
void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value);
void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val);
void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val);
u32 ath10k_pci_read32(struct ath10k *ar, u32 offset);
u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr);
u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr);
/* QCA6174 is known to have Tx/Rx issues when SOC_WAKE register is poked too
* frequently. To avoid this put SoC to sleep after a very conservative grace
* period. Adjust with great care.
*/
static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset,
u32 value)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
iowrite32(value, ar_pci->mem + offset);
}
static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
return ioread32(ar_pci->mem + offset);
}
static inline u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
{
return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
}
static inline void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
{
ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
}
static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
}
static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
}
#define ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC 60
#endif /* _PCI_H_ */

Просмотреть файл

@ -519,9 +519,12 @@ int ath10k_spectral_vif_stop(struct ath10k_vif *arvif)
int ath10k_spectral_create(struct ath10k *ar)
{
/* The buffer size covers whole channels in dual bands up to 128 bins.
* Scan with bigger than 128 bins needs to be run on single band each.
*/
ar->spectral.rfs_chan_spec_scan = relay_open("spectral_scan",
ar->debug.debugfs_phy,
1024, 256,
1140, 2500,
&rfs_spec_scan_cb, NULL);
debugfs_create_file("spectral_scan_ctl",
S_IRUSR | S_IWUSR,

Просмотреть файл

@ -1645,10 +1645,10 @@ void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
survey = &ar->survey[idx];
survey->time = WMI_CHAN_INFO_MSEC(cycle_count);
survey->time_rx = WMI_CHAN_INFO_MSEC(rx_clear_count);
survey->time_busy = WMI_CHAN_INFO_MSEC(rx_clear_count);
survey->noise = noise_floor;
survey->filled = SURVEY_INFO_TIME |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BUSY |
SURVEY_INFO_NOISE_DBM;
}

Просмотреть файл

@ -254,86 +254,25 @@ static int ar5008_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
return 0;
}
/**
* ar5008_hw_spur_mitigate - convert baseband spur frequency for external radios
* @ah: atheros hardware structure
* @chan:
*
* For non single-chip solutions. Converts to baseband spur frequency given the
* input channel frequency and compute register settings below.
*/
static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
struct ath9k_channel *chan)
void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
struct ath9k_channel *chan, int bin)
{
int bb_spur = AR_NO_SPUR;
int bin, cur_bin;
int spur_freq_sd;
int spur_delta_phase;
int denominator;
int cur_bin;
int upper, lower, cur_vit_mask;
int tmp, new;
int i;
static int pilot_mask_reg[4] = {
AR_PHY_TIMING7, AR_PHY_TIMING8,
AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
};
static int chan_mask_reg[4] = {
AR_PHY_TIMING9, AR_PHY_TIMING10,
AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
};
static int inc[4] = { 0, 100, 0, 0 };
int8_t mask_m[123];
int8_t mask_p[123];
int8_t mask_amt;
int tmp_mask;
int cur_bb_spur;
bool is2GHz = IS_CHAN_2GHZ(chan);
memset(&mask_m, 0, sizeof(int8_t) * 123);
memset(&mask_p, 0, sizeof(int8_t) * 123);
for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
if (AR_NO_SPUR == cur_bb_spur)
break;
cur_bb_spur = cur_bb_spur - (chan->channel * 10);
if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
bb_spur = cur_bb_spur;
break;
}
}
if (AR_NO_SPUR == bb_spur)
return;
bin = bb_spur * 32;
tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
AR_PHY_SPUR_REG_MASK_RATE_SELECT |
AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
REG_WRITE(ah, AR_PHY_SPUR_REG, new);
spur_delta_phase = ((bb_spur * 524288) / 100) &
AR_PHY_TIMING11_SPUR_DELTA_PHASE;
denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
REG_WRITE(ah, AR_PHY_TIMING11, new);
static const int pilot_mask_reg[4] = {
AR_PHY_TIMING7, AR_PHY_TIMING8,
AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
};
static const int chan_mask_reg[4] = {
AR_PHY_TIMING9, AR_PHY_TIMING10,
AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
};
static const int inc[4] = { 0, 100, 0, 0 };
cur_bin = -6000;
upper = bin + 100;
@ -343,6 +282,7 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
int pilot_mask = 0;
int chan_mask = 0;
int bp = 0;
for (bp = 0; bp < 30; bp++) {
if ((cur_bin > lower) && (cur_bin < upper)) {
pilot_mask = pilot_mask | 0x1 << bp;
@ -361,7 +301,6 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
for (i = 0; i < 123; i++) {
if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
/* workaround for gcc bug #37014 */
volatile int tmp_v = abs(cur_vit_mask - bin);
@ -466,6 +405,78 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
}
/**
* ar5008_hw_spur_mitigate - convert baseband spur frequency for external radios
* @ah: atheros hardware structure
* @chan:
*
* For non single-chip solutions. Converts to baseband spur frequency given the
* input channel frequency and compute register settings below.
*/
static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
struct ath9k_channel *chan)
{
int bb_spur = AR_NO_SPUR;
int bin;
int spur_freq_sd;
int spur_delta_phase;
int denominator;
int tmp, new;
int i;
int8_t mask_m[123];
int8_t mask_p[123];
int cur_bb_spur;
bool is2GHz = IS_CHAN_2GHZ(chan);
memset(&mask_m, 0, sizeof(int8_t) * 123);
memset(&mask_p, 0, sizeof(int8_t) * 123);
for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
if (AR_NO_SPUR == cur_bb_spur)
break;
cur_bb_spur = cur_bb_spur - (chan->channel * 10);
if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
bb_spur = cur_bb_spur;
break;
}
}
if (AR_NO_SPUR == bb_spur)
return;
bin = bb_spur * 32;
tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
AR_PHY_SPUR_REG_MASK_RATE_SELECT |
AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
REG_WRITE(ah, AR_PHY_SPUR_REG, new);
spur_delta_phase = ((bb_spur * 524288) / 100) &
AR_PHY_TIMING11_SPUR_DELTA_PHASE;
denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
REG_WRITE(ah, AR_PHY_TIMING11, new);
ar5008_hw_cmn_spur_mitigate(ah, chan, bin);
}
/**
* ar5008_hw_rf_alloc_ext_banks - allocates banks for external radio programming
* @ah: atheros hardware structure

Просмотреть файл

@ -169,29 +169,17 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
{
int bb_spur = AR_NO_SPUR;
int freq;
int bin, cur_bin;
int bin;
int bb_spur_off, spur_subchannel_sd;
int spur_freq_sd;
int spur_delta_phase;
int denominator;
int upper, lower, cur_vit_mask;
int tmp, newVal;
int i;
static const int pilot_mask_reg[4] = {
AR_PHY_TIMING7, AR_PHY_TIMING8,
AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
};
static const int chan_mask_reg[4] = {
AR_PHY_TIMING9, AR_PHY_TIMING10,
AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
};
static const int inc[4] = { 0, 100, 0, 0 };
struct chan_centers centers;
int8_t mask_m[123];
int8_t mask_p[123];
int8_t mask_amt;
int tmp_mask;
int cur_bb_spur;
bool is2GHz = IS_CHAN_2GHZ(chan);
@ -288,135 +276,7 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S;
REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal);
cur_bin = -6000;
upper = bin + 100;
lower = bin - 100;
for (i = 0; i < 4; i++) {
int pilot_mask = 0;
int chan_mask = 0;
int bp = 0;
for (bp = 0; bp < 30; bp++) {
if ((cur_bin > lower) && (cur_bin < upper)) {
pilot_mask = pilot_mask | 0x1 << bp;
chan_mask = chan_mask | 0x1 << bp;
}
cur_bin += 100;
}
cur_bin += inc[i];
REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
REG_WRITE(ah, chan_mask_reg[i], chan_mask);
}
cur_vit_mask = 6100;
upper = bin + 120;
lower = bin - 120;
for (i = 0; i < 123; i++) {
if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
/* workaround for gcc bug #37014 */
volatile int tmp_v = abs(cur_vit_mask - bin);
if (tmp_v < 75)
mask_amt = 1;
else
mask_amt = 0;
if (cur_vit_mask < 0)
mask_m[abs(cur_vit_mask / 100)] = mask_amt;
else
mask_p[cur_vit_mask / 100] = mask_amt;
}
cur_vit_mask -= 100;
}
tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
| (mask_m[48] << 26) | (mask_m[49] << 24)
| (mask_m[50] << 22) | (mask_m[51] << 20)
| (mask_m[52] << 18) | (mask_m[53] << 16)
| (mask_m[54] << 14) | (mask_m[55] << 12)
| (mask_m[56] << 10) | (mask_m[57] << 8)
| (mask_m[58] << 6) | (mask_m[59] << 4)
| (mask_m[60] << 2) | (mask_m[61] << 0);
REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
tmp_mask = (mask_m[31] << 28)
| (mask_m[32] << 26) | (mask_m[33] << 24)
| (mask_m[34] << 22) | (mask_m[35] << 20)
| (mask_m[36] << 18) | (mask_m[37] << 16)
| (mask_m[48] << 14) | (mask_m[39] << 12)
| (mask_m[40] << 10) | (mask_m[41] << 8)
| (mask_m[42] << 6) | (mask_m[43] << 4)
| (mask_m[44] << 2) | (mask_m[45] << 0);
REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
| (mask_m[18] << 26) | (mask_m[18] << 24)
| (mask_m[20] << 22) | (mask_m[20] << 20)
| (mask_m[22] << 18) | (mask_m[22] << 16)
| (mask_m[24] << 14) | (mask_m[24] << 12)
| (mask_m[25] << 10) | (mask_m[26] << 8)
| (mask_m[27] << 6) | (mask_m[28] << 4)
| (mask_m[29] << 2) | (mask_m[30] << 0);
REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
| (mask_m[2] << 26) | (mask_m[3] << 24)
| (mask_m[4] << 22) | (mask_m[5] << 20)
| (mask_m[6] << 18) | (mask_m[7] << 16)
| (mask_m[8] << 14) | (mask_m[9] << 12)
| (mask_m[10] << 10) | (mask_m[11] << 8)
| (mask_m[12] << 6) | (mask_m[13] << 4)
| (mask_m[14] << 2) | (mask_m[15] << 0);
REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
tmp_mask = (mask_p[15] << 28)
| (mask_p[14] << 26) | (mask_p[13] << 24)
| (mask_p[12] << 22) | (mask_p[11] << 20)
| (mask_p[10] << 18) | (mask_p[9] << 16)
| (mask_p[8] << 14) | (mask_p[7] << 12)
| (mask_p[6] << 10) | (mask_p[5] << 8)
| (mask_p[4] << 6) | (mask_p[3] << 4)
| (mask_p[2] << 2) | (mask_p[1] << 0);
REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
tmp_mask = (mask_p[30] << 28)
| (mask_p[29] << 26) | (mask_p[28] << 24)
| (mask_p[27] << 22) | (mask_p[26] << 20)
| (mask_p[25] << 18) | (mask_p[24] << 16)
| (mask_p[23] << 14) | (mask_p[22] << 12)
| (mask_p[21] << 10) | (mask_p[20] << 8)
| (mask_p[19] << 6) | (mask_p[18] << 4)
| (mask_p[17] << 2) | (mask_p[16] << 0);
REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
tmp_mask = (mask_p[45] << 28)
| (mask_p[44] << 26) | (mask_p[43] << 24)
| (mask_p[42] << 22) | (mask_p[41] << 20)
| (mask_p[40] << 18) | (mask_p[39] << 16)
| (mask_p[38] << 14) | (mask_p[37] << 12)
| (mask_p[36] << 10) | (mask_p[35] << 8)
| (mask_p[34] << 6) | (mask_p[33] << 4)
| (mask_p[32] << 2) | (mask_p[31] << 0);
REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
| (mask_p[59] << 26) | (mask_p[58] << 24)
| (mask_p[57] << 22) | (mask_p[56] << 20)
| (mask_p[55] << 18) | (mask_p[54] << 16)
| (mask_p[53] << 14) | (mask_p[52] << 12)
| (mask_p[51] << 10) | (mask_p[50] << 8)
| (mask_p[49] << 6) | (mask_p[48] << 4)
| (mask_p[47] << 2) | (mask_p[46] << 0);
REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
ar5008_hw_cmn_spur_mitigate(ah, chan, bin);
REGWRITE_BUFFER_FLUSH(ah);
}

Просмотреть файл

@ -440,9 +440,9 @@ static inline void ath9k_htc_stop_btcoex(struct ath9k_htc_priv *priv)
}
#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
#define OP_BT_PRIORITY_DETECTED BIT(3)
#define OP_BT_SCAN BIT(4)
#define OP_TSF_RESET BIT(6)
#define OP_BT_PRIORITY_DETECTED 3
#define OP_BT_SCAN 4
#define OP_TSF_RESET 6
enum htc_op_flags {
HTC_FWFLAG_NO_RMW,

Просмотреть файл

@ -1119,6 +1119,8 @@ bool ar9003_is_paprd_enabled(struct ath_hw *ah);
void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);
void ar9003_hw_init_rate_txpower(struct ath_hw *ah, u8 *rate_array,
struct ath9k_channel *chan);
void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
struct ath9k_channel *chan, int bin);
void ar5008_hw_init_rate_txpower(struct ath_hw *ah, int16_t *rate_array,
struct ath9k_channel *chan, int ht40_delta);

Просмотреть файл

@ -651,6 +651,7 @@ int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd,
unsigned int plen, void *payload, unsigned int outlen, void *out)
{
int err = -ENOMEM;
unsigned long time_left;
if (!IS_ACCEPTING_CMD(ar))
return -EIO;
@ -672,8 +673,8 @@ int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd,
err = __carl9170_exec_cmd(ar, &ar->cmd, false);
if (!(cmd & CARL9170_CMD_ASYNC_FLAG)) {
err = wait_for_completion_timeout(&ar->cmd_wait, HZ);
if (err == 0) {
time_left = wait_for_completion_timeout(&ar->cmd_wait, HZ);
if (time_left == 0) {
err = -ETIMEDOUT;
goto err_unbuf;
}

Просмотреть файл

@ -12,6 +12,7 @@ wil6210-y += debug.o
wil6210-y += rx_reorder.o
wil6210-y += ioctl.o
wil6210-y += fw.o
wil6210-y += pmc.o
wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
wil6210-y += wil_platform.o
wil6210-y += ethtool.o

Просмотреть файл

@ -402,11 +402,8 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
rsn_eid = sme->ie ?
cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len) :
NULL;
if (sme->privacy && !rsn_eid) {
wil_err(wil, "Missing RSN IE for secure connection\n");
return -EINVAL;
}
if (sme->privacy && !rsn_eid)
wil_info(wil, "WSC connection\n");
bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
sme->ssid, sme->ssid_len,
@ -425,10 +422,17 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
wil->privacy = sme->privacy;
if (wil->privacy) {
/* For secure assoc, send WMI_DELETE_CIPHER_KEY_CMD */
rc = wmi_del_cipher_key(wil, 0, bss->bssid);
/* For secure assoc, remove old keys */
rc = wmi_del_cipher_key(wil, 0, bss->bssid,
WMI_KEY_USE_PAIRWISE);
if (rc) {
wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD failed\n");
wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(PTK) failed\n");
goto out;
}
rc = wmi_del_cipher_key(wil, 0, bss->bssid,
WMI_KEY_USE_RX_GROUP);
if (rc) {
wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(GTK) failed\n");
goto out;
}
}
@ -458,11 +462,18 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
goto out;
}
if (wil->privacy) {
conn.dot11_auth_mode = WMI_AUTH11_SHARED;
conn.auth_mode = WMI_AUTH_WPA2_PSK;
conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP;
conn.pairwise_crypto_len = 16;
} else {
if (rsn_eid) { /* regular secure connection */
conn.dot11_auth_mode = WMI_AUTH11_SHARED;
conn.auth_mode = WMI_AUTH_WPA2_PSK;
conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP;
conn.pairwise_crypto_len = 16;
conn.group_crypto_type = WMI_CRYPT_AES_GCMP;
conn.group_crypto_len = 16;
} else { /* WSC */
conn.dot11_auth_mode = WMI_AUTH11_WSC;
conn.auth_mode = WMI_AUTH_NONE;
}
} else { /* insecure connection */
conn.dot11_auth_mode = WMI_AUTH11_OPEN;
conn.auth_mode = WMI_AUTH_NONE;
}
@ -507,6 +518,8 @@ static int wil_cfg80211_disconnect(struct wiphy *wiphy,
int rc;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
wil_dbg_misc(wil, "%s(reason=%d)\n", __func__, reason_code);
rc = wmi_send(wil, WMI_DISCONNECT_CMDID, NULL, 0);
return rc;
@ -561,6 +574,39 @@ static int wil_cfg80211_set_channel(struct wiphy *wiphy,
return 0;
}
static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil,
bool pairwise)
{
struct wireless_dev *wdev = wil->wdev;
enum wmi_key_usage rc;
static const char * const key_usage_str[] = {
[WMI_KEY_USE_PAIRWISE] = "WMI_KEY_USE_PAIRWISE",
[WMI_KEY_USE_RX_GROUP] = "WMI_KEY_USE_RX_GROUP",
[WMI_KEY_USE_TX_GROUP] = "WMI_KEY_USE_TX_GROUP",
};
if (pairwise) {
rc = WMI_KEY_USE_PAIRWISE;
} else {
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
rc = WMI_KEY_USE_RX_GROUP;
break;
case NL80211_IFTYPE_AP:
rc = WMI_KEY_USE_TX_GROUP;
break;
default:
/* TODO: Rx GTK or Tx GTK? */
wil_err(wil, "Can't determine GTK type\n");
rc = WMI_KEY_USE_RX_GROUP;
break;
}
}
wil_dbg_misc(wil, "%s() -> %s\n", __func__, key_usage_str[rc]);
return rc;
}
static int wil_cfg80211_add_key(struct wiphy *wiphy,
struct net_device *ndev,
u8 key_index, bool pairwise,
@ -568,13 +614,13 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
struct key_params *params)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
/* group key is not used */
if (!pairwise)
return 0;
wil_dbg_misc(wil, "%s(%pM[%d] %s)\n", __func__, mac_addr, key_index,
pairwise ? "PTK" : "GTK");
return wmi_add_cipher_key(wil, key_index, mac_addr,
params->key_len, params->key);
return wmi_add_cipher_key(wil, key_index, mac_addr, params->key_len,
params->key, key_usage);
}
static int wil_cfg80211_del_key(struct wiphy *wiphy,
@ -583,12 +629,12 @@ static int wil_cfg80211_del_key(struct wiphy *wiphy,
const u8 *mac_addr)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
/* group key is not used */
if (!pairwise)
return 0;
wil_dbg_misc(wil, "%s(%pM[%d] %s)\n", __func__, mac_addr, key_index,
pairwise ? "PTK" : "GTK");
return wmi_del_cipher_key(wil, key_index, mac_addr);
return wmi_del_cipher_key(wil, key_index, mac_addr, key_usage);
}
/* Need to be present or wiphy_new() will WARN */
@ -661,11 +707,6 @@ static int wil_fix_bcon(struct wil6210_priv *wil,
if (bcon->probe_resp_len <= hlen)
return 0;
if (!bcon->proberesp_ies) {
bcon->proberesp_ies = f->u.probe_resp.variable;
bcon->proberesp_ies_len = bcon->probe_resp_len - hlen;
rc = 1;
}
if (!bcon->assocresp_ies) {
bcon->assocresp_ies = f->u.probe_resp.variable;
bcon->assocresp_ies_len = bcon->probe_resp_len - hlen;
@ -680,9 +721,19 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
struct cfg80211_beacon_data *bcon)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
const u8 *pr_ies = NULL;
size_t pr_ies_len = 0;
int rc;
wil_dbg_misc(wil, "%s()\n", __func__);
wil_print_bcon_data(bcon);
if (bcon->probe_resp_len > hlen) {
pr_ies = f->u.probe_resp.variable;
pr_ies_len = bcon->probe_resp_len - hlen;
}
if (wil_fix_bcon(wil, bcon)) {
wil_dbg_misc(wil, "Fixed bcon\n");
@ -695,9 +746,7 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
* wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
* bcon->beacon_ies);
*/
rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP,
bcon->proberesp_ies_len,
bcon->proberesp_ies);
rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, pr_ies_len, pr_ies);
if (rc) {
wil_err(wil, "set_ie(PROBE_RESP) failed\n");
return rc;
@ -725,6 +774,10 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
struct cfg80211_beacon_data *bcon = &info->beacon;
struct cfg80211_crypto_settings *crypto = &info->crypto;
u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
const u8 *pr_ies = NULL;
size_t pr_ies_len = 0;
wil_dbg_misc(wil, "%s()\n", __func__);
@ -744,6 +797,11 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
wil_print_bcon_data(bcon);
wil_print_crypto(wil, crypto);
if (bcon->probe_resp_len > hlen) {
pr_ies = f->u.probe_resp.variable;
pr_ies_len = bcon->probe_resp_len - hlen;
}
if (wil_fix_bcon(wil, bcon)) {
wil_dbg_misc(wil, "Fixed bcon\n");
wil_print_bcon_data(bcon);
@ -771,8 +829,7 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
* wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
* bcon->beacon_ies);
*/
wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len,
bcon->proberesp_ies);
wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, pr_ies_len, pr_ies);
wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
bcon->assocresp_ies);
@ -814,13 +871,9 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
wmi_pcp_stop(wil);
__wil_down(wil);
__wil_up(wil);
mutex_unlock(&wil->mutex);
/* some functions above might fail (e.g. __wil_up). Nevertheless, we
* return success because AP has stopped
*/
return 0;
}
@ -830,6 +883,9 @@ static int wil_cfg80211_del_station(struct wiphy *wiphy,
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
wil_dbg_misc(wil, "%s(%pM, reason=%d)\n", __func__, params->mac,
params->reason_code);
mutex_lock(&wil->mutex);
wil6210_disconnect(wil, params->mac, params->reason_code, false);
mutex_unlock(&wil->mutex);

Просмотреть файл

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
* Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -24,6 +24,7 @@
#include "wil6210.h"
#include "wmi.h"
#include "txrx.h"
#include "pmc.h"
/* Nasty hack. Better have per device instances */
static u32 mem_addr;
@ -123,15 +124,17 @@ static int wil_vring_debugfs_show(struct seq_file *s, void *data)
if (cid < WIL6210_MAX_CID)
seq_printf(s,
"\n%pM CID %d TID %d BACK([%u] %u TU A%s) [%3d|%3d] idle %s\n",
"\n%pM CID %d TID %d 1x%s BACK([%u] %u TU A%s) [%3d|%3d] idle %s\n",
wil->sta[cid].addr, cid, tid,
txdata->dot1x_open ? "+" : "-",
txdata->agg_wsize,
txdata->agg_timeout,
txdata->agg_amsdu ? "+" : "-",
used, avail, sidle);
else
seq_printf(s,
"\nBroadcast [%3d|%3d] idle %s\n",
"\nBroadcast 1x%s [%3d|%3d] idle %s\n",
txdata->dot1x_open ? "+" : "-",
used, avail, sidle);
wil_print_vring(s, wil, name, vring, '_', 'H');
@ -702,6 +705,89 @@ static const struct file_operations fops_back = {
.open = simple_open,
};
/* pmc control, write:
* - "alloc <num descriptors> <descriptor_size>" to allocate PMC
* - "free" to release memory allocated for PMC
*/
static ssize_t wil_write_pmccfg(struct file *file, const char __user *buf,
size_t len, loff_t *ppos)
{
struct wil6210_priv *wil = file->private_data;
int rc;
char *kbuf = kmalloc(len + 1, GFP_KERNEL);
char cmd[9];
int num_descs, desc_size;
if (!kbuf)
return -ENOMEM;
rc = simple_write_to_buffer(kbuf, len, ppos, buf, len);
if (rc != len) {
kfree(kbuf);
return rc >= 0 ? -EIO : rc;
}
kbuf[len] = '\0';
rc = sscanf(kbuf, "%8s %d %d", cmd, &num_descs, &desc_size);
kfree(kbuf);
if (rc < 0)
return rc;
if (rc < 1) {
wil_err(wil, "pmccfg: no params given\n");
return -EINVAL;
}
if (0 == strcmp(cmd, "alloc")) {
if (rc != 3) {
wil_err(wil, "pmccfg: alloc requires 2 params\n");
return -EINVAL;
}
wil_pmc_alloc(wil, num_descs, desc_size);
} else if (0 == strcmp(cmd, "free")) {
if (rc != 1) {
wil_err(wil, "pmccfg: free does not have any params\n");
return -EINVAL;
}
wil_pmc_free(wil, true);
} else {
wil_err(wil, "pmccfg: Unrecognized command \"%s\"\n", cmd);
return -EINVAL;
}
return len;
}
static ssize_t wil_read_pmccfg(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wil6210_priv *wil = file->private_data;
char text[256];
char help[] = "pmc control, write:\n"
" - \"alloc <num descriptors> <descriptor_size>\" to allocate pmc\n"
" - \"free\" to free memory allocated for pmc\n";
sprintf(text, "Last command status: %d\n\n%s",
wil_pmc_last_cmd_status(wil),
help);
return simple_read_from_buffer(user_buf, count, ppos, text,
strlen(text) + 1);
}
static const struct file_operations fops_pmccfg = {
.read = wil_read_pmccfg,
.write = wil_write_pmccfg,
.open = simple_open,
};
static const struct file_operations fops_pmcdata = {
.open = simple_open,
.read = wil_pmc_read,
.llseek = wil_pmc_llseek,
};
/*---tx_mgmt---*/
/* Write mgmt frame to this file to send it */
static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf,
@ -1111,8 +1197,7 @@ static int wil_link_debugfs_show(struct seq_file *s, void *data)
status = "connected";
break;
}
seq_printf(s, "[%d] %pM %s%s\n", i, p->addr, status,
(p->data_port_open ? " data_port_open" : ""));
seq_printf(s, "[%d] %pM %s\n", i, p->addr, status);
if (p->status == wil_sta_connected) {
rc = wil_cid_fill_sinfo(wil, i, &sinfo);
@ -1292,8 +1377,7 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
status = "connected";
break;
}
seq_printf(s, "[%d] %pM %s%s\n", i, p->addr, status,
(p->data_port_open ? " data_port_open" : ""));
seq_printf(s, "[%d] %pM %s\n", i, p->addr, status);
if (p->status == wil_sta_connected) {
spin_lock_bh(&p->tid_rx_lock);
@ -1363,6 +1447,8 @@ static const struct {
{"tx_mgmt", S_IWUSR, &fops_txmgmt},
{"wmi_send", S_IWUSR, &fops_wmi},
{"back", S_IRUGO | S_IWUSR, &fops_back},
{"pmccfg", S_IRUGO | S_IWUSR, &fops_pmccfg},
{"pmcdata", S_IRUGO, &fops_pmcdata},
{"temp", S_IRUGO, &fops_temp},
{"freq", S_IRUGO, &fops_freq},
{"link", S_IRUGO, &fops_link},
@ -1440,6 +1526,8 @@ int wil6210_debugfs_init(struct wil6210_priv *wil)
if (IS_ERR_OR_NULL(dbg))
return -ENODEV;
wil_pmc_init(wil);
wil6210_debugfs_init_files(wil, dbg);
wil6210_debugfs_init_isr(wil, dbg);
wil6210_debugfs_init_blobs(wil, dbg);
@ -1459,4 +1547,9 @@ void wil6210_debugfs_remove(struct wil6210_priv *wil)
{
debugfs_remove_recursive(wil->debug);
wil->debug = NULL;
/* free pmc memory without sending command to fw, as it will
* be reset on the way down anyway
*/
wil_pmc_free(wil, false);
}

Просмотреть файл

@ -25,6 +25,10 @@
#define WAIT_FOR_DISCONNECT_TIMEOUT_MS 2000
#define WAIT_FOR_DISCONNECT_INTERVAL_MS 10
bool debug_fw; /* = false; */
module_param(debug_fw, bool, S_IRUGO);
MODULE_PARM_DESC(debug_fw, " do not perform card reset. For FW debug");
bool no_fw_recovery;
module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(no_fw_recovery, " disable automatic FW error recovery");
@ -146,7 +150,6 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
wil_dbg_misc(wil, "%s(CID %d, status %d)\n", __func__, cid,
sta->status);
sta->data_port_open = false;
if (sta->status != wil_sta_unused) {
if (!from_event)
wmi_disconnect_sta(wil, sta->addr, reason_code);
@ -373,9 +376,10 @@ int wil_bcast_init(struct wil6210_priv *wil)
if (ri < 0)
return ri;
wil->bcast_vring = ri;
rc = wil_vring_init_bcast(wil, ri, 1 << bcast_ring_order);
if (rc == 0)
wil->bcast_vring = ri;
if (rc)
wil->bcast_vring = -1;
return rc;
}
@ -547,7 +551,7 @@ static inline void wil_release_cpu(struct wil6210_priv *wil)
static int wil_target_reset(struct wil6210_priv *wil)
{
int delay = 0;
u32 x;
u32 x, x1 = 0;
wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name);
@ -602,12 +606,16 @@ static int wil_target_reset(struct wil6210_priv *wil)
do {
msleep(RST_DELAY);
x = R(RGF_USER_BL + offsetof(struct RGF_BL, ready));
if (x1 != x) {
wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n", x1, x);
x1 = x;
}
if (delay++ > RST_COUNT) {
wil_err(wil, "Reset not completed, bl.ready 0x%08x\n",
x);
return -ETIME;
}
} while (!(x & BIT_BL_READY));
} while (x != BIT_BL_READY);
C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
@ -686,6 +694,17 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
WARN_ON(!mutex_is_locked(&wil->mutex));
WARN_ON(test_bit(wil_status_napi_en, wil->status));
if (debug_fw) {
static const u8 mac[ETH_ALEN] = {
0x00, 0xde, 0xad, 0x12, 0x34, 0x56,
};
struct net_device *ndev = wil_to_ndev(wil);
ether_addr_copy(ndev->perm_addr, mac);
ether_addr_copy(ndev->dev_addr, ndev->perm_addr);
return 0;
}
cancel_work_sync(&wil->disconnect_worker);
wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
wil_bcast_fini(wil);

Просмотреть файл

@ -24,6 +24,11 @@ static int wil_open(struct net_device *ndev)
wil_dbg_misc(wil, "%s()\n", __func__);
if (debug_fw) {
wil_err(wil, "%s() while in debug_fw mode\n", __func__);
return -EINVAL;
}
return wil_up(wil);
}

Просмотреть файл

@ -27,10 +27,6 @@ MODULE_PARM_DESC(use_msi,
" Use MSI interrupt: "
"0 - don't, 1 - (default) - single, or 3");
static bool debug_fw; /* = false; */
module_param(debug_fw, bool, S_IRUGO);
MODULE_PARM_DESC(debug_fw, " load driver if FW not ready. For FW debug");
static
void wil_set_capabilities(struct wil6210_priv *wil)
{
@ -133,8 +129,6 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
mutex_lock(&wil->mutex);
rc = wil_reset(wil, false);
mutex_unlock(&wil->mutex);
if (debug_fw)
rc = 0;
if (rc)
goto release_irq;

Просмотреть файл

@ -0,0 +1,375 @@
/*
* Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include "wmi.h"
#include "wil6210.h"
#include "txrx.h"
#include "pmc.h"
struct desc_alloc_info {
dma_addr_t pa;
void *va;
};
static int wil_is_pmc_allocated(struct pmc_ctx *pmc)
{
return !!pmc->pring_va;
}
void wil_pmc_init(struct wil6210_priv *wil)
{
memset(&wil->pmc, 0, sizeof(struct pmc_ctx));
mutex_init(&wil->pmc.lock);
}
/**
* Allocate the physical ring (p-ring) and the required
* number of descriptors of required size.
* Initialize the descriptors as required by pmc dma.
* The descriptors' buffers dwords are initialized to hold
* dword's serial number in the lsw and reserved value
* PCM_DATA_INVALID_DW_VAL in the msw.
*/
void wil_pmc_alloc(struct wil6210_priv *wil,
int num_descriptors,
int descriptor_size)
{
u32 i;
struct pmc_ctx *pmc = &wil->pmc;
struct device *dev = wil_to_dev(wil);
struct wmi_pmc_cmd pmc_cmd = {0};
mutex_lock(&pmc->lock);
if (wil_is_pmc_allocated(pmc)) {
/* sanity check */
wil_err(wil, "%s: ERROR pmc is already allocated\n", __func__);
goto no_release_err;
}
pmc->num_descriptors = num_descriptors;
pmc->descriptor_size = descriptor_size;
wil_dbg_misc(wil, "%s: %d descriptors x %d bytes each\n",
__func__, num_descriptors, descriptor_size);
/* allocate descriptors info list in pmc context*/
pmc->descriptors = kcalloc(num_descriptors,
sizeof(struct desc_alloc_info),
GFP_KERNEL);
if (!pmc->descriptors) {
wil_err(wil, "%s: ERROR allocating pmc skb list\n", __func__);
goto no_release_err;
}
wil_dbg_misc(wil,
"%s: allocated descriptors info list %p\n",
__func__, pmc->descriptors);
/* Allocate pring buffer and descriptors.
* vring->va should be aligned on its size rounded up to power of 2
* This is granted by the dma_alloc_coherent
*/
pmc->pring_va = dma_alloc_coherent(dev,
sizeof(struct vring_tx_desc) * num_descriptors,
&pmc->pring_pa,
GFP_KERNEL);
wil_dbg_misc(wil,
"%s: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
__func__,
pmc->pring_va, &pmc->pring_pa,
sizeof(struct vring_tx_desc),
num_descriptors,
sizeof(struct vring_tx_desc) * num_descriptors);
if (!pmc->pring_va) {
wil_err(wil, "%s: ERROR allocating pmc pring\n", __func__);
goto release_pmc_skb_list;
}
/* initially, all descriptors are SW owned
* For Tx, Rx, and PMC, ownership bit is at the same location, thus
* we can use any
*/
for (i = 0; i < num_descriptors; i++) {
struct vring_tx_desc *_d = &pmc->pring_va[i];
struct vring_tx_desc dd, *d = &dd;
int j = 0;
pmc->descriptors[i].va = dma_alloc_coherent(dev,
descriptor_size,
&pmc->descriptors[i].pa,
GFP_KERNEL);
if (unlikely(!pmc->descriptors[i].va)) {
wil_err(wil,
"%s: ERROR allocating pmc descriptor %d",
__func__, i);
goto release_pmc_skbs;
}
for (j = 0; j < descriptor_size / sizeof(u32); j++) {
u32 *p = (u32 *)pmc->descriptors[i].va + j;
*p = PCM_DATA_INVALID_DW_VAL | j;
}
/* configure dma descriptor */
d->dma.addr.addr_low =
cpu_to_le32(lower_32_bits(pmc->descriptors[i].pa));
d->dma.addr.addr_high =
cpu_to_le16((u16)upper_32_bits(pmc->descriptors[i].pa));
d->dma.status = 0; /* 0 = HW_OWNED */
d->dma.length = cpu_to_le16(descriptor_size);
d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
*_d = *d;
}
wil_dbg_misc(wil, "%s: allocated successfully\n", __func__);
pmc_cmd.op = WMI_PMC_ALLOCATE;
pmc_cmd.ring_size = cpu_to_le16(pmc->num_descriptors);
pmc_cmd.mem_base = cpu_to_le64(pmc->pring_pa);
wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with ALLOCATE op\n", __func__);
pmc->last_cmd_status = wmi_send(wil,
WMI_PMC_CMDID,
&pmc_cmd,
sizeof(pmc_cmd));
if (pmc->last_cmd_status) {
wil_err(wil,
"%s: WMI_PMC_CMD with ALLOCATE op failed with status %d",
__func__, pmc->last_cmd_status);
goto release_pmc_skbs;
}
mutex_unlock(&pmc->lock);
return;
release_pmc_skbs:
wil_err(wil, "%s: exit on error: Releasing skbs...\n", __func__);
for (i = 0; pmc->descriptors[i].va && i < num_descriptors; i++) {
dma_free_coherent(dev,
descriptor_size,
pmc->descriptors[i].va,
pmc->descriptors[i].pa);
pmc->descriptors[i].va = NULL;
}
wil_err(wil, "%s: exit on error: Releasing pring...\n", __func__);
dma_free_coherent(dev,
sizeof(struct vring_tx_desc) * num_descriptors,
pmc->pring_va,
pmc->pring_pa);
pmc->pring_va = NULL;
release_pmc_skb_list:
wil_err(wil, "%s: exit on error: Releasing descriptors info list...\n",
__func__);
kfree(pmc->descriptors);
pmc->descriptors = NULL;
no_release_err:
pmc->last_cmd_status = -ENOMEM;
mutex_unlock(&pmc->lock);
}
/**
* Traverse the p-ring and release all buffers.
* At the end release the p-ring memory
*/
void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
{
struct pmc_ctx *pmc = &wil->pmc;
struct device *dev = wil_to_dev(wil);
struct wmi_pmc_cmd pmc_cmd = {0};
mutex_lock(&pmc->lock);
pmc->last_cmd_status = 0;
if (!wil_is_pmc_allocated(pmc)) {
wil_dbg_misc(wil, "%s: Error, can't free - not allocated\n",
__func__);
pmc->last_cmd_status = -EPERM;
mutex_unlock(&pmc->lock);
return;
}
if (send_pmc_cmd) {
wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with RELEASE op\n",
__func__);
pmc_cmd.op = WMI_PMC_RELEASE;
pmc->last_cmd_status =
wmi_send(wil, WMI_PMC_CMDID, &pmc_cmd,
sizeof(pmc_cmd));
if (pmc->last_cmd_status) {
wil_err(wil,
"%s WMI_PMC_CMD with RELEASE op failed, status %d",
__func__, pmc->last_cmd_status);
/* There's nothing we can do with this error.
* Normally, it should never occur.
* Continue to freeing all memory allocated for pmc.
*/
}
}
if (pmc->pring_va) {
size_t buf_size = sizeof(struct vring_tx_desc) *
pmc->num_descriptors;
wil_dbg_misc(wil, "%s: free pring va %p\n",
__func__, pmc->pring_va);
dma_free_coherent(dev, buf_size, pmc->pring_va, pmc->pring_pa);
pmc->pring_va = NULL;
} else {
pmc->last_cmd_status = -ENOENT;
}
if (pmc->descriptors) {
int i;
for (i = 0;
pmc->descriptors[i].va && i < pmc->num_descriptors; i++) {
dma_free_coherent(dev,
pmc->descriptor_size,
pmc->descriptors[i].va,
pmc->descriptors[i].pa);
pmc->descriptors[i].va = NULL;
}
wil_dbg_misc(wil, "%s: free descriptor info %d/%d\n",
__func__, i, pmc->num_descriptors);
wil_dbg_misc(wil,
"%s: free pmc descriptors info list %p\n",
__func__, pmc->descriptors);
kfree(pmc->descriptors);
pmc->descriptors = NULL;
} else {
pmc->last_cmd_status = -ENOENT;
}
mutex_unlock(&pmc->lock);
}
/**
* Status of the last operation requested via debugfs: alloc/free/read.
* 0 - success or negative errno
*/
int wil_pmc_last_cmd_status(struct wil6210_priv *wil)
{
wil_dbg_misc(wil, "%s: status %d\n", __func__,
wil->pmc.last_cmd_status);
return wil->pmc.last_cmd_status;
}
/**
* Read from required position up to the end of current descriptor,
* depends on descriptor size configured during alloc request.
*/
ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
loff_t *f_pos)
{
struct wil6210_priv *wil = filp->private_data;
struct pmc_ctx *pmc = &wil->pmc;
size_t retval = 0;
unsigned long long idx;
loff_t offset;
size_t pmc_size = pmc->descriptor_size * pmc->num_descriptors;
mutex_lock(&pmc->lock);
if (!wil_is_pmc_allocated(pmc)) {
wil_err(wil, "%s: error, pmc is not allocated!\n", __func__);
pmc->last_cmd_status = -EPERM;
mutex_unlock(&pmc->lock);
return -EPERM;
}
wil_dbg_misc(wil,
"%s: size %u, pos %lld\n",
__func__, (unsigned)count, *f_pos);
pmc->last_cmd_status = 0;
idx = *f_pos;
do_div(idx, pmc->descriptor_size);
offset = *f_pos - (idx * pmc->descriptor_size);
if (*f_pos >= pmc_size) {
wil_dbg_misc(wil, "%s: reached end of pmc buf: %lld >= %u\n",
__func__, *f_pos, (unsigned)pmc_size);
pmc->last_cmd_status = -ERANGE;
goto out;
}
wil_dbg_misc(wil,
"%s: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n",
__func__, *f_pos, idx, offset, count);
/* if no errors, return the copied byte count */
retval = simple_read_from_buffer(buf,
count,
&offset,
pmc->descriptors[idx].va,
pmc->descriptor_size);
*f_pos += retval;
out:
mutex_unlock(&pmc->lock);
return retval;
}
loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence)
{
loff_t newpos;
struct wil6210_priv *wil = filp->private_data;
struct pmc_ctx *pmc = &wil->pmc;
size_t pmc_size = pmc->descriptor_size * pmc->num_descriptors;
switch (whence) {
case 0: /* SEEK_SET */
newpos = off;
break;
case 1: /* SEEK_CUR */
newpos = filp->f_pos + off;
break;
case 2: /* SEEK_END */
newpos = pmc_size;
break;
default: /* can't happen */
return -EINVAL;
}
if (newpos < 0)
return -EINVAL;
if (newpos > pmc_size)
newpos = pmc_size;
filp->f_pos = newpos;
return newpos;
}

Просмотреть файл

@ -0,0 +1,27 @@
/*
* Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/types.h>
#define PCM_DATA_INVALID_DW_VAL (0xB0BA0000)
void wil_pmc_init(struct wil6210_priv *wil);
void wil_pmc_alloc(struct wil6210_priv *wil,
int num_descriptors, int descriptor_size);
void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd);
int wil_pmc_last_cmd_status(struct wil6210_priv *wil);
ssize_t wil_pmc_read(struct file *, char __user *, size_t, loff_t *);
loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence);

Просмотреть файл

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
* Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -236,7 +236,7 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
return -ENOMEM;
}
d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
d->dma.d0 = RX_DMA_D0_CMD_DMA_RT | RX_DMA_D0_CMD_DMA_IT;
wil_desc_addr_set(&d->dma.addr, pa);
/* ip_length don't care */
/* b11 don't care */
@ -724,6 +724,8 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
if (!wil->privacy)
txdata->dot1x_open = true;
rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd),
WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
if (rc)
@ -738,11 +740,13 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
txdata->enabled = 1;
if (wil->sta[cid].data_port_open && (agg_wsize >= 0))
if (txdata->dot1x_open && (agg_wsize >= 0))
wil_addba_tx_request(wil, id, agg_wsize);
return 0;
out_free:
txdata->dot1x_open = false;
txdata->enabled = 0;
wil_vring_free(wil, vring, 1);
out:
@ -792,6 +796,8 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
if (!wil->privacy)
txdata->dot1x_open = true;
rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, &cmd, sizeof(cmd),
WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
if (rc)
@ -809,6 +815,8 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
return 0;
out_free:
txdata->enabled = 0;
txdata->dot1x_open = false;
wil_vring_free(wil, vring, 1);
out:
@ -828,6 +836,7 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
wil_dbg_misc(wil, "%s() id=%d\n", __func__, id);
spin_lock_bh(&txdata->lock);
txdata->dot1x_open = false;
txdata->enabled = 0; /* no Tx can be in progress or start anew */
spin_unlock_bh(&txdata->lock);
/* make sure NAPI won't touch this vring */
@ -848,12 +857,11 @@ static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
if (cid < 0)
return NULL;
if (!wil->sta[cid].data_port_open &&
(skb->protocol != cpu_to_be16(ETH_P_PAE)))
return NULL;
/* TODO: fix for multiple TID */
for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) {
if (!wil->vring_tx_data[i].dot1x_open &&
(skb->protocol != cpu_to_be16(ETH_P_PAE)))
continue;
if (wil->vring2cid_tid[i][0] == cid) {
struct vring *v = &wil->vring_tx[i];
@ -883,7 +891,7 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
/* In the STA mode, it is expected to have only 1 VRING
* for the AP we connected to.
* find 1-st vring and see whether it is eligible for data
* find 1-st vring eligible for this skb and use it.
*/
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
v = &wil->vring_tx[i];
@ -894,9 +902,9 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
if (cid >= WIL6210_MAX_CID) /* skip BCAST */
continue;
if (!wil->sta[cid].data_port_open &&
if (!wil->vring_tx_data[i].dot1x_open &&
(skb->protocol != cpu_to_be16(ETH_P_PAE)))
break;
continue;
wil_dbg_txrx(wil, "Tx -> ring %d\n", i);
@ -918,7 +926,6 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
* in all cases override dest address to unicast peer's address
* Use old strategy when new is not supported yet:
* - for PBSS
* - for secure link
*/
static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
struct sk_buff *skb)
@ -931,6 +938,9 @@ static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
v = &wil->vring_tx[i];
if (!v->va)
return NULL;
if (!wil->vring_tx_data[i].dot1x_open &&
(skb->protocol != cpu_to_be16(ETH_P_PAE)))
return NULL;
return v;
}
@ -963,7 +973,8 @@ static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
cid = wil->vring2cid_tid[i][0];
if (cid >= WIL6210_MAX_CID) /* skip BCAST */
continue;
if (!wil->sta[cid].data_port_open)
if (!wil->vring_tx_data[i].dot1x_open &&
(skb->protocol != cpu_to_be16(ETH_P_PAE)))
continue;
/* don't Tx back to source when re-routing Rx->Tx at the AP */
@ -989,7 +1000,8 @@ found:
cid = wil->vring2cid_tid[i][0];
if (cid >= WIL6210_MAX_CID) /* skip BCAST */
continue;
if (!wil->sta[cid].data_port_open)
if (!wil->vring_tx_data[i].dot1x_open &&
(skb->protocol != cpu_to_be16(ETH_P_PAE)))
continue;
if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
@ -1016,9 +1028,6 @@ static struct vring *wil_find_tx_bcast(struct wil6210_priv *wil,
if (wdev->iftype != NL80211_IFTYPE_AP)
return wil_find_tx_bcast_2(wil, skb);
if (wil->privacy)
return wil_find_tx_bcast_2(wil, skb);
return wil_find_tx_bcast_1(wil, skb);
}
@ -1144,13 +1153,8 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
wil_tx_desc_map(d, pa, len, vring_index);
if (unlikely(mcast)) {
d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */
if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) {
/* set MCS 1 */
if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) /* set MCS 1 */
d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
/* packet mode 2 */
d->mac.d[1] |= BIT(MAC_CFG_DESC_TX_1_PKT_MODE_EN_POS) |
(2 << MAC_CFG_DESC_TX_1_PKT_MODE_POS);
}
}
/* Process TCP/UDP checksum offloading */
if (unlikely(wil_tx_desc_offload_cksum_set(wil, d, skb))) {

Просмотреть файл

@ -384,19 +384,27 @@ struct vring_rx_mac {
* [word 7] length
*/
#define RX_DMA_D0_CMD_DMA_IT BIT(10)
#define RX_DMA_D0_CMD_DMA_EOP BIT(8)
#define RX_DMA_D0_CMD_DMA_RT BIT(9) /* always 1 */
#define RX_DMA_D0_CMD_DMA_IT BIT(10) /* interrupt */
/* Error field, offload bits */
#define RX_DMA_ERROR_L3_ERR BIT(4)
#define RX_DMA_ERROR_L4_ERR BIT(5)
/* Error field */
#define RX_DMA_ERROR_FCS BIT(0)
#define RX_DMA_ERROR_MIC BIT(1)
#define RX_DMA_ERROR_KEY BIT(2) /* Key missing */
#define RX_DMA_ERROR_REPLAY BIT(3)
#define RX_DMA_ERROR_L3_ERR BIT(4)
#define RX_DMA_ERROR_L4_ERR BIT(5)
/* Status field */
#define RX_DMA_STATUS_DU BIT(0)
#define RX_DMA_STATUS_ERROR BIT(2)
#define RX_DMA_STATUS_DU BIT(0)
#define RX_DMA_STATUS_EOP BIT(1)
#define RX_DMA_STATUS_ERROR BIT(2)
#define RX_DMA_STATUS_MI BIT(3) /* MAC Interrupt is asserted */
#define RX_DMA_STATUS_L3I BIT(4)
#define RX_DMA_STATUS_L4I BIT(5)
#define RX_DMA_STATUS_PHY_INFO BIT(6)
#define RX_DMA_STATUS_FFM BIT(7) /* EtherType Flex Filter Match */
struct vring_rx_dma {
u32 d0;

Просмотреть файл

@ -21,6 +21,7 @@
#include <linux/wireless.h>
#include <net/cfg80211.h>
#include <linux/timex.h>
#include <linux/types.h>
#include "wil_platform.h"
extern bool no_fw_recovery;
@ -29,10 +30,11 @@ extern unsigned short rx_ring_overflow_thrsh;
extern int agg_wsize;
extern u32 vring_idle_trsh;
extern bool rx_align_2;
extern bool debug_fw;
#define WIL_NAME "wil6210"
#define WIL_FW_NAME "wil6210.fw" /* code */
#define WIL_FW2_NAME "wil6210.board" /* board & radio parameters */
#define WIL_FW2_NAME "wil6210.brd" /* board & radio parameters */
#define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */
@ -396,6 +398,7 @@ struct vring {
* Additional data for Tx Vring
*/
struct vring_tx_data {
bool dot1x_open;
int enabled;
cycles_t idle, last_idle, begin;
u8 agg_wsize; /* agreed aggregation window, 0 - no agg */
@ -484,7 +487,6 @@ struct wil_sta_info {
u8 addr[ETH_ALEN];
enum wil_sta_status status;
struct wil_net_stats stats;
bool data_port_open; /* can send any data, not only EAPOL */
/* Rx BACK */
struct wil_tid_ampdu_rx *tid_rx[WIL_STA_TID_NUM];
spinlock_t tid_rx_lock; /* guarding tid_rx array */
@ -526,6 +528,17 @@ struct wil_probe_client_req {
u8 cid;
};
struct pmc_ctx {
/* alloc, free, and read operations must own the lock */
struct mutex lock;
struct vring_tx_desc *pring_va;
dma_addr_t pring_pa;
struct desc_alloc_info *descriptors;
int last_cmd_status;
int num_descriptors;
int descriptor_size;
};
struct wil6210_priv {
struct pci_dev *pdev;
int n_msi;
@ -610,6 +623,8 @@ struct wil6210_priv {
void *platform_handle;
struct wil_platform_ops platform_ops;
struct pmc_ctx pmc;
};
#define wil_to_wiphy(i) (i->wdev->wiphy)
@ -701,9 +716,10 @@ int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid);
int wmi_set_channel(struct wil6210_priv *wil, int channel);
int wmi_get_channel(struct wil6210_priv *wil, int *channel);
int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
const void *mac_addr);
const void *mac_addr, int key_usage);
int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
const void *mac_addr, int key_len, const void *key);
const void *mac_addr, int key_len, const void *key,
int key_usage);
int wmi_echo(struct wil6210_priv *wil);
int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie);
int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring);

Просмотреть файл

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
* Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -543,55 +543,22 @@ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
}
}
static void wil_addba_tx_cid(struct wil6210_priv *wil, u8 cid, u16 wsize)
static void wmi_evt_vring_en(struct wil6210_priv *wil, int id, void *d, int len)
{
struct vring_tx_data *t;
int i;
struct wmi_vring_en_event *evt = d;
u8 vri = evt->vring_index;
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
if (cid != wil->vring2cid_tid[i][0])
continue;
t = &wil->vring_tx_data[i];
if (!t->enabled)
continue;
wil_dbg_wmi(wil, "Enable vring %d\n", vri);
wil_addba_tx_request(wil, i, wsize);
}
}
static void wmi_evt_linkup(struct wil6210_priv *wil, int id, void *d, int len)
{
struct wmi_data_port_open_event *evt = d;
u8 cid = evt->cid;
wil_dbg_wmi(wil, "Link UP for CID %d\n", cid);
if (cid >= ARRAY_SIZE(wil->sta)) {
wil_err(wil, "Link UP for invalid CID %d\n", cid);
if (vri >= ARRAY_SIZE(wil->vring_tx)) {
wil_err(wil, "Enable for invalid vring %d\n", vri);
return;
}
wil->sta[cid].data_port_open = true;
wil->vring_tx_data[vri].dot1x_open = true;
if (vri == wil->bcast_vring) /* no BA for bcast */
return;
if (agg_wsize >= 0)
wil_addba_tx_cid(wil, cid, agg_wsize);
}
static void wmi_evt_linkdown(struct wil6210_priv *wil, int id, void *d, int len)
{
struct net_device *ndev = wil_to_ndev(wil);
struct wmi_wbe_link_down_event *evt = d;
u8 cid = evt->cid;
wil_dbg_wmi(wil, "Link DOWN for CID %d, reason %d\n",
cid, le32_to_cpu(evt->reason));
if (cid >= ARRAY_SIZE(wil->sta)) {
wil_err(wil, "Link DOWN for invalid CID %d\n", cid);
return;
}
wil->sta[cid].data_port_open = false;
netif_carrier_off(ndev);
wil_addba_tx_request(wil, vri, agg_wsize);
}
static void wmi_evt_ba_status(struct wil6210_priv *wil, int id, void *d,
@ -695,11 +662,10 @@ static const struct {
{WMI_CONNECT_EVENTID, wmi_evt_connect},
{WMI_DISCONNECT_EVENTID, wmi_evt_disconnect},
{WMI_EAPOL_RX_EVENTID, wmi_evt_eapol_rx},
{WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_linkup},
{WMI_WBE_LINKDOWN_EVENTID, wmi_evt_linkdown},
{WMI_BA_STATUS_EVENTID, wmi_evt_ba_status},
{WMI_RCP_ADDBA_REQ_EVENTID, wmi_evt_addba_rx_req},
{WMI_DELBA_EVENTID, wmi_evt_delba},
{WMI_VRING_EN_EVENTID, wmi_evt_vring_en},
};
/*
@ -844,7 +810,7 @@ int wmi_echo(struct wil6210_priv *wil)
};
return wmi_call(wil, WMI_ECHO_CMDID, &cmd, sizeof(cmd),
WMI_ECHO_RSP_EVENTID, NULL, 0, 20);
WMI_ECHO_RSP_EVENTID, NULL, 0, 50);
}
int wmi_set_mac_address(struct wil6210_priv *wil, void *addr)
@ -985,7 +951,7 @@ int wmi_p2p_cfg(struct wil6210_priv *wil, int channel)
}
int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
const void *mac_addr)
const void *mac_addr, int key_usage)
{
struct wmi_delete_cipher_key_cmd cmd = {
.key_index = key_index,
@ -998,11 +964,12 @@ int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
}
int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
const void *mac_addr, int key_len, const void *key)
const void *mac_addr, int key_len, const void *key,
int key_usage)
{
struct wmi_add_cipher_key_cmd cmd = {
.key_index = key_index,
.key_usage = WMI_KEY_USE_PAIRWISE,
.key_usage = key_usage,
.key_len = key_len,
};
@ -1238,7 +1205,8 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token,
cid, tid, agg_wsize, timeout, status, amsdu ? "+" : "-");
rc = wmi_call(wil, WMI_RCP_ADDBA_RESP_CMDID, &cmd, sizeof(cmd),
WMI_ADDBA_RESP_SENT_EVENTID, &reply, sizeof(reply), 100);
WMI_RCP_ADDBA_RESP_SENT_EVENTID, &reply, sizeof(reply),
100);
if (rc)
return rc;

Просмотреть файл

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
* Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
* Copyright (c) 2006-2012 Wilocity .
*
* Permission to use, copy, modify, and/or distribute this software for any
@ -253,8 +253,8 @@ struct wmi_set_passphrase_cmd {
*/
enum wmi_key_usage {
WMI_KEY_USE_PAIRWISE = 0,
WMI_KEY_USE_GROUP = 1,
WMI_KEY_USE_TX = 2, /* default Tx Key - Static WEP only */
WMI_KEY_USE_RX_GROUP = 1,
WMI_KEY_USE_TX_GROUP = 2,
};
struct wmi_add_cipher_key_cmd {
@ -835,6 +835,21 @@ struct wmi_temp_sense_cmd {
__le32 measure_mode;
} __packed;
/*
* WMI_PMC_CMDID
*/
enum wmi_pmc_op_e {
WMI_PMC_ALLOCATE = 0,
WMI_PMC_RELEASE = 1,
};
struct wmi_pmc_cmd {
u8 op; /* enum wmi_pmc_cmd_op_type */
u8 reserved;
__le16 ring_size;
__le64 mem_base;
} __packed;
/*
* WMI Events
*/
@ -870,7 +885,7 @@ enum wmi_event_id {
WMI_VRING_CFG_DONE_EVENTID = 0x1821,
WMI_BA_STATUS_EVENTID = 0x1823,
WMI_RCP_ADDBA_REQ_EVENTID = 0x1824,
WMI_ADDBA_RESP_SENT_EVENTID = 0x1825,
WMI_RCP_ADDBA_RESP_SENT_EVENTID = 0x1825,
WMI_DELBA_EVENTID = 0x1826,
WMI_GET_SSID_EVENTID = 0x1828,
WMI_GET_PCP_CHANNEL_EVENTID = 0x182a,
@ -882,7 +897,7 @@ enum wmi_event_id {
WMI_WRITE_MAC_TXQ_EVENTID = 0x1833,
WMI_WRITE_MAC_XQ_FIELD_EVENTID = 0x1834,
WMI_BEAFORMING_MGMT_DONE_EVENTID = 0x1836,
WMI_BEAMFORMING_MGMT_DONE_EVENTID = 0x1836,
WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837,
WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839,
WMI_RS_MGMT_DONE_EVENTID = 0x1852,
@ -894,11 +909,12 @@ enum wmi_event_id {
/* Performance monitoring events */
WMI_DATA_PORT_OPEN_EVENTID = 0x1860,
WMI_WBE_LINKDOWN_EVENTID = 0x1861,
WMI_WBE_LINK_DOWN_EVENTID = 0x1861,
WMI_BF_CTRL_DONE_EVENTID = 0x1862,
WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863,
WMI_GET_STATUS_DONE_EVENTID = 0x1864,
WMI_VRING_EN_EVENTID = 0x1865,
WMI_UNIT_TEST_EVENTID = 0x1900,
WMI_FLASH_READ_DONE_EVENTID = 0x1902,
@ -1147,7 +1163,7 @@ struct wmi_vring_cfg_done_event {
} __packed;
/*
* WMI_ADDBA_RESP_SENT_EVENTID
* WMI_RCP_ADDBA_RESP_SENT_EVENTID
*/
struct wmi_rcp_addba_resp_sent_event {
u8 cidxtid;
@ -1179,7 +1195,7 @@ struct wmi_cfg_rx_chain_done_event {
} __packed;
/*
* WMI_WBE_LINKDOWN_EVENTID
* WMI_WBE_LINK_DOWN_EVENTID
*/
enum wmi_wbe_link_down_event_reason {
WMI_WBE_REASON_USER_REQUEST = 0,
@ -1201,6 +1217,14 @@ struct wmi_data_port_open_event {
u8 reserved[3];
} __packed;
/*
* WMI_VRING_EN_EVENTID
*/
struct wmi_vring_en_event {
u8 vring_index;
u8 reserved[3];
} __packed;
/*
* WMI_GET_PCP_CHANNEL_EVENTID
*/

Просмотреть файл

@ -33,6 +33,7 @@
#include <linux/suspend.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/acpi.h>
#include <net/cfg80211.h>
#include <defs.h>
@ -1116,12 +1117,25 @@ MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
static struct brcmfmac_sdio_platform_data *brcmfmac_sdio_pdata;
static void brcmf_sdiod_acpi_set_power_manageable(struct device *dev,
int val)
{
#if IS_ENABLED(CONFIG_ACPI)
struct acpi_device *adev;
adev = ACPI_COMPANION(dev);
if (adev)
adev->flags.power_manageable = 0;
#endif
}
static int brcmf_ops_sdio_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
int err;
struct brcmf_sdio_dev *sdiodev;
struct brcmf_bus *bus_if;
struct device *dev;
brcmf_dbg(SDIO, "Enter\n");
brcmf_dbg(SDIO, "Class=%x\n", func->class);
@ -1129,6 +1143,10 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
brcmf_dbg(SDIO, "Function#: %d\n", func->num);
dev = &func->dev;
/* prohibit ACPI power management for this device */
brcmf_sdiod_acpi_set_power_manageable(dev, 0);
/* Consume func num 1 but dont do anything with it. */
if (func->num == 1)
return 0;

Просмотреть файл

@ -22,17 +22,6 @@
#include "core.h"
#include "commonring.h"
/* dma flushing needs implementation for mips and arm platforms. Should
* be put in util. Note, this is not real flushing. It is virtual non
* cached memory. Only write buffers should have to be drained. Though
* this may be different depending on platform......
* SEE ALSO msgbuf.c
*/
#define brcmf_dma_flush(addr, len)
#define brcmf_dma_invalidate_cache(addr, len)
void brcmf_commonring_register_cb(struct brcmf_commonring *commonring,
int (*cr_ring_bell)(void *ctx),
int (*cr_update_rptr)(void *ctx),
@ -206,14 +195,9 @@ int brcmf_commonring_write_complete(struct brcmf_commonring *commonring)
address = commonring->buf_addr;
address += (commonring->f_ptr * commonring->item_len);
if (commonring->f_ptr > commonring->w_ptr) {
brcmf_dma_flush(address,
(commonring->depth - commonring->f_ptr) *
commonring->item_len);
address = commonring->buf_addr;
commonring->f_ptr = 0;
}
brcmf_dma_flush(address, (commonring->w_ptr - commonring->f_ptr) *
commonring->item_len);
commonring->f_ptr = commonring->w_ptr;
@ -258,8 +242,6 @@ void *brcmf_commonring_get_read_ptr(struct brcmf_commonring *commonring,
if (commonring->r_ptr == commonring->depth)
commonring->r_ptr = 0;
brcmf_dma_invalidate_cache(ret_addr, *n_ items * commonring->item_len);
return ret_addr;
}

Просмотреть файл

@ -25,7 +25,7 @@
#define BRCMF_FW_MAX_NVRAM_SIZE 64000
#define BRCMF_FW_NVRAM_DEVPATH_LEN 19 /* devpath0=pcie/1/4/ */
#define BRCMF_FW_NVRAM_PCIEDEV_LEN 9 /* pcie/1/4/ */
#define BRCMF_FW_NVRAM_PCIEDEV_LEN 10 /* pcie/1/4/ + \0 */
char brcmf_firmware_path[BRCMF_FW_PATH_LEN];
module_param_string(firmware_path, brcmf_firmware_path,
@ -66,6 +66,12 @@ struct nvram_parser {
bool multi_dev_v2;
};
/**
* is_nvram_char() - check if char is a valid one for NVRAM entry
*
* It accepts all printable ASCII chars except for '#' which opens a comment.
* Please note that ' ' (space) while accepted is not a valid key name char.
*/
static bool is_nvram_char(char c)
{
/* comment marker excluded */
@ -73,7 +79,7 @@ static bool is_nvram_char(char c)
return false;
/* key and value may have any other readable character */
return (c > 0x20 && c < 0x7f);
return (c >= 0x20 && c < 0x7f);
}
static bool is_whitespace(char c)
@ -120,7 +126,7 @@ static enum nvram_parser_state brcmf_nvram_handle_key(struct nvram_parser *nvp)
nvp->multi_dev_v1 = true;
if (strncmp(&nvp->fwnv->data[nvp->entry], "pcie/", 5) == 0)
nvp->multi_dev_v2 = true;
} else if (!is_nvram_char(c)) {
} else if (!is_nvram_char(c) || c == ' ') {
brcmf_dbg(INFO, "warning: ln=%d:col=%d: '=' expected, skip invalid key entry\n",
nvp->line, nvp->column);
return COMMENT;
@ -162,17 +168,20 @@ brcmf_nvram_handle_value(struct nvram_parser *nvp)
static enum nvram_parser_state
brcmf_nvram_handle_comment(struct nvram_parser *nvp)
{
char *eol, *sol;
char *eoc, *sol;
sol = (char *)&nvp->fwnv->data[nvp->pos];
eol = strchr(sol, '\n');
if (eol == NULL)
return END;
eoc = strchr(sol, '\n');
if (!eoc) {
eoc = strchr(sol, '\0');
if (!eoc)
return END;
}
/* eat all moving to next line */
nvp->line++;
nvp->column = 1;
nvp->pos += (eol - sol) + 1;
nvp->pos += (eoc - sol) + 1;
return IDLE;
}
@ -222,6 +231,10 @@ static int brcmf_init_nvram_parser(struct nvram_parser *nvp,
static void brcmf_fw_strip_multi_v1(struct nvram_parser *nvp, u16 domain_nr,
u16 bus_nr)
{
/* Device path with a leading '=' key-value separator */
char pcie_path[] = "=pcie/?/?";
size_t pcie_len;
u32 i, j;
bool found;
u8 *nvram;
@ -238,6 +251,9 @@ static void brcmf_fw_strip_multi_v1(struct nvram_parser *nvp, u16 domain_nr,
/* First search for the devpathX and see if it is the configuration
* for domain_nr/bus_nr. Search complete nvp
*/
snprintf(pcie_path, sizeof(pcie_path), "=pcie/%d/%d", domain_nr,
bus_nr);
pcie_len = strlen(pcie_path);
found = false;
i = 0;
while (i < nvp->nvram_len - BRCMF_FW_NVRAM_DEVPATH_LEN) {
@ -245,13 +261,10 @@ static void brcmf_fw_strip_multi_v1(struct nvram_parser *nvp, u16 domain_nr,
* Y = domain_nr, Z = bus_nr, X = virtual ID
*/
if ((strncmp(&nvp->nvram[i], "devpath", 7) == 0) &&
(strncmp(&nvp->nvram[i + 8], "=pcie/", 6) == 0)) {
if (((nvp->nvram[i + 14] - '0') == domain_nr) &&
((nvp->nvram[i + 16] - '0') == bus_nr)) {
id = nvp->nvram[i + 7] - '0';
found = true;
break;
}
(strncmp(&nvp->nvram[i + 8], pcie_path, pcie_len) == 0)) {
id = nvp->nvram[i + 7] - '0';
found = true;
break;
}
while (nvp->nvram[i] != 0)
i++;
@ -297,6 +310,8 @@ fail:
static void brcmf_fw_strip_multi_v2(struct nvram_parser *nvp, u16 domain_nr,
u16 bus_nr)
{
char prefix[BRCMF_FW_NVRAM_PCIEDEV_LEN];
size_t len;
u32 i, j;
u8 *nvram;
@ -308,14 +323,13 @@ static void brcmf_fw_strip_multi_v2(struct nvram_parser *nvp, u16 domain_nr,
* Valid entries are of type pcie/X/Y/ where X = domain_nr and
* Y = bus_nr.
*/
snprintf(prefix, sizeof(prefix), "pcie/%d/%d/", domain_nr, bus_nr);
len = strlen(prefix);
i = 0;
j = 0;
while (i < nvp->nvram_len - BRCMF_FW_NVRAM_PCIEDEV_LEN) {
if ((strncmp(&nvp->nvram[i], "pcie/", 5) == 0) &&
(nvp->nvram[i + 6] == '/') && (nvp->nvram[i + 8] == '/') &&
((nvp->nvram[i + 5] - '0') == domain_nr) &&
((nvp->nvram[i + 7] - '0') == bus_nr)) {
i += BRCMF_FW_NVRAM_PCIEDEV_LEN;
while (i < nvp->nvram_len - len) {
if (strncmp(&nvp->nvram[i], prefix, len) == 0) {
i += len;
while (nvp->nvram[i] != 0) {
nvram[j] = nvp->nvram[i];
i++;

Просмотреть файл

@ -249,8 +249,8 @@ void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid)
}
void brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
struct sk_buff *skb)
u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
struct sk_buff *skb)
{
struct brcmf_flowring_ring *ring;
@ -271,6 +271,7 @@ void brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
if (skb_queue_len(&ring->skblist) < BRCMF_FLOWRING_LOW)
brcmf_flowring_block(flow, flowid, false);
}
return skb_queue_len(&ring->skblist);
}

Просмотреть файл

@ -64,8 +64,8 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid);
void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid);
u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid);
void brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
struct sk_buff *skb);
u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
struct sk_buff *skb);
struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid);
void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
struct sk_buff *skb);

Просмотреть файл

@ -635,7 +635,7 @@ static int brcmf_fws_hanger_pushpkt(struct brcmf_fws_hanger *h,
return 0;
}
static int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
static inline int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
u32 slot_id, struct sk_buff **pktout,
bool remove_item)
{

Просмотреть файл

@ -73,7 +73,7 @@
#define BRCMF_MSGBUF_TX_FLUSH_CNT1 32
#define BRCMF_MSGBUF_TX_FLUSH_CNT2 96
#define BRCMF_MSGBUF_DELAY_TXWORKER_THRS 64
#define BRCMF_MSGBUF_DELAY_TXWORKER_THRS 96
#define BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS 32
struct msgbuf_common_hdr {
@ -278,16 +278,6 @@ struct brcmf_msgbuf_pktids {
struct brcmf_msgbuf_pktid *array;
};
/* dma flushing needs implementation for mips and arm platforms. Should
* be put in util. Note, this is not real flushing. It is virtual non
* cached memory. Only write buffers should have to be drained. Though
* this may be different depending on platform......
*/
#define brcmf_dma_flush(addr, len)
#define brcmf_dma_invalidate_cache(addr, len)
static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf);
@ -462,7 +452,6 @@ static int brcmf_msgbuf_tx_ioctl(struct brcmf_pub *drvr, int ifidx,
memcpy(msgbuf->ioctbuf, buf, buf_len);
else
memset(msgbuf->ioctbuf, 0, buf_len);
brcmf_dma_flush(ioctl_buf, buf_len);
err = brcmf_commonring_write_complete(commonring);
brcmf_commonring_unlock(commonring);
@ -795,6 +784,8 @@ static int brcmf_msgbuf_txdata(struct brcmf_pub *drvr, int ifidx,
struct brcmf_flowring *flow = msgbuf->flow;
struct ethhdr *eh = (struct ethhdr *)(skb->data);
u32 flowid;
u32 queue_count;
bool force;
flowid = brcmf_flowring_lookup(flow, eh->h_dest, skb->priority, ifidx);
if (flowid == BRCMF_FLOWRING_INVALID_ID) {
@ -802,8 +793,9 @@ static int brcmf_msgbuf_txdata(struct brcmf_pub *drvr, int ifidx,
if (flowid == BRCMF_FLOWRING_INVALID_ID)
return -ENOMEM;
}
brcmf_flowring_enqueue(flow, flowid, skb);
brcmf_msgbuf_schedule_txdata(msgbuf, flowid, false);
queue_count = brcmf_flowring_enqueue(flow, flowid, skb);
force = ((queue_count % BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) == 0);
brcmf_msgbuf_schedule_txdata(msgbuf, flowid, force);
return 0;
}

Просмотреть файл

@ -39,10 +39,16 @@ void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev)
if (!sdiodev->pdata)
return;
if (of_property_read_u32(np, "brcm,drive-strength", &val) == 0)
sdiodev->pdata->drive_strength = val;
/* make sure there are interrupts defined in the node */
if (!of_find_property(np, "interrupts", NULL))
return;
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
brcmf_err("interrupt could not be mapped\n");
devm_kfree(dev, sdiodev->pdata);
return;
}
irqf = irqd_get_trigger_type(irq_get_irq_data(irq));
@ -50,7 +56,4 @@ void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev)
sdiodev->pdata->oob_irq_supported = true;
sdiodev->pdata->oob_irq_nr = irq;
sdiodev->pdata->oob_irq_flags = irqf;
if (of_property_read_u32(np, "brcm,drive-strength", &val) == 0)
sdiodev->pdata->drive_strength = val;
}

Просмотреть файл

@ -112,10 +112,11 @@ enum brcmf_pcie_state {
BRCMF_PCIE_MB_INT_D2H3_DB0 | \
BRCMF_PCIE_MB_INT_D2H3_DB1)
#define BRCMF_PCIE_MIN_SHARED_VERSION 4
#define BRCMF_PCIE_MIN_SHARED_VERSION 5
#define BRCMF_PCIE_MAX_SHARED_VERSION 5
#define BRCMF_PCIE_SHARED_VERSION_MASK 0x00FF
#define BRCMF_PCIE_SHARED_TXPUSH_SUPPORT 0x4000
#define BRCMF_PCIE_SHARED_DMA_INDEX 0x10000
#define BRCMF_PCIE_SHARED_DMA_2B_IDX 0x100000
#define BRCMF_PCIE_FLAGS_HTOD_SPLIT 0x4000
#define BRCMF_PCIE_FLAGS_DTOH_SPLIT 0x8000
@ -147,6 +148,10 @@ enum brcmf_pcie_state {
#define BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET 8
#define BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET 12
#define BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET 16
#define BRCMF_SHARED_RING_H2D_WP_HADDR_OFFSET 20
#define BRCMF_SHARED_RING_H2D_RP_HADDR_OFFSET 28
#define BRCMF_SHARED_RING_D2H_WP_HADDR_OFFSET 36
#define BRCMF_SHARED_RING_D2H_RP_HADDR_OFFSET 44
#define BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET 0
#define BRCMF_SHARED_RING_MAX_SUB_QUEUES 52
@ -248,6 +253,13 @@ struct brcmf_pciedev_info {
bool mbdata_completed;
bool irq_allocated;
bool wowl_enabled;
u8 dma_idx_sz;
void *idxbuf;
u32 idxbuf_sz;
dma_addr_t idxbuf_dmahandle;
u16 (*read_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset);
void (*write_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
u16 value);
};
struct brcmf_pcie_ringbuf {
@ -277,15 +289,6 @@ static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = {
};
/* dma flushing needs implementation for mips and arm platforms. Should
* be put in util. Note, this is not real flushing. It is virtual non
* cached memory. Only write buffers should have to be drained. Though
* this may be different depending on platform......
*/
#define brcmf_dma_flush(addr, len)
#define brcmf_dma_invalidate_cache(addr, len)
static u32
brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
{
@ -333,6 +336,25 @@ brcmf_pcie_write_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
}
static u16
brcmf_pcie_read_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
{
u16 *address = devinfo->idxbuf + mem_offset;
return (*(address));
}
static void
brcmf_pcie_write_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
u16 value)
{
u16 *address = devinfo->idxbuf + mem_offset;
*(address) = value;
}
static u32
brcmf_pcie_read_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
{
@ -878,7 +900,7 @@ static int brcmf_pcie_ring_mb_write_rptr(void *ctx)
brcmf_dbg(PCIE, "W r_ptr %d (%d), ring %d\n", commonring->r_ptr,
commonring->w_ptr, ring->id);
brcmf_pcie_write_tcm16(devinfo, ring->r_idx_addr, commonring->r_ptr);
devinfo->write_ptr(devinfo, ring->r_idx_addr, commonring->r_ptr);
return 0;
}
@ -896,7 +918,7 @@ static int brcmf_pcie_ring_mb_write_wptr(void *ctx)
brcmf_dbg(PCIE, "W w_ptr %d (%d), ring %d\n", commonring->w_ptr,
commonring->r_ptr, ring->id);
brcmf_pcie_write_tcm16(devinfo, ring->w_idx_addr, commonring->w_ptr);
devinfo->write_ptr(devinfo, ring->w_idx_addr, commonring->w_ptr);
return 0;
}
@ -925,7 +947,7 @@ static int brcmf_pcie_ring_mb_update_rptr(void *ctx)
if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
return -EIO;
commonring->r_ptr = brcmf_pcie_read_tcm16(devinfo, ring->r_idx_addr);
commonring->r_ptr = devinfo->read_ptr(devinfo, ring->r_idx_addr);
brcmf_dbg(PCIE, "R r_ptr %d (%d), ring %d\n", commonring->r_ptr,
commonring->w_ptr, ring->id);
@ -943,7 +965,7 @@ static int brcmf_pcie_ring_mb_update_wptr(void *ctx)
if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
return -EIO;
commonring->w_ptr = brcmf_pcie_read_tcm16(devinfo, ring->w_idx_addr);
commonring->w_ptr = devinfo->read_ptr(devinfo, ring->w_idx_addr);
brcmf_dbg(PCIE, "R w_ptr %d (%d), ring %d\n", commonring->w_ptr,
commonring->r_ptr, ring->id);
@ -1048,6 +1070,13 @@ static void brcmf_pcie_release_ringbuffers(struct brcmf_pciedev_info *devinfo)
}
kfree(devinfo->shared.flowrings);
devinfo->shared.flowrings = NULL;
if (devinfo->idxbuf) {
dma_free_coherent(&devinfo->pdev->dev,
devinfo->idxbuf_sz,
devinfo->idxbuf,
devinfo->idxbuf_dmahandle);
devinfo->idxbuf = NULL;
}
}
@ -1063,19 +1092,72 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
u32 addr;
u32 ring_mem_ptr;
u32 i;
u64 address;
u32 bufsz;
u16 max_sub_queues;
u8 idx_offset;
ring_addr = devinfo->shared.ring_info_addr;
brcmf_dbg(PCIE, "Base ring addr = 0x%08x\n", ring_addr);
addr = ring_addr + BRCMF_SHARED_RING_MAX_SUB_QUEUES;
max_sub_queues = brcmf_pcie_read_tcm16(devinfo, addr);
addr = ring_addr + BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET;
d2h_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
addr = ring_addr + BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET;
d2h_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
addr = ring_addr + BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET;
h2d_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
addr = ring_addr + BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET;
h2d_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
if (devinfo->dma_idx_sz != 0) {
bufsz = (BRCMF_NROF_D2H_COMMON_MSGRINGS + max_sub_queues) *
devinfo->dma_idx_sz * 2;
devinfo->idxbuf = dma_alloc_coherent(&devinfo->pdev->dev, bufsz,
&devinfo->idxbuf_dmahandle,
GFP_KERNEL);
if (!devinfo->idxbuf)
devinfo->dma_idx_sz = 0;
}
if (devinfo->dma_idx_sz == 0) {
addr = ring_addr + BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET;
d2h_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
addr = ring_addr + BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET;
d2h_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
addr = ring_addr + BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET;
h2d_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
addr = ring_addr + BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET;
h2d_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
idx_offset = sizeof(u32);
devinfo->write_ptr = brcmf_pcie_write_tcm16;
devinfo->read_ptr = brcmf_pcie_read_tcm16;
brcmf_dbg(PCIE, "Using TCM indices\n");
} else {
memset(devinfo->idxbuf, 0, bufsz);
devinfo->idxbuf_sz = bufsz;
idx_offset = devinfo->dma_idx_sz;
devinfo->write_ptr = brcmf_pcie_write_idx;
devinfo->read_ptr = brcmf_pcie_read_idx;
h2d_w_idx_ptr = 0;
addr = ring_addr + BRCMF_SHARED_RING_H2D_WP_HADDR_OFFSET;
address = (u64)devinfo->idxbuf_dmahandle;
brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
h2d_r_idx_ptr = h2d_w_idx_ptr + max_sub_queues * idx_offset;
addr = ring_addr + BRCMF_SHARED_RING_H2D_RP_HADDR_OFFSET;
address += max_sub_queues * idx_offset;
brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
d2h_w_idx_ptr = h2d_r_idx_ptr + max_sub_queues * idx_offset;
addr = ring_addr + BRCMF_SHARED_RING_D2H_WP_HADDR_OFFSET;
address += max_sub_queues * idx_offset;
brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
d2h_r_idx_ptr = d2h_w_idx_ptr +
BRCMF_NROF_D2H_COMMON_MSGRINGS * idx_offset;
addr = ring_addr + BRCMF_SHARED_RING_D2H_RP_HADDR_OFFSET;
address += BRCMF_NROF_D2H_COMMON_MSGRINGS * idx_offset;
brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
brcmf_dbg(PCIE, "Using host memory indices\n");
}
addr = ring_addr + BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET;
ring_mem_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
@ -1089,8 +1171,8 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
ring->id = i;
devinfo->shared.commonrings[i] = ring;
h2d_w_idx_ptr += sizeof(u32);
h2d_r_idx_ptr += sizeof(u32);
h2d_w_idx_ptr += idx_offset;
h2d_r_idx_ptr += idx_offset;
ring_mem_ptr += BRCMF_RING_MEM_SZ;
}
@ -1104,13 +1186,11 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
ring->id = i;
devinfo->shared.commonrings[i] = ring;
d2h_w_idx_ptr += sizeof(u32);
d2h_r_idx_ptr += sizeof(u32);
d2h_w_idx_ptr += idx_offset;
d2h_r_idx_ptr += idx_offset;
ring_mem_ptr += BRCMF_RING_MEM_SZ;
}
addr = ring_addr + BRCMF_SHARED_RING_MAX_SUB_QUEUES;
max_sub_queues = brcmf_pcie_read_tcm16(devinfo, addr);
devinfo->shared.nrof_flowrings =
max_sub_queues - BRCMF_NROF_H2D_COMMON_MSGRINGS;
rings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*ring),
@ -1134,15 +1214,15 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
ring);
ring->w_idx_addr = h2d_w_idx_ptr;
ring->r_idx_addr = h2d_r_idx_ptr;
h2d_w_idx_ptr += sizeof(u32);
h2d_r_idx_ptr += sizeof(u32);
h2d_w_idx_ptr += idx_offset;
h2d_r_idx_ptr += idx_offset;
}
devinfo->shared.flowrings = rings;
return 0;
fail:
brcmf_err("Allocating commonring buffers failed\n");
brcmf_err("Allocating ring buffers failed\n");
brcmf_pcie_release_ringbuffers(devinfo);
return -ENOMEM;
}
@ -1175,7 +1255,6 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
goto fail;
memset(devinfo->shared.scratch, 0, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
brcmf_dma_flush(devinfo->shared.scratch, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
addr = devinfo->shared.tcm_base_address +
BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET;
@ -1193,7 +1272,6 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
goto fail;
memset(devinfo->shared.ringupd, 0, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
brcmf_dma_flush(devinfo->shared.ringupd, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
addr = devinfo->shared.tcm_base_address +
BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET;
@ -1280,10 +1358,13 @@ brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
brcmf_err("Unsupported PCIE version %d\n", version);
return -EINVAL;
}
if (shared->flags & BRCMF_PCIE_SHARED_TXPUSH_SUPPORT) {
brcmf_err("Unsupported legacy TX mode 0x%x\n",
shared->flags & BRCMF_PCIE_SHARED_TXPUSH_SUPPORT);
return -EINVAL;
/* check firmware support dma indicies */
if (shared->flags & BRCMF_PCIE_SHARED_DMA_INDEX) {
if (shared->flags & BRCMF_PCIE_SHARED_DMA_2B_IDX)
devinfo->dma_idx_sz = sizeof(u16);
else
devinfo->dma_idx_sz = sizeof(u32);
}
addr = sharedram_addr + BRCMF_SHARED_MAX_RXBUFPOST_OFFSET;

Просмотреть файл

@ -9,6 +9,7 @@ iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o
iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o
iwlwifi-objs += iwl-trans.o
iwlwifi-objs += $(iwlwifi-m)

Просмотреть файл

@ -112,6 +112,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
IEEE80211_HW_QUEUE_CONTROL |
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
IEEE80211_HW_SUPPORT_FAST_XMIT |
IEEE80211_HW_WANT_MONITOR_VIF;
hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE;

Просмотреть файл

@ -69,7 +69,7 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
#define IWL7260_UCODE_API_MAX 13
#define IWL7260_UCODE_API_MAX 15
/* Oldest version we won't warn about */
#define IWL7260_UCODE_API_OK 12

Просмотреть файл

@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2014 Intel Mobile Communications GmbH
* Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2014 Intel Mobile Communications GmbH
* Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -69,7 +69,7 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
#define IWL8000_UCODE_API_MAX 13
#define IWL8000_UCODE_API_MAX 15
/* Oldest version we won't warn about */
#define IWL8000_UCODE_API_OK 12
@ -122,24 +122,49 @@ static const struct iwl_ht_params iwl8000_ht_params = {
.ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
};
#define IWL_DEVICE_8000 \
.ucode_api_max = IWL8000_UCODE_API_MAX, \
.ucode_api_ok = IWL8000_UCODE_API_OK, \
.ucode_api_min = IWL8000_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_8000, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.base_params = &iwl8000_base_params, \
.led_mode = IWL_LED_RF_STATE, \
.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \
.d0i3 = true, \
.non_shared_ant = ANT_A, \
.dccm_offset = IWL8260_DCCM_OFFSET, \
.dccm_len = IWL8260_DCCM_LEN, \
.dccm2_offset = IWL8260_DCCM2_OFFSET, \
.dccm2_len = IWL8260_DCCM2_LEN, \
.smem_offset = IWL8260_SMEM_OFFSET, \
.smem_len = IWL8260_SMEM_LEN
static const struct iwl_tt_params iwl8000_tt_params = {
.ct_kill_entry = 115,
.ct_kill_exit = 93,
.ct_kill_duration = 5,
.dynamic_smps_entry = 111,
.dynamic_smps_exit = 107,
.tx_protection_entry = 112,
.tx_protection_exit = 105,
.tx_backoff = {
{.temperature = 110, .backoff = 200},
{.temperature = 111, .backoff = 600},
{.temperature = 112, .backoff = 1200},
{.temperature = 113, .backoff = 2000},
{.temperature = 114, .backoff = 4000},
},
.support_ct_kill = true,
.support_dynamic_smps = true,
.support_tx_protection = true,
.support_tx_backoff = true,
};
#define IWL_DEVICE_8000 \
.ucode_api_max = IWL8000_UCODE_API_MAX, \
.ucode_api_ok = IWL8000_UCODE_API_OK, \
.ucode_api_min = IWL8000_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_8000, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.base_params = &iwl8000_base_params, \
.led_mode = IWL_LED_RF_STATE, \
.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \
.d0i3 = true, \
.non_shared_ant = ANT_A, \
.dccm_offset = IWL8260_DCCM_OFFSET, \
.dccm_len = IWL8260_DCCM_LEN, \
.dccm2_offset = IWL8260_DCCM2_OFFSET, \
.dccm2_len = IWL8260_DCCM2_LEN, \
.smem_offset = IWL8260_SMEM_OFFSET, \
.smem_len = IWL8260_SMEM_LEN, \
.default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B, \
.default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C, \
.thermal_params = &iwl8000_tt_params, \
.apmg_not_supported = true
const struct iwl_cfg iwl8260_2n_cfg = {
.name = "Intel(R) Dual Band Wireless N 8260",
@ -177,8 +202,6 @@ const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
.default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B,
.default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C,
.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
.disable_dummy_notification = true,
.max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
@ -192,8 +215,6 @@ const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
.default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B,
.default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C,
.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
.bt_shared_single_ant = true,
.disable_dummy_notification = true,

Просмотреть файл

@ -360,6 +360,7 @@ struct iwl_cfg {
const u32 smem_offset;
const u32 smem_len;
const struct iwl_tt_params *thermal_params;
bool apmg_not_supported;
};
/*

Просмотреть файл

@ -1,6 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@ -64,19 +65,21 @@ TRACE_EVENT(iwlwifi_dev_hcmd,
TRACE_EVENT(iwlwifi_dev_rx,
TP_PROTO(const struct device *dev, const struct iwl_trans *trans,
void *rxbuf, size_t len),
TP_ARGS(dev, trans, rxbuf, len),
struct iwl_rx_packet *pkt, size_t len),
TP_ARGS(dev, trans, pkt, len),
TP_STRUCT__entry(
DEV_ENTRY
__dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, rxbuf, len))
__field(u8, cmd)
__dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, pkt, len))
),
TP_fast_assign(
DEV_ASSIGN;
memcpy(__get_dynamic_array(rxbuf), rxbuf,
iwl_rx_trace_len(trans, rxbuf, len));
__entry->cmd = pkt->hdr.cmd;
memcpy(__get_dynamic_array(rxbuf), pkt,
iwl_rx_trace_len(trans, pkt, len));
),
TP_printk("[%s] RX cmd %#.2x",
__get_str(dev), ((u8 *)__get_dynamic_array(rxbuf))[4])
__get_str(dev), __entry->cmd)
);
TRACE_EVENT(iwlwifi_dev_tx,

Просмотреть файл

@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -423,13 +423,19 @@ static int iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data,
{
const struct iwl_ucode_api *ucode_api = (void *)data;
u32 api_index = le32_to_cpu(ucode_api->api_index);
u32 api_flags = le32_to_cpu(ucode_api->api_flags);
int i;
if (api_index >= IWL_API_ARRAY_SIZE) {
if (api_index >= IWL_API_MAX_BITS / 32) {
IWL_ERR(drv, "api_index larger than supported by driver\n");
return -EINVAL;
/* don't return an error so we can load FW that has more bits */
return 0;
}
capa->api[api_index] = le32_to_cpu(ucode_api->api_flags);
for (i = 0; i < 32; i++) {
if (api_flags & BIT(i))
__set_bit(i + 32 * api_index, capa->_api);
}
return 0;
}
@ -439,13 +445,19 @@ static int iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data,
{
const struct iwl_ucode_capa *ucode_capa = (void *)data;
u32 api_index = le32_to_cpu(ucode_capa->api_index);
u32 api_flags = le32_to_cpu(ucode_capa->api_capa);
int i;
if (api_index >= IWL_CAPABILITIES_ARRAY_SIZE) {
if (api_index >= IWL_CAPABILITIES_MAX_BITS / 32) {
IWL_ERR(drv, "api_index larger than supported by driver\n");
return -EINVAL;
/* don't return an error so we can load FW that has more bits */
return 0;
}
capa->capa[api_index] = le32_to_cpu(ucode_capa->api_capa);
for (i = 0; i < 32; i++) {
if (api_flags & BIT(i))
__set_bit(i + 32 * api_index, capa->_capa);
}
return 0;
}
@ -1148,7 +1160,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
if (err)
goto try_again;
if (drv->fw.ucode_capa.api[0] & IWL_UCODE_TLV_API_NEW_VERSION)
if (fw_has_api(&drv->fw.ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION))
api_ver = drv->fw.ucode_ver;
else
api_ver = IWL_UCODE_API(drv->fw.ucode_ver);
@ -1239,6 +1251,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
sizeof(struct iwl_fw_dbg_trigger_txq_timer);
trigger_tlv_sz[FW_DBG_TRIGGER_TIME_EVENT] =
sizeof(struct iwl_fw_dbg_trigger_time_event);
trigger_tlv_sz[FW_DBG_TRIGGER_BA] =
sizeof(struct iwl_fw_dbg_trigger_ba);
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) {
if (pieces->dbg_trigger_tlv[i]) {

Просмотреть файл

@ -438,12 +438,6 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
#define RX_QUEUE_MASK 255
#define RX_QUEUE_SIZE_LOG 8
/*
* RX related structures and functions
*/
#define RX_FREE_BUFFERS 64
#define RX_LOW_WATERMARK 8
/**
* struct iwl_rb_status - reserve buffer status
* host memory mapped FH registers

Просмотреть файл

@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2014 Intel Mobile Communications GmbH
* Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2014 Intel Mobile Communications GmbH
* Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -254,6 +254,7 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data)
* detection.
* @FW_DBG_TRIGGER_TIME_EVENT: trigger log collection upon time events related
* events.
* @FW_DBG_TRIGGER_BA: trigger log collection upon BlockAck related events.
*/
enum iwl_fw_dbg_trigger {
FW_DBG_TRIGGER_INVALID = 0,
@ -267,6 +268,7 @@ enum iwl_fw_dbg_trigger {
FW_DBG_TRIGGER_RSSI,
FW_DBG_TRIGGER_TXQ_TIMERS,
FW_DBG_TRIGGER_TIME_EVENT,
FW_DBG_TRIGGER_BA,
/* must be last */
FW_DBG_TRIGGER_MAX,

Просмотреть файл

@ -237,6 +237,8 @@ enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30),
};
typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
/**
* enum iwl_ucode_tlv_api - ucode api
* @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex
@ -255,22 +257,27 @@ enum iwl_ucode_tlv_flag {
* @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params
* @IWL_UCODE_TLV_API_STATS_V10: uCode supports/uses statistics API version 10
* @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format
* @IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority
* instead of 3.
*/
enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_BT_COEX_SPLIT = BIT(3),
IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8),
IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = BIT(9),
IWL_UCODE_TLV_API_HDC_PHASE_0 = BIT(10),
IWL_UCODE_TLV_API_TX_POWER_DEV = BIT(11),
IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13),
IWL_UCODE_TLV_API_SCD_CFG = BIT(15),
IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = BIT(16),
IWL_UCODE_TLV_API_ASYNC_DTM = BIT(17),
IWL_UCODE_TLV_API_LQ_SS_PARAMS = BIT(18),
IWL_UCODE_TLV_API_STATS_V10 = BIT(19),
IWL_UCODE_TLV_API_NEW_VERSION = BIT(20),
IWL_UCODE_TLV_API_BT_COEX_SPLIT = (__force iwl_ucode_tlv_api_t)3,
IWL_UCODE_TLV_API_FRAGMENTED_SCAN = (__force iwl_ucode_tlv_api_t)8,
IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9,
IWL_UCODE_TLV_API_HDC_PHASE_0 = (__force iwl_ucode_tlv_api_t)10,
IWL_UCODE_TLV_API_TX_POWER_DEV = (__force iwl_ucode_tlv_api_t)11,
IWL_UCODE_TLV_API_BASIC_DWELL = (__force iwl_ucode_tlv_api_t)13,
IWL_UCODE_TLV_API_SCD_CFG = (__force iwl_ucode_tlv_api_t)15,
IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = (__force iwl_ucode_tlv_api_t)16,
IWL_UCODE_TLV_API_ASYNC_DTM = (__force iwl_ucode_tlv_api_t)17,
IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18,
IWL_UCODE_TLV_API_STATS_V10 = (__force iwl_ucode_tlv_api_t)19,
IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20,
IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY = (__force iwl_ucode_tlv_api_t)24,
};
typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
/**
* enum iwl_ucode_tlv_capa - ucode capabilities
* @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3
@ -290,6 +297,7 @@ enum iwl_ucode_tlv_api {
* which also implies support for the scheduler configuration command
* @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
* @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
* @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command
* @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
* @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running
* @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different
@ -299,22 +307,23 @@ enum iwl_ucode_tlv_api {
* @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
*/
enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0),
IWL_UCODE_TLV_CAPA_LAR_SUPPORT = BIT(1),
IWL_UCODE_TLV_CAPA_UMAC_SCAN = BIT(2),
IWL_UCODE_TLV_CAPA_BEAMFORMER = BIT(3),
IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = BIT(6),
IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = BIT(8),
IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = BIT(9),
IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = BIT(10),
IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = BIT(11),
IWL_UCODE_TLV_CAPA_DQA_SUPPORT = BIT(12),
IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = BIT(13),
IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = BIT(18),
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = BIT(22),
IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = BIT(28),
IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = BIT(29),
IWL_UCODE_TLV_CAPA_BT_COEX_RRC = BIT(30),
IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = (__force iwl_ucode_tlv_capa_t)0,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT = (__force iwl_ucode_tlv_capa_t)1,
IWL_UCODE_TLV_CAPA_UMAC_SCAN = (__force iwl_ucode_tlv_capa_t)2,
IWL_UCODE_TLV_CAPA_BEAMFORMER = (__force iwl_ucode_tlv_capa_t)3,
IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = (__force iwl_ucode_tlv_capa_t)6,
IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = (__force iwl_ucode_tlv_capa_t)8,
IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)9,
IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)10,
IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = (__force iwl_ucode_tlv_capa_t)11,
IWL_UCODE_TLV_CAPA_DQA_SUPPORT = (__force iwl_ucode_tlv_capa_t)12,
IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = (__force iwl_ucode_tlv_capa_t)13,
IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = (__force iwl_ucode_tlv_capa_t)18,
IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT = (__force iwl_ucode_tlv_capa_t)19,
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22,
IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = (__force iwl_ucode_tlv_capa_t)28,
IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29,
IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30,
};
/* The default calibrate table size if not specified by firmware file */
@ -325,13 +334,14 @@ enum iwl_ucode_tlv_capa {
/* The default max probe length if not specified by the firmware file */
#define IWL_DEFAULT_MAX_PROBE_LENGTH 200
#define IWL_API_MAX_BITS 64
#define IWL_CAPABILITIES_MAX_BITS 64
/*
* For 16.0 uCode and above, there is no differentiation between sections,
* just an offset to the HW address.
*/
#define IWL_UCODE_SECTION_MAX 12
#define IWL_API_ARRAY_SIZE 1
#define IWL_CAPABILITIES_ARRAY_SIZE 1
#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
/* uCode version contains 4 values: Major/Minor/API/Serial */
@ -424,11 +434,13 @@ struct iwl_fw_dbg_reg_op {
* @SMEM_MODE: monitor stores the data in SMEM
* @EXTERNAL_MODE: monitor stores the data in allocated DRAM
* @MARBH_MODE: monitor stores the data in MARBH buffer
* @MIPI_MODE: monitor outputs the data through the MIPI interface
*/
enum iwl_fw_dbg_monitor_mode {
SMEM_MODE = 0,
EXTERNAL_MODE = 1,
MARBH_MODE = 2,
MIPI_MODE = 3,
};
/**
@ -660,6 +672,33 @@ struct iwl_fw_dbg_trigger_time_event {
} __packed time_events[16];
} __packed;
/**
* struct iwl_fw_dbg_trigger_ba - configures BlockAck related trigger
* rx_ba_start: tid bitmap to configure on what tid the trigger should occur
* when an Rx BlockAck session is started.
* rx_ba_stop: tid bitmap to configure on what tid the trigger should occur
* when an Rx BlockAck session is stopped.
* tx_ba_start: tid bitmap to configure on what tid the trigger should occur
* when a Tx BlockAck session is started.
* tx_ba_stop: tid bitmap to configure on what tid the trigger should occur
* when a Tx BlockAck session is stopped.
* rx_bar: tid bitmap to configure on what tid the trigger should occur
* when a BAR is received (for a Tx BlockAck session).
* tx_bar: tid bitmap to configure on what tid the trigger should occur
* when a BAR is send (for an Rx BlocAck session).
* frame_timeout: tid bitmap to configure on what tid the trigger should occur
* when a frame times out in the reodering buffer.
*/
struct iwl_fw_dbg_trigger_ba {
__le16 rx_ba_start;
__le16 rx_ba_stop;
__le16 tx_ba_start;
__le16 tx_ba_stop;
__le16 rx_bar;
__le16 tx_bar;
__le16 frame_timeout;
} __packed;
/**
* struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration.
* @id: conf id

Просмотреть файл

@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -105,10 +105,24 @@ struct iwl_ucode_capabilities {
u32 n_scan_channels;
u32 standard_phy_calibration_size;
u32 flags;
u32 api[IWL_API_ARRAY_SIZE];
u32 capa[IWL_CAPABILITIES_ARRAY_SIZE];
unsigned long _api[BITS_TO_LONGS(IWL_API_MAX_BITS)];
unsigned long _capa[BITS_TO_LONGS(IWL_CAPABILITIES_MAX_BITS)];
};
static inline bool
fw_has_api(const struct iwl_ucode_capabilities *capabilities,
iwl_ucode_tlv_api_t api)
{
return test_bit((__force long)api, capabilities->_api);
}
static inline bool
fw_has_capa(const struct iwl_ucode_capabilities *capabilities,
iwl_ucode_tlv_capa_t capa)
{
return test_bit((__force long)capa, capabilities->_capa);
}
/* one for each uCode image (inst/data, init/runtime/wowlan) */
struct fw_desc {
const void *data; /* vmalloc'ed data */
@ -205,6 +219,8 @@ static inline const char *get_fw_dbg_mode_string(int mode)
return "EXTERNAL_DRAM";
case MARBH_MODE:
return "MARBH";
case MIPI_MODE:
return "MIPI";
default:
return "UNKNOWN";
}

Просмотреть файл

@ -0,0 +1,113 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#include <linux/kernel.h>
#include "iwl-trans.h"
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
const struct iwl_cfg *cfg,
const struct iwl_trans_ops *ops,
size_t dev_cmd_headroom)
{
struct iwl_trans *trans;
#ifdef CONFIG_LOCKDEP
static struct lock_class_key __key;
#endif
trans = kzalloc(sizeof(*trans) + priv_size, GFP_KERNEL);
if (!trans)
return NULL;
#ifdef CONFIG_LOCKDEP
lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
&__key, 0);
#endif
trans->dev = dev;
trans->cfg = cfg;
trans->ops = ops;
trans->dev_cmd_headroom = dev_cmd_headroom;
snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
"iwl_cmd_pool:%s", dev_name(trans->dev));
trans->dev_cmd_pool =
kmem_cache_create(trans->dev_cmd_pool_name,
sizeof(struct iwl_device_cmd)
+ trans->dev_cmd_headroom,
sizeof(void *),
SLAB_HWCACHE_ALIGN,
NULL);
if (!trans->dev_cmd_pool)
goto free;
return trans;
free:
kfree(trans);
return NULL;
}
void iwl_trans_free(struct iwl_trans *trans)
{
kmem_cache_destroy(trans->dev_cmd_pool);
kfree(trans);
}

Просмотреть файл

@ -641,6 +641,8 @@ struct iwl_trans {
enum iwl_d0i3_mode d0i3_mode;
bool wowlan_d0i3;
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[0] __aligned(sizeof(void *));
@ -1010,20 +1012,20 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans)
iwl_op_mode_nic_error(trans->op_mode);
}
/*****************************************************
* transport helper functions
*****************************************************/
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
const struct iwl_cfg *cfg,
const struct iwl_trans_ops *ops,
size_t dev_cmd_headroom);
void iwl_trans_free(struct iwl_trans *trans);
/*****************************************************
* driver (transport) register/unregister functions
******************************************************/
int __must_check iwl_pci_register_driver(void);
void iwl_pci_unregister_driver(void);
static inline void trans_lockdep_init(struct iwl_trans *trans)
{
#ifdef CONFIG_LOCKDEP
static struct lock_class_key __key;
lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
&__key, 0);
#endif
}
#endif /* __iwl_trans_h__ */

Просмотреть файл

@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -408,23 +408,12 @@ iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
{
struct iwl_bt_coex_cmd *bt_cmd;
struct iwl_host_cmd cmd = {
.id = BT_CONFIG,
.len = { sizeof(*bt_cmd), },
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
};
int ret;
struct iwl_bt_coex_cmd bt_cmd = {};
u32 mode;
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_send_bt_init_conf_old(mvm);
bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
if (!bt_cmd)
return -ENOMEM;
cmd.data[0] = bt_cmd;
lockdep_assert_held(&mvm->mutex);
if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
@ -440,36 +429,33 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
mode = 0;
}
bt_cmd->mode = cpu_to_le32(mode);
bt_cmd.mode = cpu_to_le32(mode);
goto send_cmd;
}
mode = iwlwifi_mod_params.bt_coex_active ? BT_COEX_NW : BT_COEX_DISABLE;
bt_cmd->mode = cpu_to_le32(mode);
bt_cmd.mode = cpu_to_le32(mode);
if (IWL_MVM_BT_COEX_SYNC2SCO)
bt_cmd->enabled_modules |=
bt_cmd.enabled_modules |=
cpu_to_le32(BT_COEX_SYNC2SCO_ENABLED);
if (iwl_mvm_bt_is_plcr_supported(mvm))
bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_CORUN_ENABLED);
bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_CORUN_ENABLED);
if (IWL_MVM_BT_COEX_MPLUT) {
bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_ENABLED);
bt_cmd->enabled_modules |=
bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_ENABLED);
bt_cmd.enabled_modules |=
cpu_to_le32(BT_COEX_MPLUT_BOOST_ENABLED);
}
bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET);
bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET);
send_cmd:
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
ret = iwl_mvm_send_cmd(mvm, &cmd);
kfree(bt_cmd);
return ret;
return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd);
}
static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
@ -746,7 +732,7 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_rx_bt_coex_notif_old(mvm, rxb, dev_cmd);
IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
@ -770,52 +756,14 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
return 0;
}
static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_bt_iterator_data *data = _data;
struct iwl_mvm *mvm = data->mvm;
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
struct ieee80211_chanctx_conf *chanctx_conf;
rcu_read_lock();
chanctx_conf = rcu_dereference(vif->chanctx_conf);
/* If channel context is invalid or not on 2.4GHz - don't count it */
if (!chanctx_conf ||
chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
rcu_read_unlock();
return;
}
rcu_read_unlock();
if (vif->type != NL80211_IFTYPE_STATION ||
mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
return;
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
lockdep_is_held(&mvm->mutex));
/* This can happen if the station has been removed right now */
if (IS_ERR_OR_NULL(sta))
return;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
}
void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_rssi_event_data rssi_event)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_bt_iterator_data data = {
.mvm = mvm,
};
int ret;
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
iwl_mvm_bt_rssi_event_old(mvm, vif, rssi_event);
return;
}
@ -853,10 +801,6 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (ret)
IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n");
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_bt_rssi_iterator, &data);
}
#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000)
@ -870,7 +814,7 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
enum iwl_bt_coex_lut_type lut_type;
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_coex_agg_time_limit_old(mvm, sta);
if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
@ -897,7 +841,7 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
enum iwl_bt_coex_lut_type lut_type;
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_bt_coex_is_mimo_allowed_old(mvm, sta);
if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
@ -927,7 +871,7 @@ bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant)
if (ant & mvm->cfg->non_shared_ant)
return true;
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
@ -940,10 +884,10 @@ bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm)
if (mvm->cfg->bt_shared_single_ant)
return true;
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF;
return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC;
}
bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
@ -951,7 +895,7 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
{
u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_bt_coex_is_tpc_allowed_old(mvm, band);
if (band != IEEE80211_BAND_2GHZ)
@ -994,7 +938,8 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
{
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
iwl_mvm_bt_coex_vif_change_old(mvm);
return;
}
@ -1012,7 +957,7 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
u8 __maybe_unused lower_bound, upper_bound;
u8 lut;
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb, dev_cmd);
if (!iwl_mvm_bt_is_plcr_supported(mvm))

Просмотреть файл

@ -761,7 +761,7 @@ void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm)
{
iwl_mvm_cancel_scan(mvm);
iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
iwl_trans_stop_device(mvm->trans);
@ -1170,7 +1170,8 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
iwl_trans_suspend(mvm->trans);
if (wowlan->any) {
mvm->trans->wowlan_d0i3 = wowlan->any;
if (mvm->trans->wowlan_d0i3) {
/* 'any' trigger means d0i3 usage */
if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
int ret = iwl_mvm_enter_d0i3_sync(mvm);
@ -1785,7 +1786,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) {
struct iwl_scan_offload_profile_match *fw_match;
struct cfg80211_wowlan_nd_match *match;
int n_channels = 0;
int idx, n_channels = 0;
fw_match = &query.matches[i];
@ -1800,8 +1801,12 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
net_detect->matches[net_detect->n_matches++] = match;
match->ssid.ssid_len = mvm->nd_match_sets[i].ssid.ssid_len;
memcpy(match->ssid.ssid, mvm->nd_match_sets[i].ssid.ssid,
/* We inverted the order of the SSIDs in the scan
* request, so invert the index here.
*/
idx = mvm->n_nd_match_sets - i - 1;
match->ssid.ssid_len = mvm->nd_match_sets[idx].ssid.ssid_len;
memcpy(match->ssid.ssid, mvm->nd_match_sets[idx].ssid.ssid,
match->ssid.ssid_len);
if (mvm->n_nd_channels < n_channels)

Просмотреть файл

@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -190,6 +190,21 @@ static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
return ret ?: count;
}
static ssize_t iwl_dbgfs_tx_pwr_lmt_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_vif *vif = file->private_data;
char buf[64];
int bufsz = sizeof(buf);
int pos;
pos = scnprintf(buf, bufsz, "bss limit = %d\n",
vif->bss_conf.txpower);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@ -607,6 +622,7 @@ static ssize_t iwl_dbgfs_rx_phyinfo_read(struct file *file,
} while (0)
MVM_DEBUGFS_READ_FILE_OPS(mac_params);
MVM_DEBUGFS_READ_FILE_OPS(tx_pwr_lmt);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
@ -641,6 +657,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR |
S_IRUSR);
MVM_DEBUGFS_ADD_FILE_VIF(tx_pwr_lmt, mvmvif->dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir,
S_IRUSR | S_IWUSR);

Просмотреть файл

@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -493,7 +493,8 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
mutex_lock(&mvm->mutex);
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
struct iwl_bt_coex_profile_notif_old *notif =
&mvm->last_bt_notif_old;
@ -550,7 +551,8 @@ static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
mutex_lock(&mvm->mutex);
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
struct iwl_bt_coex_ci_cmd_old *cmd = &mvm->last_bt_ci_cmd_old;
pos += scnprintf(buf+pos, bufsz-pos,
@ -916,7 +918,8 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
if (mvm->scan_rx_ant != scan_rx_ant) {
mvm->scan_rx_ant = scan_rx_ant;
if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_UMAC_SCAN))
iwl_mvm_config_scan(mvm);
}
@ -1356,6 +1359,7 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
PRINT_MVM_REF(IWL_MVM_REF_UCODE_DOWN);
PRINT_MVM_REF(IWL_MVM_REF_SCAN);
PRINT_MVM_REF(IWL_MVM_REF_ROC);
PRINT_MVM_REF(IWL_MVM_REF_ROC_AUX);
PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT);
PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS);
PRINT_MVM_REF(IWL_MVM_REF_USER);

Просмотреть файл

@ -294,6 +294,7 @@ enum iwl_scan_ebs_status {
IWL_SCAN_EBS_SUCCESS,
IWL_SCAN_EBS_FAILED,
IWL_SCAN_EBS_CHAN_NOT_FOUND,
IWL_SCAN_EBS_INACTIVE,
};
/**
@ -431,6 +432,17 @@ enum iwl_scan_priority {
IWL_SCAN_PRIORITY_HIGH,
};
enum iwl_scan_priority_ext {
IWL_SCAN_PRIORITY_EXT_0_LOWEST,
IWL_SCAN_PRIORITY_EXT_1,
IWL_SCAN_PRIORITY_EXT_2,
IWL_SCAN_PRIORITY_EXT_3,
IWL_SCAN_PRIORITY_EXT_4,
IWL_SCAN_PRIORITY_EXT_5,
IWL_SCAN_PRIORITY_EXT_6,
IWL_SCAN_PRIORITY_EXT_7_HIGHEST,
};
/**
* iwl_scan_req_lmac - SCAN_REQUEST_CMD_API_S_VER_1
* @reserved1: for alignment and future use
@ -837,4 +849,27 @@ struct iwl_scan_offload_profiles_query {
struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES];
} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */
/**
* struct iwl_umac_scan_iter_complete_notif - notifies end of scanning iteration
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @scanned_channels: number of channels scanned and number of valid elements in
* results array
* @status: one of SCAN_COMP_STATUS_*
* @bt_status: BT on/off status
* @last_channel: last channel that was scanned
* @tsf_low: TSF timer (lower half) in usecs
* @tsf_high: TSF timer (higher half) in usecs
* @results: array of scan results, only "scanned_channels" of them are valid
*/
struct iwl_umac_scan_iter_complete_notif {
__le32 uid;
u8 scanned_channels;
u8 status;
u8 bt_status;
u8 last_channel;
__le32 tsf_low;
__le32 tsf_high;
struct iwl_scan_results_notif results[];
} __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_1 */
#endif

Просмотреть файл

@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -108,6 +108,7 @@ enum {
ANTENNA_COUPLING_NOTIFICATION = 0xa,
/* UMAC scan commands */
SCAN_ITERATION_COMPLETE_UMAC = 0xb5,
SCAN_CFG_CMD = 0xc,
SCAN_REQ_UMAC = 0xd,
SCAN_ABORT_UMAC = 0xe,
@ -170,12 +171,8 @@ enum {
/* Thermal Throttling*/
REPLY_THERMAL_MNG_BACKOFF = 0x7e,
/* Scanning */
SCAN_REQUEST_CMD = 0x80,
SCAN_ABORT_CMD = 0x81,
SCAN_START_NOTIFICATION = 0x82,
SCAN_RESULTS_NOTIFICATION = 0x83,
SCAN_COMPLETE_NOTIFICATION = 0x84,
/* Set/Get DC2DC frequency tune */
DC2DC_CONFIG_CMD = 0x83,
/* NVM */
NVM_ACCESS_CMD = 0x88,
@ -1395,6 +1392,49 @@ struct iwl_mvm_marker {
__le32 metadata[0];
} __packed; /* MARKER_API_S_VER_1 */
/*
* enum iwl_dc2dc_config_id - flag ids
*
* Ids of dc2dc configuration flags
*/
enum iwl_dc2dc_config_id {
DCDC_LOW_POWER_MODE_MSK_SET = 0x1, /* not used */
DCDC_FREQ_TUNE_SET = 0x2,
}; /* MARKER_ID_API_E_VER_1 */
/**
* struct iwl_dc2dc_config_cmd - configure dc2dc values
*
* (DC2DC_CONFIG_CMD = 0x83)
*
* Set/Get & configure dc2dc values.
* The command always returns the current dc2dc values.
*
* @flags: set/get dc2dc
* @enable_low_power_mode: not used.
* @dc2dc_freq_tune0: frequency divider - digital domain
* @dc2dc_freq_tune1: frequency divider - analog domain
*/
struct iwl_dc2dc_config_cmd {
__le32 flags;
__le32 enable_low_power_mode; /* not used */
__le32 dc2dc_freq_tune0;
__le32 dc2dc_freq_tune1;
} __packed; /* DC2DC_CONFIG_CMD_API_S_VER_1 */
/**
* struct iwl_dc2dc_config_resp - response for iwl_dc2dc_config_cmd
*
* Current dc2dc values returned by the FW.
*
* @dc2dc_freq_tune0: frequency divider - digital domain
* @dc2dc_freq_tune1: frequency divider - analog domain
*/
struct iwl_dc2dc_config_resp {
__le32 dc2dc_freq_tune0;
__le32 dc2dc_freq_tune1;
} __packed; /* DC2DC_CONFIG_RESP_API_S_VER_1 */
/***********************************
* Smart Fifo API
***********************************/

Просмотреть файл

@ -623,7 +623,7 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
if (!mvm->trans->ltr_enabled)
return 0;
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_HDC_PHASE_0))
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_HDC_PHASE_0))
return iwl_mvm_config_ltr_v1(mvm);
return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
@ -662,9 +662,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
* device that are triggered by the INIT firwmare (MFUART).
*/
_iwl_trans_stop_device(mvm->trans, false);
_iwl_trans_start_hw(mvm->trans, false);
ret = _iwl_trans_start_hw(mvm->trans, false);
if (ret)
return ret;
goto error;
}
if (iwlmvm_mod_params.init_dbg)
@ -754,7 +754,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
}
if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
ret = iwl_mvm_config_scan(mvm);
if (ret)
goto error;

Просмотреть файл

@ -318,7 +318,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
resp = iwl_mvm_update_mcc(mvm, alpha2, src_id);
if (IS_ERR_OR_NULL(resp)) {
IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
PTR_RET(resp));
PTR_ERR_OR_ZERO(resp));
goto out;
}
@ -334,7 +334,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
kfree(resp);
if (IS_ERR_OR_NULL(regd)) {
IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
PTR_RET(regd));
PTR_ERR_OR_ZERO(regd));
goto out;
}
@ -415,6 +415,12 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
{
struct ieee80211_hw *hw = mvm->hw;
int num_mac, ret, i;
static const u32 mvm_ciphers[] = {
WLAN_CIPHER_SUITE_WEP40,
WLAN_CIPHER_SUITE_WEP104,
WLAN_CIPHER_SUITE_TKIP,
WLAN_CIPHER_SUITE_CCMP,
};
/* Tell mac80211 our characteristics */
hw->flags = IEEE80211_HW_SIGNAL_DBM |
@ -428,6 +434,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
IEEE80211_HW_TIMING_BEACON_ONLY |
IEEE80211_HW_CONNECTION_MONITOR |
IEEE80211_HW_CHANCTX_STA_CSA |
IEEE80211_HW_SUPPORT_FAST_XMIT |
IEEE80211_HW_SUPPORTS_CLONED_SKBS;
hw->queues = mvm->first_agg_queue;
@ -440,19 +447,38 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 2);
memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
hw->wiphy->cipher_suites = mvm->ciphers;
/*
* Enable 11w if advertised by firmware and software crypto
* is not enabled (as the firmware will interpret some mgmt
* packets, so enabling it with software crypto isn't safe)
*/
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP &&
!iwlwifi_mod_params.sw_crypto)
!iwlwifi_mod_params.sw_crypto) {
hw->flags |= IEEE80211_HW_MFP_CAPABLE;
mvm->ciphers[hw->wiphy->n_cipher_suites] =
WLAN_CIPHER_SUITE_AES_CMAC;
hw->wiphy->n_cipher_suites++;
}
/* currently FW API supports only one optional cipher scheme */
if (mvm->fw->cs[0].cipher) {
mvm->hw->n_cipher_schemes = 1;
mvm->hw->cipher_schemes = &mvm->fw->cs[0];
mvm->ciphers[hw->wiphy->n_cipher_suites] =
mvm->fw->cs[0].cipher;
hw->wiphy->n_cipher_suites++;
}
hw->flags |= IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS;
hw->wiphy->features |=
NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
hw->sta_data_size = sizeof(struct iwl_mvm_sta);
hw->vif_data_size = sizeof(struct iwl_mvm_vif);
@ -509,10 +535,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK);
BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));
if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS;
else
mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS;
@ -524,10 +551,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
&mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
if ((mvm->fw->ucode_capa.capa[0] &
IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
(mvm->fw->ucode_capa.api[0] &
IWL_UCODE_TLV_API_LQ_SS_PARAMS))
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_LQ_SS_PARAMS))
hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |=
IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
}
@ -553,30 +580,24 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
NL80211_FEATURE_STATIC_SMPS |
NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
if (mvm->fw->ucode_capa.capa[0] &
IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT)
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT))
hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION;
if (mvm->fw->ucode_capa.capa[0] &
IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT)
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT))
hw->wiphy->features |= NL80211_FEATURE_QUIET;
if (mvm->fw->ucode_capa.capa[0] &
IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
hw->wiphy->features |=
NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES;
if (mvm->fw->ucode_capa.capa[0] &
IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
/* currently FW API supports only one optional cipher scheme */
if (mvm->fw->cs[0].cipher) {
mvm->hw->n_cipher_schemes = 1;
mvm->hw->cipher_schemes = &mvm->fw->cs[0];
}
#ifdef CONFIG_PM_SLEEP
if (iwl_mvm_is_d0i3_supported(mvm) &&
device_can_wakeup(mvm->trans->dev)) {
@ -616,13 +637,14 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
if (ret)
return ret;
if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_TDLS_SUPPORT) {
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
}
if (mvm->fw->ucode_capa.capa[0] &
IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH) {
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) {
IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n");
hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
}
@ -735,6 +757,60 @@ static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
return true;
}
#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \
do { \
if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \
break; \
iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt); \
} while (0)
static void
iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn,
enum ieee80211_ampdu_mlme_action action)
{
struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_ba *ba_trig;
if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
return;
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
ba_trig = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
return;
switch (action) {
case IEEE80211_AMPDU_TX_OPERATIONAL: {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid,
"TX AGG START: MAC %pM tid %d ssn %d\n",
sta->addr, tid, tid_data->ssn);
break;
}
case IEEE80211_AMPDU_TX_STOP_CONT:
CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid,
"TX AGG STOP: MAC %pM tid %d\n",
sta->addr, tid);
break;
case IEEE80211_AMPDU_RX_START:
CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid,
"RX AGG START: MAC %pM tid %d ssn %d\n",
sta->addr, tid, rx_ba_ssn);
break;
case IEEE80211_AMPDU_RX_STOP:
CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid,
"RX AGG STOP: MAC %pM tid %d\n",
sta->addr, tid);
break;
default:
break;
}
}
static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
@ -811,6 +887,16 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
ret = -EINVAL;
break;
}
if (!ret) {
u16 rx_ba_ssn = 0;
if (action == IEEE80211_AMPDU_RX_START)
rx_ba_ssn = *ssn;
iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid,
rx_ba_ssn, action);
}
mutex_unlock(&mvm->mutex);
/*
@ -1410,7 +1496,7 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
* The work item could be running or queued if the
* ROC time event stops just as we get here.
*/
cancel_work_sync(&mvm->roc_done_wk);
flush_work(&mvm->roc_done_wk);
iwl_trans_stop_device(mvm->trans);
@ -1423,20 +1509,24 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
/*
* Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
* won't be called in this case).
* But make sure to cleanup interfaces that have gone down before/during
* HW restart was requested.
*/
clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
ieee80211_iterate_interfaces(mvm->hw, 0,
iwl_mvm_cleanup_iterator, mvm);
/* We shouldn't have any UIDs still set. Loop over all the UIDs to
* make sure there's nothing left there and warn if any is found.
*/
if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
int i;
for (i = 0; i < mvm->max_scans; i++) {
if (WARN_ONCE(mvm->scan_uid[i],
"UMAC scan UID %d was not cleaned\n",
mvm->scan_uid[i]))
mvm->scan_uid[i] = 0;
if (WARN_ONCE(mvm->scan_uid_status[i],
"UMAC scan UID %d status was not cleaned\n",
i))
mvm->scan_uid_status[i] = 0;
}
}
@ -1501,7 +1591,7 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
.pwr_restriction = cpu_to_le16(8 * tx_power),
};
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_TX_POWER_DEV))
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_DEV))
return iwl_mvm_set_tx_power_old(mvm, vif, tx_power);
if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
@ -2360,7 +2450,7 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
if (changes & BSS_CHANGED_IDLE && !bss_conf->idle)
iwl_mvm_scan_offload_stop(mvm, true);
iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
switch (vif->type) {
case NL80211_IFTYPE_STATION:
@ -2411,12 +2501,8 @@ static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
* cancel scan scan before ieee80211_scan_work() could run.
* To handle that, simply return if the scan is not running.
*/
/* FIXME: for now, we ignore this race for UMAC scans, since
* they don't set the scan_status.
*/
if ((mvm->scan_status & IWL_MVM_SCAN_REGULAR) ||
(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN))
iwl_mvm_cancel_scan(mvm);
if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
mutex_unlock(&mvm->mutex);
}
@ -2765,16 +2851,12 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
* could run. To handle this, simply return if the scan is
* not running.
*/
/* FIXME: for now, we ignore this race for UMAC scans, since
* they don't set the scan_status.
*/
if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED) &&
!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) {
mutex_unlock(&mvm->mutex);
return 0;
}
ret = iwl_mvm_scan_offload_stop(mvm, false);
ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false);
mutex_unlock(&mvm->mutex);
iwl_mvm_wait_for_async_handlers(mvm);
@ -3039,8 +3121,8 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
switch (vif->type) {
case NL80211_IFTYPE_STATION:
if (mvm->fw->ucode_capa.capa[0] &
IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT) {
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) {
/* Use aux roc framework (HS20) */
ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
vif, duration);
@ -3832,7 +3914,7 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
if (idx != 0)
return -ENOENT;
if (!(mvm->fw->ucode_capa.capa[0] &
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
return -ENOENT;
@ -3879,8 +3961,8 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
if (!(mvm->fw->ucode_capa.capa[0] &
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
return;
/* if beacon filtering isn't on mac80211 does it anyway */
@ -3910,9 +3992,9 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
const struct ieee80211_event *event)
static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
const struct ieee80211_event *event)
{
#define CHECK_MLME_TRIGGER(_mvm, _trig, _buf, _cnt, _fmt...) \
do { \
@ -3921,7 +4003,6 @@ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt);\
} while (0)
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_mlme *trig_mlme;
@ -3965,6 +4046,75 @@ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
#undef CHECK_MLME_TRIGGER
}
static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
const struct ieee80211_event *event)
{
struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_ba *ba_trig;
if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
return;
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
ba_trig = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
return;
if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid)))
return;
iwl_mvm_fw_dbg_collect_trig(mvm, trig,
"BAR received from %pM, tid %d, ssn %d",
event->u.ba.sta->addr, event->u.ba.tid,
event->u.ba.ssn);
}
static void
iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
const struct ieee80211_event *event)
{
struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_ba *ba_trig;
if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
return;
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
ba_trig = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
return;
if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(event->u.ba.tid)))
return;
iwl_mvm_fw_dbg_collect_trig(mvm, trig,
"Frame from %pM timed out, tid %d",
event->u.ba.sta->addr, event->u.ba.tid);
}
static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
const struct ieee80211_event *event)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
switch (event->type) {
case MLME_EVENT:
iwl_mvm_event_mlme_callback(mvm, vif, event);
break;
case BAR_RX_EVENT:
iwl_mvm_event_bar_rx_callback(mvm, vif, event);
break;
case BA_FRAME_TIMEOUT:
iwl_mvm_event_frame_timeout_callback(mvm, vif, event);
break;
default:
break;
}
}
const struct ieee80211_ops iwl_mvm_hw_ops = {
.tx = iwl_mvm_mac_tx,
.ampdu_action = iwl_mvm_mac_ampdu_action,

Просмотреть файл

@ -276,6 +276,7 @@ enum iwl_mvm_ref_type {
IWL_MVM_REF_UCODE_DOWN,
IWL_MVM_REF_SCAN,
IWL_MVM_REF_ROC,
IWL_MVM_REF_ROC_AUX,
IWL_MVM_REF_P2P_CLIENT,
IWL_MVM_REF_AP_IBSS,
IWL_MVM_REF_USER,
@ -446,6 +447,8 @@ iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
extern const u8 tid_to_mac80211_ac[];
#define IWL_MVM_SCAN_STOPPING_SHIFT 8
enum iwl_scan_status {
IWL_MVM_SCAN_REGULAR = BIT(0),
IWL_MVM_SCAN_SCHED = BIT(1),
@ -462,8 +465,8 @@ enum iwl_scan_status {
IWL_MVM_SCAN_NETDETECT_MASK = IWL_MVM_SCAN_NETDETECT |
IWL_MVM_SCAN_STOPPING_NETDETECT,
IWL_MVM_SCAN_STOPPING_MASK = 0xff00,
IWL_MVM_SCAN_MASK = 0x00ff,
IWL_MVM_SCAN_STOPPING_MASK = 0xff << IWL_MVM_SCAN_STOPPING_SHIFT,
IWL_MVM_SCAN_MASK = 0xff,
};
/**
@ -627,8 +630,7 @@ struct iwl_mvm {
unsigned int max_scans;
/* UMAC scan tracking */
u32 scan_uid[IWL_MVM_MAX_UMAC_SCANS];
u8 scan_seq_num, sched_scan_seq_num;
u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS];
/* rx chain antennas set through debugfs for the scan command */
u8 scan_rx_ant;
@ -818,6 +820,8 @@ struct iwl_mvm {
} tdls_cs;
struct iwl_mvm_shared_mem_cfg shared_mem_cfg;
u32 ciphers[6];
};
/* Extract MVM priv from op_mode and _hw */
@ -887,14 +891,15 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
return mvm->trans->cfg->d0i3 &&
mvm->trans->d0i3_mode != IWL_D0I3_MODE_OFF &&
!iwlwifi_mod_params.d0i3_disable &&
(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
}
static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
{
bool nvm_lar = mvm->nvm_data->lar_enabled;
bool tlv_lar = mvm->fw->ucode_capa.capa[0] &
IWL_UCODE_TLV_CAPA_LAR_SUPPORT;
bool tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
if (iwlwifi_mod_params.lar_disable)
return false;
@ -911,24 +916,28 @@ static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
static inline bool iwl_mvm_is_wifi_mcc_supported(struct iwl_mvm *mvm)
{
return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_WIFI_MCC_UPDATE ||
mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC;
return fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC);
}
static inline bool iwl_mvm_is_scd_cfg_supported(struct iwl_mvm *mvm)
{
return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SCD_CFG;
return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCD_CFG);
}
static inline bool iwl_mvm_bt_is_plcr_supported(struct iwl_mvm *mvm)
{
return (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BT_COEX_PLCR) &&
return fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_BT_COEX_PLCR) &&
IWL_MVM_BT_COEX_CORUNNING;
}
static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm)
{
return (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BT_COEX_RRC) &&
return fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_BT_COEX_RRC) &&
IWL_MVM_BT_COEX_RRC;
}
@ -1121,34 +1130,34 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_scan_request *req,
struct ieee80211_scan_ies *ies);
int iwl_mvm_scan_size(struct iwl_mvm *mvm);
int iwl_mvm_cancel_scan(struct iwl_mvm *mvm);
int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify);
int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
/* Scheduled scan */
int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req);
int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_sched_scan_request *req,
struct ieee80211_scan_ies *ies,
int type);
int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify);
int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
/* UMAC scan */
int iwl_mvm_config_scan(struct iwl_mvm *mvm);
int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
/* MVM debugfs */
#ifdef CONFIG_IWLWIFI_DEBUGFS

Просмотреть файл

@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -316,8 +316,8 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data;
lar_enabled = !iwlwifi_mod_params.lar_disable &&
(mvm->fw->ucode_capa.capa[0] &
IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib,
regulatory, mac_override, phy_sku,
@ -583,9 +583,9 @@ int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
kfree(nvm_buffer);
}
/* load external NVM if configured */
/* Only if PNVM selected in the mod param - load external NVM */
if (mvm->nvm_file_name) {
/* read External NVM file - take the default */
/* read External NVM file from the mod param */
ret = iwl_mvm_read_external_nvm(mvm);
if (ret) {
/* choose the nvm_file name according to the
@ -792,8 +792,8 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
char mcc[3];
if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
tlv_lar = mvm->fw->ucode_capa.capa[0] &
IWL_UCODE_TLV_CAPA_LAR_SUPPORT;
tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
nvm_lar = mvm->nvm_data->lar_enabled;
if (tlv_lar != nvm_lar)
IWL_INFO(mvm,

Просмотреть файл

@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -194,7 +194,7 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
* (PCIe power is lost before PERST# is asserted), causing ME FW
* to lose ownership and not being able to obtain it back.
*/
if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
if (!mvm->trans->cfg->apmg_not_supported)
iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
@ -238,13 +238,15 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false),
RX_HANDLER(SCAN_ITERATION_COMPLETE,
iwl_mvm_rx_scan_offload_iter_complete_notif, false),
iwl_mvm_rx_lmac_scan_iter_complete_notif, false),
RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
iwl_mvm_rx_scan_offload_complete_notif, true),
RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_offload_results,
iwl_mvm_rx_lmac_scan_complete_notif, true),
RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_match_found,
false),
RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif,
true),
RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC,
iwl_mvm_rx_umac_scan_iter_complete_notif, false),
RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false),
@ -279,11 +281,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(BINDING_CONTEXT_CMD),
CMD(TIME_QUOTA_CMD),
CMD(NON_QOS_TX_COUNTER_CMD),
CMD(SCAN_REQUEST_CMD),
CMD(SCAN_ABORT_CMD),
CMD(SCAN_START_NOTIFICATION),
CMD(SCAN_RESULTS_NOTIFICATION),
CMD(SCAN_COMPLETE_NOTIFICATION),
CMD(DC2DC_CONFIG_CMD),
CMD(NVM_ACCESS_CMD),
CMD(PHY_CONFIGURATION_CMD),
CMD(CALIB_RES_NOTIF_PHY_DB),
@ -356,6 +354,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(TDLS_CHANNEL_SWITCH_NOTIFICATION),
CMD(TDLS_CONFIG_CMD),
CMD(MCC_UPDATE_CMD),
CMD(SCAN_ITERATION_COMPLETE_UMAC),
};
#undef CMD
@ -517,15 +516,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
min_backoff = calc_min_backoff(trans, cfg);
iwl_mvm_tt_initialize(mvm, min_backoff);
/* set the nvm_file_name according to priority */
if (iwlwifi_mod_params.nvm_file) {
if (iwlwifi_mod_params.nvm_file)
mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
} else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
if (CSR_HW_REV_STEP(trans->hw_rev) == SILICON_B_STEP)
mvm->nvm_file_name = mvm->cfg->default_nvm_file_B_step;
else
mvm->nvm_file_name = mvm->cfg->default_nvm_file_C_step;
}
else
IWL_DEBUG_EEPROM(mvm->trans->dev,
"working without external nvm file\n");
if (WARN(cfg->no_power_up_nic_in_init && !mvm->nvm_file_name,
"not allowing power-up and not having nvm_file\n"))

Просмотреть файл

@ -138,7 +138,7 @@ struct rs_tx_column;
typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct iwl_scale_tbl_info *tbl,
struct rs_rate *rate,
const struct rs_tx_column *next_col);
struct rs_tx_column {
@ -150,14 +150,14 @@ struct rs_tx_column {
};
static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct iwl_scale_tbl_info *tbl,
struct rs_rate *rate,
const struct rs_tx_column *next_col)
{
return iwl_mvm_bt_coex_is_ant_avail(mvm, next_col->ant);
}
static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct iwl_scale_tbl_info *tbl,
struct rs_rate *rate,
const struct rs_tx_column *next_col)
{
struct iwl_mvm_sta *mvmsta;
@ -187,7 +187,7 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
}
static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct iwl_scale_tbl_info *tbl,
struct rs_rate *rate,
const struct rs_tx_column *next_col)
{
if (!sta->ht_cap.ht_supported)
@ -197,10 +197,9 @@ static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
}
static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct iwl_scale_tbl_info *tbl,
struct rs_rate *rate,
const struct rs_tx_column *next_col)
{
struct rs_rate *rate = &tbl->rate;
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
@ -1128,8 +1127,8 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
bool allow_ant_mismatch = mvm->fw->ucode_capa.api[0] &
IWL_UCODE_TLV_API_LQ_SS_PARAMS;
bool allow_ant_mismatch = fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_LQ_SS_PARAMS);
/* Treat uninitialized rate scaling data same as non-existing. */
if (!lq_sta) {
@ -1659,7 +1658,8 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
for (j = 0; j < MAX_COLUMN_CHECKS; j++) {
allow_func = next_col->checks[j];
if (allow_func && !allow_func(mvm, sta, tbl, next_col))
if (allow_func && !allow_func(mvm, sta, &tbl->rate,
next_col))
break;
}
@ -2714,7 +2714,7 @@ static void rs_vht_init(struct iwl_mvm *mvm,
(vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))
lq_sta->stbc_capable = true;
if ((mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
(num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
(vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE))
lq_sta->bfer_capable = true;
@ -2998,7 +2998,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
/* TODO: remove old API when min FW API hits 14 */
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LQ_SS_PARAMS) &&
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS) &&
rs_stbc_allow(mvm, sta, lq_sta))
rate.stbc = true;
@ -3212,7 +3212,7 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
rs_build_rates_table(mvm, sta, lq_sta, initial_rate);
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LQ_SS_PARAMS)
if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS))
rs_set_lq_ss_params(mvm, sta, lq_sta, initial_rate);
mvmsta = iwl_mvm_sta_from_mac80211(sta);

Просмотреть файл

@ -570,7 +570,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
};
u32 temperature;
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_STATS_V10) {
if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STATS_V10)) {
struct iwl_notif_statistics_v10 *stats = (void *)&pkt->data;
if (iwl_rx_packet_payload_len(pkt) != v10_len)
@ -610,7 +610,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
/* Only handle rx statistics temperature changes if async temp
* notifications are not supported
*/
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_ASYNC_DTM))
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_ASYNC_DTM))
iwl_mvm_tt_temp_changed(mvm, temperature);
ieee80211_iterate_active_interfaces(mvm->hw,

Просмотреть файл

@ -101,14 +101,6 @@ struct iwl_mvm_scan_params {
} schedule[2];
};
enum iwl_umac_scan_uid_type {
IWL_UMAC_SCAN_UID_REG_SCAN = BIT(0),
IWL_UMAC_SCAN_UID_SCHED_SCAN = BIT(1),
};
static int iwl_umac_scan_stop(struct iwl_mvm *mvm,
enum iwl_umac_scan_uid_type type, bool notify);
static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
{
if (mvm->scan_rx_ant != ANT_NONE)
@ -168,7 +160,7 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm,
enum ieee80211_band band, int n_ssids)
{
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL))
return 10;
if (band == IEEE80211_BAND_2GHZ)
return 20 + 3 * (n_ssids + 1);
@ -178,7 +170,7 @@ static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm,
static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm,
enum ieee80211_band band)
{
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL))
return 110;
return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
}
@ -213,8 +205,9 @@ static void iwl_mvm_scan_calc_dwell(struct iwl_mvm *mvm,
params->max_out_time = 120;
if (iwl_mvm_low_latency(mvm)) {
if (mvm->fw->ucode_capa.api[0] &
IWL_UCODE_TLV_API_FRAGMENTED_SCAN) {
if (fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
params->suspend_time = 105;
/*
* If there is more than one active interface make
@ -228,8 +221,9 @@ static void iwl_mvm_scan_calc_dwell(struct iwl_mvm *mvm,
}
}
if (frag_passive_dwell && (mvm->fw->ucode_capa.api[0] &
IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
if (frag_passive_dwell &&
fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
/*
* P2P device scan should not be fragmented to avoid negative
* impact on P2P device discovery. Configure max_out_time to be
@ -281,8 +275,8 @@ not_bound:
static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
{
/* require rrm scan whenever the fw supports it */
return mvm->fw->ucode_capa.capa[0] &
IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT;
return fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT);
}
static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm)
@ -318,22 +312,41 @@ int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm)
return max_ie_len;
}
int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
static u8 *iwl_mvm_dump_channel_list(struct iwl_scan_results_notif *res,
int num_res, u8 *buf, size_t buf_size)
{
int i;
u8 *pos = buf, *end = buf + buf_size;
for (i = 0; pos < end && i < num_res; i++)
pos += snprintf(pos, end - pos, " %u", res[i].channel);
/* terminate the string in case the buffer was too short */
*(buf + buf_size - 1) = '\0';
return buf;
}
int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data;
u8 buf[256];
IWL_DEBUG_SCAN(mvm,
"Scan offload iteration complete: status=0x%x scanned channels=%d\n",
notif->status, notif->scanned_channels);
"Scan offload iteration complete: status=0x%x scanned channels=%d channels list: %s\n",
notif->status, notif->scanned_channels,
iwl_mvm_dump_channel_list(notif->results,
notif->scanned_channels, buf,
sizeof(buf)));
return 0;
}
int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
ieee80211_sched_scan_results(mvm->hw);
@ -341,14 +354,27 @@ int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm,
return 0;
}
int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status)
{
switch (status) {
case IWL_SCAN_EBS_SUCCESS:
return "successful";
case IWL_SCAN_EBS_INACTIVE:
return "inactive";
case IWL_SCAN_EBS_FAILED:
case IWL_SCAN_EBS_CHAN_NOT_FOUND:
default:
return "failed";
}
}
int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data;
bool aborted = (scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
bool ebs_successful = (scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS);
/* scan status must be locked for proper checking */
lockdep_assert_held(&mvm->mutex);
@ -368,13 +394,13 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
aborted ? "aborted" : "completed",
ebs_successful ? "successful" : "failed");
iwl_mvm_ebs_status_str(scan_notif->ebs_status));
mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED;
} else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) {
IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s\n",
aborted ? "aborted" : "completed",
ebs_successful ? "successful" : "failed");
iwl_mvm_ebs_status_str(scan_notif->ebs_status));
mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_REGULAR;
} else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) {
@ -382,14 +408,14 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s (FW)\n",
aborted ? "aborted" : "completed",
ebs_successful ? "successful" : "failed");
iwl_mvm_ebs_status_str(scan_notif->ebs_status));
mvm->scan_status &= ~IWL_MVM_SCAN_SCHED;
ieee80211_sched_scan_stopped(mvm->hw);
} else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) {
IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n",
aborted ? "aborted" : "completed",
ebs_successful ? "successful" : "failed");
iwl_mvm_ebs_status_str(scan_notif->ebs_status));
mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR;
ieee80211_scan_completed(mvm->hw,
@ -397,7 +423,9 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
}
mvm->last_ebs_successful = ebs_successful;
mvm->last_ebs_successful =
scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ||
scan_notif->ebs_status == IWL_SCAN_EBS_INACTIVE;
return 0;
}
@ -463,8 +491,9 @@ static void iwl_scan_build_ssids(struct iwl_mvm_scan_params *params,
}
}
int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req)
static int
iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req)
{
struct iwl_scan_offload_profile *profile;
struct iwl_scan_offload_profile_cfg *profile_cfg;
@ -545,7 +574,7 @@ static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm,
return true;
}
static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm)
static int iwl_mvm_lmac_scan_abort(struct iwl_mvm *mvm)
{
int ret;
struct iwl_host_cmd cmd = {
@ -553,12 +582,6 @@ static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm)
};
u32 status;
/* Exit instantly with error when device is not ready
* to receive scan abort command or it does not perform
* scheduled scan currently */
if (!mvm->scan_status)
return -EIO;
ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
if (ret)
return ret;
@ -578,73 +601,6 @@ static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm)
return ret;
}
int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
{
int ret;
struct iwl_notification_wait wait_scan_done;
static const u8 scan_done_notif[] = { SCAN_OFFLOAD_COMPLETE, };
bool sched = !!(mvm->scan_status & IWL_MVM_SCAN_SCHED);
lockdep_assert_held(&mvm->mutex);
if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN,
notify);
/* FIXME: For now we only check if no scan is set here, since
* we only support LMAC in this flow and it doesn't support
* multiple scans.
*/
if (!mvm->scan_status)
return 0;
if (iwl_mvm_is_radio_killed(mvm)) {
ret = 0;
goto out;
}
iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
scan_done_notif,
ARRAY_SIZE(scan_done_notif),
NULL, NULL);
ret = iwl_mvm_send_scan_offload_abort(mvm);
if (ret) {
IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n",
sched ? "offloaded " : "", ret);
iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
goto out;
}
IWL_DEBUG_SCAN(mvm, "Successfully sent stop %sscan\n",
sched ? "scheduled " : "");
ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
out:
/* Clear the scan status so the next scan requests will
* succeed and mark the scan as stopping, so that the Rx
* handler doesn't do anything, as the scan was stopped from
* above. Since the rx handler won't do anything now, we have
* to release the scan reference here.
*/
if (mvm->scan_status == IWL_MVM_SCAN_REGULAR)
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
if (sched) {
mvm->scan_status &= ~IWL_MVM_SCAN_SCHED;
mvm->scan_status |= IWL_MVM_SCAN_STOPPING_SCHED;
if (notify)
ieee80211_sched_scan_stopped(mvm->hw);
} else {
mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR;
mvm->scan_status |= IWL_MVM_SCAN_STOPPING_REGULAR;
if (notify)
ieee80211_scan_completed(mvm->hw, true);
}
return ret;
}
static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm,
struct iwl_scan_req_tx_cmd *tx_cmd,
bool no_cck)
@ -775,6 +731,22 @@ iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
params->preq.common_data.len = cpu_to_le16(ies->common_ie_len);
}
static __le32 iwl_mvm_scan_priority(struct iwl_mvm *mvm,
enum iwl_scan_priority_ext prio)
{
if (fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY))
return cpu_to_le32(prio);
if (prio <= IWL_SCAN_PRIORITY_EXT_2)
return cpu_to_le32(IWL_SCAN_PRIORITY_LOW);
if (prio <= IWL_SCAN_PRIORITY_EXT_4)
return cpu_to_le32(IWL_SCAN_PRIORITY_MEDIUM);
return cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
}
static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
struct iwl_scan_req_lmac *cmd,
struct iwl_mvm_scan_params *params)
@ -786,7 +758,7 @@ static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
params->dwell[IEEE80211_BAND_2GHZ].fragmented;
cmd->max_out_time = cpu_to_le32(params->max_out_time);
cmd->suspend_time = cpu_to_le32(params->suspend_time);
cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
cmd->scan_prio = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
}
static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids,
@ -801,19 +773,23 @@ static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids,
iwl_mvm_max_scan_ie_fw_cmd_room(mvm)));
}
static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm, int n_iterations)
static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
int n_iterations)
{
const struct iwl_ucode_capabilities *capa = &mvm->fw->ucode_capa;
/* We can only use EBS if:
* 1. the feature is supported;
* 2. the last EBS was successful;
* 3. if only single scan, the single scan EBS API is supported.
* 3. if only single scan, the single scan EBS API is supported;
* 4. it's not a p2p find operation.
*/
return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) &&
mvm->last_ebs_successful &&
(n_iterations > 1 ||
(capa->api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS)));
fw_has_api(capa, IWL_UCODE_TLV_API_SINGLE_SCAN_EBS)) &&
vif->type != NL80211_IFTYPE_P2P_DEVICE);
}
static int iwl_mvm_scan_total_iterations(struct iwl_mvm_scan_params *params)
@ -891,7 +867,7 @@ static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cmd->schedule[1].iterations = params->schedule[1].iterations;
cmd->schedule[1].full_scan_mul = params->schedule[1].iterations;
if (iwl_mvm_scan_use_ebs(mvm, n_iterations)) {
if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) {
cmd->channel_opt[0].flags =
cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
@ -914,32 +890,6 @@ static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return 0;
}
int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
{
if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_REG_SCAN,
true);
if (!(mvm->scan_status & IWL_MVM_SCAN_REGULAR))
return 0;
if (iwl_mvm_is_radio_killed(mvm)) {
ieee80211_scan_completed(mvm->hw, true);
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR;
return 0;
}
return iwl_mvm_scan_offload_stop(mvm, true);
}
/* UMAC scan API */
struct iwl_umac_scan_done {
struct iwl_mvm *mvm;
enum iwl_umac_scan_uid_type type;
};
static int rate_to_scan_rate_flag(unsigned int rate)
{
static const int rate_to_scan_rate[IWL_RATE_COUNT] = {
@ -1048,68 +998,15 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
return ret;
}
static int iwl_mvm_find_scan_uid(struct iwl_mvm *mvm, u32 uid)
static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status)
{
int i;
for (i = 0; i < mvm->max_scans; i++)
if (mvm->scan_uid[i] == uid)
if (mvm->scan_uid_status[i] == status)
return i;
return i;
}
static int iwl_mvm_find_free_scan_uid(struct iwl_mvm *mvm)
{
return iwl_mvm_find_scan_uid(mvm, 0);
}
static bool iwl_mvm_find_scan_type(struct iwl_mvm *mvm,
enum iwl_umac_scan_uid_type type)
{
int i;
for (i = 0; i < mvm->max_scans; i++)
if (mvm->scan_uid[i] & type)
return true;
return false;
}
static int iwl_mvm_find_first_scan(struct iwl_mvm *mvm,
enum iwl_umac_scan_uid_type type)
{
int i;
for (i = 0; i < mvm->max_scans; i++)
if (mvm->scan_uid[i] & type)
return i;
return i;
}
static u32 iwl_generate_scan_uid(struct iwl_mvm *mvm,
enum iwl_umac_scan_uid_type type)
{
u32 uid;
/* make sure exactly one bit is on in scan type */
WARN_ON(hweight8(type) != 1);
/*
* Make sure scan uids are unique. If one scan lasts long time while
* others are completing frequently, the seq number will wrap up and
* we may have more than one scan with the same uid.
*/
do {
uid = type | (mvm->scan_seq_num <<
IWL_UMAC_SCAN_UID_SEQ_OFFSET);
mvm->scan_seq_num++;
} while (iwl_mvm_find_scan_uid(mvm, uid) < mvm->max_scans);
IWL_DEBUG_SCAN(mvm, "Generated scan UID %u\n", uid);
return uid;
return -ENOENT;
}
static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
@ -1123,12 +1020,15 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
params->dwell[IEEE80211_BAND_2GHZ].fragmented;
cmd->max_out_time = cpu_to_le32(params->max_out_time);
cmd->suspend_time = cpu_to_le32(params->suspend_time);
cmd->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
cmd->scan_priority =
iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
if (iwl_mvm_scan_total_iterations(params) == 0)
cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
cmd->ooc_priority =
iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
else
cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_LOW);
cmd->ooc_priority =
iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_2);
}
static void
@ -1173,26 +1073,30 @@ static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
if (iwl_mvm_scan_total_iterations(params) > 1)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (mvm->scan_iter_notif_enabled)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
#endif
return flags;
}
static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_scan_params *params)
struct iwl_mvm_scan_params *params,
int type)
{
struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data +
sizeof(struct iwl_scan_channel_cfg_umac) *
mvm->fw->ucode_capa.n_scan_channels;
u32 uid;
int uid;
u32 ssid_bitmap = 0;
int n_iterations = iwl_mvm_scan_total_iterations(params);
int uid_idx;
lockdep_assert_held(&mvm->mutex);
uid_idx = iwl_mvm_find_free_scan_uid(mvm);
if (uid_idx >= mvm->max_scans)
return -EBUSY;
uid = iwl_mvm_scan_uid_by_status(mvm, 0);
if (uid < 0)
return uid;
memset(cmd, 0, ksize(cmd));
cmd->hdr.size = cpu_to_le16(iwl_mvm_scan_size(mvm) -
@ -1200,17 +1104,12 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_scan_umac_dwell(mvm, cmd, params);
if (n_iterations == 1)
uid = iwl_generate_scan_uid(mvm, IWL_UMAC_SCAN_UID_REG_SCAN);
else
uid = iwl_generate_scan_uid(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN);
mvm->scan_uid_status[uid] = type;
mvm->scan_uid[uid_idx] = uid;
cmd->uid = cpu_to_le32(uid);
cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params));
if (iwl_mvm_scan_use_ebs(mvm, n_iterations))
if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations))
cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
@ -1222,8 +1121,8 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_umac_scan_cfg_channels(mvm, params->channels,
params->n_channels, ssid_bitmap, cmd);
/* With UMAC we can have only one schedule, so use the sum of
* the iterations (with a a maximum of 255).
/* With UMAC we use only one schedule for now, so use the sum
* of the iterations (with a a maximum of 255).
*/
sec_part->schedule[0].iter_count =
(n_iterations > 255) ? 255 : n_iterations;
@ -1262,11 +1161,11 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
case IWL_MVM_SCAN_REGULAR:
if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
return -EBUSY;
return iwl_mvm_scan_offload_stop(mvm, true);
return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
case IWL_MVM_SCAN_SCHED:
if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
return -EBUSY;
return iwl_mvm_cancel_scan(mvm);
iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
case IWL_MVM_SCAN_NETDETECT:
/* No need to stop anything for net-detect since the
* firmware is restarted anyway. This way, any sched
@ -1337,9 +1236,10 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
hcmd.id = SCAN_REQ_UMAC;
ret = iwl_mvm_scan_umac(mvm, vif, &params);
ret = iwl_mvm_scan_umac(mvm, vif, &params,
IWL_MVM_SCAN_REGULAR);
} else {
hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
ret = iwl_mvm_scan_lmac(mvm, vif, &params);
@ -1444,9 +1344,9 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
hcmd.id = SCAN_REQ_UMAC;
ret = iwl_mvm_scan_umac(mvm, vif, &params);
ret = iwl_mvm_scan_umac(mvm, vif, &params, IWL_MVM_SCAN_SCHED);
} else {
hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
ret = iwl_mvm_scan_lmac(mvm, vif, &params);
@ -1478,144 +1378,118 @@ int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_umac_scan_complete *notif = (void *)pkt->data;
u32 uid = __le32_to_cpu(notif->uid);
bool sched = !!(uid & IWL_UMAC_SCAN_UID_SCHED_SCAN);
int uid_idx = iwl_mvm_find_scan_uid(mvm, uid);
bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
/*
* Scan uid may be set to zero in case of scan abort request from above.
*/
if (uid_idx >= mvm->max_scans)
if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status)))
return 0;
/* if the scan is already stopping, we don't need to notify mac80211 */
if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
ieee80211_scan_completed(mvm->hw, aborted);
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
} else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
ieee80211_sched_scan_stopped(mvm->hw);
}
mvm->scan_status &= ~mvm->scan_uid_status[uid];
IWL_DEBUG_SCAN(mvm,
"Scan completed, uid %u type %s, status %s, EBS status %s\n",
uid, sched ? "sched" : "regular",
"Scan completed, uid %u type %u, status %s, EBS status %s\n",
uid, mvm->scan_uid_status[uid],
notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
"completed" : "aborted",
notif->ebs_status == IWL_SCAN_EBS_SUCCESS ?
"success" : "failed");
iwl_mvm_ebs_status_str(notif->ebs_status));
if (notif->ebs_status)
if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS &&
notif->ebs_status != IWL_SCAN_EBS_INACTIVE)
mvm->last_ebs_successful = false;
mvm->scan_uid[uid_idx] = 0;
if (!sched) {
ieee80211_scan_completed(mvm->hw,
notif->status ==
IWL_SCAN_OFFLOAD_ABORTED);
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
} else if (!iwl_mvm_find_scan_type(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN)) {
ieee80211_sched_scan_stopped(mvm->hw);
} else {
IWL_DEBUG_SCAN(mvm, "Another sched scan is running\n");
}
mvm->scan_uid_status[uid] = 0;
return 0;
}
static bool iwl_scan_umac_done_check(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt, void *data)
int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_umac_scan_done *scan_done = data;
struct iwl_umac_scan_complete *notif = (void *)pkt->data;
u32 uid = __le32_to_cpu(notif->uid);
int uid_idx = iwl_mvm_find_scan_uid(scan_done->mvm, uid);
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
u8 buf[256];
if (WARN_ON(pkt->hdr.cmd != SCAN_COMPLETE_UMAC))
return false;
if (uid_idx >= scan_done->mvm->max_scans)
return false;
/*
* Clear scan uid of scans that was aborted from above and completed
* in FW so the RX handler does nothing. Set last_ebs_successful here if
* needed.
*/
scan_done->mvm->scan_uid[uid_idx] = 0;
if (notif->ebs_status)
scan_done->mvm->last_ebs_successful = false;
return !iwl_mvm_find_scan_type(scan_done->mvm, scan_done->type);
IWL_DEBUG_SCAN(mvm,
"UMAC Scan iteration complete: status=0x%x scanned_channels=%d channels list: %s\n",
notif->status, notif->scanned_channels,
iwl_mvm_dump_channel_list(notif->results,
notif->scanned_channels, buf,
sizeof(buf)));
return 0;
}
static int iwl_umac_scan_abort_one(struct iwl_mvm *mvm, u32 uid)
static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
{
struct iwl_umac_scan_abort cmd = {
.hdr.size = cpu_to_le16(sizeof(struct iwl_umac_scan_abort) -
sizeof(struct iwl_mvm_umac_cmd_hdr)),
.uid = cpu_to_le32(uid),
};
int uid, ret;
lockdep_assert_held(&mvm->mutex);
/* We should always get a valid index here, because we already
* checked that this type of scan was running in the generic
* code.
*/
uid = iwl_mvm_scan_uid_by_status(mvm, type);
if (WARN_ON_ONCE(uid < 0))
return uid;
cmd.uid = cpu_to_le32(uid);
IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
return iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd);
ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd);
if (!ret)
mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
return ret;
}
static int iwl_umac_scan_stop(struct iwl_mvm *mvm,
enum iwl_umac_scan_uid_type type, bool notify)
static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
{
struct iwl_notification_wait wait_scan_done;
static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC, };
struct iwl_umac_scan_done scan_done = {
.mvm = mvm,
.type = type,
};
int i, ret = -EIO;
static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
SCAN_OFFLOAD_COMPLETE, };
int ret;
lockdep_assert_held(&mvm->mutex);
iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
scan_done_notif,
ARRAY_SIZE(scan_done_notif),
iwl_scan_umac_done_check, &scan_done);
NULL, NULL);
IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type);
for (i = 0; i < mvm->max_scans; i++) {
if (mvm->scan_uid[i] & type) {
int err;
if (iwl_mvm_is_radio_killed(mvm) &&
(type & IWL_UMAC_SCAN_UID_REG_SCAN)) {
ieee80211_scan_completed(mvm->hw, true);
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
break;
}
err = iwl_umac_scan_abort_one(mvm, mvm->scan_uid[i]);
if (!err)
ret = 0;
}
}
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
ret = iwl_mvm_umac_scan_abort(mvm, type);
else
ret = iwl_mvm_lmac_scan_abort(mvm);
if (ret) {
IWL_DEBUG_SCAN(mvm, "Couldn't stop scan\n");
IWL_DEBUG_SCAN(mvm, "couldn't stop scan type %d\n", type);
iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
return ret;
}
ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
if (ret)
return ret;
if (notify) {
if (type & IWL_UMAC_SCAN_UID_SCHED_SCAN)
ieee80211_sched_scan_stopped(mvm->hw);
if (type & IWL_UMAC_SCAN_UID_REG_SCAN) {
ieee80211_scan_completed(mvm->hw, true);
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
}
}
return ret;
}
int iwl_mvm_scan_size(struct iwl_mvm *mvm)
{
if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
return sizeof(struct iwl_scan_req_umac) +
sizeof(struct iwl_scan_channel_cfg_umac) *
mvm->fw->ucode_capa.n_scan_channels +
@ -1633,19 +1507,18 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm)
*/
void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
{
if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
u32 uid, i;
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
int uid, i;
uid = iwl_mvm_find_first_scan(mvm, IWL_UMAC_SCAN_UID_REG_SCAN);
if (uid < mvm->max_scans) {
uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_REGULAR);
if (uid >= 0) {
ieee80211_scan_completed(mvm->hw, true);
mvm->scan_uid[uid] = 0;
mvm->scan_uid_status[uid] = 0;
}
uid = iwl_mvm_find_first_scan(mvm,
IWL_UMAC_SCAN_UID_SCHED_SCAN);
if (uid < mvm->max_scans && !mvm->restart_fw) {
uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED);
if (uid >= 0 && !mvm->restart_fw) {
ieee80211_sched_scan_stopped(mvm->hw);
mvm->scan_uid[uid] = 0;
mvm->scan_uid_status[uid] = 0;
}
/* We shouldn't have any UIDs still set. Loop over all the
@ -1653,10 +1526,10 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
* any is found.
*/
for (i = 0; i < mvm->max_scans; i++) {
if (WARN_ONCE(mvm->scan_uid[i],
"UMAC scan UID %d was not cleaned\n",
mvm->scan_uid[i]))
mvm->scan_uid[i] = 0;
if (WARN_ONCE(mvm->scan_uid_status[i],
"UMAC scan UID %d status was not cleaned\n",
i))
mvm->scan_uid_status[i] = 0;
}
} else {
if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
@ -1670,3 +1543,40 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
ieee80211_sched_scan_stopped(mvm->hw);
}
}
int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify)
{
int ret;
if (!(mvm->scan_status & type))
return 0;
if (iwl_mvm_is_radio_killed(mvm)) {
ret = 0;
goto out;
}
ret = iwl_mvm_scan_stop_wait(mvm, type);
if (!ret)
mvm->scan_status |= type << IWL_MVM_SCAN_STOPPING_SHIFT;
out:
/* Clear the scan status so the next scan requests will
* succeed and mark the scan as stopping, so that the Rx
* handler doesn't do anything, as the scan was stopped from
* above.
*/
mvm->scan_status &= ~type;
if (type == IWL_MVM_SCAN_REGULAR) {
/* Since the rx handler won't do anything now, we have
* to release the scan reference here.
*/
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
if (notify)
ieee80211_scan_completed(mvm->hw, true);
} else if (notify) {
ieee80211_sched_scan_stopped(mvm->hw);
}
return ret;
}

Просмотреть файл

@ -5,8 +5,8 @@
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -31,8 +31,8 @@
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -1000,13 +1000,13 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
iwl_mvm_enable_agg_txq(mvm, queue, fifo, mvmsta->sta_id, tid,
buf_size, ssn, wdg_timeout);
ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
if (ret)
return -EIO;
iwl_mvm_enable_agg_txq(mvm, queue, fifo, mvmsta->sta_id, tid,
buf_size, ssn, wdg_timeout);
/*
* Even though in theory the peer could have different
* aggregation reorder buffer sizes for different sessions,

Просмотреть файл

@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -108,12 +108,14 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
* in the case that the time event actually completed in the firmware
* (which is handled in iwl_mvm_te_handle_notif).
*/
if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) {
queues |= BIT(IWL_MVM_OFFCHANNEL_QUEUE);
if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
}
if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
queues |= BIT(mvm->aux_queue);
iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
iwl_mvm_unref(mvm, IWL_MVM_REF_ROC_AUX);
}
synchronize_net();
@ -393,6 +395,7 @@ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
} else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
te_data->running = true;
iwl_mvm_ref(mvm, IWL_MVM_REF_ROC_AUX);
ieee80211_ready_on_channel(mvm->hw); /* Start TE */
} else {
IWL_DEBUG_TE(mvm,
@ -794,13 +797,12 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
{
struct iwl_mvm_vif *mvmvif;
struct iwl_mvm_vif *mvmvif = NULL;
struct iwl_mvm_time_event_data *te_data;
bool is_p2p = false;
lockdep_assert_held(&mvm->mutex);
mvmvif = NULL;
spin_lock_bh(&mvm->time_event_lock);
/*
@ -818,17 +820,14 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
}
}
/*
* Iterate over the list of aux roc time events and find the time
* event that is associated with a BSS interface.
* This assumes that a BSS interface can have only a single time
* event at any given time and this time event corresponds to a ROC
* request
/* There can only be at most one AUX ROC time event, we just use the
* list to simplify/unify code. Remove it if it exists.
*/
list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) {
te_data = list_first_entry_or_null(&mvm->aux_roc_te_list,
struct iwl_mvm_time_event_data,
list);
if (te_data)
mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
goto remove_te;
}
remove_te:
spin_unlock_bh(&mvm->time_event_lock);

Просмотреть файл

@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -70,6 +70,30 @@
#include "mvm.h"
#include "sta.h"
static void
iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
u16 tid, u16 ssn)
{
struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_ba *ba_trig;
if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
return;
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
ba_trig = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
return;
if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid)))
return;
iwl_mvm_fw_dbg_collect_trig(mvm, trig,
"BAR sent to %pM, tid %d, ssn %d",
addr, tid, ssn);
}
/*
* Sets most of the Tx cmd's fields
*/
@ -101,12 +125,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
} else if (ieee80211_is_back_req(fc)) {
struct ieee80211_bar *bar = (void *)skb->data;
u16 control = le16_to_cpu(bar->control);
u16 ssn = le16_to_cpu(bar->start_seq_num);
tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
tx_cmd->tid_tspec = (control &
IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);
iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec,
ssn);
} else {
tx_cmd->tid_tspec = IWL_TID_NON_QOS;
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
@ -144,8 +171,8 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
!is_multicast_ether_addr(ieee80211_get_DA(hdr)))
tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
if ((mvm->fw->ucode_capa.capa[0] &
IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) &&
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) &&
ieee80211_action_contains_tpc(skb))
tx_flags |= TX_CMD_FLG_WRITE_TX_POWER;

Просмотреть файл

@ -584,7 +584,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
struct iwl_error_event_table table;
u32 base;
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_NEW_VERSION)) {
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION)) {
iwl_mvm_dump_nic_error_log_old(mvm);
return;
}

Просмотреть файл

@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -629,7 +629,18 @@ static int iwl_pci_resume(struct device *device)
if (!trans->op_mode)
return 0;
iwl_enable_rfkill_int(trans);
/*
* On suspend, ict is disabled, and the interrupt mask
* gets cleared. Reconfigure them both in case of d0i3
* image. Otherwise, only enable rfkill interrupt (in
* order to keep track of the rfkill status)
*/
if (trans->wowlan_d0i3) {
iwl_pcie_reset_ict(trans);
iwl_enable_interrupts(trans);
} else {
iwl_enable_rfkill_int(trans);
}
hw_rfkill = iwl_is_rfkill_set(trans);
iwl_trans_pcie_rf_kill(trans, hw_rfkill);

Просмотреть файл

@ -44,6 +44,15 @@
#include "iwl-io.h"
#include "iwl-op-mode.h"
/*
* RX related structures and functions
*/
#define RX_NUM_QUEUES 1
#define RX_POST_REQ_ALLOC 2
#define RX_CLAIM_REQ_ALLOC 8
#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES)
#define RX_LOW_WATERMARK 8
struct iwl_host_cmd;
/*This file includes the declaration that are internal to the
@ -77,29 +86,29 @@ struct isr_statistics {
* struct iwl_rxq - Rx queue
* @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
* @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
* @pool:
* @queue:
* @read: Shared index to newest available Rx buffer
* @write: Shared index to oldest written Rx packet
* @free_count: Number of pre-allocated buffers in rx_free
* @used_count: Number of RBDs handled to allocator to use for allocation
* @write_actual:
* @rx_free: list of free SKBs for use
* @rx_used: List of Rx buffers with no SKB
* @rx_free: list of RBDs with allocated RB ready for use
* @rx_used: list of RBDs with no RB attached
* @need_update: flag to indicate we need to update read/write index
* @rb_stts: driver's pointer to receive buffer status
* @rb_stts_dma: bus address of receive buffer status
* @lock:
* @pool: initial pool of iwl_rx_mem_buffer for the queue
* @queue: actual rx queue
*
* NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
*/
struct iwl_rxq {
__le32 *bd;
dma_addr_t bd_dma;
struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
u32 read;
u32 write;
u32 free_count;
u32 used_count;
u32 write_actual;
struct list_head rx_free;
struct list_head rx_used;
@ -107,6 +116,32 @@ struct iwl_rxq {
struct iwl_rb_status *rb_stts;
dma_addr_t rb_stts_dma;
spinlock_t lock;
struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE];
struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
};
/**
* struct iwl_rb_allocator - Rx allocator
* @pool: initial pool of allocator
* @req_pending: number of requests the allcator had not processed yet
* @req_ready: number of requests honored and ready for claiming
* @rbd_allocated: RBDs with pages allocated and ready to be handled to
* the queue. This is a list of &struct iwl_rx_mem_buffer
* @rbd_empty: RBDs with no page attached for allocator use. This is a list
* of &struct iwl_rx_mem_buffer
* @lock: protects the rbd_allocated and rbd_empty lists
* @alloc_wq: work queue for background calls
* @rx_alloc: work struct for background calls
*/
struct iwl_rb_allocator {
struct iwl_rx_mem_buffer pool[RX_POOL_SIZE];
atomic_t req_pending;
atomic_t req_ready;
struct list_head rbd_allocated;
struct list_head rbd_empty;
spinlock_t lock;
struct workqueue_struct *alloc_wq;
struct work_struct rx_alloc;
};
struct iwl_dma_ptr {
@ -250,7 +285,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
/**
* struct iwl_trans_pcie - PCIe transport specific data
* @rxq: all the RX queue data
* @rx_replenish: work that will be called when buffers need to be allocated
* @rba: allocator for RX replenishing
* @drv - pointer to iwl_drv
* @trans: pointer to the generic transport area
* @scd_base_addr: scheduler sram base address in SRAM
@ -273,7 +308,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
*/
struct iwl_trans_pcie {
struct iwl_rxq rxq;
struct work_struct rx_replenish;
struct iwl_rb_allocator rba;
struct iwl_trans *trans;
struct iwl_drv *drv;

Просмотреть файл

@ -1,7 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@ -74,16 +74,29 @@
* resets the Rx queue buffers with new memory.
*
* The management in the driver is as follows:
* + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
* iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
* to replenish the iwl->rxq->rx_free.
* + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
* iwl->rxq is replenished and the READ INDEX is updated (updating the
* 'processed' and 'read' driver indexes as well)
* + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
* When the interrupt handler is called, the request is processed.
* The page is either stolen - transferred to the upper layer
* or reused - added immediately to the iwl->rxq->rx_free list.
* + When the page is stolen - the driver updates the matching queue's used
* count, detaches the RBD and transfers it to the queue used list.
* When there are two used RBDs - they are transferred to the allocator empty
* list. Work is then scheduled for the allocator to start allocating
* eight buffers.
* When there are another 6 used RBDs - they are transferred to the allocator
* empty list and the driver tries to claim the pre-allocated buffers and
* add them to iwl->rxq->rx_free. If it fails - it continues to claim them
* until ready.
* When there are 8+ buffers in the free list - either from allocation or from
* 8 reused unstolen pages - restock is called to update the FW and indexes.
* + In order to make sure the allocator always has RBDs to use for allocation
* the allocator has initial pool in the size of num_queues*(8-2) - the
* maximum missing RBDs per allocation request (request posted with 2
* empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
* The queues supplies the recycle of the rest of the RBDs.
* + A received packet is processed and handed to the kernel network stack,
* detached from the iwl->rxq. The driver 'processed' index is updated.
* + The Host/Firmware iwl->rxq is replenished at irq thread time from the
* rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
* + If there are no allocated buffers in iwl->rxq->rx_free,
* the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
* If there were enough free buffers and RX_STALLED is set it is cleared.
*
@ -92,18 +105,32 @@
*
* iwl_rxq_alloc() Allocates rx_free
* iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
* iwl_pcie_rxq_restock
* iwl_pcie_rxq_restock.
* Used only during initialization.
* iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
* queue, updates firmware pointers, and updates
* the WRITE index. If insufficient rx_free buffers
* are available, schedules iwl_pcie_rx_replenish
* the WRITE index.
* iwl_pcie_rx_allocator() Background work for allocating pages.
*
* -- enable interrupts --
* ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
* READ INDEX, detaching the SKB from the pool.
* Moves the packet buffer from queue to rx_used.
* Posts and claims requests to the allocator.
* Calls iwl_pcie_rxq_restock to refill any empty
* slots.
*
* RBD life-cycle:
*
* Init:
* rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
*
* Regular Receive interrupt:
* Page Stolen:
* rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
* allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
* Page not Stolen:
* rxq.queue -> rxq.rx_free -> rxq.queue
* ...
*
*/
@ -240,10 +267,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
rxq->free_count--;
}
spin_unlock(&rxq->lock);
/* If the pre-allocated buffer pool is dropping low, schedule to
* refill it */
if (rxq->free_count <= RX_LOW_WATERMARK)
schedule_work(&trans_pcie->rx_replenish);
/* If we've added more space for the firmware to place data, tell it.
* Increment device's write pointer in multiples of 8. */
@ -254,6 +277,44 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
}
}
/*
* iwl_pcie_rx_alloc_page - allocates and returns a page.
*
*/
static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
struct page *page;
gfp_t gfp_mask = GFP_KERNEL;
if (rxq->free_count > RX_LOW_WATERMARK)
gfp_mask |= __GFP_NOWARN;
if (trans_pcie->rx_page_order > 0)
gfp_mask |= __GFP_COMP;
/* Alloc a new receive buffer */
page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
if (!page) {
if (net_ratelimit())
IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
trans_pcie->rx_page_order);
/* Issue an error if the hardware has consumed more than half
* of its free buffer list and we don't have enough
* pre-allocated buffers.
` */
if (rxq->free_count <= RX_LOW_WATERMARK &&
iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) &&
net_ratelimit())
IWL_CRIT(trans,
"Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n",
rxq->free_count);
return NULL;
}
return page;
}
/*
* iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
*
@ -263,13 +324,12 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
* iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
* allocated buffers.
*/
static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_rx_mem_buffer *rxb;
struct page *page;
gfp_t gfp_mask = priority;
while (1) {
spin_lock(&rxq->lock);
@ -279,32 +339,10 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
}
spin_unlock(&rxq->lock);
if (rxq->free_count > RX_LOW_WATERMARK)
gfp_mask |= __GFP_NOWARN;
if (trans_pcie->rx_page_order > 0)
gfp_mask |= __GFP_COMP;
/* Alloc a new receive buffer */
page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
if (!page) {
if (net_ratelimit())
IWL_DEBUG_INFO(trans, "alloc_pages failed, "
"order: %d\n",
trans_pcie->rx_page_order);
if ((rxq->free_count <= RX_LOW_WATERMARK) &&
net_ratelimit())
IWL_CRIT(trans, "Failed to alloc_pages with %s."
"Only %u free buffers remaining.\n",
priority == GFP_ATOMIC ?
"GFP_ATOMIC" : "GFP_KERNEL",
rxq->free_count);
/* We don't reschedule replenish work here -- we will
* call the restock method and if it still needs
* more buffers it will schedule replenish */
page = iwl_pcie_rx_alloc_page(trans);
if (!page)
return;
}
spin_lock(&rxq->lock);
@ -355,7 +393,7 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
lockdep_assert_held(&rxq->lock);
for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
for (i = 0; i < RX_QUEUE_SIZE; i++) {
if (!rxq->pool[i].page)
continue;
dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
@ -372,32 +410,144 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
* When moving to rx_free an page is allocated for the slot.
*
* Also restock the Rx queue via iwl_pcie_rxq_restock.
* This is called as a scheduled work item (except for during initialization)
* This is called only during initialization
*/
static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
{
iwl_pcie_rxq_alloc_rbs(trans, gfp);
iwl_pcie_rxq_alloc_rbs(trans);
iwl_pcie_rxq_restock(trans);
}
static void iwl_pcie_rx_replenish_work(struct work_struct *data)
/*
* iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
*
* Allocates for each received request 8 pages
* Called as a scheduled work item.
*/
static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie =
container_of(data, struct iwl_trans_pcie, rx_replenish);
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rb_allocator *rba = &trans_pcie->rba;
iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
while (atomic_read(&rba->req_pending)) {
int i;
struct list_head local_empty;
struct list_head local_allocated;
INIT_LIST_HEAD(&local_allocated);
spin_lock(&rba->lock);
/* swap out the entire rba->rbd_empty to a local list */
list_replace_init(&rba->rbd_empty, &local_empty);
spin_unlock(&rba->lock);
for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
struct iwl_rx_mem_buffer *rxb;
struct page *page;
/* List should never be empty - each reused RBD is
* returned to the list, and initial pool covers any
* possible gap between the time the page is allocated
* to the time the RBD is added.
*/
BUG_ON(list_empty(&local_empty));
/* Get the first rxb from the rbd list */
rxb = list_first_entry(&local_empty,
struct iwl_rx_mem_buffer, list);
BUG_ON(rxb->page);
/* Alloc a new receive buffer */
page = iwl_pcie_rx_alloc_page(trans);
if (!page)
continue;
rxb->page = page;
/* Get physical address of the RB */
rxb->page_dma = dma_map_page(trans->dev, page, 0,
PAGE_SIZE << trans_pcie->rx_page_order,
DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) {
rxb->page = NULL;
__free_pages(page, trans_pcie->rx_page_order);
continue;
}
/* dma address must be no more than 36 bits */
BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
/* and also 256 byte aligned! */
BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
/* move the allocated entry to the out list */
list_move(&rxb->list, &local_allocated);
i++;
}
spin_lock(&rba->lock);
/* add the allocated rbds to the allocator allocated list */
list_splice_tail(&local_allocated, &rba->rbd_allocated);
/* add the unused rbds back to the allocator empty list */
list_splice_tail(&local_empty, &rba->rbd_empty);
spin_unlock(&rba->lock);
atomic_dec(&rba->req_pending);
atomic_inc(&rba->req_ready);
}
}
/*
* iwl_pcie_rx_allocator_get - Returns the pre-allocated pages
.*
.* Called by queue when the queue posted allocation request and
* has freed 8 RBDs in order to restock itself.
*/
static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
struct iwl_rx_mem_buffer
*out[RX_CLAIM_REQ_ALLOC])
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rb_allocator *rba = &trans_pcie->rba;
int i;
if (atomic_dec_return(&rba->req_ready) < 0) {
atomic_inc(&rba->req_ready);
IWL_DEBUG_RX(trans,
"Allocation request not ready, pending requests = %d\n",
atomic_read(&rba->req_pending));
return -ENOMEM;
}
spin_lock(&rba->lock);
for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
/* Get next free Rx buffer, remove it from free list */
out[i] = list_first_entry(&rba->rbd_allocated,
struct iwl_rx_mem_buffer, list);
list_del(&out[i]->list);
}
spin_unlock(&rba->lock);
return 0;
}
static void iwl_pcie_rx_allocator_work(struct work_struct *data)
{
struct iwl_rb_allocator *rba_p =
container_of(data, struct iwl_rb_allocator, rx_alloc);
struct iwl_trans_pcie *trans_pcie =
container_of(rba_p, struct iwl_trans_pcie, rba);
iwl_pcie_rx_allocator(trans_pcie->trans);
}
static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_rb_allocator *rba = &trans_pcie->rba;
struct device *dev = trans->dev;
memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
spin_lock_init(&rxq->lock);
spin_lock_init(&rba->lock);
if (WARN_ON(rxq->bd || rxq->rb_stts))
return -EINVAL;
@ -487,15 +637,49 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
INIT_LIST_HEAD(&rxq->rx_free);
INIT_LIST_HEAD(&rxq->rx_used);
rxq->free_count = 0;
rxq->used_count = 0;
for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
for (i = 0; i < RX_QUEUE_SIZE; i++)
list_add(&rxq->pool[i].list, &rxq->rx_used);
}
static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba)
{
int i;
lockdep_assert_held(&rba->lock);
INIT_LIST_HEAD(&rba->rbd_allocated);
INIT_LIST_HEAD(&rba->rbd_empty);
for (i = 0; i < RX_POOL_SIZE; i++)
list_add(&rba->pool[i].list, &rba->rbd_empty);
}
static void iwl_pcie_rx_free_rba(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rb_allocator *rba = &trans_pcie->rba;
int i;
lockdep_assert_held(&rba->lock);
for (i = 0; i < RX_POOL_SIZE; i++) {
if (!rba->pool[i].page)
continue;
dma_unmap_page(trans->dev, rba->pool[i].page_dma,
PAGE_SIZE << trans_pcie->rx_page_order,
DMA_FROM_DEVICE);
__free_pages(rba->pool[i].page, trans_pcie->rx_page_order);
rba->pool[i].page = NULL;
}
}
int iwl_pcie_rx_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_rb_allocator *rba = &trans_pcie->rba;
int i, err;
if (!rxq->bd) {
@ -503,11 +687,21 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
if (err)
return err;
}
if (!rba->alloc_wq)
rba->alloc_wq = alloc_workqueue("rb_allocator",
WQ_HIGHPRI | WQ_UNBOUND, 1);
INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
spin_lock(&rba->lock);
atomic_set(&rba->req_pending, 0);
atomic_set(&rba->req_ready, 0);
/* free all first - we might be reconfigured for a different size */
iwl_pcie_rx_free_rba(trans);
iwl_pcie_rx_init_rba(rba);
spin_unlock(&rba->lock);
spin_lock(&rxq->lock);
INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
/* free all first - we might be reconfigured for a different size */
iwl_pcie_rxq_free_rbs(trans);
iwl_pcie_rx_init_rxb_lists(rxq);
@ -522,7 +716,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
spin_unlock(&rxq->lock);
iwl_pcie_rx_replenish(trans, GFP_KERNEL);
iwl_pcie_rx_replenish(trans);
iwl_pcie_rx_hw_init(trans, rxq);
@ -537,6 +731,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_rb_allocator *rba = &trans_pcie->rba;
/*if rxq->bd is NULL, it means that nothing has been allocated,
* exit now */
@ -545,7 +740,15 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
return;
}
cancel_work_sync(&trans_pcie->rx_replenish);
cancel_work_sync(&rba->rx_alloc);
if (rba->alloc_wq) {
destroy_workqueue(rba->alloc_wq);
rba->alloc_wq = NULL;
}
spin_lock(&rba->lock);
iwl_pcie_rx_free_rba(trans);
spin_unlock(&rba->lock);
spin_lock(&rxq->lock);
iwl_pcie_rxq_free_rbs(trans);
@ -566,6 +769,43 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
rxq->rb_stts = NULL;
}
/*
* iwl_pcie_rx_reuse_rbd - Recycle used RBDs
*
* Called when a RBD can be reused. The RBD is transferred to the allocator.
* When there are 2 empty RBDs - a request for allocation is posted
*/
static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
struct iwl_rx_mem_buffer *rxb,
struct iwl_rxq *rxq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rb_allocator *rba = &trans_pcie->rba;
/* Count the used RBDs */
rxq->used_count++;
/* Move the RBD to the used list, will be moved to allocator in batches
* before claiming or posting a request*/
list_add_tail(&rxb->list, &rxq->rx_used);
/* If we have RX_POST_REQ_ALLOC new released rx buffers -
* issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
* used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
* after but we still need to post another request.
*/
if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
/* Move the 2 RBDs to the allocator ownership.
Allocator has another 6 from pool for the request completion*/
spin_lock(&rba->lock);
list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
spin_unlock(&rba->lock);
atomic_inc(&rba->req_pending);
queue_work(rba->alloc_wq, &rba->rx_alloc);
}
}
static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
struct iwl_rx_mem_buffer *rxb)
{
@ -688,13 +928,13 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
*/
__free_pages(rxb->page, trans_pcie->rx_page_order);
rxb->page = NULL;
list_add_tail(&rxb->list, &rxq->rx_used);
iwl_pcie_rx_reuse_rbd(trans, rxb, rxq);
} else {
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
}
} else
list_add_tail(&rxb->list, &rxq->rx_used);
iwl_pcie_rx_reuse_rbd(trans, rxb, rxq);
}
/*
@ -704,10 +944,7 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
u32 r, i;
u8 fill_rx = 0;
u32 count = 8;
int total_empty;
u32 r, i, j;
restart:
spin_lock(&rxq->lock);
@ -720,14 +957,6 @@ restart:
if (i == r)
IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
/* calculate total frames need to be restock after handling RX */
total_empty = r - rxq->write_actual;
if (total_empty < 0)
total_empty += RX_QUEUE_SIZE;
if (total_empty > (RX_QUEUE_SIZE / 2))
fill_rx = 1;
while (i != r) {
struct iwl_rx_mem_buffer *rxb;
@ -739,29 +968,48 @@ restart:
iwl_pcie_rx_handle_rb(trans, rxb);
i = (i + 1) & RX_QUEUE_MASK;
/* If there are a lot of unused frames,
* restock the Rx queue so ucode wont assert. */
if (fill_rx) {
count++;
if (count >= 8) {
rxq->read = i;
spin_unlock(&rxq->lock);
iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
count = 0;
goto restart;
/* If we have RX_CLAIM_REQ_ALLOC released rx buffers -
* try to claim the pre-allocated buffers from the allocator */
if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) {
struct iwl_rb_allocator *rba = &trans_pcie->rba;
struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC];
/* Add the remaining 6 empty RBDs for allocator use */
spin_lock(&rba->lock);
list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
spin_unlock(&rba->lock);
/* If not ready - continue, will try to reclaim later.
* No need to reschedule work - allocator exits only on
* success */
if (!iwl_pcie_rx_allocator_get(trans, out)) {
/* If success - then RX_CLAIM_REQ_ALLOC
* buffers were retrieved and should be added
* to free list */
rxq->used_count -= RX_CLAIM_REQ_ALLOC;
for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) {
list_add_tail(&out[j]->list,
&rxq->rx_free);
rxq->free_count++;
}
}
}
/* handle restock for two cases:
* - we just pulled buffers from the allocator
* - we have 8+ unstolen pages accumulated */
if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) {
rxq->read = i;
spin_unlock(&rxq->lock);
iwl_pcie_rxq_restock(trans);
goto restart;
}
}
/* Backtrack one entry */
rxq->read = i;
spin_unlock(&rxq->lock);
if (fill_rx)
iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
else
iwl_pcie_rxq_restock(trans);
if (trans_pcie->napi.poll)
napi_gro_flush(&trans_pcie->napi, false);
}
@ -775,6 +1023,7 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
if (trans->cfg->internal_wimax_coex &&
!trans->cfg->apmg_not_supported &&
(!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
APMS_CLK_VAL_MRB_FUNC_MODE) ||
(iwl_read_prph(trans, APMG_PS_CTRL_REG) &

Просмотреть файл

@ -182,6 +182,9 @@ static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
{
if (!trans->cfg->apmg_not_supported)
return;
if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
@ -315,7 +318,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
* bits do not disable clocks. This preserves any hardware
* bits already set by default in "CLK_CTRL_REG" after reset.
*/
if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
if (!trans->cfg->apmg_not_supported) {
iwl_write_prph(trans, APMG_CLK_EN_REG,
APMG_CLK_VAL_DMA_CLK_RQT);
udelay(20);
@ -515,8 +518,7 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans)
spin_unlock(&trans_pcie->irq_lock);
if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
iwl_pcie_set_pwr(trans, false);
iwl_pcie_set_pwr(trans, false);
iwl_op_mode_nic_config(trans->op_mode);
@ -973,12 +975,8 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
return ret;
/* load to FW the binary sections of CPU2 */
ret = iwl_pcie_load_cpu_sections_8000(trans, image, 2,
&first_ucode_section);
if (ret)
return ret;
return 0;
return iwl_pcie_load_cpu_sections_8000(trans, image, 2,
&first_ucode_section);
}
static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
@ -1067,7 +1065,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
iwl_pcie_rx_stop(trans);
/* Power-down device's busmaster DMA clocks */
if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
if (!trans->cfg->apmg_not_supported) {
iwl_write_prph(trans, APMG_CLK_DIS_REG,
APMG_CLK_VAL_DMA_CLK_RQT);
udelay(5);
@ -1364,14 +1362,13 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
iounmap(trans_pcie->hw_base);
pci_release_regions(trans_pcie->pci_dev);
pci_disable_device(trans_pcie->pci_dev);
kmem_cache_destroy(trans->dev_cmd_pool);
if (trans_pcie->napi.poll)
netif_napi_del(&trans_pcie->napi);
iwl_pcie_free_fw_monitor(trans);
kfree(trans);
iwl_trans_free(trans);
}
static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
@ -2464,18 +2461,13 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
u16 pci_cmd;
int err;
trans = kzalloc(sizeof(struct iwl_trans) +
sizeof(struct iwl_trans_pcie), GFP_KERNEL);
if (!trans) {
err = -ENOMEM;
goto out;
}
trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
&pdev->dev, cfg, &trans_ops_pcie, 0);
if (!trans)
return ERR_PTR(-ENOMEM);
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
trans->ops = &trans_ops_pcie;
trans->cfg = cfg;
trans_lockdep_init(trans);
trans_pcie->trans = trans;
spin_lock_init(&trans_pcie->irq_lock);
spin_lock_init(&trans_pcie->reg_lock);
@ -2599,25 +2591,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
/* Initialize the wait queue for commands */
init_waitqueue_head(&trans_pcie->wait_command_queue);
snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
"iwl_cmd_pool:%s", dev_name(trans->dev));
trans->dev_cmd_headroom = 0;
trans->dev_cmd_pool =
kmem_cache_create(trans->dev_cmd_pool_name,
sizeof(struct iwl_device_cmd)
+ trans->dev_cmd_headroom,
sizeof(void *),
SLAB_HWCACHE_ALIGN,
NULL);
if (!trans->dev_cmd_pool) {
err = -ENOMEM;
goto out_pci_disable_msi;
}
if (iwl_pcie_alloc_ict(trans))
goto out_free_cmd_pool;
goto out_pci_disable_msi;
err = request_threaded_irq(pdev->irq, iwl_pcie_isr,
iwl_pcie_irq_handler,
@ -2634,8 +2609,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
out_free_ict:
iwl_pcie_free_ict(trans);
out_free_cmd_pool:
kmem_cache_destroy(trans->dev_cmd_pool);
out_pci_disable_msi:
pci_disable_msi(pdev);
out_pci_release_regions:
@ -2643,7 +2616,6 @@ out_pci_release_regions:
out_pci_disable_device:
pci_disable_device(pdev);
out_no_pci:
kfree(trans);
out:
iwl_trans_free(trans);
return ERR_PTR(err);
}

Просмотреть файл

@ -1049,8 +1049,6 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
!trans_pcie->cmd_hold_nic_awake) {
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
udelay(2);
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,

Просмотреть файл

@ -0,0 +1,10 @@
menuconfig WL_MEDIATEK
bool "Mediatek Wireless LAN support"
---help---
Enable community drivers for MediaTek WiFi devices.
Those drivers make use of the Linux mac80211 stack.
if WL_MEDIATEK
source "drivers/net/wireless/mediatek/mt7601u/Kconfig"
endif # WL_MEDIATEK

Просмотреть файл

@ -0,0 +1 @@
obj-$(CONFIG_MT7601U) += mt7601u/

Просмотреть файл

@ -0,0 +1,6 @@
config MT7601U
tristate "MediaTek MT7601U (USB) support"
depends on MAC80211
depends on USB
---help---
This adds support for MT7601U-based wireless USB dongles.

Просмотреть файл

@ -0,0 +1,9 @@
ccflags-y += -D__CHECK_ENDIAN__
obj-$(CONFIG_MT7601U) += mt7601u.o
mt7601u-objs = \
usb.o init.o main.o mcu.o trace.o dma.o core.o eeprom.o phy.o \
mac.o util.o debugfs.o tx.o
CFLAGS_trace.o := -I$(src)

Просмотреть файл

@ -0,0 +1,78 @@
/*
* Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "mt7601u.h"
int mt7601u_wait_asic_ready(struct mt7601u_dev *dev)
{
int i = 100;
u32 val;
do {
if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
return -EIO;
val = mt7601u_rr(dev, MT_MAC_CSR0);
if (val && ~val)
return 0;
udelay(10);
} while (i--);
return -EIO;
}
bool mt76_poll(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val,
int timeout)
{
u32 cur;
timeout /= 10;
do {
if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
return false;
cur = mt7601u_rr(dev, offset) & mask;
if (cur == val)
return true;
udelay(10);
} while (timeout-- > 0);
dev_err(dev->dev, "Error: Time out with reg %08x\n", offset);
return false;
}
bool mt76_poll_msec(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val,
int timeout)
{
u32 cur;
timeout /= 10;
do {
if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
return false;
cur = mt7601u_rr(dev, offset) & mask;
if (cur == val)
return true;
msleep(10);
} while (timeout-- > 0);
dev_err(dev->dev, "Error: Time out with reg %08x\n", offset);
return false;
}

Просмотреть файл

@ -0,0 +1,172 @@
/*
* Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/debugfs.h>
#include "mt7601u.h"
#include "eeprom.h"
static int
mt76_reg_set(void *data, u64 val)
{
struct mt7601u_dev *dev = data;
mt76_wr(dev, dev->debugfs_reg, val);
return 0;
}
static int
mt76_reg_get(void *data, u64 *val)
{
struct mt7601u_dev *dev = data;
*val = mt76_rr(dev, dev->debugfs_reg);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, "0x%08llx\n");
static int
mt7601u_ampdu_stat_read(struct seq_file *file, void *data)
{
struct mt7601u_dev *dev = file->private;
int i, j;
#define stat_printf(grp, off, name) \
seq_printf(file, #name ":\t%llu\n", dev->stats.grp[off])
stat_printf(rx_stat, 0, rx_crc_err);
stat_printf(rx_stat, 1, rx_phy_err);
stat_printf(rx_stat, 2, rx_false_cca);
stat_printf(rx_stat, 3, rx_plcp_err);
stat_printf(rx_stat, 4, rx_fifo_overflow);
stat_printf(rx_stat, 5, rx_duplicate);
stat_printf(tx_stat, 0, tx_fail_cnt);
stat_printf(tx_stat, 1, tx_bcn_cnt);
stat_printf(tx_stat, 2, tx_success);
stat_printf(tx_stat, 3, tx_retransmit);
stat_printf(tx_stat, 4, tx_zero_len);
stat_printf(tx_stat, 5, tx_underflow);
stat_printf(aggr_stat, 0, non_aggr_tx);
stat_printf(aggr_stat, 1, aggr_tx);
stat_printf(zero_len_del, 0, tx_zero_len_del);
stat_printf(zero_len_del, 1, rx_zero_len_del);
#undef stat_printf
seq_puts(file, "Aggregations stats:\n");
for (i = 0; i < 4; i++) {
for (j = 0; j < 8; j++)
seq_printf(file, "%08llx ",
dev->stats.aggr_n[i * 8 + j]);
seq_putc(file, '\n');
}
seq_printf(file, "recent average AMPDU len: %d\n",
atomic_read(&dev->avg_ampdu_len));
return 0;
}
static int
mt7601u_ampdu_stat_open(struct inode *inode, struct file *f)
{
return single_open(f, mt7601u_ampdu_stat_read, inode->i_private);
}
static const struct file_operations fops_ampdu_stat = {
.open = mt7601u_ampdu_stat_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int
mt7601u_eeprom_param_read(struct seq_file *file, void *data)
{
struct mt7601u_dev *dev = file->private;
struct mt7601u_rate_power *rp = &dev->ee->power_rate_table;
struct tssi_data *td = &dev->ee->tssi_data;
int i;
seq_printf(file, "RF freq offset: %hhx\n", dev->ee->rf_freq_off);
seq_printf(file, "RSSI offset: %hhx %hhx\n",
dev->ee->rssi_offset[0], dev->ee->rssi_offset[1]);
seq_printf(file, "Reference temp: %hhx\n", dev->ee->ref_temp);
seq_printf(file, "LNA gain: %hhx\n", dev->ee->lna_gain);
seq_printf(file, "Reg channels: %hhu-%hhu\n", dev->ee->reg.start,
dev->ee->reg.start + dev->ee->reg.num - 1);
seq_puts(file, "Per rate power:\n");
for (i = 0; i < 2; i++)
seq_printf(file, "\t raw:%02hhx bw20:%02hhx bw40:%02hhx\n",
rp->cck[i].raw, rp->cck[i].bw20, rp->cck[i].bw40);
for (i = 0; i < 4; i++)
seq_printf(file, "\t raw:%02hhx bw20:%02hhx bw40:%02hhx\n",
rp->ofdm[i].raw, rp->ofdm[i].bw20, rp->ofdm[i].bw40);
for (i = 0; i < 4; i++)
seq_printf(file, "\t raw:%02hhx bw20:%02hhx bw40:%02hhx\n",
rp->ht[i].raw, rp->ht[i].bw20, rp->ht[i].bw40);
seq_puts(file, "Per channel power:\n");
for (i = 0; i < 7; i++)
seq_printf(file, "\t tx_power ch%u:%02hhx ch%u:%02hhx\n",
i * 2 + 1, dev->ee->chan_pwr[i * 2],
i * 2 + 2, dev->ee->chan_pwr[i * 2 + 1]);
if (!dev->ee->tssi_enabled)
return 0;
seq_puts(file, "TSSI:\n");
seq_printf(file, "\t slope:%02hhx\n", td->slope);
seq_printf(file, "\t offset=%02hhx %02hhx %02hhx\n",
td->offset[0], td->offset[1], td->offset[2]);
seq_printf(file, "\t delta_off:%08x\n", td->tx0_delta_offset);
return 0;
}
static int
mt7601u_eeprom_param_open(struct inode *inode, struct file *f)
{
return single_open(f, mt7601u_eeprom_param_read, inode->i_private);
}
static const struct file_operations fops_eeprom_param = {
.open = mt7601u_eeprom_param_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
void mt7601u_init_debugfs(struct mt7601u_dev *dev)
{
struct dentry *dir;
dir = debugfs_create_dir("mt7601u", dev->hw->wiphy->debugfsdir);
if (!dir)
return;
debugfs_create_u8("temperature", S_IRUSR, dir, &dev->raw_temp);
debugfs_create_u32("temp_mode", S_IRUSR, dir, &dev->temp_mode);
debugfs_create_u32("regidx", S_IRUSR | S_IWUSR, dir, &dev->debugfs_reg);
debugfs_create_file("regval", S_IRUSR | S_IWUSR, dir, dev,
&fops_regval);
debugfs_create_file("ampdu_stat", S_IRUSR, dir, dev, &fops_ampdu_stat);
debugfs_create_file("eeprom_param", S_IRUSR, dir, dev,
&fops_eeprom_param);
}

Просмотреть файл

@ -0,0 +1,533 @@
/*
* Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "mt7601u.h"
#include "dma.h"
#include "usb.h"
#include "trace.h"
static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
struct mt7601u_dma_buf_rx *e, gfp_t gfp);
static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len)
{
const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data;
unsigned int hdrlen;
if (unlikely(len < 10))
return 0;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
if (unlikely(hdrlen > len))
return 0;
return hdrlen;
}
static struct sk_buff *
mt7601u_rx_skb_from_seg(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi,
u8 *data, u32 seg_len)
{
struct sk_buff *skb;
u32 true_len;
if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD))
seg_len -= 2;
skb = alloc_skb(seg_len, GFP_ATOMIC);
if (!skb)
return NULL;
if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
int hdr_len = ieee80211_get_hdrlen_from_buf(data, seg_len);
memcpy(skb_put(skb, hdr_len), data, hdr_len);
data += hdr_len + 2;
seg_len -= hdr_len;
}
memcpy(skb_put(skb, seg_len), data, seg_len);
true_len = mt76_mac_process_rx(dev, skb, skb->data, rxwi);
skb_trim(skb, true_len);
return skb;
}
static struct sk_buff *
mt7601u_rx_skb_from_seg_paged(struct mt7601u_dev *dev,
struct mt7601u_rxwi *rxwi, void *data,
u32 seg_len, u32 truesize, struct page *p)
{
unsigned int hdr_len = ieee80211_get_hdrlen_from_buf(data, seg_len);
unsigned int true_len, copy, frag;
struct sk_buff *skb;
skb = alloc_skb(128, GFP_ATOMIC);
if (!skb)
return NULL;
true_len = mt76_mac_process_rx(dev, skb, data, rxwi);
if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
memcpy(skb_put(skb, hdr_len), data, hdr_len);
data += hdr_len + 2;
true_len -= hdr_len;
hdr_len = 0;
}
copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8;
frag = true_len - copy;
memcpy(skb_put(skb, copy), data, copy);
data += copy;
if (frag) {
skb_add_rx_frag(skb, 0, p, data - page_address(p),
frag, truesize);
get_page(p);
}
return skb;
}
static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data,
u32 seg_len, struct page *p, bool paged)
{
struct sk_buff *skb;
struct mt7601u_rxwi *rxwi;
u32 fce_info, truesize = seg_len;
/* DMA_INFO field at the beginning of the segment contains only some of
* the information, we need to read the FCE descriptor from the end.
*/
fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN);
seg_len -= MT_FCE_INFO_LEN;
data += MT_DMA_HDR_LEN;
seg_len -= MT_DMA_HDR_LEN;
rxwi = (struct mt7601u_rxwi *) data;
data += sizeof(struct mt7601u_rxwi);
seg_len -= sizeof(struct mt7601u_rxwi);
if (unlikely(rxwi->zero[0] || rxwi->zero[1] || rxwi->zero[2]))
dev_err_once(dev->dev, "Error: RXWI zero fields are set\n");
if (unlikely(MT76_GET(MT_RXD_INFO_TYPE, fce_info)))
dev_err_once(dev->dev, "Error: RX path seen a non-pkt urb\n");
trace_mt_rx(dev, rxwi, fce_info);
if (paged)
skb = mt7601u_rx_skb_from_seg_paged(dev, rxwi, data, seg_len,
truesize, p);
else
skb = mt7601u_rx_skb_from_seg(dev, rxwi, data, seg_len);
if (!skb)
return;
ieee80211_rx_ni(dev->hw, skb);
}
static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
{
u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN +
sizeof(struct mt7601u_rxwi) + MT_FCE_INFO_LEN;
u16 dma_len = get_unaligned_le16(data);
if (data_len < min_seg_len ||
WARN_ON(!dma_len) ||
WARN_ON(dma_len + MT_DMA_HDRS > data_len) ||
WARN_ON(dma_len & 0x3))
return 0;
return MT_DMA_HDRS + dma_len;
}
static void
mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
{
u32 seg_len, data_len = e->urb->actual_length;
u8 *data = page_address(e->p);
struct page *new_p = NULL;
bool paged = true;
int cnt = 0;
if (!test_bit(MT7601U_STATE_INITIALIZED, &dev->state))
return;
/* Copy if there is very little data in the buffer. */
if (data_len < 512) {
paged = false;
} else {
new_p = dev_alloc_pages(MT_RX_ORDER);
if (!new_p)
paged = false;
}
while ((seg_len = mt7601u_rx_next_seg_len(data, data_len))) {
mt7601u_rx_process_seg(dev, data, seg_len, e->p, paged);
data_len -= seg_len;
data += seg_len;
cnt++;
}
if (cnt > 1)
trace_mt_rx_dma_aggr(dev, cnt, paged);
if (paged) {
/* we have one extra ref from the allocator */
__free_pages(e->p, MT_RX_ORDER);
e->p = new_p;
}
}
static struct mt7601u_dma_buf_rx *
mt7601u_rx_get_pending_entry(struct mt7601u_dev *dev)
{
struct mt7601u_rx_queue *q = &dev->rx_q;
struct mt7601u_dma_buf_rx *buf = NULL;
unsigned long flags;
spin_lock_irqsave(&dev->rx_lock, flags);
if (!q->pending)
goto out;
buf = &q->e[q->start];
q->pending--;
q->start = (q->start + 1) % q->entries;
out:
spin_unlock_irqrestore(&dev->rx_lock, flags);
return buf;
}
static void mt7601u_complete_rx(struct urb *urb)
{
struct mt7601u_dev *dev = urb->context;
struct mt7601u_rx_queue *q = &dev->rx_q;
unsigned long flags;
spin_lock_irqsave(&dev->rx_lock, flags);
if (mt7601u_urb_has_error(urb))
dev_err(dev->dev, "Error: RX urb failed:%d\n", urb->status);
if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
goto out;
q->end = (q->end + 1) % q->entries;
q->pending++;
tasklet_schedule(&dev->rx_tasklet);
out:
spin_unlock_irqrestore(&dev->rx_lock, flags);
}
static void mt7601u_rx_tasklet(unsigned long data)
{
struct mt7601u_dev *dev = (struct mt7601u_dev *) data;
struct mt7601u_dma_buf_rx *e;
while ((e = mt7601u_rx_get_pending_entry(dev))) {
if (e->urb->status)
continue;
mt7601u_rx_process_entry(dev, e);
mt7601u_submit_rx_buf(dev, e, GFP_ATOMIC);
}
}
static void mt7601u_complete_tx(struct urb *urb)
{
struct mt7601u_tx_queue *q = urb->context;
struct mt7601u_dev *dev = q->dev;
struct sk_buff *skb;
unsigned long flags;
spin_lock_irqsave(&dev->tx_lock, flags);
if (mt7601u_urb_has_error(urb))
dev_err(dev->dev, "Error: TX urb failed:%d\n", urb->status);
if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
goto out;
skb = q->e[q->start].skb;
trace_mt_tx_dma_done(dev, skb);
mt7601u_tx_status(dev, skb);
if (q->used == q->entries - q->entries / 8)
ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb));
q->start = (q->start + 1) % q->entries;
q->used--;
if (urb->status)
goto out;
set_bit(MT7601U_STATE_MORE_STATS, &dev->state);
if (!test_and_set_bit(MT7601U_STATE_READING_STATS, &dev->state))
queue_delayed_work(dev->stat_wq, &dev->stat_work,
msecs_to_jiffies(10));
out:
spin_unlock_irqrestore(&dev->tx_lock, flags);
}
static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
struct sk_buff *skb, u8 ep)
{
struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep]);
struct mt7601u_dma_buf_tx *e;
struct mt7601u_tx_queue *q = &dev->tx_q[ep];
unsigned long flags;
int ret;
spin_lock_irqsave(&dev->tx_lock, flags);
if (WARN_ON(q->entries <= q->used)) {
ret = -ENOSPC;
goto out;
}
e = &q->e[q->end];
e->skb = skb;
usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
mt7601u_complete_tx, q);
ret = usb_submit_urb(e->urb, GFP_ATOMIC);
if (ret) {
/* Special-handle ENODEV from TX urb submission because it will
* often be the first ENODEV we see after device is removed.
*/
if (ret == -ENODEV)
set_bit(MT7601U_STATE_REMOVED, &dev->state);
else
dev_err(dev->dev, "Error: TX urb submit failed:%d\n",
ret);
goto out;
}
q->end = (q->end + 1) % q->entries;
q->used++;
if (q->used >= q->entries)
ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
out:
spin_unlock_irqrestore(&dev->tx_lock, flags);
return ret;
}
/* Map hardware Q to USB endpoint number */
static u8 q2ep(u8 qid)
{
/* TODO: take management packets to queue 5 */
return qid + 1;
}
/* Map USB endpoint number to Q id in the DMA engine */
static enum mt76_qsel ep2dmaq(u8 ep)
{
if (ep == 5)
return MT_QSEL_MGMT;
return MT_QSEL_EDCA;
}
int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
struct mt76_wcid *wcid, int hw_q)
{
u8 ep = q2ep(hw_q);
u32 dma_flags;
int ret;
dma_flags = MT_TXD_PKT_INFO_80211;
if (wcid->hw_key_idx == 0xff)
dma_flags |= MT_TXD_PKT_INFO_WIV;
ret = mt7601u_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags);
if (ret)
return ret;
ret = mt7601u_dma_submit_tx(dev, skb, ep);
if (ret) {
ieee80211_free_txskb(dev->hw, skb);
return ret;
}
return 0;
}
static void mt7601u_kill_rx(struct mt7601u_dev *dev)
{
int i;
unsigned long flags;
spin_lock_irqsave(&dev->rx_lock, flags);
for (i = 0; i < dev->rx_q.entries; i++) {
int next = dev->rx_q.end;
spin_unlock_irqrestore(&dev->rx_lock, flags);
usb_poison_urb(dev->rx_q.e[next].urb);
spin_lock_irqsave(&dev->rx_lock, flags);
}
spin_unlock_irqrestore(&dev->rx_lock, flags);
}
static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
struct mt7601u_dma_buf_rx *e, gfp_t gfp)
{
struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
u8 *buf = page_address(e->p);
unsigned pipe;
int ret;
pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[MT_EP_IN_PKT_RX]);
usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE,
mt7601u_complete_rx, dev);
trace_mt_submit_urb(dev, e->urb);
ret = usb_submit_urb(e->urb, gfp);
if (ret)
dev_err(dev->dev, "Error: submit RX URB failed:%d\n", ret);
return ret;
}
static int mt7601u_submit_rx(struct mt7601u_dev *dev)
{
int i, ret;
for (i = 0; i < dev->rx_q.entries; i++) {
ret = mt7601u_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL);
if (ret)
return ret;
}
return 0;
}
static void mt7601u_free_rx(struct mt7601u_dev *dev)
{
int i;
for (i = 0; i < dev->rx_q.entries; i++) {
__free_pages(dev->rx_q.e[i].p, MT_RX_ORDER);
usb_free_urb(dev->rx_q.e[i].urb);
}
}
static int mt7601u_alloc_rx(struct mt7601u_dev *dev)
{
int i;
memset(&dev->rx_q, 0, sizeof(dev->rx_q));
dev->rx_q.dev = dev;
dev->rx_q.entries = N_RX_ENTRIES;
for (i = 0; i < N_RX_ENTRIES; i++) {
dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER);
if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p)
return -ENOMEM;
}
return 0;
}
static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q)
{
int i;
WARN_ON(q->used);
for (i = 0; i < q->entries; i++) {
usb_poison_urb(q->e[i].urb);
usb_free_urb(q->e[i].urb);
}
}
static void mt7601u_free_tx(struct mt7601u_dev *dev)
{
int i;
for (i = 0; i < __MT_EP_OUT_MAX; i++)
mt7601u_free_tx_queue(&dev->tx_q[i]);
}
static int mt7601u_alloc_tx_queue(struct mt7601u_dev *dev,
struct mt7601u_tx_queue *q)
{
int i;
q->dev = dev;
q->entries = N_TX_ENTRIES;
for (i = 0; i < N_TX_ENTRIES; i++) {
q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
if (!q->e[i].urb)
return -ENOMEM;
}
return 0;
}
static int mt7601u_alloc_tx(struct mt7601u_dev *dev)
{
int i;
dev->tx_q = devm_kcalloc(dev->dev, __MT_EP_OUT_MAX,
sizeof(*dev->tx_q), GFP_KERNEL);
for (i = 0; i < __MT_EP_OUT_MAX; i++)
if (mt7601u_alloc_tx_queue(dev, &dev->tx_q[i]))
return -ENOMEM;
return 0;
}
int mt7601u_dma_init(struct mt7601u_dev *dev)
{
int ret = -ENOMEM;
tasklet_init(&dev->rx_tasklet, mt7601u_rx_tasklet, (unsigned long) dev);
ret = mt7601u_alloc_tx(dev);
if (ret)
goto err;
ret = mt7601u_alloc_rx(dev);
if (ret)
goto err;
ret = mt7601u_submit_rx(dev);
if (ret)
goto err;
return 0;
err:
mt7601u_dma_cleanup(dev);
return ret;
}
void mt7601u_dma_cleanup(struct mt7601u_dev *dev)
{
mt7601u_kill_rx(dev);
tasklet_kill(&dev->rx_tasklet);
mt7601u_free_rx(dev);
mt7601u_free_tx(dev);
}

Просмотреть файл

@ -0,0 +1,127 @@
/*
* Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __MT7601U_DMA_H
#define __MT7601U_DMA_H
#include <asm/unaligned.h>
#include <linux/skbuff.h>
#include "util.h"
#define MT_DMA_HDR_LEN 4
#define MT_RX_INFO_LEN 4
#define MT_FCE_INFO_LEN 4
#define MT_DMA_HDRS (MT_DMA_HDR_LEN + MT_RX_INFO_LEN)
/* Common Tx DMA descriptor fields */
#define MT_TXD_INFO_LEN GENMASK(15, 0)
#define MT_TXD_INFO_D_PORT GENMASK(29, 27)
#define MT_TXD_INFO_TYPE GENMASK(31, 30)
enum mt76_msg_port {
WLAN_PORT,
CPU_RX_PORT,
CPU_TX_PORT,
HOST_PORT,
VIRTUAL_CPU_RX_PORT,
VIRTUAL_CPU_TX_PORT,
DISCARD,
};
enum mt76_info_type {
DMA_PACKET,
DMA_COMMAND,
};
/* Tx DMA packet specific flags */
#define MT_TXD_PKT_INFO_NEXT_VLD BIT(16)
#define MT_TXD_PKT_INFO_TX_BURST BIT(17)
#define MT_TXD_PKT_INFO_80211 BIT(19)
#define MT_TXD_PKT_INFO_TSO BIT(20)
#define MT_TXD_PKT_INFO_CSO BIT(21)
#define MT_TXD_PKT_INFO_WIV BIT(24)
#define MT_TXD_PKT_INFO_QSEL GENMASK(26, 25)
enum mt76_qsel {
MT_QSEL_MGMT,
MT_QSEL_HCCA,
MT_QSEL_EDCA,
MT_QSEL_EDCA_2,
};
/* Tx DMA MCU command specific flags */
#define MT_TXD_CMD_INFO_SEQ GENMASK(19, 16)
#define MT_TXD_CMD_INFO_TYPE GENMASK(26, 20)
static inline int mt7601u_dma_skb_wrap(struct sk_buff *skb,
enum mt76_msg_port d_port,
enum mt76_info_type type, u32 flags)
{
u32 info;
/* Buffer layout:
* | 4B | xfer len | pad | 4B |
* | TXINFO | pkt/cmd | zero pad to 4B | zero |
*
* length field of TXINFO should be set to 'xfer len'.
*/
info = flags |
MT76_SET(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
MT76_SET(MT_TXD_INFO_D_PORT, d_port) |
MT76_SET(MT_TXD_INFO_TYPE, type);
put_unaligned_le32(info, skb_push(skb, sizeof(info)));
return skb_put_padto(skb, round_up(skb->len, 4) + 4);
}
static inline int
mt7601u_dma_skb_wrap_pkt(struct sk_buff *skb, enum mt76_qsel qsel, u32 flags)
{
flags |= MT76_SET(MT_TXD_PKT_INFO_QSEL, qsel);
return mt7601u_dma_skb_wrap(skb, WLAN_PORT, DMA_PACKET, flags);
}
/* Common Rx DMA descriptor fields */
#define MT_RXD_INFO_LEN GENMASK(13, 0)
#define MT_RXD_INFO_PCIE_INTR BIT(24)
#define MT_RXD_INFO_QSEL GENMASK(26, 25)
#define MT_RXD_INFO_PORT GENMASK(29, 27)
#define MT_RXD_INFO_TYPE GENMASK(31, 30)
/* Rx DMA packet specific flags */
#define MT_RXD_PKT_INFO_UDP_ERR BIT(16)
#define MT_RXD_PKT_INFO_TCP_ERR BIT(17)
#define MT_RXD_PKT_INFO_IP_ERR BIT(18)
#define MT_RXD_PKT_INFO_PKT_80211 BIT(19)
#define MT_RXD_PKT_INFO_L3L4_DONE BIT(20)
#define MT_RXD_PKT_INFO_MAC_LEN GENMASK(23, 21)
/* Rx DMA MCU command specific flags */
#define MT_RXD_CMD_INFO_SELF_GEN BIT(15)
#define MT_RXD_CMD_INFO_CMD_SEQ GENMASK(19, 16)
#define MT_RXD_CMD_INFO_EVT_TYPE GENMASK(23, 20)
enum mt76_evt_type {
CMD_DONE,
CMD_ERROR,
CMD_RETRY,
EVENT_PWR_RSP,
EVENT_WOW_RSP,
EVENT_CARRIER_DETECT_RSP,
EVENT_DFS_DETECT_RSP,
};
#endif

Просмотреть файл

@ -0,0 +1,414 @@
/*
* Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/of.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
#include "mt7601u.h"
#include "eeprom.h"
static bool
field_valid(u8 val)
{
return val != 0xff;
}
static s8
field_validate(u8 val)
{
if (!field_valid(val))
return 0;
return val;
}
static int
mt7601u_efuse_read(struct mt7601u_dev *dev, u16 addr, u8 *data,
enum mt7601u_eeprom_access_modes mode)
{
u32 val;
int i;
val = mt76_rr(dev, MT_EFUSE_CTRL);
val &= ~(MT_EFUSE_CTRL_AIN |
MT_EFUSE_CTRL_MODE);
val |= MT76_SET(MT_EFUSE_CTRL_AIN, addr & ~0xf) |
MT76_SET(MT_EFUSE_CTRL_MODE, mode) |
MT_EFUSE_CTRL_KICK;
mt76_wr(dev, MT_EFUSE_CTRL, val);
if (!mt76_poll(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
return -ETIMEDOUT;
val = mt76_rr(dev, MT_EFUSE_CTRL);
if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) {
/* Parts of eeprom not in the usage map (0x80-0xc0,0xf0)
* will not return valid data but it's ok.
*/
memset(data, 0xff, 16);
return 0;
}
for (i = 0; i < 4; i++) {
val = mt76_rr(dev, MT_EFUSE_DATA(i));
put_unaligned_le32(val, data + 4 * i);
}
return 0;
}
static int
mt7601u_efuse_physical_size_check(struct mt7601u_dev *dev)
{
const int map_reads = DIV_ROUND_UP(MT_EFUSE_USAGE_MAP_SIZE, 16);
u8 data[map_reads * 16];
int ret, i;
u32 start = 0, end = 0, cnt_free;
for (i = 0; i < map_reads; i++) {
ret = mt7601u_efuse_read(dev, MT_EE_USAGE_MAP_START + i * 16,
data + i * 16, MT_EE_PHYSICAL_READ);
if (ret)
return ret;
}
for (i = 0; i < MT_EFUSE_USAGE_MAP_SIZE; i++)
if (!data[i]) {
if (!start)
start = MT_EE_USAGE_MAP_START + i;
end = MT_EE_USAGE_MAP_START + i;
}
cnt_free = end - start + 1;
if (MT_EFUSE_USAGE_MAP_SIZE - cnt_free < 5) {
dev_err(dev->dev, "Error: your device needs default EEPROM file and this driver doesn't support it!\n");
return -EINVAL;
}
return 0;
}
static bool
mt7601u_has_tssi(struct mt7601u_dev *dev, u8 *eeprom)
{
u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1);
return ~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN);
}
static void
mt7601u_set_chip_cap(struct mt7601u_dev *dev, u8 *eeprom)
{
u16 nic_conf0 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_0);
u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1);
if (!field_valid(nic_conf1 & 0xff))
nic_conf1 &= 0xff00;
dev->ee->tssi_enabled = mt7601u_has_tssi(dev, eeprom) &&
!(nic_conf1 & MT_EE_NIC_CONF_1_TEMP_TX_ALC);
if (nic_conf1 & MT_EE_NIC_CONF_1_HW_RF_CTRL)
dev_err(dev->dev,
"Error: this driver does not support HW RF ctrl\n");
if (!field_valid(nic_conf0 >> 8))
return;
if (MT76_GET(MT_EE_NIC_CONF_0_RX_PATH, nic_conf0) > 1 ||
MT76_GET(MT_EE_NIC_CONF_0_TX_PATH, nic_conf0) > 1)
dev_err(dev->dev,
"Error: device has more than 1 RX/TX stream!\n");
}
static int
mt7601u_set_macaddr(struct mt7601u_dev *dev, const u8 *eeprom)
{
const void *src = eeprom + MT_EE_MAC_ADDR;
ether_addr_copy(dev->macaddr, src);
if (!is_valid_ether_addr(dev->macaddr)) {
eth_random_addr(dev->macaddr);
dev_info(dev->dev,
"Invalid MAC address, using random address %pM\n",
dev->macaddr);
}
mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->macaddr));
mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(dev->macaddr + 4) |
MT76_SET(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
return 0;
}
static void mt7601u_set_channel_target_power(struct mt7601u_dev *dev,
u8 *eeprom, u8 max_pwr)
{
u8 trgt_pwr = eeprom[MT_EE_TX_TSSI_TARGET_POWER];
if (trgt_pwr > max_pwr || !trgt_pwr) {
dev_warn(dev->dev, "Error: EEPROM trgt power invalid %hhx!\n",
trgt_pwr);
trgt_pwr = 0x20;
}
memset(dev->ee->chan_pwr, trgt_pwr, sizeof(dev->ee->chan_pwr));
}
static void
mt7601u_set_channel_power(struct mt7601u_dev *dev, u8 *eeprom)
{
u32 i, val;
u8 max_pwr;
val = mt7601u_rr(dev, MT_TX_ALC_CFG_0);
max_pwr = MT76_GET(MT_TX_ALC_CFG_0_LIMIT_0, val);
if (mt7601u_has_tssi(dev, eeprom)) {
mt7601u_set_channel_target_power(dev, eeprom, max_pwr);
return;
}
for (i = 0; i < 14; i++) {
s8 power = field_validate(eeprom[MT_EE_TX_POWER_OFFSET + i]);
if (power > max_pwr || power < 0)
power = MT7601U_DEFAULT_TX_POWER;
dev->ee->chan_pwr[i] = power;
}
}
static void
mt7601u_set_country_reg(struct mt7601u_dev *dev, u8 *eeprom)
{
/* Note: - region 31 is not valid for mt7601u (see rtmp_init.c)
* - comments in rtmp_def.h are incorrect (see rt_channel.c)
*/
static const struct reg_channel_bounds chan_bounds[] = {
/* EEPROM country regions 0 - 7 */
{ 1, 11 }, { 1, 13 }, { 10, 2 }, { 10, 4 },
{ 14, 1 }, { 1, 14 }, { 3, 7 }, { 5, 9 },
/* EEPROM country regions 32 - 33 */
{ 1, 11 }, { 1, 14 }
};
u8 val = eeprom[MT_EE_COUNTRY_REGION];
int idx = -1;
if (val < 8)
idx = val;
if (val > 31 && val < 33)
idx = val - 32 + 8;
if (idx != -1)
dev_info(dev->dev,
"EEPROM country region %02hhx (channels %hhd-%hhd)\n",
val, chan_bounds[idx].start,
chan_bounds[idx].start + chan_bounds[idx].num - 1);
else
idx = 5; /* channels 1 - 14 */
dev->ee->reg = chan_bounds[idx];
/* TODO: country region 33 is special - phy should be set to B-mode
* before entering channel 14 (see sta/connect.c)
*/
}
static void
mt7601u_set_rf_freq_off(struct mt7601u_dev *dev, u8 *eeprom)
{
u8 comp;
dev->ee->rf_freq_off = field_validate(eeprom[MT_EE_FREQ_OFFSET]);
comp = field_validate(eeprom[MT_EE_FREQ_OFFSET_COMPENSATION]);
if (comp & BIT(7))
dev->ee->rf_freq_off -= comp & 0x7f;
else
dev->ee->rf_freq_off += comp;
}
static void
mt7601u_set_rssi_offset(struct mt7601u_dev *dev, u8 *eeprom)
{
int i;
s8 *rssi_offset = dev->ee->rssi_offset;
for (i = 0; i < 2; i++) {
rssi_offset[i] = eeprom[MT_EE_RSSI_OFFSET + i];
if (rssi_offset[i] < -10 || rssi_offset[i] > 10) {
dev_warn(dev->dev,
"Warning: EEPROM RSSI is invalid %02hhx\n",
rssi_offset[i]);
rssi_offset[i] = 0;
}
}
}
static void
mt7601u_extra_power_over_mac(struct mt7601u_dev *dev)
{
u32 val;
val = ((mt7601u_rr(dev, MT_TX_PWR_CFG_1) & 0x0000ff00) >> 8);
val |= ((mt7601u_rr(dev, MT_TX_PWR_CFG_2) & 0x0000ff00) << 8);
mt7601u_wr(dev, MT_TX_PWR_CFG_7, val);
val = ((mt7601u_rr(dev, MT_TX_PWR_CFG_4) & 0x0000ff00) >> 8);
mt7601u_wr(dev, MT_TX_PWR_CFG_9, val);
}
static void
mt7601u_set_power_rate(struct power_per_rate *rate, s8 delta, u8 value)
{
rate->raw = s6_validate(value);
rate->bw20 = s6_to_int(value);
/* Note: vendor driver does cap the value to s6 right away */
rate->bw40 = rate->bw20 + delta;
}
static void
mt7601u_save_power_rate(struct mt7601u_dev *dev, s8 delta, u32 val, int i)
{
struct mt7601u_rate_power *t = &dev->ee->power_rate_table;
switch (i) {
case 0:
mt7601u_set_power_rate(&t->cck[0], delta, (val >> 0) & 0xff);
mt7601u_set_power_rate(&t->cck[1], delta, (val >> 8) & 0xff);
/* Save cck bw20 for fixups of channel 14 */
dev->ee->real_cck_bw20[0] = t->cck[0].bw20;
dev->ee->real_cck_bw20[1] = t->cck[1].bw20;
mt7601u_set_power_rate(&t->ofdm[0], delta, (val >> 16) & 0xff);
mt7601u_set_power_rate(&t->ofdm[1], delta, (val >> 24) & 0xff);
break;
case 1:
mt7601u_set_power_rate(&t->ofdm[2], delta, (val >> 0) & 0xff);
mt7601u_set_power_rate(&t->ofdm[3], delta, (val >> 8) & 0xff);
mt7601u_set_power_rate(&t->ht[0], delta, (val >> 16) & 0xff);
mt7601u_set_power_rate(&t->ht[1], delta, (val >> 24) & 0xff);
break;
case 2:
mt7601u_set_power_rate(&t->ht[2], delta, (val >> 0) & 0xff);
mt7601u_set_power_rate(&t->ht[3], delta, (val >> 8) & 0xff);
break;
}
}
static s8
get_delta(u8 val)
{
s8 ret;
if (!field_valid(val) || !(val & BIT(7)))
return 0;
ret = val & 0x1f;
if (ret > 8)
ret = 8;
if (val & BIT(6))
ret = -ret;
return ret;
}
static void
mt7601u_config_tx_power_per_rate(struct mt7601u_dev *dev, u8 *eeprom)
{
u32 val;
s8 bw40_delta;
int i;
bw40_delta = get_delta(eeprom[MT_EE_TX_POWER_DELTA_BW40]);
for (i = 0; i < 5; i++) {
val = get_unaligned_le32(eeprom + MT_EE_TX_POWER_BYRATE(i));
mt7601u_save_power_rate(dev, bw40_delta, val, i);
if (~val)
mt7601u_wr(dev, MT_TX_PWR_CFG_0 + i * 4, val);
}
mt7601u_extra_power_over_mac(dev);
}
static void
mt7601u_init_tssi_params(struct mt7601u_dev *dev, u8 *eeprom)
{
struct tssi_data *d = &dev->ee->tssi_data;
if (!dev->ee->tssi_enabled)
return;
d->slope = eeprom[MT_EE_TX_TSSI_SLOPE];
d->tx0_delta_offset = eeprom[MT_EE_TX_TSSI_OFFSET] * 1024;
d->offset[0] = eeprom[MT_EE_TX_TSSI_OFFSET_GROUP];
d->offset[1] = eeprom[MT_EE_TX_TSSI_OFFSET_GROUP + 1];
d->offset[2] = eeprom[MT_EE_TX_TSSI_OFFSET_GROUP + 2];
}
int
mt7601u_eeprom_init(struct mt7601u_dev *dev)
{
u8 *eeprom;
int i, ret;
ret = mt7601u_efuse_physical_size_check(dev);
if (ret)
return ret;
dev->ee = devm_kzalloc(dev->dev, sizeof(*dev->ee), GFP_KERNEL);
if (!dev->ee)
return -ENOMEM;
eeprom = kmalloc(MT7601U_EEPROM_SIZE, GFP_KERNEL);
if (!eeprom)
return -ENOMEM;
for (i = 0; i + 16 <= MT7601U_EEPROM_SIZE; i += 16) {
ret = mt7601u_efuse_read(dev, i, eeprom + i, MT_EE_READ);
if (ret)
goto out;
}
if (eeprom[MT_EE_VERSION_EE] > MT7601U_EE_MAX_VER)
dev_warn(dev->dev,
"Warning: unsupported EEPROM version %02hhx\n",
eeprom[MT_EE_VERSION_EE]);
dev_info(dev->dev, "EEPROM ver:%02hhx fae:%02hhx\n",
eeprom[MT_EE_VERSION_EE], eeprom[MT_EE_VERSION_FAE]);
mt7601u_set_macaddr(dev, eeprom);
mt7601u_set_chip_cap(dev, eeprom);
mt7601u_set_channel_power(dev, eeprom);
mt7601u_set_country_reg(dev, eeprom);
mt7601u_set_rf_freq_off(dev, eeprom);
mt7601u_set_rssi_offset(dev, eeprom);
dev->ee->ref_temp = eeprom[MT_EE_REF_TEMP];
dev->ee->lna_gain = eeprom[MT_EE_LNA_GAIN];
mt7601u_config_tx_power_per_rate(dev, eeprom);
mt7601u_init_tssi_params(dev, eeprom);
out:
kfree(eeprom);
return ret;
}

Просмотреть файл

@ -0,0 +1,151 @@
/*
* Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __MT7601U_EEPROM_H
#define __MT7601U_EEPROM_H
struct mt7601u_dev;
#define MT7601U_EE_MAX_VER 0x0c
#define MT7601U_EEPROM_SIZE 256
#define MT7601U_DEFAULT_TX_POWER 6
enum mt76_eeprom_field {
MT_EE_CHIP_ID = 0x00,
MT_EE_VERSION_FAE = 0x02,
MT_EE_VERSION_EE = 0x03,
MT_EE_MAC_ADDR = 0x04,
MT_EE_NIC_CONF_0 = 0x34,
MT_EE_NIC_CONF_1 = 0x36,
MT_EE_COUNTRY_REGION = 0x39,
MT_EE_FREQ_OFFSET = 0x3a,
MT_EE_NIC_CONF_2 = 0x42,
MT_EE_LNA_GAIN = 0x44,
MT_EE_RSSI_OFFSET = 0x46,
MT_EE_TX_POWER_DELTA_BW40 = 0x50,
MT_EE_TX_POWER_OFFSET = 0x52,
MT_EE_TX_TSSI_SLOPE = 0x6e,
MT_EE_TX_TSSI_OFFSET_GROUP = 0x6f,
MT_EE_TX_TSSI_OFFSET = 0x76,
MT_EE_TX_TSSI_TARGET_POWER = 0xd0,
MT_EE_REF_TEMP = 0xd1,
MT_EE_FREQ_OFFSET_COMPENSATION = 0xdb,
MT_EE_TX_POWER_BYRATE_BASE = 0xde,
MT_EE_USAGE_MAP_START = 0x1e0,
MT_EE_USAGE_MAP_END = 0x1fc,
};
#define MT_EE_NIC_CONF_0_RX_PATH GENMASK(3, 0)
#define MT_EE_NIC_CONF_0_TX_PATH GENMASK(7, 4)
#define MT_EE_NIC_CONF_0_BOARD_TYPE GENMASK(13, 12)
#define MT_EE_NIC_CONF_1_HW_RF_CTRL BIT(0)
#define MT_EE_NIC_CONF_1_TEMP_TX_ALC BIT(1)
#define MT_EE_NIC_CONF_1_LNA_EXT_2G BIT(2)
#define MT_EE_NIC_CONF_1_LNA_EXT_5G BIT(3)
#define MT_EE_NIC_CONF_1_TX_ALC_EN BIT(13)
#define MT_EE_NIC_CONF_2_RX_STREAM GENMASK(3, 0)
#define MT_EE_NIC_CONF_2_TX_STREAM GENMASK(7, 4)
#define MT_EE_NIC_CONF_2_HW_ANTDIV BIT(8)
#define MT_EE_NIC_CONF_2_XTAL_OPTION GENMASK(10, 9)
#define MT_EE_NIC_CONF_2_TEMP_DISABLE BIT(11)
#define MT_EE_NIC_CONF_2_COEX_METHOD GENMASK(15, 13)
#define MT_EE_TX_POWER_BYRATE(i) (MT_EE_TX_POWER_BYRATE_BASE + \
(i) * 4)
#define MT_EFUSE_USAGE_MAP_SIZE (MT_EE_USAGE_MAP_END - \
MT_EE_USAGE_MAP_START + 1)
enum mt7601u_eeprom_access_modes {
MT_EE_READ = 0,
MT_EE_PHYSICAL_READ = 1,
};
struct power_per_rate {
u8 raw; /* validated s6 value */
s8 bw20; /* sign-extended int */
s8 bw40; /* sign-extended int */
};
/* Power per rate - one value per two rates */
struct mt7601u_rate_power {
struct power_per_rate cck[2];
struct power_per_rate ofdm[4];
struct power_per_rate ht[4];
};
struct reg_channel_bounds {
u8 start;
u8 num;
};
struct mt7601u_eeprom_params {
bool tssi_enabled;
u8 rf_freq_off;
s8 rssi_offset[2];
s8 ref_temp;
s8 lna_gain;
u8 chan_pwr[14];
struct mt7601u_rate_power power_rate_table;
s8 real_cck_bw20[2];
/* TSSI stuff - only with internal TX ALC */
struct tssi_data {
int tx0_delta_offset;
u8 slope;
u8 offset[3];
} tssi_data;
struct reg_channel_bounds reg;
};
int mt7601u_eeprom_init(struct mt7601u_dev *dev);
static inline u32 s6_validate(u32 reg)
{
WARN_ON(reg & ~GENMASK(5, 0));
return reg & GENMASK(5, 0);
}
static inline int s6_to_int(u32 reg)
{
int s6;
s6 = s6_validate(reg);
if (s6 & BIT(5))
s6 -= BIT(6);
return s6;
}
static inline u32 int_to_s6(int val)
{
if (val < -0x20)
return 0x20;
if (val > 0x1f)
return 0x1f;
return val & 0x3f;
}
#endif

Просмотреть файл

@ -0,0 +1,625 @@
/*
* (c) Copyright 2002-2010, Ralink Technology, Inc.
* Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "mt7601u.h"
#include "eeprom.h"
#include "trace.h"
#include "mcu.h"
#include "initvals.h"
static void
mt7601u_set_wlan_state(struct mt7601u_dev *dev, u32 val, bool enable)
{
int i;
/* Note: we don't turn off WLAN_CLK because that makes the device
* not respond properly on the probe path.
* In case anyone (PSM?) wants to use this function we can
* bring the clock stuff back and fixup the probe path.
*/
if (enable)
val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
else
val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN);
mt7601u_wr(dev, MT_WLAN_FUN_CTRL, val);
udelay(20);
if (enable) {
set_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state);
} else {
clear_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state);
return;
}
for (i = 200; i; i--) {
val = mt7601u_rr(dev, MT_CMB_CTRL);
if (val & MT_CMB_CTRL_XTAL_RDY && val & MT_CMB_CTRL_PLL_LD)
break;
udelay(20);
}
/* Note: vendor driver tries to disable/enable wlan here and retry
* but the code which does it is so buggy it must have never
* triggered, so don't bother.
*/
if (!i)
dev_err(dev->dev, "Error: PLL and XTAL check failed!\n");
}
static void mt7601u_chip_onoff(struct mt7601u_dev *dev, bool enable, bool reset)
{
u32 val;
mutex_lock(&dev->hw_atomic_mutex);
val = mt7601u_rr(dev, MT_WLAN_FUN_CTRL);
if (reset) {
val |= MT_WLAN_FUN_CTRL_GPIO_OUT_EN;
val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
val |= (MT_WLAN_FUN_CTRL_WLAN_RESET |
MT_WLAN_FUN_CTRL_WLAN_RESET_RF);
mt7601u_wr(dev, MT_WLAN_FUN_CTRL, val);
udelay(20);
val &= ~(MT_WLAN_FUN_CTRL_WLAN_RESET |
MT_WLAN_FUN_CTRL_WLAN_RESET_RF);
}
}
mt7601u_wr(dev, MT_WLAN_FUN_CTRL, val);
udelay(20);
mt7601u_set_wlan_state(dev, val, enable);
mutex_unlock(&dev->hw_atomic_mutex);
}
static void mt7601u_reset_csr_bbp(struct mt7601u_dev *dev)
{
mt7601u_wr(dev, MT_MAC_SYS_CTRL, (MT_MAC_SYS_CTRL_RESET_CSR |
MT_MAC_SYS_CTRL_RESET_BBP));
mt7601u_wr(dev, MT_USB_DMA_CFG, 0);
msleep(1);
mt7601u_wr(dev, MT_MAC_SYS_CTRL, 0);
}
static void mt7601u_init_usb_dma(struct mt7601u_dev *dev)
{
u32 val;
val = MT76_SET(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, MT_USB_AGGR_TIMEOUT) |
MT76_SET(MT_USB_DMA_CFG_RX_BULK_AGG_LMT, MT_USB_AGGR_SIZE_LIMIT) |
MT_USB_DMA_CFG_RX_BULK_EN |
MT_USB_DMA_CFG_TX_BULK_EN;
if (dev->in_max_packet == 512)
val |= MT_USB_DMA_CFG_RX_BULK_AGG_EN;
mt7601u_wr(dev, MT_USB_DMA_CFG, val);
val |= MT_USB_DMA_CFG_UDMA_RX_WL_DROP;
mt7601u_wr(dev, MT_USB_DMA_CFG, val);
val &= ~MT_USB_DMA_CFG_UDMA_RX_WL_DROP;
mt7601u_wr(dev, MT_USB_DMA_CFG, val);
}
static int mt7601u_init_bbp(struct mt7601u_dev *dev)
{
int ret;
ret = mt7601u_wait_bbp_ready(dev);
if (ret)
return ret;
ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, bbp_common_vals,
ARRAY_SIZE(bbp_common_vals));
if (ret)
return ret;
return mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, bbp_chip_vals,
ARRAY_SIZE(bbp_chip_vals));
}
static void
mt76_init_beacon_offsets(struct mt7601u_dev *dev)
{
u16 base = MT_BEACON_BASE;
u32 regs[4] = {};
int i;
for (i = 0; i < 16; i++) {
u16 addr = dev->beacon_offsets[i];
regs[i / 4] |= ((addr - base) / 64) << (8 * (i % 4));
}
for (i = 0; i < 4; i++)
mt7601u_wr(dev, MT_BCN_OFFSET(i), regs[i]);
}
static int mt7601u_write_mac_initvals(struct mt7601u_dev *dev)
{
int ret;
ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_WLAN, mac_common_vals,
ARRAY_SIZE(mac_common_vals));
if (ret)
return ret;
ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_WLAN,
mac_chip_vals, ARRAY_SIZE(mac_chip_vals));
if (ret)
return ret;
mt76_init_beacon_offsets(dev);
mt7601u_wr(dev, MT_AUX_CLK_CFG, 0);
return 0;
}
static int mt7601u_init_wcid_mem(struct mt7601u_dev *dev)
{
u32 *vals;
int i, ret;
vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL);
if (!vals)
return -ENOMEM;
for (i = 0; i < N_WCIDS; i++) {
vals[i * 2] = 0xffffffff;
vals[i * 2 + 1] = 0x00ffffff;
}
ret = mt7601u_burst_write_regs(dev, MT_WCID_ADDR_BASE,
vals, N_WCIDS * 2);
kfree(vals);
return ret;
}
static int mt7601u_init_key_mem(struct mt7601u_dev *dev)
{
u32 vals[4] = {};
return mt7601u_burst_write_regs(dev, MT_SKEY_MODE_BASE_0,
vals, ARRAY_SIZE(vals));
}
static int mt7601u_init_wcid_attr_mem(struct mt7601u_dev *dev)
{
u32 *vals;
int i, ret;
vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL);
if (!vals)
return -ENOMEM;
for (i = 0; i < N_WCIDS * 2; i++)
vals[i] = 1;
ret = mt7601u_burst_write_regs(dev, MT_WCID_ATTR_BASE,
vals, N_WCIDS * 2);
kfree(vals);
return ret;
}
static void mt7601u_reset_counters(struct mt7601u_dev *dev)
{
mt7601u_rr(dev, MT_RX_STA_CNT0);
mt7601u_rr(dev, MT_RX_STA_CNT1);
mt7601u_rr(dev, MT_RX_STA_CNT2);
mt7601u_rr(dev, MT_TX_STA_CNT0);
mt7601u_rr(dev, MT_TX_STA_CNT1);
mt7601u_rr(dev, MT_TX_STA_CNT2);
}
int mt7601u_mac_start(struct mt7601u_dev *dev)
{
mt7601u_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 200000))
return -ETIMEDOUT;
dev->rxfilter = MT_RX_FILTR_CFG_CRC_ERR |
MT_RX_FILTR_CFG_PHY_ERR | MT_RX_FILTR_CFG_PROMISC |
MT_RX_FILTR_CFG_VER_ERR | MT_RX_FILTR_CFG_DUP |
MT_RX_FILTR_CFG_CFACK | MT_RX_FILTR_CFG_CFEND |
MT_RX_FILTR_CFG_ACK | MT_RX_FILTR_CFG_CTS |
MT_RX_FILTR_CFG_RTS | MT_RX_FILTR_CFG_PSPOLL |
MT_RX_FILTR_CFG_BA | MT_RX_FILTR_CFG_CTRL_RSV;
mt7601u_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
mt7601u_wr(dev, MT_MAC_SYS_CTRL,
MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 50))
return -ETIMEDOUT;
return 0;
}
static void mt7601u_mac_stop_hw(struct mt7601u_dev *dev)
{
int i, ok;
if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
return;
mt76_clear(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_TIMER_EN |
MT_BEACON_TIME_CFG_SYNC_MODE | MT_BEACON_TIME_CFG_TBTT_EN |
MT_BEACON_TIME_CFG_BEACON_TX);
if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_BUSY, 0, 1000))
dev_warn(dev->dev, "Warning: TX DMA did not stop!\n");
/* Page count on TxQ */
i = 200;
while (i-- && ((mt76_rr(dev, 0x0438) & 0xffffffff) ||
(mt76_rr(dev, 0x0a30) & 0x000000ff) ||
(mt76_rr(dev, 0x0a34) & 0x00ff00ff)))
msleep(10);
if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_TX, 0, 1000))
dev_warn(dev->dev, "Warning: MAC TX did not stop!\n");
mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_RX |
MT_MAC_SYS_CTRL_ENABLE_TX);
/* Page count on RxQ */
ok = 0;
i = 200;
while (i--) {
if ((mt76_rr(dev, 0x0430) & 0x00ff0000) ||
(mt76_rr(dev, 0x0a30) & 0xffffffff) ||
(mt76_rr(dev, 0x0a34) & 0xffffffff))
ok++;
if (ok > 6)
break;
msleep(1);
}
if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 1000))
dev_warn(dev->dev, "Warning: MAC RX did not stop!\n");
if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_RX_BUSY, 0, 1000))
dev_warn(dev->dev, "Warning: RX DMA did not stop!\n");
}
void mt7601u_mac_stop(struct mt7601u_dev *dev)
{
mt7601u_mac_stop_hw(dev);
flush_delayed_work(&dev->stat_work);
cancel_delayed_work_sync(&dev->stat_work);
}
static void mt7601u_stop_hardware(struct mt7601u_dev *dev)
{
mt7601u_chip_onoff(dev, false, false);
}
int mt7601u_init_hardware(struct mt7601u_dev *dev)
{
static const u16 beacon_offsets[16] = {
/* 512 byte per beacon */
0xc000, 0xc200, 0xc400, 0xc600,
0xc800, 0xca00, 0xcc00, 0xce00,
0xd000, 0xd200, 0xd400, 0xd600,
0xd800, 0xda00, 0xdc00, 0xde00
};
int ret;
dev->beacon_offsets = beacon_offsets;
mt7601u_chip_onoff(dev, true, false);
ret = mt7601u_wait_asic_ready(dev);
if (ret)
goto err;
ret = mt7601u_mcu_init(dev);
if (ret)
goto err;
if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG,
MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100)) {
ret = -EIO;
goto err;
}
/* Wait for ASIC ready after FW load. */
ret = mt7601u_wait_asic_ready(dev);
if (ret)
goto err;
mt7601u_reset_csr_bbp(dev);
mt7601u_init_usb_dma(dev);
ret = mt7601u_mcu_cmd_init(dev);
if (ret)
goto err;
ret = mt7601u_dma_init(dev);
if (ret)
goto err_mcu;
ret = mt7601u_write_mac_initvals(dev);
if (ret)
goto err_rx;
if (!mt76_poll_msec(dev, MT_MAC_STATUS,
MT_MAC_STATUS_TX | MT_MAC_STATUS_RX, 0, 100)) {
ret = -EIO;
goto err_rx;
}
ret = mt7601u_init_bbp(dev);
if (ret)
goto err_rx;
ret = mt7601u_init_wcid_mem(dev);
if (ret)
goto err_rx;
ret = mt7601u_init_key_mem(dev);
if (ret)
goto err_rx;
ret = mt7601u_init_wcid_attr_mem(dev);
if (ret)
goto err_rx;
mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN |
MT_BEACON_TIME_CFG_SYNC_MODE |
MT_BEACON_TIME_CFG_TBTT_EN |
MT_BEACON_TIME_CFG_BEACON_TX));
mt7601u_reset_counters(dev);
mt7601u_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
mt7601u_wr(dev, MT_TXOP_CTRL_CFG, MT76_SET(MT_TXOP_TRUN_EN, 0x3f) |
MT76_SET(MT_TXOP_EXT_CCA_DLY, 0x58));
ret = mt7601u_eeprom_init(dev);
if (ret)
goto err_rx;
ret = mt7601u_phy_init(dev);
if (ret)
goto err_rx;
mt7601u_set_rx_path(dev, 0);
mt7601u_set_tx_dac(dev, 0);
mt7601u_mac_set_ctrlch(dev, false);
mt7601u_bbp_set_ctrlch(dev, false);
mt7601u_bbp_set_bw(dev, MT_BW_20);
return 0;
err_rx:
mt7601u_dma_cleanup(dev);
err_mcu:
mt7601u_mcu_cmd_deinit(dev);
err:
mt7601u_chip_onoff(dev, false, false);
return ret;
}
void mt7601u_cleanup(struct mt7601u_dev *dev)
{
mt7601u_stop_hardware(dev);
mt7601u_dma_cleanup(dev);
mt7601u_mcu_cmd_deinit(dev);
}
struct mt7601u_dev *mt7601u_alloc_device(struct device *pdev)
{
struct ieee80211_hw *hw;
struct mt7601u_dev *dev;
hw = ieee80211_alloc_hw(sizeof(*dev), &mt7601u_ops);
if (!hw)
return NULL;
dev = hw->priv;
dev->dev = pdev;
dev->hw = hw;
mutex_init(&dev->vendor_req_mutex);
mutex_init(&dev->reg_atomic_mutex);
mutex_init(&dev->hw_atomic_mutex);
mutex_init(&dev->mutex);
spin_lock_init(&dev->tx_lock);
spin_lock_init(&dev->rx_lock);
spin_lock_init(&dev->lock);
spin_lock_init(&dev->con_mon_lock);
atomic_set(&dev->avg_ampdu_len, 1);
dev->stat_wq = alloc_workqueue("mt7601u", WQ_UNBOUND, 0);
if (!dev->stat_wq) {
ieee80211_free_hw(hw);
return NULL;
}
return dev;
}
#define CHAN2G(_idx, _freq) { \
.band = IEEE80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_idx), \
.max_power = 30, \
}
static const struct ieee80211_channel mt76_channels_2ghz[] = {
CHAN2G(1, 2412),
CHAN2G(2, 2417),
CHAN2G(3, 2422),
CHAN2G(4, 2427),
CHAN2G(5, 2432),
CHAN2G(6, 2437),
CHAN2G(7, 2442),
CHAN2G(8, 2447),
CHAN2G(9, 2452),
CHAN2G(10, 2457),
CHAN2G(11, 2462),
CHAN2G(12, 2467),
CHAN2G(13, 2472),
CHAN2G(14, 2484),
};
#define CCK_RATE(_idx, _rate) { \
.bitrate = _rate, \
.flags = IEEE80211_RATE_SHORT_PREAMBLE, \
.hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \
.hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \
}
#define OFDM_RATE(_idx, _rate) { \
.bitrate = _rate, \
.hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \
.hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \
}
static struct ieee80211_rate mt76_rates[] = {
CCK_RATE(0, 10),
CCK_RATE(1, 20),
CCK_RATE(2, 55),
CCK_RATE(3, 110),
OFDM_RATE(0, 60),
OFDM_RATE(1, 90),
OFDM_RATE(2, 120),
OFDM_RATE(3, 180),
OFDM_RATE(4, 240),
OFDM_RATE(5, 360),
OFDM_RATE(6, 480),
OFDM_RATE(7, 540),
};
static int
mt76_init_sband(struct mt7601u_dev *dev, struct ieee80211_supported_band *sband,
const struct ieee80211_channel *chan, int n_chan,
struct ieee80211_rate *rates, int n_rates)
{
struct ieee80211_sta_ht_cap *ht_cap;
void *chanlist;
int size;
size = n_chan * sizeof(*chan);
chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
if (!chanlist)
return -ENOMEM;
sband->channels = chanlist;
sband->n_channels = n_chan;
sband->bitrates = rates;
sband->n_bitrates = n_rates;
ht_cap = &sband->ht_cap;
ht_cap->ht_supported = true;
ht_cap->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
IEEE80211_HT_CAP_GRN_FLD |
IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40 |
(1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
ht_cap->mcs.rx_mask[0] = 0xff;
ht_cap->mcs.rx_mask[4] = 0x1;
ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_2;
dev->chandef.chan = &sband->channels[0];
return 0;
}
static int
mt76_init_sband_2g(struct mt7601u_dev *dev)
{
dev->sband_2g = devm_kzalloc(dev->dev, sizeof(*dev->sband_2g),
GFP_KERNEL);
dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = dev->sband_2g;
WARN_ON(dev->ee->reg.start - 1 + dev->ee->reg.num >
ARRAY_SIZE(mt76_channels_2ghz));
return mt76_init_sband(dev, dev->sband_2g,
&mt76_channels_2ghz[dev->ee->reg.start - 1],
dev->ee->reg.num,
mt76_rates, ARRAY_SIZE(mt76_rates));
}
int mt7601u_register_device(struct mt7601u_dev *dev)
{
struct ieee80211_hw *hw = dev->hw;
struct wiphy *wiphy = hw->wiphy;
int ret;
/* Reserve WCID 0 for mcast - thanks to this APs WCID will go to
* entry no. 1 like it does in the vendor driver.
*/
dev->wcid_mask[0] |= 1;
/* init fake wcid for monitor interfaces */
dev->mon_wcid = devm_kmalloc(dev->dev, sizeof(*dev->mon_wcid),
GFP_KERNEL);
if (!dev->mon_wcid)
return -ENOMEM;
dev->mon_wcid->idx = 0xff;
dev->mon_wcid->hw_key_idx = -1;
SET_IEEE80211_DEV(hw, dev->dev);
hw->queues = 4;
hw->flags = IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_PS_NULLFUNC_STACK |
IEEE80211_HW_SUPPORTS_HT_CCK_RATES |
IEEE80211_HW_AMPDU_AGGREGATION |
IEEE80211_HW_SUPPORTS_RC_TABLE;
hw->max_rates = 1;
hw->max_report_rates = 7;
hw->max_rate_tries = 1;
hw->sta_data_size = sizeof(struct mt76_sta);
hw->vif_data_size = sizeof(struct mt76_vif);
SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
ret = mt76_init_sband_2g(dev);
if (ret)
return ret;
INIT_DELAYED_WORK(&dev->mac_work, mt7601u_mac_work);
INIT_DELAYED_WORK(&dev->stat_work, mt7601u_tx_stat);
ret = ieee80211_register_hw(hw);
if (ret)
return ret;
mt7601u_init_debugfs(dev);
return 0;
}

Просмотреть файл

@ -0,0 +1,164 @@
/*
* (c) Copyright 2002-2010, Ralink Technology, Inc.
* Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __MT7601U_INITVALS_H
#define __MT7601U_INITVALS_H
static const struct mt76_reg_pair bbp_common_vals[] = {
{ 65, 0x2c },
{ 66, 0x38 },
{ 68, 0x0b },
{ 69, 0x12 },
{ 70, 0x0a },
{ 73, 0x10 },
{ 81, 0x37 },
{ 82, 0x62 },
{ 83, 0x6a },
{ 84, 0x99 },
{ 86, 0x00 },
{ 91, 0x04 },
{ 92, 0x00 },
{ 103, 0x00 },
{ 105, 0x05 },
{ 106, 0x35 },
};
static const struct mt76_reg_pair bbp_chip_vals[] = {
{ 1, 0x04 }, { 4, 0x40 }, { 20, 0x06 }, { 31, 0x08 },
/* CCK Tx Control */
{ 178, 0xff },
/* AGC/Sync controls */
{ 66, 0x14 }, { 68, 0x8b }, { 69, 0x12 }, { 70, 0x09 },
{ 73, 0x11 }, { 75, 0x60 }, { 76, 0x44 }, { 84, 0x9a },
{ 86, 0x38 }, { 91, 0x07 }, { 92, 0x02 },
/* Rx Path Controls */
{ 99, 0x50 }, { 101, 0x00 }, { 103, 0xc0 }, { 104, 0x92 },
{ 105, 0x3c }, { 106, 0x03 }, { 128, 0x12 },
/* Change RXWI content: Gain Report */
{ 142, 0x04 }, { 143, 0x37 },
/* Change RXWI content: Antenna Report */
{ 142, 0x03 }, { 143, 0x99 },
/* Calibration Index Register */
/* CCK Receiver Control */
{ 160, 0xeb }, { 161, 0xc4 }, { 162, 0x77 }, { 163, 0xf9 },
{ 164, 0x88 }, { 165, 0x80 }, { 166, 0xff }, { 167, 0xe4 },
/* Added AGC controls - these AGC/GLRT registers are accessed
* through R195 and R196.
*/
{ 195, 0x00 }, { 196, 0x00 },
{ 195, 0x01 }, { 196, 0x04 },
{ 195, 0x02 }, { 196, 0x20 },
{ 195, 0x03 }, { 196, 0x0a },
{ 195, 0x06 }, { 196, 0x16 },
{ 195, 0x07 }, { 196, 0x05 },
{ 195, 0x08 }, { 196, 0x37 },
{ 195, 0x0a }, { 196, 0x15 },
{ 195, 0x0b }, { 196, 0x17 },
{ 195, 0x0c }, { 196, 0x06 },
{ 195, 0x0d }, { 196, 0x09 },
{ 195, 0x0e }, { 196, 0x05 },
{ 195, 0x0f }, { 196, 0x09 },
{ 195, 0x10 }, { 196, 0x20 },
{ 195, 0x20 }, { 196, 0x17 },
{ 195, 0x21 }, { 196, 0x06 },
{ 195, 0x22 }, { 196, 0x09 },
{ 195, 0x23 }, { 196, 0x17 },
{ 195, 0x24 }, { 196, 0x06 },
{ 195, 0x25 }, { 196, 0x09 },
{ 195, 0x26 }, { 196, 0x17 },
{ 195, 0x27 }, { 196, 0x06 },
{ 195, 0x28 }, { 196, 0x09 },
{ 195, 0x29 }, { 196, 0x05 },
{ 195, 0x2a }, { 196, 0x09 },
{ 195, 0x80 }, { 196, 0x8b },
{ 195, 0x81 }, { 196, 0x12 },
{ 195, 0x82 }, { 196, 0x09 },
{ 195, 0x83 }, { 196, 0x17 },
{ 195, 0x84 }, { 196, 0x11 },
{ 195, 0x85 }, { 196, 0x00 },
{ 195, 0x86 }, { 196, 0x00 },
{ 195, 0x87 }, { 196, 0x18 },
{ 195, 0x88 }, { 196, 0x60 },
{ 195, 0x89 }, { 196, 0x44 },
{ 195, 0x8a }, { 196, 0x8b },
{ 195, 0x8b }, { 196, 0x8b },
{ 195, 0x8c }, { 196, 0x8b },
{ 195, 0x8d }, { 196, 0x8b },
{ 195, 0x8e }, { 196, 0x09 },
{ 195, 0x8f }, { 196, 0x09 },
{ 195, 0x90 }, { 196, 0x09 },
{ 195, 0x91 }, { 196, 0x09 },
{ 195, 0x92 }, { 196, 0x11 },
{ 195, 0x93 }, { 196, 0x11 },
{ 195, 0x94 }, { 196, 0x11 },
{ 195, 0x95 }, { 196, 0x11 },
/* PPAD */
{ 47, 0x80 }, { 60, 0x80 }, { 150, 0xd2 }, { 151, 0x32 },
{ 152, 0x23 }, { 153, 0x41 }, { 154, 0x00 }, { 155, 0x4f },
{ 253, 0x7e }, { 195, 0x30 }, { 196, 0x32 }, { 195, 0x31 },
{ 196, 0x23 }, { 195, 0x32 }, { 196, 0x45 }, { 195, 0x35 },
{ 196, 0x4a }, { 195, 0x36 }, { 196, 0x5a }, { 195, 0x37 },
{ 196, 0x5a },
};
static const struct mt76_reg_pair mac_common_vals[] = {
{ MT_LEGACY_BASIC_RATE, 0x0000013f },
{ MT_HT_BASIC_RATE, 0x00008003 },
{ MT_MAC_SYS_CTRL, 0x00000000 },
{ MT_RX_FILTR_CFG, 0x00017f97 },
{ MT_BKOFF_SLOT_CFG, 0x00000209 },
{ MT_TX_SW_CFG0, 0x00000000 },
{ MT_TX_SW_CFG1, 0x00080606 },
{ MT_TX_LINK_CFG, 0x00001020 },
{ MT_TX_TIMEOUT_CFG, 0x000a2090 },
{ MT_MAX_LEN_CFG, 0x00003fff },
{ MT_PBF_TX_MAX_PCNT, 0x1fbf1f1f },
{ MT_PBF_RX_MAX_PCNT, 0x0000009f },
{ MT_TX_RETRY_CFG, 0x47d01f0f },
{ MT_AUTO_RSP_CFG, 0x00000013 },
{ MT_CCK_PROT_CFG, 0x05740003 },
{ MT_OFDM_PROT_CFG, 0x05740003 },
{ MT_MM40_PROT_CFG, 0x03f44084 },
{ MT_GF20_PROT_CFG, 0x01744004 },
{ MT_GF40_PROT_CFG, 0x03f44084 },
{ MT_MM20_PROT_CFG, 0x01744004 },
{ MT_TXOP_CTRL_CFG, 0x0000583f },
{ MT_TX_RTS_CFG, 0x01092b20 },
{ MT_EXP_ACK_TIME, 0x002400ca },
{ MT_TXOP_HLDR_ET, 0x00000002 },
{ MT_XIFS_TIME_CFG, 0x33a41010 },
{ MT_PWR_PIN_CFG, 0x00000000 },
};
static const struct mt76_reg_pair mac_chip_vals[] = {
{ MT_TSO_CTRL, 0x00006050 },
{ MT_BCN_OFFSET(0), 0x18100800 },
{ MT_BCN_OFFSET(1), 0x38302820 },
{ MT_PBF_SYS_CTRL, 0x00080c00 },
{ MT_PBF_CFG, 0x7f723c1f },
{ MT_FCE_PSE_CTRL, 0x00000001 },
{ MT_PAUSE_ENABLE_CONTROL1, 0x00000000 },
{ MT_TX0_RF_GAIN_CORR, 0x003b0005 },
{ MT_TX0_RF_GAIN_ATTEN, 0x00006900 },
{ MT_TX0_BB_GAIN_ATTEN, 0x00000400 },
{ MT_TX_ALC_VGA3, 0x00060006 },
{ MT_TX_SW_CFG0, 0x00000402 },
{ MT_TX_SW_CFG1, 0x00000000 },
{ MT_TX_SW_CFG2, 0x00000000 },
{ MT_HEADER_TRANS_CTRL_REG, 0x00000000 },
{ MT_FCE_CSO, 0x0000030f },
{ MT_FCE_PARAMETERS, 0x00256f0f },
};
#endif

Просмотреть файл

@ -0,0 +1,291 @@
/*
* (c) Copyright 2002-2010, Ralink Technology, Inc.
* Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __MT7601U_PHY_INITVALS_H
#define __MT7601U_PHY_INITVALS_H
#define RF_REG_PAIR(bank, reg, value) \
{ MT_MCU_MEMMAP_RF | (bank) << 16 | (reg), value }
static const struct mt76_reg_pair rf_central[] = {
/* Bank 0 - for central blocks: BG, PLL, XTAL, LO, ADC/DAC */
RF_REG_PAIR(0, 0, 0x02),
RF_REG_PAIR(0, 1, 0x01),
RF_REG_PAIR(0, 2, 0x11),
RF_REG_PAIR(0, 3, 0xff),
RF_REG_PAIR(0, 4, 0x0a),
RF_REG_PAIR(0, 5, 0x20),
RF_REG_PAIR(0, 6, 0x00),
/* B/G */
RF_REG_PAIR(0, 7, 0x00),
RF_REG_PAIR(0, 8, 0x00),
RF_REG_PAIR(0, 9, 0x00),
RF_REG_PAIR(0, 10, 0x00),
RF_REG_PAIR(0, 11, 0x21),
/* XO */
RF_REG_PAIR(0, 13, 0x00), /* 40mhz xtal */
/* RF_REG_PAIR(0, 13, 0x13), */ /* 20mhz xtal */
RF_REG_PAIR(0, 14, 0x7c),
RF_REG_PAIR(0, 15, 0x22),
RF_REG_PAIR(0, 16, 0x80),
/* PLL */
RF_REG_PAIR(0, 17, 0x99),
RF_REG_PAIR(0, 18, 0x99),
RF_REG_PAIR(0, 19, 0x09),
RF_REG_PAIR(0, 20, 0x50),
RF_REG_PAIR(0, 21, 0xb0),
RF_REG_PAIR(0, 22, 0x00),
RF_REG_PAIR(0, 23, 0xc5),
RF_REG_PAIR(0, 24, 0xfc),
RF_REG_PAIR(0, 25, 0x40),
RF_REG_PAIR(0, 26, 0x4d),
RF_REG_PAIR(0, 27, 0x02),
RF_REG_PAIR(0, 28, 0x72),
RF_REG_PAIR(0, 29, 0x01),
RF_REG_PAIR(0, 30, 0x00),
RF_REG_PAIR(0, 31, 0x00),
/* test ports */
RF_REG_PAIR(0, 32, 0x00),
RF_REG_PAIR(0, 33, 0x00),
RF_REG_PAIR(0, 34, 0x23),
RF_REG_PAIR(0, 35, 0x01), /* change setting to reduce spurs */
RF_REG_PAIR(0, 36, 0x00),
RF_REG_PAIR(0, 37, 0x00),
/* ADC/DAC */
RF_REG_PAIR(0, 38, 0x00),
RF_REG_PAIR(0, 39, 0x20),
RF_REG_PAIR(0, 40, 0x00),
RF_REG_PAIR(0, 41, 0xd0),
RF_REG_PAIR(0, 42, 0x1b),
RF_REG_PAIR(0, 43, 0x02),
RF_REG_PAIR(0, 44, 0x00),
};
static const struct mt76_reg_pair rf_channel[] = {
RF_REG_PAIR(4, 0, 0x01),
RF_REG_PAIR(4, 1, 0x00),
RF_REG_PAIR(4, 2, 0x00),
RF_REG_PAIR(4, 3, 0x00),
/* LDO */
RF_REG_PAIR(4, 4, 0x00),
RF_REG_PAIR(4, 5, 0x08),
RF_REG_PAIR(4, 6, 0x00),
/* RX */
RF_REG_PAIR(4, 7, 0x5b),
RF_REG_PAIR(4, 8, 0x52),
RF_REG_PAIR(4, 9, 0xb6),
RF_REG_PAIR(4, 10, 0x57),
RF_REG_PAIR(4, 11, 0x33),
RF_REG_PAIR(4, 12, 0x22),
RF_REG_PAIR(4, 13, 0x3d),
RF_REG_PAIR(4, 14, 0x3e),
RF_REG_PAIR(4, 15, 0x13),
RF_REG_PAIR(4, 16, 0x22),
RF_REG_PAIR(4, 17, 0x23),
RF_REG_PAIR(4, 18, 0x02),
RF_REG_PAIR(4, 19, 0xa4),
RF_REG_PAIR(4, 20, 0x01),
RF_REG_PAIR(4, 21, 0x12),
RF_REG_PAIR(4, 22, 0x80),
RF_REG_PAIR(4, 23, 0xb3),
RF_REG_PAIR(4, 24, 0x00), /* reserved */
RF_REG_PAIR(4, 25, 0x00), /* reserved */
RF_REG_PAIR(4, 26, 0x00), /* reserved */
RF_REG_PAIR(4, 27, 0x00), /* reserved */
/* LOGEN */
RF_REG_PAIR(4, 28, 0x18),
RF_REG_PAIR(4, 29, 0xee),
RF_REG_PAIR(4, 30, 0x6b),
RF_REG_PAIR(4, 31, 0x31),
RF_REG_PAIR(4, 32, 0x5d),
RF_REG_PAIR(4, 33, 0x00), /* reserved */
/* TX */
RF_REG_PAIR(4, 34, 0x96),
RF_REG_PAIR(4, 35, 0x55),
RF_REG_PAIR(4, 36, 0x08),
RF_REG_PAIR(4, 37, 0xbb),
RF_REG_PAIR(4, 38, 0xb3),
RF_REG_PAIR(4, 39, 0xb3),
RF_REG_PAIR(4, 40, 0x03),
RF_REG_PAIR(4, 41, 0x00), /* reserved */
RF_REG_PAIR(4, 42, 0x00), /* reserved */
RF_REG_PAIR(4, 43, 0xc5),
RF_REG_PAIR(4, 44, 0xc5),
RF_REG_PAIR(4, 45, 0xc5),
RF_REG_PAIR(4, 46, 0x07),
RF_REG_PAIR(4, 47, 0xa8),
RF_REG_PAIR(4, 48, 0xef),
RF_REG_PAIR(4, 49, 0x1a),
/* PA */
RF_REG_PAIR(4, 54, 0x07),
RF_REG_PAIR(4, 55, 0xa7),
RF_REG_PAIR(4, 56, 0xcc),
RF_REG_PAIR(4, 57, 0x14),
RF_REG_PAIR(4, 58, 0x07),
RF_REG_PAIR(4, 59, 0xa8),
RF_REG_PAIR(4, 60, 0xd7),
RF_REG_PAIR(4, 61, 0x10),
RF_REG_PAIR(4, 62, 0x1c),
RF_REG_PAIR(4, 63, 0x00), /* reserved */
};
static const struct mt76_reg_pair rf_vga[] = {
RF_REG_PAIR(5, 0, 0x47),
RF_REG_PAIR(5, 1, 0x00),
RF_REG_PAIR(5, 2, 0x00),
RF_REG_PAIR(5, 3, 0x08),
RF_REG_PAIR(5, 4, 0x04),
RF_REG_PAIR(5, 5, 0x20),
RF_REG_PAIR(5, 6, 0x3a),
RF_REG_PAIR(5, 7, 0x3a),
RF_REG_PAIR(5, 8, 0x00),
RF_REG_PAIR(5, 9, 0x00),
RF_REG_PAIR(5, 10, 0x10),
RF_REG_PAIR(5, 11, 0x10),
RF_REG_PAIR(5, 12, 0x10),
RF_REG_PAIR(5, 13, 0x10),
RF_REG_PAIR(5, 14, 0x10),
RF_REG_PAIR(5, 15, 0x20),
RF_REG_PAIR(5, 16, 0x22),
RF_REG_PAIR(5, 17, 0x7c),
RF_REG_PAIR(5, 18, 0x00),
RF_REG_PAIR(5, 19, 0x00),
RF_REG_PAIR(5, 20, 0x00),
RF_REG_PAIR(5, 21, 0xf1),
RF_REG_PAIR(5, 22, 0x11),
RF_REG_PAIR(5, 23, 0x02),
RF_REG_PAIR(5, 24, 0x41),
RF_REG_PAIR(5, 25, 0x20),
RF_REG_PAIR(5, 26, 0x00),
RF_REG_PAIR(5, 27, 0xd7),
RF_REG_PAIR(5, 28, 0xa2),
RF_REG_PAIR(5, 29, 0x20),
RF_REG_PAIR(5, 30, 0x49),
RF_REG_PAIR(5, 31, 0x20),
RF_REG_PAIR(5, 32, 0x04),
RF_REG_PAIR(5, 33, 0xf1),
RF_REG_PAIR(5, 34, 0xa1),
RF_REG_PAIR(5, 35, 0x01),
RF_REG_PAIR(5, 41, 0x00),
RF_REG_PAIR(5, 42, 0x00),
RF_REG_PAIR(5, 43, 0x00),
RF_REG_PAIR(5, 44, 0x00),
RF_REG_PAIR(5, 45, 0x00),
RF_REG_PAIR(5, 46, 0x00),
RF_REG_PAIR(5, 47, 0x00),
RF_REG_PAIR(5, 48, 0x00),
RF_REG_PAIR(5, 49, 0x00),
RF_REG_PAIR(5, 50, 0x00),
RF_REG_PAIR(5, 51, 0x00),
RF_REG_PAIR(5, 52, 0x00),
RF_REG_PAIR(5, 53, 0x00),
RF_REG_PAIR(5, 54, 0x00),
RF_REG_PAIR(5, 55, 0x00),
RF_REG_PAIR(5, 56, 0x00),
RF_REG_PAIR(5, 57, 0x00),
RF_REG_PAIR(5, 58, 0x31),
RF_REG_PAIR(5, 59, 0x31),
RF_REG_PAIR(5, 60, 0x0a),
RF_REG_PAIR(5, 61, 0x02),
RF_REG_PAIR(5, 62, 0x00),
RF_REG_PAIR(5, 63, 0x00),
};
/* TODO: BBP178 is set to 0xff for "CCK CH14 OBW" which overrides the settings
* from channel switching. Seems stupid at best.
*/
static const struct mt76_reg_pair bbp_high_temp[] = {
{ 75, 0x60 },
{ 92, 0x02 },
{ 178, 0xff }, /* For CCK CH14 OBW */
{ 195, 0x88 }, { 196, 0x60 },
}, bbp_high_temp_bw20[] = {
{ 69, 0x12 },
{ 91, 0x07 },
{ 195, 0x23 }, { 196, 0x17 },
{ 195, 0x24 }, { 196, 0x06 },
{ 195, 0x81 }, { 196, 0x12 },
{ 195, 0x83 }, { 196, 0x17 },
}, bbp_high_temp_bw40[] = {
{ 69, 0x15 },
{ 91, 0x04 },
{ 195, 0x23 }, { 196, 0x12 },
{ 195, 0x24 }, { 196, 0x08 },
{ 195, 0x81 }, { 196, 0x15 },
{ 195, 0x83 }, { 196, 0x16 },
}, bbp_low_temp[] = {
{ 178, 0xff }, /* For CCK CH14 OBW */
}, bbp_low_temp_bw20[] = {
{ 69, 0x12 },
{ 75, 0x5e },
{ 91, 0x07 },
{ 92, 0x02 },
{ 195, 0x23 }, { 196, 0x17 },
{ 195, 0x24 }, { 196, 0x06 },
{ 195, 0x81 }, { 196, 0x12 },
{ 195, 0x83 }, { 196, 0x17 },
{ 195, 0x88 }, { 196, 0x5e },
}, bbp_low_temp_bw40[] = {
{ 69, 0x15 },
{ 75, 0x5c },
{ 91, 0x04 },
{ 92, 0x03 },
{ 195, 0x23 }, { 196, 0x10 },
{ 195, 0x24 }, { 196, 0x08 },
{ 195, 0x81 }, { 196, 0x15 },
{ 195, 0x83 }, { 196, 0x16 },
{ 195, 0x88 }, { 196, 0x5b },
}, bbp_normal_temp[] = {
{ 75, 0x60 },
{ 92, 0x02 },
{ 178, 0xff }, /* For CCK CH14 OBW */
{ 195, 0x88 }, { 196, 0x60 },
}, bbp_normal_temp_bw20[] = {
{ 69, 0x12 },
{ 91, 0x07 },
{ 195, 0x23 }, { 196, 0x17 },
{ 195, 0x24 }, { 196, 0x06 },
{ 195, 0x81 }, { 196, 0x12 },
{ 195, 0x83 }, { 196, 0x17 },
}, bbp_normal_temp_bw40[] = {
{ 69, 0x15 },
{ 91, 0x04 },
{ 195, 0x23 }, { 196, 0x12 },
{ 195, 0x24 }, { 196, 0x08 },
{ 195, 0x81 }, { 196, 0x15 },
{ 195, 0x83 }, { 196, 0x16 },
};
#define BBP_TABLE(arr) { arr, ARRAY_SIZE(arr), }
static const struct reg_table {
const struct mt76_reg_pair *regs;
size_t n;
} bbp_mode_table[3][3] = {
{
BBP_TABLE(bbp_normal_temp_bw20),
BBP_TABLE(bbp_normal_temp_bw40),
BBP_TABLE(bbp_normal_temp),
}, {
BBP_TABLE(bbp_high_temp_bw20),
BBP_TABLE(bbp_high_temp_bw40),
BBP_TABLE(bbp_high_temp),
}, {
BBP_TABLE(bbp_low_temp_bw20),
BBP_TABLE(bbp_low_temp_bw40),
BBP_TABLE(bbp_low_temp),
}
};
#endif

Просмотреть файл

@ -0,0 +1,569 @@
/*
* Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "mt7601u.h"
#include "trace.h"
#include <linux/etherdevice.h>
static void
mt76_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate)
{
u8 idx = MT76_GET(MT_TXWI_RATE_MCS, rate);
txrate->idx = 0;
txrate->flags = 0;
txrate->count = 1;
switch (MT76_GET(MT_TXWI_RATE_PHY_MODE, rate)) {
case MT_PHY_TYPE_OFDM:
txrate->idx = idx + 4;
return;
case MT_PHY_TYPE_CCK:
if (idx >= 8)
idx -= 8;
txrate->idx = idx;
return;
case MT_PHY_TYPE_HT_GF:
txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
/* fall through */
case MT_PHY_TYPE_HT:
txrate->flags |= IEEE80211_TX_RC_MCS;
txrate->idx = idx;
break;
default:
WARN_ON(1);
return;
}
if (MT76_GET(MT_TXWI_RATE_BW, rate) == MT_PHY_BW_40)
txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
if (rate & MT_TXWI_RATE_SGI)
txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
}
static void
mt76_mac_fill_tx_status(struct mt7601u_dev *dev, struct ieee80211_tx_info *info,
struct mt76_tx_status *st)
{
struct ieee80211_tx_rate *rate = info->status.rates;
int cur_idx, last_rate;
int i;
last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
mt76_mac_process_tx_rate(&rate[last_rate], st->rate);
if (last_rate < IEEE80211_TX_MAX_RATES - 1)
rate[last_rate + 1].idx = -1;
cur_idx = rate[last_rate].idx + st->retry;
for (i = 0; i <= last_rate; i++) {
rate[i].flags = rate[last_rate].flags;
rate[i].idx = max_t(int, 0, cur_idx - i);
rate[i].count = 1;
}
if (last_rate > 0)
rate[last_rate - 1].count = st->retry + 1 - last_rate;
info->status.ampdu_len = 1;
info->status.ampdu_ack_len = st->success;
if (st->is_probe)
info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
if (st->aggr)
info->flags |= IEEE80211_TX_CTL_AMPDU |
IEEE80211_TX_STAT_AMPDU;
if (!st->ack_req)
info->flags |= IEEE80211_TX_CTL_NO_ACK;
else if (st->success)
info->flags |= IEEE80211_TX_STAT_ACK;
}
u16 mt76_mac_tx_rate_val(struct mt7601u_dev *dev,
const struct ieee80211_tx_rate *rate, u8 *nss_val)
{
u16 rateval;
u8 phy, rate_idx;
u8 nss = 1;
u8 bw = 0;
if (rate->flags & IEEE80211_TX_RC_MCS) {
rate_idx = rate->idx;
nss = 1 + (rate->idx >> 3);
phy = MT_PHY_TYPE_HT;
if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
phy = MT_PHY_TYPE_HT_GF;
if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
bw = 1;
} else {
const struct ieee80211_rate *r;
int band = dev->chandef.chan->band;
u16 val;
r = &dev->hw->wiphy->bands[band]->bitrates[rate->idx];
if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
val = r->hw_value_short;
else
val = r->hw_value;
phy = val >> 8;
rate_idx = val & 0xff;
bw = 0;
}
rateval = MT76_SET(MT_RXWI_RATE_MCS, rate_idx);
rateval |= MT76_SET(MT_RXWI_RATE_PHY, phy);
rateval |= MT76_SET(MT_RXWI_RATE_BW, bw);
if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
rateval |= MT_RXWI_RATE_SGI;
*nss_val = nss;
return rateval;
}
void mt76_mac_wcid_set_rate(struct mt7601u_dev *dev, struct mt76_wcid *wcid,
const struct ieee80211_tx_rate *rate)
{
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
wcid->tx_rate = mt76_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
wcid->tx_rate_set = true;
spin_unlock_irqrestore(&dev->lock, flags);
}
struct mt76_tx_status mt7601u_mac_fetch_tx_status(struct mt7601u_dev *dev)
{
struct mt76_tx_status stat = {};
u32 val;
val = mt7601u_rr(dev, MT_TX_STAT_FIFO);
stat.valid = !!(val & MT_TX_STAT_FIFO_VALID);
stat.success = !!(val & MT_TX_STAT_FIFO_SUCCESS);
stat.aggr = !!(val & MT_TX_STAT_FIFO_AGGR);
stat.ack_req = !!(val & MT_TX_STAT_FIFO_ACKREQ);
stat.pktid = MT76_GET(MT_TX_STAT_FIFO_PID_TYPE, val);
stat.wcid = MT76_GET(MT_TX_STAT_FIFO_WCID, val);
stat.rate = MT76_GET(MT_TX_STAT_FIFO_RATE, val);
return stat;
}
void mt76_send_tx_status(struct mt7601u_dev *dev, struct mt76_tx_status *stat)
{
struct ieee80211_tx_info info = {};
struct ieee80211_sta *sta = NULL;
struct mt76_wcid *wcid = NULL;
void *msta;
rcu_read_lock();
if (stat->wcid < ARRAY_SIZE(dev->wcid))
wcid = rcu_dereference(dev->wcid[stat->wcid]);
if (wcid) {
msta = container_of(wcid, struct mt76_sta, wcid);
sta = container_of(msta, struct ieee80211_sta,
drv_priv);
}
mt76_mac_fill_tx_status(dev, &info, stat);
ieee80211_tx_status_noskb(dev->hw, sta, &info);
rcu_read_unlock();
}
void mt7601u_mac_set_protection(struct mt7601u_dev *dev, bool legacy_prot,
int ht_mode)
{
int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION;
bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
u32 prot[6];
bool ht_rts[4] = {};
int i;
prot[0] = MT_PROT_NAV_SHORT |
MT_PROT_TXOP_ALLOW_ALL |
MT_PROT_RTS_THR_EN;
prot[1] = prot[0];
if (legacy_prot)
prot[1] |= MT_PROT_CTRL_CTS2SELF;
prot[2] = prot[4] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_BW20;
prot[3] = prot[5] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_ALL;
if (legacy_prot) {
prot[2] |= MT_PROT_RATE_CCK_11;
prot[3] |= MT_PROT_RATE_CCK_11;
prot[4] |= MT_PROT_RATE_CCK_11;
prot[5] |= MT_PROT_RATE_CCK_11;
} else {
prot[2] |= MT_PROT_RATE_OFDM_24;
prot[3] |= MT_PROT_RATE_DUP_OFDM_24;
prot[4] |= MT_PROT_RATE_OFDM_24;
prot[5] |= MT_PROT_RATE_DUP_OFDM_24;
}
switch (mode) {
case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
break;
case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true;
break;
case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
ht_rts[1] = ht_rts[3] = true;
break;
case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true;
break;
}
if (non_gf)
ht_rts[2] = ht_rts[3] = true;
for (i = 0; i < 4; i++)
if (ht_rts[i])
prot[i + 2] |= MT_PROT_CTRL_RTS_CTS;
for (i = 0; i < 6; i++)
mt7601u_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]);
}
void mt7601u_mac_set_short_preamble(struct mt7601u_dev *dev, bool short_preamb)
{
if (short_preamb)
mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
else
mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
}
void mt7601u_mac_config_tsf(struct mt7601u_dev *dev, bool enable, int interval)
{
u32 val = mt7601u_rr(dev, MT_BEACON_TIME_CFG);
val &= ~(MT_BEACON_TIME_CFG_TIMER_EN |
MT_BEACON_TIME_CFG_SYNC_MODE |
MT_BEACON_TIME_CFG_TBTT_EN);
if (!enable) {
mt7601u_wr(dev, MT_BEACON_TIME_CFG, val);
return;
}
val &= ~MT_BEACON_TIME_CFG_INTVAL;
val |= MT76_SET(MT_BEACON_TIME_CFG_INTVAL, interval << 4) |
MT_BEACON_TIME_CFG_TIMER_EN |
MT_BEACON_TIME_CFG_SYNC_MODE |
MT_BEACON_TIME_CFG_TBTT_EN;
}
static void mt7601u_check_mac_err(struct mt7601u_dev *dev)
{
u32 val = mt7601u_rr(dev, 0x10f4);
if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
return;
dev_err(dev->dev, "Error: MAC specific condition occurred\n");
mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
udelay(10);
mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
}
void mt7601u_mac_work(struct work_struct *work)
{
struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev,
mac_work.work);
struct {
u32 addr_base;
u32 span;
u64 *stat_base;
} spans[] = {
{ MT_RX_STA_CNT0, 3, dev->stats.rx_stat },
{ MT_TX_STA_CNT0, 3, dev->stats.tx_stat },
{ MT_TX_AGG_STAT, 1, dev->stats.aggr_stat },
{ MT_MPDU_DENSITY_CNT, 1, dev->stats.zero_len_del },
{ MT_TX_AGG_CNT_BASE0, 8, &dev->stats.aggr_n[0] },
{ MT_TX_AGG_CNT_BASE1, 8, &dev->stats.aggr_n[16] },
};
u32 sum, n;
int i, j, k;
/* Note: using MCU_RANDOM_READ is actually slower then reading all the
* registers by hand. MCU takes ca. 20ms to complete read of 24
* registers while reading them one by one will takes roughly
* 24*200us =~ 5ms.
*/
k = 0;
n = 0;
sum = 0;
for (i = 0; i < ARRAY_SIZE(spans); i++)
for (j = 0; j < spans[i].span; j++) {
u32 val = mt7601u_rr(dev, spans[i].addr_base + j * 4);
spans[i].stat_base[j * 2] += val & 0xffff;
spans[i].stat_base[j * 2 + 1] += val >> 16;
/* Calculate average AMPDU length */
if (spans[i].addr_base != MT_TX_AGG_CNT_BASE0 &&
spans[i].addr_base != MT_TX_AGG_CNT_BASE1)
continue;
n += (val >> 16) + (val & 0xffff);
sum += (val & 0xffff) * (1 + k * 2) +
(val >> 16) * (2 + k * 2);
k++;
}
atomic_set(&dev->avg_ampdu_len, n ? DIV_ROUND_CLOSEST(sum, n) : 1);
mt7601u_check_mac_err(dev);
ieee80211_queue_delayed_work(dev->hw, &dev->mac_work, 10 * HZ);
}
void
mt7601u_mac_wcid_setup(struct mt7601u_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
{
u8 zmac[ETH_ALEN] = {};
u32 attr;
attr = MT76_SET(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
MT76_SET(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
mt76_wr(dev, MT_WCID_ATTR(idx), attr);
if (mac)
memcpy(zmac, mac, sizeof(zmac));
mt7601u_addr_wr(dev, MT_WCID_ADDR(idx), zmac);
}
void mt7601u_mac_set_ampdu_factor(struct mt7601u_dev *dev)
{
struct ieee80211_sta *sta;
struct mt76_wcid *wcid;
void *msta;
u8 min_factor = 3;
int i;
rcu_read_lock();
for (i = 0; i < ARRAY_SIZE(dev->wcid); i++) {
wcid = rcu_dereference(dev->wcid[i]);
if (!wcid)
continue;
msta = container_of(wcid, struct mt76_sta, wcid);
sta = container_of(msta, struct ieee80211_sta, drv_priv);
min_factor = min(min_factor, sta->ht_cap.ampdu_factor);
}
rcu_read_unlock();
mt7601u_wr(dev, MT_MAX_LEN_CFG, 0xa0fff |
MT76_SET(MT_MAX_LEN_CFG_AMPDU, min_factor));
}
static void
mt76_mac_process_rate(struct ieee80211_rx_status *status, u16 rate)
{
u8 idx = MT76_GET(MT_RXWI_RATE_MCS, rate);
switch (MT76_GET(MT_RXWI_RATE_PHY, rate)) {
case MT_PHY_TYPE_OFDM:
if (WARN_ON(idx >= 8))
idx = 0;
idx += 4;
status->rate_idx = idx;
return;
case MT_PHY_TYPE_CCK:
if (idx >= 8) {
idx -= 8;
status->flag |= RX_FLAG_SHORTPRE;
}
if (WARN_ON(idx >= 4))
idx = 0;
status->rate_idx = idx;
return;
case MT_PHY_TYPE_HT_GF:
status->flag |= RX_FLAG_HT_GF;
/* fall through */
case MT_PHY_TYPE_HT:
status->flag |= RX_FLAG_HT;
status->rate_idx = idx;
break;
default:
WARN_ON(1);
return;
}
if (rate & MT_RXWI_RATE_SGI)
status->flag |= RX_FLAG_SHORT_GI;
if (rate & MT_RXWI_RATE_STBC)
status->flag |= 1 << RX_FLAG_STBC_SHIFT;
if (rate & MT_RXWI_RATE_BW)
status->flag |= RX_FLAG_40MHZ;
}
static void
mt7601u_rx_monitor_beacon(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi,
u16 rate, int rssi)
{
dev->bcn_freq_off = rxwi->freq_off;
dev->bcn_phy_mode = MT76_GET(MT_RXWI_RATE_PHY, rate);
dev->avg_rssi = (dev->avg_rssi * 15) / 16 + (rssi << 8);
}
static int
mt7601u_rx_is_our_beacon(struct mt7601u_dev *dev, u8 *data)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
return ieee80211_is_beacon(hdr->frame_control) &&
ether_addr_equal(hdr->addr2, dev->ap_bssid);
}
u32 mt76_mac_process_rx(struct mt7601u_dev *dev, struct sk_buff *skb,
u8 *data, void *rxi)
{
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct mt7601u_rxwi *rxwi = rxi;
u32 ctl = le32_to_cpu(rxwi->ctl);
u16 rate = le16_to_cpu(rxwi->rate);
int rssi;
if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_DECRYPT)) {
status->flag |= RX_FLAG_DECRYPTED;
status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
}
status->chains = BIT(0);
rssi = mt7601u_phy_get_rssi(dev, rxwi, rate);
status->chain_signal[0] = status->signal = rssi;
status->freq = dev->chandef.chan->center_freq;
status->band = dev->chandef.chan->band;
mt76_mac_process_rate(status, rate);
spin_lock_bh(&dev->con_mon_lock);
if (mt7601u_rx_is_our_beacon(dev, data))
mt7601u_rx_monitor_beacon(dev, rxwi, rate, rssi);
else if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_U2M))
dev->avg_rssi = (dev->avg_rssi * 15) / 16 + (rssi << 8);
spin_unlock_bh(&dev->con_mon_lock);
return MT76_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
}
static enum mt76_cipher_type
mt76_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
{
memset(key_data, 0, 32);
if (!key)
return MT_CIPHER_NONE;
if (key->keylen > 32)
return MT_CIPHER_NONE;
memcpy(key_data, key->key, key->keylen);
switch (key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
return MT_CIPHER_WEP40;
case WLAN_CIPHER_SUITE_WEP104:
return MT_CIPHER_WEP104;
case WLAN_CIPHER_SUITE_TKIP:
return MT_CIPHER_TKIP;
case WLAN_CIPHER_SUITE_CCMP:
return MT_CIPHER_AES_CCMP;
default:
return MT_CIPHER_NONE;
}
}
int mt76_mac_wcid_set_key(struct mt7601u_dev *dev, u8 idx,
struct ieee80211_key_conf *key)
{
enum mt76_cipher_type cipher;
u8 key_data[32];
u8 iv_data[8];
u32 val;
cipher = mt76_mac_get_key_info(key, key_data);
if (cipher == MT_CIPHER_NONE && key)
return -EINVAL;
trace_set_key(dev, idx);
mt7601u_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
memset(iv_data, 0, sizeof(iv_data));
if (key) {
iv_data[3] = key->keyidx << 6;
if (cipher >= MT_CIPHER_TKIP) {
/* Note: start with 1 to comply with spec,
* (see comment on common/cmm_wpa.c:4291).
*/
iv_data[0] |= 1;
iv_data[3] |= 0x20;
}
}
mt7601u_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
val = mt7601u_rr(dev, MT_WCID_ATTR(idx));
val &= ~MT_WCID_ATTR_PKEY_MODE & ~MT_WCID_ATTR_PKEY_MODE_EXT;
val |= MT76_SET(MT_WCID_ATTR_PKEY_MODE, cipher & 7) |
MT76_SET(MT_WCID_ATTR_PKEY_MODE_EXT, cipher >> 3);
val &= ~MT_WCID_ATTR_PAIRWISE;
val |= MT_WCID_ATTR_PAIRWISE *
!!(key && key->flags & IEEE80211_KEY_FLAG_PAIRWISE);
mt7601u_wr(dev, MT_WCID_ATTR(idx), val);
return 0;
}
int mt76_mac_shared_key_setup(struct mt7601u_dev *dev, u8 vif_idx, u8 key_idx,
struct ieee80211_key_conf *key)
{
enum mt76_cipher_type cipher;
u8 key_data[32];
u32 val;
cipher = mt76_mac_get_key_info(key, key_data);
if (cipher == MT_CIPHER_NONE && key)
return -EINVAL;
trace_set_shared_key(dev, vif_idx, key_idx);
mt7601u_wr_copy(dev, MT_SKEY(vif_idx, key_idx),
key_data, sizeof(key_data));
val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
return 0;
}

Просмотреть файл

@ -0,0 +1,178 @@
/*
* Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __MT76_MAC_H
#define __MT76_MAC_H
struct mt76_tx_status {
u8 valid:1;
u8 success:1;
u8 aggr:1;
u8 ack_req:1;
u8 is_probe:1;
u8 wcid;
u8 pktid;
u8 retry;
u16 rate;
} __packed __aligned(2);
/* Note: values in original "RSSI" and "SNR" fields are not actually what they
* are called for MT7601U, names used by this driver are educated guesses
* (see vendor mac/ral_omac.c).
*/
struct mt7601u_rxwi {
__le32 rxinfo;
__le32 ctl;
__le16 frag_sn;
__le16 rate;
u8 unknown;
u8 zero[3];
u8 snr;
u8 ant;
u8 gain;
u8 freq_off;
__le32 resv2;
__le32 expert_ant;
} __packed __aligned(4);
#define MT_RXINFO_BA BIT(0)
#define MT_RXINFO_DATA BIT(1)
#define MT_RXINFO_NULL BIT(2)
#define MT_RXINFO_FRAG BIT(3)
#define MT_RXINFO_U2M BIT(4)
#define MT_RXINFO_MULTICAST BIT(5)
#define MT_RXINFO_BROADCAST BIT(6)
#define MT_RXINFO_MYBSS BIT(7)
#define MT_RXINFO_CRCERR BIT(8)
#define MT_RXINFO_ICVERR BIT(9)
#define MT_RXINFO_MICERR BIT(10)
#define MT_RXINFO_AMSDU BIT(11)
#define MT_RXINFO_HTC BIT(12)
#define MT_RXINFO_RSSI BIT(13)
#define MT_RXINFO_L2PAD BIT(14)
#define MT_RXINFO_AMPDU BIT(15)
#define MT_RXINFO_DECRYPT BIT(16)
#define MT_RXINFO_BSSIDX3 BIT(17)
#define MT_RXINFO_WAPI_KEY BIT(18)
#define MT_RXINFO_PN_LEN GENMASK(21, 19)
#define MT_RXINFO_SW_PKT_80211 BIT(22)
#define MT_RXINFO_TCP_SUM_BYPASS BIT(28)
#define MT_RXINFO_IP_SUM_BYPASS BIT(29)
#define MT_RXINFO_TCP_SUM_ERR BIT(30)
#define MT_RXINFO_IP_SUM_ERR BIT(31)
#define MT_RXWI_CTL_WCID GENMASK(7, 0)
#define MT_RXWI_CTL_KEY_IDX GENMASK(9, 8)
#define MT_RXWI_CTL_BSS_IDX GENMASK(12, 10)
#define MT_RXWI_CTL_UDF GENMASK(15, 13)
#define MT_RXWI_CTL_MPDU_LEN GENMASK(27, 16)
#define MT_RXWI_CTL_TID GENMASK(31, 28)
#define MT_RXWI_FRAG GENMASK(3, 0)
#define MT_RXWI_SN GENMASK(15, 4)
#define MT_RXWI_RATE_MCS GENMASK(6, 0)
#define MT_RXWI_RATE_BW BIT(7)
#define MT_RXWI_RATE_SGI BIT(8)
#define MT_RXWI_RATE_STBC GENMASK(10, 9)
#define MT_RXWI_RATE_ETXBF BIT(11)
#define MT_RXWI_RATE_SND BIT(12)
#define MT_RXWI_RATE_ITXBF BIT(13)
#define MT_RXWI_RATE_PHY GENMASK(15, 14)
#define MT_RXWI_GAIN_RSSI_VAL GENMASK(5, 0)
#define MT_RXWI_GAIN_RSSI_LNA_ID GENMASK(7, 6)
#define MT_RXWI_ANT_AUX_LNA BIT(7)
#define MT_RXWI_EANT_ENC_ANT_ID GENMASK(7, 0)
enum mt76_phy_type {
MT_PHY_TYPE_CCK,
MT_PHY_TYPE_OFDM,
MT_PHY_TYPE_HT,
MT_PHY_TYPE_HT_GF,
};
enum mt76_phy_bandwidth {
MT_PHY_BW_20,
MT_PHY_BW_40,
};
struct mt76_txwi {
__le16 flags;
__le16 rate_ctl;
u8 ack_ctl;
u8 wcid;
__le16 len_ctl;
__le32 iv;
__le32 eiv;
u8 aid;
u8 txstream;
__le16 ctl;
} __packed __aligned(4);
#define MT_TXWI_FLAGS_FRAG BIT(0)
#define MT_TXWI_FLAGS_MMPS BIT(1)
#define MT_TXWI_FLAGS_CFACK BIT(2)
#define MT_TXWI_FLAGS_TS BIT(3)
#define MT_TXWI_FLAGS_AMPDU BIT(4)
#define MT_TXWI_FLAGS_MPDU_DENSITY GENMASK(7, 5)
#define MT_TXWI_FLAGS_TXOP GENMASK(9, 8)
#define MT_TXWI_FLAGS_CWMIN GENMASK(12, 10)
#define MT_TXWI_FLAGS_NO_RATE_FALLBACK BIT(13)
#define MT_TXWI_FLAGS_TX_RPT BIT(14)
#define MT_TXWI_FLAGS_TX_RATE_LUT BIT(15)
#define MT_TXWI_RATE_MCS GENMASK(6, 0)
#define MT_TXWI_RATE_BW BIT(7)
#define MT_TXWI_RATE_SGI BIT(8)
#define MT_TXWI_RATE_STBC GENMASK(10, 9)
#define MT_TXWI_RATE_PHY_MODE GENMASK(15, 14)
#define MT_TXWI_ACK_CTL_REQ BIT(0)
#define MT_TXWI_ACK_CTL_NSEQ BIT(1)
#define MT_TXWI_ACK_CTL_BA_WINDOW GENMASK(7, 2)
#define MT_TXWI_LEN_BYTE_CNT GENMASK(11, 0)
#define MT_TXWI_LEN_PKTID GENMASK(15, 12)
#define MT_TXWI_CTL_TX_POWER_ADJ GENMASK(3, 0)
#define MT_TXWI_CTL_CHAN_CHECK_PKT BIT(4)
#define MT_TXWI_CTL_PIFS_REV BIT(6)
u32 mt76_mac_process_rx(struct mt7601u_dev *dev, struct sk_buff *skb,
u8 *data, void *rxi);
int mt76_mac_wcid_set_key(struct mt7601u_dev *dev, u8 idx,
struct ieee80211_key_conf *key);
void mt76_mac_wcid_set_rate(struct mt7601u_dev *dev, struct mt76_wcid *wcid,
const struct ieee80211_tx_rate *rate);
int mt76_mac_shared_key_setup(struct mt7601u_dev *dev, u8 vif_idx, u8 key_idx,
struct ieee80211_key_conf *key);
u16 mt76_mac_tx_rate_val(struct mt7601u_dev *dev,
const struct ieee80211_tx_rate *rate, u8 *nss_val);
struct mt76_tx_status
mt7601u_mac_fetch_tx_status(struct mt7601u_dev *dev);
void mt76_send_tx_status(struct mt7601u_dev *dev, struct mt76_tx_status *stat);
#endif

Просмотреть файл

@ -0,0 +1,412 @@
/*
* Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "mt7601u.h"
#include "mac.h"
#include <linux/etherdevice.h>
#include <linux/version.h>
static int mt7601u_start(struct ieee80211_hw *hw)
{
struct mt7601u_dev *dev = hw->priv;
int ret;
mutex_lock(&dev->mutex);
ret = mt7601u_mac_start(dev);
if (ret)
goto out;
ieee80211_queue_delayed_work(dev->hw, &dev->mac_work,
MT_CALIBRATE_INTERVAL);
ieee80211_queue_delayed_work(dev->hw, &dev->cal_work,
MT_CALIBRATE_INTERVAL);
out:
mutex_unlock(&dev->mutex);
return ret;
}
static void mt7601u_stop(struct ieee80211_hw *hw)
{
struct mt7601u_dev *dev = hw->priv;
mutex_lock(&dev->mutex);
cancel_delayed_work_sync(&dev->cal_work);
cancel_delayed_work_sync(&dev->mac_work);
mt7601u_mac_stop(dev);
mutex_unlock(&dev->mutex);
}
static int mt7601u_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct mt7601u_dev *dev = hw->priv;
struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
unsigned int idx = 0;
unsigned int wcid = GROUP_WCID(idx);
/* Note: for AP do the AP-STA things mt76 does:
* - beacon offsets
* - do mac address tricks
* - shift vif idx
*/
mvif->idx = idx;
if (dev->wcid_mask[wcid / BITS_PER_LONG] & BIT(wcid % BITS_PER_LONG))
return -ENOSPC;
dev->wcid_mask[wcid / BITS_PER_LONG] |= BIT(wcid % BITS_PER_LONG);
mvif->group_wcid.idx = wcid;
mvif->group_wcid.hw_key_idx = -1;
return 0;
}
static void mt7601u_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct mt7601u_dev *dev = hw->priv;
struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
unsigned int wcid = mvif->group_wcid.idx;
dev->wcid_mask[wcid / BITS_PER_LONG] &= ~BIT(wcid % BITS_PER_LONG);
}
static int mt7601u_config(struct ieee80211_hw *hw, u32 changed)
{
struct mt7601u_dev *dev = hw->priv;
int ret = 0;
mutex_lock(&dev->mutex);
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
ieee80211_stop_queues(hw);
ret = mt7601u_phy_set_channel(dev, &hw->conf.chandef);
ieee80211_wake_queues(hw);
}
mutex_unlock(&dev->mutex);
return ret;
}
static void
mt76_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
unsigned int *total_flags, u64 multicast)
{
struct mt7601u_dev *dev = hw->priv;
u32 flags = 0;
#define MT76_FILTER(_flag, _hw) do { \
flags |= *total_flags & FIF_##_flag; \
dev->rxfilter &= ~(_hw); \
dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
} while (0)
mutex_lock(&dev->mutex);
dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
MT_RX_FILTR_CFG_CTS |
MT_RX_FILTR_CFG_CFEND |
MT_RX_FILTR_CFG_CFACK |
MT_RX_FILTR_CFG_BA |
MT_RX_FILTR_CFG_CTRL_RSV);
MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
*total_flags = flags;
mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
mutex_unlock(&dev->mutex);
}
static void
mt7601u_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info, u32 changed)
{
struct mt7601u_dev *dev = hw->priv;
mutex_lock(&dev->mutex);
if (changed & BSS_CHANGED_ASSOC)
mt7601u_phy_con_cal_onoff(dev, info);
if (changed & BSS_CHANGED_BSSID) {
mt7601u_addr_wr(dev, MT_MAC_BSSID_DW0, info->bssid);
/* Note: this is a hack because beacon_int is not changed
* on leave nor is any more appropriate event generated.
* rt2x00 doesn't seem to be bothered though.
*/
if (is_zero_ether_addr(info->bssid))
mt7601u_mac_config_tsf(dev, false, 0);
}
if (changed & BSS_CHANGED_BASIC_RATES) {
mt7601u_wr(dev, MT_LEGACY_BASIC_RATE, info->basic_rates);
mt7601u_wr(dev, MT_HT_FBK_CFG0, 0x65432100);
mt7601u_wr(dev, MT_HT_FBK_CFG1, 0xedcba980);
mt7601u_wr(dev, MT_LG_FBK_CFG0, 0xedcba988);
mt7601u_wr(dev, MT_LG_FBK_CFG1, 0x00002100);
}
if (changed & BSS_CHANGED_BEACON_INT)
mt7601u_mac_config_tsf(dev, true, info->beacon_int);
if (changed & BSS_CHANGED_HT || changed & BSS_CHANGED_ERP_CTS_PROT)
mt7601u_mac_set_protection(dev, info->use_cts_prot,
info->ht_operation_mode);
if (changed & BSS_CHANGED_ERP_PREAMBLE)
mt7601u_mac_set_short_preamble(dev, info->use_short_preamble);
if (changed & BSS_CHANGED_ERP_SLOT) {
int slottime = info->use_short_slot ? 9 : 20;
mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG,
MT_BKOFF_SLOT_CFG_SLOTTIME, slottime);
}
if (changed & BSS_CHANGED_ASSOC)
mt7601u_phy_recalibrate_after_assoc(dev);
mutex_unlock(&dev->mutex);
}
static int
mt76_wcid_alloc(struct mt7601u_dev *dev)
{
int i, idx = 0;
for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) {
idx = ffs(~dev->wcid_mask[i]);
if (!idx)
continue;
idx--;
dev->wcid_mask[i] |= BIT(idx);
break;
}
idx = i * BITS_PER_LONG + idx;
if (idx > 119)
return -1;
return idx;
}
static int
mt7601u_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mt7601u_dev *dev = hw->priv;
struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
int ret = 0;
int idx = 0;
mutex_lock(&dev->mutex);
idx = mt76_wcid_alloc(dev);
if (idx < 0) {
ret = -ENOSPC;
goto out;
}
msta->wcid.idx = idx;
msta->wcid.hw_key_idx = -1;
mt7601u_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
mt76_clear(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx));
rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
mt7601u_mac_set_ampdu_factor(dev);
out:
mutex_unlock(&dev->mutex);
return ret;
}
static int
mt7601u_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mt7601u_dev *dev = hw->priv;
struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
int idx = msta->wcid.idx;
mutex_lock(&dev->mutex);
rcu_assign_pointer(dev->wcid[idx], NULL);
mt76_set(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx));
dev->wcid_mask[idx / BITS_PER_LONG] &= ~BIT(idx % BITS_PER_LONG);
mt7601u_mac_wcid_setup(dev, idx, 0, NULL);
mt7601u_mac_set_ampdu_factor(dev);
mutex_unlock(&dev->mutex);
return 0;
}
static void
mt7601u_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum sta_notify_cmd cmd, struct ieee80211_sta *sta)
{
}
static void
mt7601u_sw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
const u8 *mac_addr)
{
struct mt7601u_dev *dev = hw->priv;
mt7601u_agc_save(dev);
set_bit(MT7601U_STATE_SCANNING, &dev->state);
}
static void
mt7601u_sw_scan_complete(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct mt7601u_dev *dev = hw->priv;
mt7601u_agc_restore(dev);
clear_bit(MT7601U_STATE_SCANNING, &dev->state);
}
static int
mt7601u_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
struct mt7601u_dev *dev = hw->priv;
struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
struct mt76_sta *msta = sta ? (struct mt76_sta *) sta->drv_priv : NULL;
struct mt76_wcid *wcid = msta ? &msta->wcid : &mvif->group_wcid;
int idx = key->keyidx;
int ret;
if (cmd == SET_KEY) {
key->hw_key_idx = wcid->idx;
wcid->hw_key_idx = idx;
} else {
if (idx == wcid->hw_key_idx)
wcid->hw_key_idx = -1;
key = NULL;
}
if (!msta) {
if (key || wcid->hw_key_idx == idx) {
ret = mt76_mac_wcid_set_key(dev, wcid->idx, key);
if (ret)
return ret;
}
return mt76_mac_shared_key_setup(dev, mvif->idx, idx, key);
}
return mt76_mac_wcid_set_key(dev, msta->wcid.idx, key);
}
static int mt7601u_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
struct mt7601u_dev *dev = hw->priv;
mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, value);
return 0;
}
static int
mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size)
{
struct mt7601u_dev *dev = hw->priv;
struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
WARN_ON(msta->wcid.idx > GROUP_WCID(0));
switch (action) {
case IEEE80211_AMPDU_RX_START:
mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
break;
case IEEE80211_AMPDU_RX_STOP:
mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
BIT(16 + tid));
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
ieee80211_send_bar(vif, sta->addr, tid, msta->agg_ssn[tid]);
break;
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
break;
case IEEE80211_AMPDU_TX_START:
msta->agg_ssn[tid] = *ssn << 4;
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
}
return 0;
}
static void
mt76_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mt7601u_dev *dev = hw->priv;
struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
struct ieee80211_sta_rates *rates;
struct ieee80211_tx_rate rate = {};
rcu_read_lock();
rates = rcu_dereference(sta->rates);
if (!rates)
goto out;
rate.idx = rates->rate[0].idx;
rate.flags = rates->rate[0].flags;
mt76_mac_wcid_set_rate(dev, &msta->wcid, &rate);
out:
rcu_read_unlock();
}
const struct ieee80211_ops mt7601u_ops = {
.tx = mt7601u_tx,
.start = mt7601u_start,
.stop = mt7601u_stop,
.add_interface = mt7601u_add_interface,
.remove_interface = mt7601u_remove_interface,
.config = mt7601u_config,
.configure_filter = mt76_configure_filter,
.bss_info_changed = mt7601u_bss_info_changed,
.sta_add = mt7601u_sta_add,
.sta_remove = mt7601u_sta_remove,
.sta_notify = mt7601u_sta_notify,
.set_key = mt7601u_set_key,
.conf_tx = mt7601u_conf_tx,
.sw_scan_start = mt7601u_sw_scan,
.sw_scan_complete = mt7601u_sw_scan_complete,
.ampdu_action = mt76_ampdu_action,
.sta_rate_tbl_update = mt76_sta_rate_tbl_update,
.set_rts_threshold = mt7601u_set_rts_threshold,
};

Просмотреть файл

@ -0,0 +1,534 @@
/*
* (c) Copyright 2002-2010, Ralink Technology, Inc.
* Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/firmware.h>
#include <linux/delay.h>
#include <linux/usb.h>
#include <linux/skbuff.h>
#include "mt7601u.h"
#include "dma.h"
#include "mcu.h"
#include "usb.h"
#include "trace.h"
#define MCU_FW_URB_MAX_PAYLOAD 0x3800
#define MCU_FW_URB_SIZE (MCU_FW_URB_MAX_PAYLOAD + 12)
#define MCU_RESP_URB_SIZE 1024
static inline int firmware_running(struct mt7601u_dev *dev)
{
return mt7601u_rr(dev, MT_MCU_COM_REG0) == 1;
}
static inline void skb_put_le32(struct sk_buff *skb, u32 val)
{
put_unaligned_le32(val, skb_put(skb, 4));
}
static inline void mt7601u_dma_skb_wrap_cmd(struct sk_buff *skb,
u8 seq, enum mcu_cmd cmd)
{
WARN_ON(mt7601u_dma_skb_wrap(skb, CPU_TX_PORT, DMA_COMMAND,
MT76_SET(MT_TXD_CMD_INFO_SEQ, seq) |
MT76_SET(MT_TXD_CMD_INFO_TYPE, cmd)));
}
static inline void trace_mt_mcu_msg_send_cs(struct mt7601u_dev *dev,
struct sk_buff *skb, bool need_resp)
{
u32 i, csum = 0;
for (i = 0; i < skb->len / 4; i++)
csum ^= get_unaligned_le32(skb->data + i * 4);
trace_mt_mcu_msg_send(dev, skb, csum, need_resp);
}
static struct sk_buff *
mt7601u_mcu_msg_alloc(struct mt7601u_dev *dev, const void *data, int len)
{
struct sk_buff *skb;
WARN_ON(len % 4); /* if length is not divisible by 4 we need to pad */
skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
skb_reserve(skb, MT_DMA_HDR_LEN);
memcpy(skb_put(skb, len), data, len);
return skb;
}
static int mt7601u_mcu_wait_resp(struct mt7601u_dev *dev, u8 seq)
{
struct urb *urb = dev->mcu.resp.urb;
u32 rxfce;
int urb_status, ret, i = 5;
while (i--) {
if (!wait_for_completion_timeout(&dev->mcu.resp_cmpl,
msecs_to_jiffies(300))) {
dev_warn(dev->dev, "Warning: %s retrying\n", __func__);
continue;
}
/* Make copies of important data before reusing the urb */
rxfce = get_unaligned_le32(dev->mcu.resp.buf);
urb_status = urb->status * mt7601u_urb_has_error(urb);
ret = mt7601u_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
&dev->mcu.resp, GFP_KERNEL,
mt7601u_complete_urb,
&dev->mcu.resp_cmpl);
if (ret)
return ret;
if (urb_status)
dev_err(dev->dev, "Error: MCU resp urb failed:%d\n",
urb_status);
if (MT76_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce) == seq &&
MT76_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce) == CMD_DONE)
return 0;
dev_err(dev->dev, "Error: MCU resp evt:%hhx seq:%hhx-%hhx!\n",
MT76_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce),
seq, MT76_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce));
}
dev_err(dev->dev, "Error: %s timed out\n", __func__);
return -ETIMEDOUT;
}
static int
mt7601u_mcu_msg_send(struct mt7601u_dev *dev, struct sk_buff *skb,
enum mcu_cmd cmd, bool wait_resp)
{
struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
unsigned cmd_pipe = usb_sndbulkpipe(usb_dev,
dev->out_eps[MT_EP_OUT_INBAND_CMD]);
int sent, ret;
u8 seq = 0;
if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
return 0;
mutex_lock(&dev->mcu.mutex);
if (wait_resp)
while (!seq)
seq = ++dev->mcu.msg_seq & 0xf;
mt7601u_dma_skb_wrap_cmd(skb, seq, cmd);
if (dev->mcu.resp_cmpl.done)
dev_err(dev->dev, "Error: MCU response pre-completed!\n");
trace_mt_mcu_msg_send_cs(dev, skb, wait_resp);
trace_mt_submit_urb_sync(dev, cmd_pipe, skb->len);
ret = usb_bulk_msg(usb_dev, cmd_pipe, skb->data, skb->len, &sent, 500);
if (ret) {
dev_err(dev->dev, "Error: send MCU cmd failed:%d\n", ret);
goto out;
}
if (sent != skb->len)
dev_err(dev->dev, "Error: %s sent != skb->len\n", __func__);
if (wait_resp)
ret = mt7601u_mcu_wait_resp(dev, seq);
out:
mutex_unlock(&dev->mcu.mutex);
consume_skb(skb);
return ret;
}
static int mt7601u_mcu_function_select(struct mt7601u_dev *dev,
enum mcu_function func, u32 val)
{
struct sk_buff *skb;
struct {
__le32 id;
__le32 value;
} __packed __aligned(4) msg = {
.id = cpu_to_le32(func),
.value = cpu_to_le32(val),
};
skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg));
return mt7601u_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5);
}
int mt7601u_mcu_tssi_read_kick(struct mt7601u_dev *dev, int use_hvga)
{
int ret;
if (!test_bit(MT7601U_STATE_MCU_RUNNING, &dev->state))
return 0;
ret = mt7601u_mcu_function_select(dev, ATOMIC_TSSI_SETTING,
use_hvga);
if (ret) {
dev_warn(dev->dev, "Warning: MCU TSSI read kick failed\n");
return ret;
}
dev->tssi_read_trig = true;
return 0;
}
int
mt7601u_mcu_calibrate(struct mt7601u_dev *dev, enum mcu_calibrate cal, u32 val)
{
struct sk_buff *skb;
struct {
__le32 id;
__le32 value;
} __packed __aligned(4) msg = {
.id = cpu_to_le32(cal),
.value = cpu_to_le32(val),
};
skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg));
return mt7601u_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true);
}
int mt7601u_write_reg_pairs(struct mt7601u_dev *dev, u32 base,
const struct mt76_reg_pair *data, int n)
{
const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 8;
struct sk_buff *skb;
int cnt, i, ret;
if (!n)
return 0;
cnt = min(max_vals_per_cmd, n);
skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
if (!skb)
return -ENOMEM;
skb_reserve(skb, MT_DMA_HDR_LEN);
for (i = 0; i < cnt; i++) {
skb_put_le32(skb, base + data[i].reg);
skb_put_le32(skb, data[i].value);
}
ret = mt7601u_mcu_msg_send(dev, skb, CMD_RANDOM_WRITE, cnt == n);
if (ret)
return ret;
return mt7601u_write_reg_pairs(dev, base, data + cnt, n - cnt);
}
int mt7601u_burst_write_regs(struct mt7601u_dev *dev, u32 offset,
const u32 *data, int n)
{
const int max_regs_per_cmd = INBAND_PACKET_MAX_LEN / 4 - 1;
struct sk_buff *skb;
int cnt, i, ret;
if (!n)
return 0;
cnt = min(max_regs_per_cmd, n);
skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
if (!skb)
return -ENOMEM;
skb_reserve(skb, MT_DMA_HDR_LEN);
skb_put_le32(skb, MT_MCU_MEMMAP_WLAN + offset);
for (i = 0; i < cnt; i++)
skb_put_le32(skb, data[i]);
ret = mt7601u_mcu_msg_send(dev, skb, CMD_BURST_WRITE, cnt == n);
if (ret)
return ret;
return mt7601u_burst_write_regs(dev, offset + cnt * 4,
data + cnt, n - cnt);
}
struct mt76_fw_header {
__le32 ilm_len;
__le32 dlm_len;
__le16 build_ver;
__le16 fw_ver;
u8 pad[4];
char build_time[16];
};
struct mt76_fw {
struct mt76_fw_header hdr;
u8 ivb[MT_MCU_IVB_SIZE];
u8 ilm[];
};
static int __mt7601u_dma_fw(struct mt7601u_dev *dev,
const struct mt7601u_dma_buf *dma_buf,
const void *data, u32 len, u32 dst_addr)
{
DECLARE_COMPLETION_ONSTACK(cmpl);
struct mt7601u_dma_buf buf = *dma_buf; /* we need to fake length */
__le32 reg;
u32 val;
int ret;
reg = cpu_to_le32(MT76_SET(MT_TXD_INFO_TYPE, DMA_PACKET) |
MT76_SET(MT_TXD_INFO_D_PORT, CPU_TX_PORT) |
MT76_SET(MT_TXD_INFO_LEN, len));
memcpy(buf.buf, &reg, sizeof(reg));
memcpy(buf.buf + sizeof(reg), data, len);
memset(buf.buf + sizeof(reg) + len, 0, 8);
ret = mt7601u_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
MT_FCE_DMA_ADDR, dst_addr);
if (ret)
return ret;
len = roundup(len, 4);
ret = mt7601u_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
MT_FCE_DMA_LEN, len << 16);
if (ret)
return ret;
buf.len = MT_DMA_HDR_LEN + len + 4;
ret = mt7601u_usb_submit_buf(dev, USB_DIR_OUT, MT_EP_OUT_INBAND_CMD,
&buf, GFP_KERNEL,
mt7601u_complete_urb, &cmpl);
if (ret)
return ret;
if (!wait_for_completion_timeout(&cmpl, msecs_to_jiffies(1000))) {
dev_err(dev->dev, "Error: firmware upload timed out\n");
usb_kill_urb(buf.urb);
return -ETIMEDOUT;
}
if (mt7601u_urb_has_error(buf.urb)) {
dev_err(dev->dev, "Error: firmware upload urb failed:%d\n",
buf.urb->status);
return buf.urb->status;
}
val = mt7601u_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
val++;
mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
return 0;
}
static int
mt7601u_dma_fw(struct mt7601u_dev *dev, struct mt7601u_dma_buf *dma_buf,
const void *data, int len, u32 dst_addr)
{
int n, ret;
if (len == 0)
return 0;
n = min(MCU_FW_URB_MAX_PAYLOAD, len);
ret = __mt7601u_dma_fw(dev, dma_buf, data, n, dst_addr);
if (ret)
return ret;
if (!mt76_poll_msec(dev, MT_MCU_COM_REG1, BIT(31), BIT(31), 500))
return -ETIMEDOUT;
return mt7601u_dma_fw(dev, dma_buf, data + n, len - n, dst_addr + n);
}
static int
mt7601u_upload_firmware(struct mt7601u_dev *dev, const struct mt76_fw *fw)
{
struct mt7601u_dma_buf dma_buf;
void *ivb;
u32 ilm_len, dlm_len;
int i, ret;
ivb = kmemdup(fw->ivb, sizeof(fw->ivb), GFP_KERNEL);
if (!ivb || mt7601u_usb_alloc_buf(dev, MCU_FW_URB_SIZE, &dma_buf)) {
ret = -ENOMEM;
goto error;
}
ilm_len = le32_to_cpu(fw->hdr.ilm_len) - sizeof(fw->ivb);
dev_dbg(dev->dev, "loading FW - ILM %u + IVB %zu\n",
ilm_len, sizeof(fw->ivb));
ret = mt7601u_dma_fw(dev, &dma_buf, fw->ilm, ilm_len, sizeof(fw->ivb));
if (ret)
goto error;
dlm_len = le32_to_cpu(fw->hdr.dlm_len);
dev_dbg(dev->dev, "loading FW - DLM %u\n", dlm_len);
ret = mt7601u_dma_fw(dev, &dma_buf, fw->ilm + ilm_len,
dlm_len, MT_MCU_DLM_OFFSET);
if (ret)
goto error;
ret = mt7601u_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
0x12, 0, ivb, sizeof(fw->ivb));
if (ret < 0)
goto error;
ret = 0;
for (i = 100; i && !firmware_running(dev); i--)
msleep(10);
if (!i) {
ret = -ETIMEDOUT;
goto error;
}
dev_dbg(dev->dev, "Firmware running!\n");
error:
kfree(ivb);
mt7601u_usb_free_buf(dev, &dma_buf);
return ret;
}
static int mt7601u_load_firmware(struct mt7601u_dev *dev)
{
const struct firmware *fw;
const struct mt76_fw_header *hdr;
int len, ret;
u32 val;
mt7601u_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
MT_USB_DMA_CFG_TX_BULK_EN));
if (firmware_running(dev))
return 0;
ret = request_firmware(&fw, MT7601U_FIRMWARE, dev->dev);
if (ret)
return ret;
if (!fw || !fw->data || fw->size < sizeof(*hdr))
goto err_inv_fw;
hdr = (const struct mt76_fw_header *) fw->data;
if (le32_to_cpu(hdr->ilm_len) <= MT_MCU_IVB_SIZE)
goto err_inv_fw;
len = sizeof(*hdr);
len += le32_to_cpu(hdr->ilm_len);
len += le32_to_cpu(hdr->dlm_len);
if (fw->size != len)
goto err_inv_fw;
val = le16_to_cpu(hdr->fw_ver);
dev_info(dev->dev,
"Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n",
(val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf,
le16_to_cpu(hdr->build_ver), hdr->build_time);
len = le32_to_cpu(hdr->ilm_len);
mt7601u_wr(dev, 0x94c, 0);
mt7601u_wr(dev, MT_FCE_PSE_CTRL, 0);
mt7601u_vendor_reset(dev);
msleep(5);
mt7601u_wr(dev, 0xa44, 0);
mt7601u_wr(dev, 0x230, 0x84210);
mt7601u_wr(dev, 0x400, 0x80c00);
mt7601u_wr(dev, 0x800, 1);
mt7601u_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN |
MT_PBF_CFG_TX1Q_EN |
MT_PBF_CFG_TX2Q_EN |
MT_PBF_CFG_TX3Q_EN));
mt7601u_wr(dev, MT_FCE_PSE_CTRL, 1);
mt7601u_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
MT_USB_DMA_CFG_TX_BULK_EN));
val = mt76_set(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_CLR);
val &= ~MT_USB_DMA_CFG_TX_CLR;
mt7601u_wr(dev, MT_USB_DMA_CFG, val);
/* FCE tx_fs_base_ptr */
mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
/* FCE tx_fs_max_cnt */
mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 1);
/* FCE pdma enable */
mt7601u_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
/* FCE skip_fs_en */
mt7601u_wr(dev, MT_FCE_SKIP_FS, 3);
ret = mt7601u_upload_firmware(dev, (const struct mt76_fw *)fw->data);
release_firmware(fw);
return ret;
err_inv_fw:
dev_err(dev->dev, "Invalid firmware image\n");
release_firmware(fw);
return -ENOENT;
}
int mt7601u_mcu_init(struct mt7601u_dev *dev)
{
int ret;
mutex_init(&dev->mcu.mutex);
ret = mt7601u_load_firmware(dev);
if (ret)
return ret;
set_bit(MT7601U_STATE_MCU_RUNNING, &dev->state);
return 0;
}
int mt7601u_mcu_cmd_init(struct mt7601u_dev *dev)
{
int ret;
ret = mt7601u_mcu_function_select(dev, Q_SELECT, 1);
if (ret)
return ret;
init_completion(&dev->mcu.resp_cmpl);
if (mt7601u_usb_alloc_buf(dev, MCU_RESP_URB_SIZE, &dev->mcu.resp)) {
mt7601u_usb_free_buf(dev, &dev->mcu.resp);
return -ENOMEM;
}
ret = mt7601u_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
&dev->mcu.resp, GFP_KERNEL,
mt7601u_complete_urb, &dev->mcu.resp_cmpl);
if (ret) {
mt7601u_usb_free_buf(dev, &dev->mcu.resp);
return ret;
}
return 0;
}
void mt7601u_mcu_cmd_deinit(struct mt7601u_dev *dev)
{
usb_kill_urb(dev->mcu.resp.urb);
mt7601u_usb_free_buf(dev, &dev->mcu.resp);
}

Просмотреть файл

@ -0,0 +1,94 @@
/*
* Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __MT7601U_MCU_H
#define __MT7601U_MCU_H
struct mt7601u_dev;
/* Register definitions */
#define MT_MCU_RESET_CTL 0x070C
#define MT_MCU_INT_LEVEL 0x0718
#define MT_MCU_COM_REG0 0x0730
#define MT_MCU_COM_REG1 0x0734
#define MT_MCU_COM_REG2 0x0738
#define MT_MCU_COM_REG3 0x073C
#define MT_MCU_IVB_SIZE 0x40
#define MT_MCU_DLM_OFFSET 0x80000
#define MT_MCU_MEMMAP_WLAN 0x00410000
#define MT_MCU_MEMMAP_BBP 0x40000000
#define MT_MCU_MEMMAP_RF 0x80000000
#define INBAND_PACKET_MAX_LEN 192
enum mcu_cmd {
CMD_FUN_SET_OP = 1,
CMD_LOAD_CR = 2,
CMD_INIT_GAIN_OP = 3,
CMD_DYNC_VGA_OP = 6,
CMD_TDLS_CH_SW = 7,
CMD_BURST_WRITE = 8,
CMD_READ_MODIFY_WRITE = 9,
CMD_RANDOM_READ = 10,
CMD_BURST_READ = 11,
CMD_RANDOM_WRITE = 12,
CMD_LED_MODE_OP = 16,
CMD_POWER_SAVING_OP = 20,
CMD_WOW_CONFIG = 21,
CMD_WOW_QUERY = 22,
CMD_WOW_FEATURE = 24,
CMD_CARRIER_DETECT_OP = 28,
CMD_RADOR_DETECT_OP = 29,
CMD_SWITCH_CHANNEL_OP = 30,
CMD_CALIBRATION_OP = 31,
CMD_BEACON_OP = 32,
CMD_ANTENNA_OP = 33,
};
enum mcu_function {
Q_SELECT = 1,
ATOMIC_TSSI_SETTING = 5,
};
enum mcu_power_mode {
RADIO_OFF = 0x30,
RADIO_ON = 0x31,
RADIO_OFF_AUTO_WAKEUP = 0x32,
RADIO_OFF_ADVANCE = 0x33,
RADIO_ON_ADVANCE = 0x34,
};
enum mcu_calibrate {
MCU_CAL_R = 1,
MCU_CAL_DCOC,
MCU_CAL_LC,
MCU_CAL_LOFT,
MCU_CAL_TXIQ,
MCU_CAL_BW,
MCU_CAL_DPD,
MCU_CAL_RXIQ,
MCU_CAL_TXDCOC,
};
int mt7601u_mcu_init(struct mt7601u_dev *dev);
int mt7601u_mcu_cmd_init(struct mt7601u_dev *dev);
void mt7601u_mcu_cmd_deinit(struct mt7601u_dev *dev);
int
mt7601u_mcu_calibrate(struct mt7601u_dev *dev, enum mcu_calibrate cal, u32 val);
int mt7601u_mcu_tssi_read_kick(struct mt7601u_dev *dev, int use_hvga);
#endif

Просмотреть файл

@ -0,0 +1,390 @@
/*
* Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef MT7601U_H
#define MT7601U_H
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/usb.h>
#include <linux/completion.h>
#include <net/mac80211.h>
#include <linux/debugfs.h>
#include "regs.h"
#include "util.h"
#define MT_CALIBRATE_INTERVAL (4 * HZ)
#define MT_FREQ_CAL_INIT_DELAY (30 * HZ)
#define MT_FREQ_CAL_CHECK_INTERVAL (10 * HZ)
#define MT_FREQ_CAL_ADJ_INTERVAL (HZ / 2)
#define MT_BBP_REG_VERSION 0x00
#define MT_USB_AGGR_SIZE_LIMIT 28 /* * 1024B */
#define MT_USB_AGGR_TIMEOUT 0x80 /* * 33ns */
#define MT_RX_ORDER 3
#define MT_RX_URB_SIZE (PAGE_SIZE << MT_RX_ORDER)
struct mt7601u_dma_buf {
struct urb *urb;
void *buf;
dma_addr_t dma;
size_t len;
};
struct mt7601u_mcu {
struct mutex mutex;
u8 msg_seq;
struct mt7601u_dma_buf resp;
struct completion resp_cmpl;
};
struct mt7601u_freq_cal {
struct delayed_work work;
u8 freq;
bool enabled;
bool adjusting;
};
struct mac_stats {
u64 rx_stat[6];
u64 tx_stat[6];
u64 aggr_stat[2];
u64 aggr_n[32];
u64 zero_len_del[2];
};
#define N_RX_ENTRIES 16
struct mt7601u_rx_queue {
struct mt7601u_dev *dev;
struct mt7601u_dma_buf_rx {
struct urb *urb;
struct page *p;
} e[N_RX_ENTRIES];
unsigned int start;
unsigned int end;
unsigned int entries;
unsigned int pending;
};
#define N_TX_ENTRIES 64
struct mt7601u_tx_queue {
struct mt7601u_dev *dev;
struct mt7601u_dma_buf_tx {
struct urb *urb;
struct sk_buff *skb;
} e[N_TX_ENTRIES];
unsigned int start;
unsigned int end;
unsigned int entries;
unsigned int used;
unsigned int fifo_seq;
};
/* WCID allocation:
* 0: mcast wcid
* 1: bssid wcid
* 1...: STAs
* ...7e: group wcids
* 7f: reserved
*/
#define N_WCIDS 128
#define GROUP_WCID(idx) (N_WCIDS - 2 - idx)
struct mt7601u_eeprom_params;
#define MT_EE_TEMPERATURE_SLOPE 39
#define MT_FREQ_OFFSET_INVALID -128
enum mt_temp_mode {
MT_TEMP_MODE_NORMAL,
MT_TEMP_MODE_HIGH,
MT_TEMP_MODE_LOW,
};
enum mt_bw {
MT_BW_20,
MT_BW_40,
};
enum {
MT7601U_STATE_INITIALIZED,
MT7601U_STATE_REMOVED,
MT7601U_STATE_WLAN_RUNNING,
MT7601U_STATE_MCU_RUNNING,
MT7601U_STATE_SCANNING,
MT7601U_STATE_READING_STATS,
MT7601U_STATE_MORE_STATS,
};
/**
* struct mt7601u_dev - adapter structure
* @lock: protects @wcid->tx_rate.
* @tx_lock: protects @tx_q and changes of MT7601U_STATE_*_STATS
flags in @state.
* @rx_lock: protects @rx_q.
* @con_mon_lock: protects @ap_bssid, @bcn_*, @avg_rssi.
* @mutex: ensures exclusive access from mac80211 callbacks.
* @vendor_req_mutex: ensures atomicity of vendor requests.
* @reg_atomic_mutex: ensures atomicity of indirect register accesses
* (accesses to RF and BBP).
* @hw_atomic_mutex: ensures exclusive access to HW during critical
* operations (power management, channel switch).
*/
struct mt7601u_dev {
struct ieee80211_hw *hw;
struct device *dev;
unsigned long state;
struct mutex mutex;
unsigned long wcid_mask[N_WCIDS / BITS_PER_LONG];
struct cfg80211_chan_def chandef;
struct ieee80211_supported_band *sband_2g;
struct mt7601u_mcu mcu;
struct delayed_work cal_work;
struct delayed_work mac_work;
struct workqueue_struct *stat_wq;
struct delayed_work stat_work;
struct mt76_wcid *mon_wcid;
struct mt76_wcid __rcu *wcid[N_WCIDS];
spinlock_t lock;
const u16 *beacon_offsets;
u8 macaddr[ETH_ALEN];
struct mt7601u_eeprom_params *ee;
struct mutex vendor_req_mutex;
struct mutex reg_atomic_mutex;
struct mutex hw_atomic_mutex;
u32 rxfilter;
u32 debugfs_reg;
u8 out_eps[8];
u8 in_eps[8];
u16 out_max_packet;
u16 in_max_packet;
/* TX */
spinlock_t tx_lock;
struct mt7601u_tx_queue *tx_q;
atomic_t avg_ampdu_len;
/* RX */
spinlock_t rx_lock;
struct tasklet_struct rx_tasklet;
struct mt7601u_rx_queue rx_q;
/* Connection monitoring things */
spinlock_t con_mon_lock;
u8 ap_bssid[ETH_ALEN];
s8 bcn_freq_off;
u8 bcn_phy_mode;
int avg_rssi; /* starts at 0 and converges */
u8 agc_save;
struct mt7601u_freq_cal freq_cal;
bool tssi_read_trig;
s8 tssi_init;
s8 tssi_init_hvga;
s16 tssi_init_hvga_offset_db;
int prev_pwr_diff;
enum mt_temp_mode temp_mode;
int curr_temp;
int dpd_temp;
s8 raw_temp;
bool pll_lock_protect;
u8 bw;
bool chan_ext_below;
/* PA mode */
u32 rf_pa_mode[2];
struct mac_stats stats;
};
struct mt7601u_tssi_params {
char tssi0;
int trgt_power;
};
struct mt76_wcid {
u8 idx;
u8 hw_key_idx;
u16 tx_rate;
bool tx_rate_set;
u8 tx_rate_nss;
};
struct mt76_vif {
u8 idx;
struct mt76_wcid group_wcid;
};
struct mt76_sta {
struct mt76_wcid wcid;
u16 agg_ssn[IEEE80211_NUM_TIDS];
};
struct mt76_reg_pair {
u32 reg;
u32 value;
};
struct mt7601u_rxwi;
extern const struct ieee80211_ops mt7601u_ops;
void mt7601u_init_debugfs(struct mt7601u_dev *dev);
u32 mt7601u_rr(struct mt7601u_dev *dev, u32 offset);
void mt7601u_wr(struct mt7601u_dev *dev, u32 offset, u32 val);
u32 mt7601u_rmw(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val);
u32 mt7601u_rmc(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val);
void mt7601u_wr_copy(struct mt7601u_dev *dev, u32 offset,
const void *data, int len);
int mt7601u_wait_asic_ready(struct mt7601u_dev *dev);
bool mt76_poll(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val,
int timeout);
bool mt76_poll_msec(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val,
int timeout);
/* Compatibility with mt76 */
#define mt76_rmw_field(_dev, _reg, _field, _val) \
mt76_rmw(_dev, _reg, _field, MT76_SET(_field, _val))
static inline u32 mt76_rr(struct mt7601u_dev *dev, u32 offset)
{
return mt7601u_rr(dev, offset);
}
static inline void mt76_wr(struct mt7601u_dev *dev, u32 offset, u32 val)
{
return mt7601u_wr(dev, offset, val);
}
static inline u32
mt76_rmw(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val)
{
return mt7601u_rmw(dev, offset, mask, val);
}
static inline u32 mt76_set(struct mt7601u_dev *dev, u32 offset, u32 val)
{
return mt76_rmw(dev, offset, 0, val);
}
static inline u32 mt76_clear(struct mt7601u_dev *dev, u32 offset, u32 val)
{
return mt76_rmw(dev, offset, val, 0);
}
int mt7601u_write_reg_pairs(struct mt7601u_dev *dev, u32 base,
const struct mt76_reg_pair *data, int len);
int mt7601u_burst_write_regs(struct mt7601u_dev *dev, u32 offset,
const u32 *data, int n);
void mt7601u_addr_wr(struct mt7601u_dev *dev, const u32 offset, const u8 *addr);
/* Init */
struct mt7601u_dev *mt7601u_alloc_device(struct device *dev);
int mt7601u_init_hardware(struct mt7601u_dev *dev);
int mt7601u_register_device(struct mt7601u_dev *dev);
void mt7601u_cleanup(struct mt7601u_dev *dev);
int mt7601u_mac_start(struct mt7601u_dev *dev);
void mt7601u_mac_stop(struct mt7601u_dev *dev);
/* PHY */
int mt7601u_phy_init(struct mt7601u_dev *dev);
int mt7601u_wait_bbp_ready(struct mt7601u_dev *dev);
void mt7601u_set_rx_path(struct mt7601u_dev *dev, u8 path);
void mt7601u_set_tx_dac(struct mt7601u_dev *dev, u8 path);
int mt7601u_bbp_set_bw(struct mt7601u_dev *dev, int bw);
void mt7601u_agc_save(struct mt7601u_dev *dev);
void mt7601u_agc_restore(struct mt7601u_dev *dev);
int mt7601u_phy_set_channel(struct mt7601u_dev *dev,
struct cfg80211_chan_def *chandef);
void mt7601u_phy_recalibrate_after_assoc(struct mt7601u_dev *dev);
int mt7601u_phy_get_rssi(struct mt7601u_dev *dev,
struct mt7601u_rxwi *rxwi, u16 rate);
void mt7601u_phy_con_cal_onoff(struct mt7601u_dev *dev,
struct ieee80211_bss_conf *info);
/* MAC */
void mt7601u_mac_work(struct work_struct *work);
void mt7601u_mac_set_protection(struct mt7601u_dev *dev, bool legacy_prot,
int ht_mode);
void mt7601u_mac_set_short_preamble(struct mt7601u_dev *dev, bool short_preamb);
void mt7601u_mac_config_tsf(struct mt7601u_dev *dev, bool enable, int interval);
void
mt7601u_mac_wcid_setup(struct mt7601u_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
void mt7601u_mac_set_ampdu_factor(struct mt7601u_dev *dev);
/* TX */
void mt7601u_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
struct sk_buff *skb);
int mt7601u_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u16 queue, const struct ieee80211_tx_queue_params *params);
void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb);
void mt7601u_tx_stat(struct work_struct *work);
/* util */
void mt76_remove_hdr_pad(struct sk_buff *skb);
int mt76_insert_hdr_pad(struct sk_buff *skb);
u32 mt7601u_bbp_set_ctrlch(struct mt7601u_dev *dev, bool below);
static inline u32 mt7601u_mac_set_ctrlch(struct mt7601u_dev *dev, bool below)
{
return mt7601u_rmc(dev, MT_TX_BAND_CFG, 1, below);
}
int mt7601u_dma_init(struct mt7601u_dev *dev);
void mt7601u_dma_cleanup(struct mt7601u_dev *dev);
int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
struct mt76_wcid *wcid, int hw_q);
#endif

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,636 @@
/*
* Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __MT76_REGS_H
#define __MT76_REGS_H
#include <linux/bitops.h>
#ifndef GENMASK
#define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l))
#endif
#define MT_ASIC_VERSION 0x0000
#define MT76XX_REV_E3 0x22
#define MT76XX_REV_E4 0x33
#define MT_CMB_CTRL 0x0020
#define MT_CMB_CTRL_XTAL_RDY BIT(22)
#define MT_CMB_CTRL_PLL_LD BIT(23)
#define MT_EFUSE_CTRL 0x0024
#define MT_EFUSE_CTRL_AOUT GENMASK(5, 0)
#define MT_EFUSE_CTRL_MODE GENMASK(7, 6)
#define MT_EFUSE_CTRL_LDO_OFF_TIME GENMASK(13, 8)
#define MT_EFUSE_CTRL_LDO_ON_TIME GENMASK(15, 14)
#define MT_EFUSE_CTRL_AIN GENMASK(25, 16)
#define MT_EFUSE_CTRL_KICK BIT(30)
#define MT_EFUSE_CTRL_SEL BIT(31)
#define MT_EFUSE_DATA_BASE 0x0028
#define MT_EFUSE_DATA(_n) (MT_EFUSE_DATA_BASE + ((_n) << 2))
#define MT_COEXCFG0 0x0040
#define MT_COEXCFG0_COEX_EN BIT(0)
#define MT_WLAN_FUN_CTRL 0x0080
#define MT_WLAN_FUN_CTRL_WLAN_EN BIT(0)
#define MT_WLAN_FUN_CTRL_WLAN_CLK_EN BIT(1)
#define MT_WLAN_FUN_CTRL_WLAN_RESET_RF BIT(2)
#define MT_WLAN_FUN_CTRL_WLAN_RESET BIT(3) /* MT76x0 */
#define MT_WLAN_FUN_CTRL_CSR_F20M_CKEN BIT(3) /* MT76x2 */
#define MT_WLAN_FUN_CTRL_PCIE_CLK_REQ BIT(4)
#define MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL BIT(5)
#define MT_WLAN_FUN_CTRL_INV_ANT_SEL BIT(6)
#define MT_WLAN_FUN_CTRL_WAKE_HOST BIT(7)
#define MT_WLAN_FUN_CTRL_THERM_RST BIT(8) /* MT76x2 */
#define MT_WLAN_FUN_CTRL_THERM_CKEN BIT(9) /* MT76x2 */
#define MT_WLAN_FUN_CTRL_GPIO_IN GENMASK(15, 8) /* MT76x0 */
#define MT_WLAN_FUN_CTRL_GPIO_OUT GENMASK(23, 16) /* MT76x0 */
#define MT_WLAN_FUN_CTRL_GPIO_OUT_EN GENMASK(31, 24) /* MT76x0 */
#define MT_XO_CTRL0 0x0100
#define MT_XO_CTRL1 0x0104
#define MT_XO_CTRL2 0x0108
#define MT_XO_CTRL3 0x010c
#define MT_XO_CTRL4 0x0110
#define MT_XO_CTRL5 0x0114
#define MT_XO_CTRL5_C2_VAL GENMASK(14, 8)
#define MT_XO_CTRL6 0x0118
#define MT_XO_CTRL6_C2_CTRL GENMASK(14, 8)
#define MT_XO_CTRL7 0x011c
#define MT_WLAN_MTC_CTRL 0x10148
#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP BIT(0)
#define MT_WLAN_MTC_CTRL_PWR_ACK BIT(12)
#define MT_WLAN_MTC_CTRL_PWR_ACK_S BIT(13)
#define MT_WLAN_MTC_CTRL_BBP_MEM_PD GENMASK(19, 16)
#define MT_WLAN_MTC_CTRL_PBF_MEM_PD BIT(20)
#define MT_WLAN_MTC_CTRL_FCE_MEM_PD BIT(21)
#define MT_WLAN_MTC_CTRL_TSO_MEM_PD BIT(22)
#define MT_WLAN_MTC_CTRL_BBP_MEM_RB BIT(24)
#define MT_WLAN_MTC_CTRL_PBF_MEM_RB BIT(25)
#define MT_WLAN_MTC_CTRL_FCE_MEM_RB BIT(26)
#define MT_WLAN_MTC_CTRL_TSO_MEM_RB BIT(27)
#define MT_WLAN_MTC_CTRL_STATE_UP BIT(28)
#define MT_INT_SOURCE_CSR 0x0200
#define MT_INT_MASK_CSR 0x0204
#define MT_INT_RX_DONE(_n) BIT(_n)
#define MT_INT_RX_DONE_ALL GENMASK(1, 0)
#define MT_INT_TX_DONE_ALL GENMASK(13, 4)
#define MT_INT_TX_DONE(_n) BIT(_n + 4)
#define MT_INT_RX_COHERENT BIT(16)
#define MT_INT_TX_COHERENT BIT(17)
#define MT_INT_ANY_COHERENT BIT(18)
#define MT_INT_MCU_CMD BIT(19)
#define MT_INT_TBTT BIT(20)
#define MT_INT_PRE_TBTT BIT(21)
#define MT_INT_TX_STAT BIT(22)
#define MT_INT_AUTO_WAKEUP BIT(23)
#define MT_INT_GPTIMER BIT(24)
#define MT_INT_RXDELAYINT BIT(26)
#define MT_INT_TXDELAYINT BIT(27)
#define MT_WPDMA_GLO_CFG 0x0208
#define MT_WPDMA_GLO_CFG_TX_DMA_EN BIT(0)
#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY BIT(1)
#define MT_WPDMA_GLO_CFG_RX_DMA_EN BIT(2)
#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY BIT(3)
#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE GENMASK(5, 4)
#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE BIT(6)
#define MT_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
#define MT_WPDMA_GLO_CFG_HDR_SEG_LEN GENMASK(15, 8)
#define MT_WPDMA_GLO_CFG_CLK_GATE_DIS BIT(30)
#define MT_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31)
#define MT_WPDMA_RST_IDX 0x020c
#define MT_WPDMA_DELAY_INT_CFG 0x0210
#define MT_WMM_AIFSN 0x0214
#define MT_WMM_AIFSN_MASK GENMASK(3, 0)
#define MT_WMM_AIFSN_SHIFT(_n) ((_n) * 4)
#define MT_WMM_CWMIN 0x0218
#define MT_WMM_CWMIN_MASK GENMASK(3, 0)
#define MT_WMM_CWMIN_SHIFT(_n) ((_n) * 4)
#define MT_WMM_CWMAX 0x021c
#define MT_WMM_CWMAX_MASK GENMASK(3, 0)
#define MT_WMM_CWMAX_SHIFT(_n) ((_n) * 4)
#define MT_WMM_TXOP_BASE 0x0220
#define MT_WMM_TXOP(_n) (MT_WMM_TXOP_BASE + (((_n) / 2) << 2))
#define MT_WMM_TXOP_SHIFT(_n) ((_n & 1) * 16)
#define MT_WMM_TXOP_MASK GENMASK(15, 0)
#define MT_FCE_DMA_ADDR 0x0230
#define MT_FCE_DMA_LEN 0x0234
#define MT_USB_DMA_CFG 0x238
#define MT_USB_DMA_CFG_RX_BULK_AGG_TOUT GENMASK(7, 0)
#define MT_USB_DMA_CFG_RX_BULK_AGG_LMT GENMASK(15, 8)
#define MT_USB_DMA_CFG_PHY_CLR BIT(16)
#define MT_USB_DMA_CFG_TX_CLR BIT(19)
#define MT_USB_DMA_CFG_TXOP_HALT BIT(20)
#define MT_USB_DMA_CFG_RX_BULK_AGG_EN BIT(21)
#define MT_USB_DMA_CFG_RX_BULK_EN BIT(22)
#define MT_USB_DMA_CFG_TX_BULK_EN BIT(23)
#define MT_USB_DMA_CFG_UDMA_RX_WL_DROP BIT(25)
#define MT_USB_DMA_CFG_EP_OUT_VALID GENMASK(29, 27)
#define MT_USB_DMA_CFG_RX_BUSY BIT(30)
#define MT_USB_DMA_CFG_TX_BUSY BIT(31)
#define MT_TSO_CTRL 0x0250
#define MT_HEADER_TRANS_CTRL_REG 0x0260
#define MT_US_CYC_CFG 0x02a4
#define MT_US_CYC_CNT GENMASK(7, 0)
#define MT_TX_RING_BASE 0x0300
#define MT_RX_RING_BASE 0x03c0
#define MT_RING_SIZE 0x10
#define MT_TX_HW_QUEUE_MCU 8
#define MT_TX_HW_QUEUE_MGMT 9
#define MT_PBF_SYS_CTRL 0x0400
#define MT_PBF_SYS_CTRL_MCU_RESET BIT(0)
#define MT_PBF_SYS_CTRL_DMA_RESET BIT(1)
#define MT_PBF_SYS_CTRL_MAC_RESET BIT(2)
#define MT_PBF_SYS_CTRL_PBF_RESET BIT(3)
#define MT_PBF_SYS_CTRL_ASY_RESET BIT(4)
#define MT_PBF_CFG 0x0404
#define MT_PBF_CFG_TX0Q_EN BIT(0)
#define MT_PBF_CFG_TX1Q_EN BIT(1)
#define MT_PBF_CFG_TX2Q_EN BIT(2)
#define MT_PBF_CFG_TX3Q_EN BIT(3)
#define MT_PBF_CFG_RX0Q_EN BIT(4)
#define MT_PBF_CFG_RX_DROP_EN BIT(8)
#define MT_PBF_TX_MAX_PCNT 0x0408
#define MT_PBF_RX_MAX_PCNT 0x040c
#define MT_BCN_OFFSET_BASE 0x041c
#define MT_BCN_OFFSET(_n) (MT_BCN_OFFSET_BASE + ((_n) << 2))
#define MT_RF_CSR_CFG 0x0500
#define MT_RF_CSR_CFG_DATA GENMASK(7, 0)
#define MT_RF_CSR_CFG_REG_ID GENMASK(13, 8)
#define MT_RF_CSR_CFG_REG_BANK GENMASK(17, 14)
#define MT_RF_CSR_CFG_WR BIT(30)
#define MT_RF_CSR_CFG_KICK BIT(31)
#define MT_RF_BYPASS_0 0x0504
#define MT_RF_BYPASS_1 0x0508
#define MT_RF_SETTING_0 0x050c
#define MT_RF_DATA_WRITE 0x0524
#define MT_RF_CTRL 0x0528
#define MT_RF_CTRL_ADDR GENMASK(11, 0)
#define MT_RF_CTRL_WRITE BIT(12)
#define MT_RF_CTRL_BUSY BIT(13)
#define MT_RF_CTRL_IDX BIT(16)
#define MT_RF_DATA_READ 0x052c
#define MT_FCE_PSE_CTRL 0x0800
#define MT_FCE_PARAMETERS 0x0804
#define MT_FCE_CSO 0x0808
#define MT_FCE_L2_STUFF 0x080c
#define MT_FCE_L2_STUFF_HT_L2_EN BIT(0)
#define MT_FCE_L2_STUFF_QOS_L2_EN BIT(1)
#define MT_FCE_L2_STUFF_RX_STUFF_EN BIT(2)
#define MT_FCE_L2_STUFF_TX_STUFF_EN BIT(3)
#define MT_FCE_L2_STUFF_WR_MPDU_LEN_EN BIT(4)
#define MT_FCE_L2_STUFF_MVINV_BSWAP BIT(5)
#define MT_FCE_L2_STUFF_TS_CMD_QSEL_EN GENMASK(15, 8)
#define MT_FCE_L2_STUFF_TS_LEN_EN GENMASK(23, 16)
#define MT_FCE_L2_STUFF_OTHER_PORT GENMASK(25, 24)
#define MT_FCE_WLAN_FLOW_CONTROL1 0x0824
#define MT_TX_CPU_FROM_FCE_BASE_PTR 0x09a0
#define MT_TX_CPU_FROM_FCE_MAX_COUNT 0x09a4
#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX 0x09a8
#define MT_FCE_PDMA_GLOBAL_CONF 0x09c4
#define MT_PAUSE_ENABLE_CONTROL1 0x0a38
#define MT_FCE_SKIP_FS 0x0a6c
#define MT_MAC_CSR0 0x1000
#define MT_MAC_SYS_CTRL 0x1004
#define MT_MAC_SYS_CTRL_RESET_CSR BIT(0)
#define MT_MAC_SYS_CTRL_RESET_BBP BIT(1)
#define MT_MAC_SYS_CTRL_ENABLE_TX BIT(2)
#define MT_MAC_SYS_CTRL_ENABLE_RX BIT(3)
#define MT_MAC_ADDR_DW0 0x1008
#define MT_MAC_ADDR_DW1 0x100c
#define MT_MAC_ADDR_DW1_U2ME_MASK GENMASK(23, 16)
#define MT_MAC_BSSID_DW0 0x1010
#define MT_MAC_BSSID_DW1 0x1014
#define MT_MAC_BSSID_DW1_ADDR GENMASK(15, 0)
#define MT_MAC_BSSID_DW1_MBSS_MODE GENMASK(17, 16)
#define MT_MAC_BSSID_DW1_MBEACON_N GENMASK(20, 18)
#define MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT BIT(21)
#define MT_MAC_BSSID_DW1_MBSS_MODE_B2 BIT(22)
#define MT_MAC_BSSID_DW1_MBEACON_N_B3 BIT(23)
#define MT_MAC_BSSID_DW1_MBSS_IDX_BYTE GENMASK(26, 24)
#define MT_MAX_LEN_CFG 0x1018
#define MT_MAX_LEN_CFG_AMPDU GENMASK(13, 12)
#define MT_BBP_CSR_CFG 0x101c
#define MT_BBP_CSR_CFG_VAL GENMASK(7, 0)
#define MT_BBP_CSR_CFG_REG_NUM GENMASK(15, 8)
#define MT_BBP_CSR_CFG_READ BIT(16)
#define MT_BBP_CSR_CFG_BUSY BIT(17)
#define MT_BBP_CSR_CFG_PAR_DUR BIT(18)
#define MT_BBP_CSR_CFG_RW_MODE BIT(19)
#define MT_AMPDU_MAX_LEN_20M1S 0x1030
#define MT_AMPDU_MAX_LEN_20M2S 0x1034
#define MT_AMPDU_MAX_LEN_40M1S 0x1038
#define MT_AMPDU_MAX_LEN_40M2S 0x103c
#define MT_AMPDU_MAX_LEN 0x1040
#define MT_WCID_DROP_BASE 0x106c
#define MT_WCID_DROP(_n) (MT_WCID_DROP_BASE + ((_n) >> 5) * 4)
#define MT_WCID_DROP_MASK(_n) BIT((_n) % 32)
#define MT_BCN_BYPASS_MASK 0x108c
#define MT_MAC_APC_BSSID_BASE 0x1090
#define MT_MAC_APC_BSSID_L(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8))
#define MT_MAC_APC_BSSID_H(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8 + 4))
#define MT_MAC_APC_BSSID_H_ADDR GENMASK(15, 0)
#define MT_MAC_APC_BSSID0_H_EN BIT(16)
#define MT_XIFS_TIME_CFG 0x1100
#define MT_XIFS_TIME_CFG_CCK_SIFS GENMASK(7, 0)
#define MT_XIFS_TIME_CFG_OFDM_SIFS GENMASK(15, 8)
#define MT_XIFS_TIME_CFG_OFDM_XIFS GENMASK(19, 16)
#define MT_XIFS_TIME_CFG_EIFS GENMASK(28, 20)
#define MT_XIFS_TIME_CFG_BB_RXEND_EN BIT(29)
#define MT_BKOFF_SLOT_CFG 0x1104
#define MT_BKOFF_SLOT_CFG_SLOTTIME GENMASK(7, 0)
#define MT_BKOFF_SLOT_CFG_CC_DELAY GENMASK(11, 8)
#define MT_BEACON_TIME_CFG 0x1114
#define MT_BEACON_TIME_CFG_INTVAL GENMASK(15, 0)
#define MT_BEACON_TIME_CFG_TIMER_EN BIT(16)
#define MT_BEACON_TIME_CFG_SYNC_MODE GENMASK(18, 17)
#define MT_BEACON_TIME_CFG_TBTT_EN BIT(19)
#define MT_BEACON_TIME_CFG_BEACON_TX BIT(20)
#define MT_BEACON_TIME_CFG_TSF_COMP GENMASK(31, 24)
#define MT_TBTT_SYNC_CFG 0x1118
#define MT_TBTT_TIMER_CFG 0x1124
#define MT_INT_TIMER_CFG 0x1128
#define MT_INT_TIMER_CFG_PRE_TBTT GENMASK(15, 0)
#define MT_INT_TIMER_CFG_GP_TIMER GENMASK(31, 16)
#define MT_INT_TIMER_EN 0x112c
#define MT_INT_TIMER_EN_PRE_TBTT_EN BIT(0)
#define MT_INT_TIMER_EN_GP_TIMER_EN BIT(1)
#define MT_MAC_STATUS 0x1200
#define MT_MAC_STATUS_TX BIT(0)
#define MT_MAC_STATUS_RX BIT(1)
#define MT_PWR_PIN_CFG 0x1204
#define MT_AUX_CLK_CFG 0x120c
#define MT_BB_PA_MODE_CFG0 0x1214
#define MT_BB_PA_MODE_CFG1 0x1218
#define MT_RF_PA_MODE_CFG0 0x121c
#define MT_RF_PA_MODE_CFG1 0x1220
#define MT_RF_PA_MODE_ADJ0 0x1228
#define MT_RF_PA_MODE_ADJ1 0x122c
#define MT_DACCLK_EN_DLY_CFG 0x1264
#define MT_EDCA_CFG_BASE 0x1300
#define MT_EDCA_CFG_AC(_n) (MT_EDCA_CFG_BASE + ((_n) << 2))
#define MT_EDCA_CFG_TXOP GENMASK(7, 0)
#define MT_EDCA_CFG_AIFSN GENMASK(11, 8)
#define MT_EDCA_CFG_CWMIN GENMASK(15, 12)
#define MT_EDCA_CFG_CWMAX GENMASK(19, 16)
#define MT_TX_PWR_CFG_0 0x1314
#define MT_TX_PWR_CFG_1 0x1318
#define MT_TX_PWR_CFG_2 0x131c
#define MT_TX_PWR_CFG_3 0x1320
#define MT_TX_PWR_CFG_4 0x1324
#define MT_TX_BAND_CFG 0x132c
#define MT_TX_BAND_CFG_UPPER_40M BIT(0)
#define MT_TX_BAND_CFG_5G BIT(1)
#define MT_TX_BAND_CFG_2G BIT(2)
#define MT_HT_FBK_TO_LEGACY 0x1384
#define MT_TX_MPDU_ADJ_INT 0x1388
#define MT_TX_PWR_CFG_7 0x13d4
#define MT_TX_PWR_CFG_8 0x13d8
#define MT_TX_PWR_CFG_9 0x13dc
#define MT_TX_SW_CFG0 0x1330
#define MT_TX_SW_CFG1 0x1334
#define MT_TX_SW_CFG2 0x1338
#define MT_TXOP_CTRL_CFG 0x1340
#define MT_TXOP_TRUN_EN GENMASK(5, 0)
#define MT_TXOP_EXT_CCA_DLY GENMASK(15, 8)
#define MT_TXOP_CTRL
#define MT_TX_RTS_CFG 0x1344
#define MT_TX_RTS_CFG_RETRY_LIMIT GENMASK(7, 0)
#define MT_TX_RTS_CFG_THRESH GENMASK(23, 8)
#define MT_TX_RTS_FALLBACK BIT(24)
#define MT_TX_TIMEOUT_CFG 0x1348
#define MT_TX_RETRY_CFG 0x134c
#define MT_TX_LINK_CFG 0x1350
#define MT_HT_FBK_CFG0 0x1354
#define MT_HT_FBK_CFG1 0x1358
#define MT_LG_FBK_CFG0 0x135c
#define MT_LG_FBK_CFG1 0x1360
#define MT_CCK_PROT_CFG 0x1364
#define MT_OFDM_PROT_CFG 0x1368
#define MT_MM20_PROT_CFG 0x136c
#define MT_MM40_PROT_CFG 0x1370
#define MT_GF20_PROT_CFG 0x1374
#define MT_GF40_PROT_CFG 0x1378
#define MT_PROT_RATE GENMASK(15, 0)
#define MT_PROT_CTRL_RTS_CTS BIT(16)
#define MT_PROT_CTRL_CTS2SELF BIT(17)
#define MT_PROT_NAV_SHORT BIT(18)
#define MT_PROT_NAV_LONG BIT(19)
#define MT_PROT_TXOP_ALLOW_CCK BIT(20)
#define MT_PROT_TXOP_ALLOW_OFDM BIT(21)
#define MT_PROT_TXOP_ALLOW_MM20 BIT(22)
#define MT_PROT_TXOP_ALLOW_MM40 BIT(23)
#define MT_PROT_TXOP_ALLOW_GF20 BIT(24)
#define MT_PROT_TXOP_ALLOW_GF40 BIT(25)
#define MT_PROT_RTS_THR_EN BIT(26)
#define MT_PROT_RATE_CCK_11 0x0003
#define MT_PROT_RATE_OFDM_6 0x4000
#define MT_PROT_RATE_OFDM_24 0x4004
#define MT_PROT_RATE_DUP_OFDM_24 0x4084
#define MT_PROT_TXOP_ALLOW_ALL GENMASK(25, 20)
#define MT_PROT_TXOP_ALLOW_BW20 (MT_PROT_TXOP_ALLOW_ALL & \
~MT_PROT_TXOP_ALLOW_MM40 & \
~MT_PROT_TXOP_ALLOW_GF40)
#define MT_EXP_ACK_TIME 0x1380
#define MT_TX_PWR_CFG_0_EXT 0x1390
#define MT_TX_PWR_CFG_1_EXT 0x1394
#define MT_TX_FBK_LIMIT 0x1398
#define MT_TX_FBK_LIMIT_MPDU_FBK GENMASK(7, 0)
#define MT_TX_FBK_LIMIT_AMPDU_FBK GENMASK(15, 8)
#define MT_TX_FBK_LIMIT_MPDU_UP_CLEAR BIT(16)
#define MT_TX_FBK_LIMIT_AMPDU_UP_CLEAR BIT(17)
#define MT_TX_FBK_LIMIT_RATE_LUT BIT(18)
#define MT_TX0_RF_GAIN_CORR 0x13a0
#define MT_TX1_RF_GAIN_CORR 0x13a4
#define MT_TX0_RF_GAIN_ATTEN 0x13a8
#define MT_TX_ALC_CFG_0 0x13b0
#define MT_TX_ALC_CFG_0_CH_INIT_0 GENMASK(5, 0)
#define MT_TX_ALC_CFG_0_CH_INIT_1 GENMASK(13, 8)
#define MT_TX_ALC_CFG_0_LIMIT_0 GENMASK(21, 16)
#define MT_TX_ALC_CFG_0_LIMIT_1 GENMASK(29, 24)
#define MT_TX_ALC_CFG_1 0x13b4
#define MT_TX_ALC_CFG_1_TEMP_COMP GENMASK(5, 0)
#define MT_TX_ALC_CFG_2 0x13a8
#define MT_TX_ALC_CFG_2_TEMP_COMP GENMASK(5, 0)
#define MT_TX0_BB_GAIN_ATTEN 0x13c0
#define MT_TX_ALC_VGA3 0x13c8
#define MT_TX_PROT_CFG6 0x13e0
#define MT_TX_PROT_CFG7 0x13e4
#define MT_TX_PROT_CFG8 0x13e8
#define MT_PIFS_TX_CFG 0x13ec
#define MT_RX_FILTR_CFG 0x1400
#define MT_RX_FILTR_CFG_CRC_ERR BIT(0)
#define MT_RX_FILTR_CFG_PHY_ERR BIT(1)
#define MT_RX_FILTR_CFG_PROMISC BIT(2)
#define MT_RX_FILTR_CFG_OTHER_BSS BIT(3)
#define MT_RX_FILTR_CFG_VER_ERR BIT(4)
#define MT_RX_FILTR_CFG_MCAST BIT(5)
#define MT_RX_FILTR_CFG_BCAST BIT(6)
#define MT_RX_FILTR_CFG_DUP BIT(7)
#define MT_RX_FILTR_CFG_CFACK BIT(8)
#define MT_RX_FILTR_CFG_CFEND BIT(9)
#define MT_RX_FILTR_CFG_ACK BIT(10)
#define MT_RX_FILTR_CFG_CTS BIT(11)
#define MT_RX_FILTR_CFG_RTS BIT(12)
#define MT_RX_FILTR_CFG_PSPOLL BIT(13)
#define MT_RX_FILTR_CFG_BA BIT(14)
#define MT_RX_FILTR_CFG_BAR BIT(15)
#define MT_RX_FILTR_CFG_CTRL_RSV BIT(16)
#define MT_AUTO_RSP_CFG 0x1404
#define MT_AUTO_RSP_PREAMB_SHORT BIT(4)
#define MT_LEGACY_BASIC_RATE 0x1408
#define MT_HT_BASIC_RATE 0x140c
#define MT_RX_PARSER_CFG 0x1418
#define MT_RX_PARSER_RX_SET_NAV_ALL BIT(0)
#define MT_EXT_CCA_CFG 0x141c
#define MT_EXT_CCA_CFG_CCA0 GENMASK(1, 0)
#define MT_EXT_CCA_CFG_CCA1 GENMASK(3, 2)
#define MT_EXT_CCA_CFG_CCA2 GENMASK(5, 4)
#define MT_EXT_CCA_CFG_CCA3 GENMASK(7, 6)
#define MT_EXT_CCA_CFG_CCA_MASK GENMASK(11, 8)
#define MT_EXT_CCA_CFG_ED_CCA_MASK GENMASK(15, 12)
#define MT_TX_SW_CFG3 0x1478
#define MT_PN_PAD_MODE 0x150c
#define MT_TXOP_HLDR_ET 0x1608
#define MT_PROT_AUTO_TX_CFG 0x1648
#define MT_RX_STA_CNT0 0x1700
#define MT_RX_STA_CNT1 0x1704
#define MT_RX_STA_CNT2 0x1708
#define MT_TX_STA_CNT0 0x170c
#define MT_TX_STA_CNT1 0x1710
#define MT_TX_STA_CNT2 0x1714
/* Vendor driver defines content of the second word of STAT_FIFO as follows:
* MT_TX_STAT_FIFO_RATE GENMASK(26, 16)
* MT_TX_STAT_FIFO_ETXBF BIT(27)
* MT_TX_STAT_FIFO_SND BIT(28)
* MT_TX_STAT_FIFO_ITXBF BIT(29)
* However, tests show that b16-31 have the same layout as TXWI rate_ctl
* with rate set to rate at which frame was acked.
*/
#define MT_TX_STAT_FIFO 0x1718
#define MT_TX_STAT_FIFO_VALID BIT(0)
#define MT_TX_STAT_FIFO_PID_TYPE GENMASK(4, 1)
#define MT_TX_STAT_FIFO_SUCCESS BIT(5)
#define MT_TX_STAT_FIFO_AGGR BIT(6)
#define MT_TX_STAT_FIFO_ACKREQ BIT(7)
#define MT_TX_STAT_FIFO_WCID GENMASK(15, 8)
#define MT_TX_STAT_FIFO_RATE GENMASK(31, 16)
#define MT_TX_AGG_STAT 0x171c
#define MT_TX_AGG_CNT_BASE0 0x1720
#define MT_MPDU_DENSITY_CNT 0x1740
#define MT_TX_AGG_CNT_BASE1 0x174c
#define MT_TX_AGG_CNT(_id) ((_id) < 8 ? \
MT_TX_AGG_CNT_BASE0 + ((_id) << 2) : \
MT_TX_AGG_CNT_BASE1 + ((_id - 8) << 2))
#define MT_TX_STAT_FIFO_EXT 0x1798
#define MT_TX_STAT_FIFO_EXT_RETRY GENMASK(7, 0)
#define MT_BBP_CORE_BASE 0x2000
#define MT_BBP_IBI_BASE 0x2100
#define MT_BBP_AGC_BASE 0x2300
#define MT_BBP_TXC_BASE 0x2400
#define MT_BBP_RXC_BASE 0x2500
#define MT_BBP_TXO_BASE 0x2600
#define MT_BBP_TXBE_BASE 0x2700
#define MT_BBP_RXFE_BASE 0x2800
#define MT_BBP_RXO_BASE 0x2900
#define MT_BBP_DFS_BASE 0x2a00
#define MT_BBP_TR_BASE 0x2b00
#define MT_BBP_CAL_BASE 0x2c00
#define MT_BBP_DSC_BASE 0x2e00
#define MT_BBP_PFMU_BASE 0x2f00
#define MT_BBP(_type, _n) (MT_BBP_##_type##_BASE + ((_n) << 2))
#define MT_BBP_CORE_R1_BW GENMASK(4, 3)
#define MT_BBP_AGC_R0_CTRL_CHAN GENMASK(9, 8)
#define MT_BBP_AGC_R0_BW GENMASK(14, 12)
/* AGC, R4/R5 */
#define MT_BBP_AGC_LNA_GAIN GENMASK(21, 16)
/* AGC, R8/R9 */
#define MT_BBP_AGC_GAIN GENMASK(14, 8)
#define MT_BBP_AGC20_RSSI0 GENMASK(7, 0)
#define MT_BBP_AGC20_RSSI1 GENMASK(15, 8)
#define MT_BBP_TXBE_R0_CTRL_CHAN GENMASK(1, 0)
#define MT_WCID_ADDR_BASE 0x1800
#define MT_WCID_ADDR(_n) (MT_WCID_ADDR_BASE + (_n) * 8)
#define MT_SRAM_BASE 0x4000
#define MT_WCID_KEY_BASE 0x8000
#define MT_WCID_KEY(_n) (MT_WCID_KEY_BASE + (_n) * 32)
#define MT_WCID_IV_BASE 0xa000
#define MT_WCID_IV(_n) (MT_WCID_IV_BASE + (_n) * 8)
#define MT_WCID_ATTR_BASE 0xa800
#define MT_WCID_ATTR(_n) (MT_WCID_ATTR_BASE + (_n) * 4)
#define MT_WCID_ATTR_PAIRWISE BIT(0)
#define MT_WCID_ATTR_PKEY_MODE GENMASK(3, 1)
#define MT_WCID_ATTR_BSS_IDX GENMASK(6, 4)
#define MT_WCID_ATTR_RXWI_UDF GENMASK(9, 7)
#define MT_WCID_ATTR_PKEY_MODE_EXT BIT(10)
#define MT_WCID_ATTR_BSS_IDX_EXT BIT(11)
#define MT_WCID_ATTR_WAPI_MCBC BIT(15)
#define MT_WCID_ATTR_WAPI_KEYID GENMASK(31, 24)
#define MT_SKEY_BASE_0 0xac00
#define MT_SKEY_BASE_1 0xb400
#define MT_SKEY_0(_bss, _idx) \
(MT_SKEY_BASE_0 + (4 * (_bss) + _idx) * 32)
#define MT_SKEY_1(_bss, _idx) \
(MT_SKEY_BASE_1 + (4 * ((_bss) & 7) + _idx) * 32)
#define MT_SKEY(_bss, _idx) \
((_bss & 8) ? MT_SKEY_1(_bss, _idx) : MT_SKEY_0(_bss, _idx))
#define MT_SKEY_MODE_BASE_0 0xb000
#define MT_SKEY_MODE_BASE_1 0xb3f0
#define MT_SKEY_MODE_0(_bss) \
(MT_SKEY_MODE_BASE_0 + ((_bss / 2) << 2))
#define MT_SKEY_MODE_1(_bss) \
(MT_SKEY_MODE_BASE_1 + ((((_bss) & 7) / 2) << 2))
#define MT_SKEY_MODE(_bss) \
((_bss & 8) ? MT_SKEY_MODE_1(_bss) : MT_SKEY_MODE_0(_bss))
#define MT_SKEY_MODE_MASK GENMASK(3, 0)
#define MT_SKEY_MODE_SHIFT(_bss, _idx) (4 * ((_idx) + 4 * (_bss & 1)))
#define MT_BEACON_BASE 0xc000
#define MT_TEMP_SENSOR 0x1d000
#define MT_TEMP_SENSOR_VAL GENMASK(6, 0)
enum mt76_cipher_type {
MT_CIPHER_NONE,
MT_CIPHER_WEP40,
MT_CIPHER_WEP104,
MT_CIPHER_TKIP,
MT_CIPHER_AES_CCMP,
MT_CIPHER_CKIP40,
MT_CIPHER_CKIP104,
MT_CIPHER_CKIP128,
MT_CIPHER_WAPI,
};
#endif

Просмотреть файл

@ -0,0 +1,21 @@
/*
* Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "trace.h"
#endif

Просмотреть файл

@ -0,0 +1,400 @@
/*
* Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#if !defined(__MT7601U_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#define __MT7601U_TRACE_H
#include <linux/tracepoint.h>
#include "mt7601u.h"
#include "mac.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM mt7601u
#define MAXNAME 32
#define DEV_ENTRY __array(char, wiphy_name, 32)
#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \
wiphy_name(dev->hw->wiphy), MAXNAME)
#define DEV_PR_FMT "%s "
#define DEV_PR_ARG __entry->wiphy_name
#define REG_ENTRY __field(u32, reg) __field(u32, val)
#define REG_ASSIGN __entry->reg = reg; __entry->val = val
#define REG_PR_FMT "%04x=%08x"
#define REG_PR_ARG __entry->reg, __entry->val
DECLARE_EVENT_CLASS(dev_reg_evt,
TP_PROTO(struct mt7601u_dev *dev, u32 reg, u32 val),
TP_ARGS(dev, reg, val),
TP_STRUCT__entry(
DEV_ENTRY
REG_ENTRY
),
TP_fast_assign(
DEV_ASSIGN;
REG_ASSIGN;
),
TP_printk(
DEV_PR_FMT REG_PR_FMT,
DEV_PR_ARG, REG_PR_ARG
)
);
DEFINE_EVENT(dev_reg_evt, reg_read,
TP_PROTO(struct mt7601u_dev *dev, u32 reg, u32 val),
TP_ARGS(dev, reg, val)
);
DEFINE_EVENT(dev_reg_evt, reg_write,
TP_PROTO(struct mt7601u_dev *dev, u32 reg, u32 val),
TP_ARGS(dev, reg, val)
);
TRACE_EVENT(mt_submit_urb,
TP_PROTO(struct mt7601u_dev *dev, struct urb *u),
TP_ARGS(dev, u),
TP_STRUCT__entry(
DEV_ENTRY __field(unsigned, pipe) __field(u32, len)
),
TP_fast_assign(
DEV_ASSIGN;
__entry->pipe = u->pipe;
__entry->len = u->transfer_buffer_length;
),
TP_printk(DEV_PR_FMT "p:%08x len:%u",
DEV_PR_ARG, __entry->pipe, __entry->len)
);
#define trace_mt_submit_urb_sync(__dev, __pipe, __len) ({ \
struct urb u; \
u.pipe = __pipe; \
u.transfer_buffer_length = __len; \
trace_mt_submit_urb(__dev, &u); \
})
TRACE_EVENT(mt_mcu_msg_send,
TP_PROTO(struct mt7601u_dev *dev,
struct sk_buff *skb, u32 csum, bool resp),
TP_ARGS(dev, skb, csum, resp),
TP_STRUCT__entry(
DEV_ENTRY
__field(u32, info)
__field(u32, csum)
__field(bool, resp)
),
TP_fast_assign(
DEV_ASSIGN;
__entry->info = *(u32 *)skb->data;
__entry->csum = csum;
__entry->resp = resp;
),
TP_printk(DEV_PR_FMT "i:%08x c:%08x r:%d",
DEV_PR_ARG, __entry->info, __entry->csum, __entry->resp)
);
TRACE_EVENT(mt_vend_req,
TP_PROTO(struct mt7601u_dev *dev, unsigned pipe, u8 req, u8 req_type,
u16 val, u16 offset, void *buf, size_t buflen, int ret),
TP_ARGS(dev, pipe, req, req_type, val, offset, buf, buflen, ret),
TP_STRUCT__entry(
DEV_ENTRY
__field(unsigned, pipe) __field(u8, req) __field(u8, req_type)
__field(u16, val) __field(u16, offset) __field(void*, buf)
__field(int, buflen) __field(int, ret)
),
TP_fast_assign(
DEV_ASSIGN;
__entry->pipe = pipe;
__entry->req = req;
__entry->req_type = req_type;
__entry->val = val;
__entry->offset = offset;
__entry->buf = buf;
__entry->buflen = buflen;
__entry->ret = ret;
),
TP_printk(DEV_PR_FMT
"%d p:%08x req:%02hhx %02hhx val:%04hx %04hx buf:%d %d",
DEV_PR_ARG, __entry->ret, __entry->pipe, __entry->req,
__entry->req_type, __entry->val, __entry->offset,
!!__entry->buf, __entry->buflen)
);
TRACE_EVENT(ee_read,
TP_PROTO(struct mt7601u_dev *dev, int offset, u16 val),
TP_ARGS(dev, offset, val),
TP_STRUCT__entry(
DEV_ENTRY
__field(int, o) __field(u16, v)
),
TP_fast_assign(
DEV_ASSIGN;
__entry->o = offset;
__entry->v = val;
),
TP_printk(DEV_PR_FMT "%04x=%04x", DEV_PR_ARG, __entry->o, __entry->v)
);
DECLARE_EVENT_CLASS(dev_rf_reg_evt,
TP_PROTO(struct mt7601u_dev *dev, u8 bank, u8 reg, u8 val),
TP_ARGS(dev, bank, reg, val),
TP_STRUCT__entry(
DEV_ENTRY
__field(u8, bank)
__field(u8, reg)
__field(u8, val)
),
TP_fast_assign(
DEV_ASSIGN;
REG_ASSIGN;
__entry->bank = bank;
),
TP_printk(
DEV_PR_FMT "%02hhx:%02hhx=%02hhx",
DEV_PR_ARG, __entry->bank, __entry->reg, __entry->val
)
);
DEFINE_EVENT(dev_rf_reg_evt, rf_read,
TP_PROTO(struct mt7601u_dev *dev, u8 bank, u8 reg, u8 val),
TP_ARGS(dev, bank, reg, val)
);
DEFINE_EVENT(dev_rf_reg_evt, rf_write,
TP_PROTO(struct mt7601u_dev *dev, u8 bank, u8 reg, u8 val),
TP_ARGS(dev, bank, reg, val)
);
DECLARE_EVENT_CLASS(dev_bbp_reg_evt,
TP_PROTO(struct mt7601u_dev *dev, u8 reg, u8 val),
TP_ARGS(dev, reg, val),
TP_STRUCT__entry(
DEV_ENTRY
__field(u8, reg)
__field(u8, val)
),
TP_fast_assign(
DEV_ASSIGN;
REG_ASSIGN;
),
TP_printk(
DEV_PR_FMT "%02hhx=%02hhx",
DEV_PR_ARG, __entry->reg, __entry->val
)
);
DEFINE_EVENT(dev_bbp_reg_evt, bbp_read,
TP_PROTO(struct mt7601u_dev *dev, u8 reg, u8 val),
TP_ARGS(dev, reg, val)
);
DEFINE_EVENT(dev_bbp_reg_evt, bbp_write,
TP_PROTO(struct mt7601u_dev *dev, u8 reg, u8 val),
TP_ARGS(dev, reg, val)
);
DECLARE_EVENT_CLASS(dev_simple_evt,
TP_PROTO(struct mt7601u_dev *dev, u8 val),
TP_ARGS(dev, val),
TP_STRUCT__entry(
DEV_ENTRY
__field(u8, val)
),
TP_fast_assign(
DEV_ASSIGN;
__entry->val = val;
),
TP_printk(
DEV_PR_FMT "%02hhx", DEV_PR_ARG, __entry->val
)
);
DEFINE_EVENT(dev_simple_evt, temp_mode,
TP_PROTO(struct mt7601u_dev *dev, u8 val),
TP_ARGS(dev, val)
);
DEFINE_EVENT(dev_simple_evt, read_temp,
TP_PROTO(struct mt7601u_dev *dev, u8 val),
TP_ARGS(dev, val)
);
DEFINE_EVENT(dev_simple_evt, freq_cal_adjust,
TP_PROTO(struct mt7601u_dev *dev, u8 val),
TP_ARGS(dev, val)
);
TRACE_EVENT(freq_cal_offset,
TP_PROTO(struct mt7601u_dev *dev, u8 phy_mode, s8 freq_off),
TP_ARGS(dev, phy_mode, freq_off),
TP_STRUCT__entry(
DEV_ENTRY
__field(u8, phy_mode)
__field(s8, freq_off)
),
TP_fast_assign(
DEV_ASSIGN;
__entry->phy_mode = phy_mode;
__entry->freq_off = freq_off;
),
TP_printk(DEV_PR_FMT "phy:%02hhx off:%02hhx",
DEV_PR_ARG, __entry->phy_mode, __entry->freq_off)
);
TRACE_EVENT(mt_rx,
TP_PROTO(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi, u32 f),
TP_ARGS(dev, rxwi, f),
TP_STRUCT__entry(
DEV_ENTRY
__field_struct(struct mt7601u_rxwi, rxwi)
__field(u32, fce_info)
),
TP_fast_assign(
DEV_ASSIGN;
__entry->rxwi = *rxwi;
__entry->fce_info = f;
),
TP_printk(DEV_PR_FMT "rxi:%08x ctl:%08x frag_sn:%04hx rate:%04hx "
"uknw:%02hhx z:%02hhx%02hhx%02hhx snr:%02hhx "
"ant:%02hhx gain:%02hhx freq_o:%02hhx "
"r:%08x ea:%08x fce:%08x", DEV_PR_ARG,
le32_to_cpu(__entry->rxwi.rxinfo),
le32_to_cpu(__entry->rxwi.ctl),
le16_to_cpu(__entry->rxwi.frag_sn),
le16_to_cpu(__entry->rxwi.rate),
__entry->rxwi.unknown,
__entry->rxwi.zero[0], __entry->rxwi.zero[1],
__entry->rxwi.zero[2],
__entry->rxwi.snr, __entry->rxwi.ant,
__entry->rxwi.gain, __entry->rxwi.freq_off,
__entry->rxwi.resv2, __entry->rxwi.expert_ant,
__entry->fce_info)
);
TRACE_EVENT(mt_tx,
TP_PROTO(struct mt7601u_dev *dev, struct sk_buff *skb,
struct mt76_sta *sta, struct mt76_txwi *h),
TP_ARGS(dev, skb, sta, h),
TP_STRUCT__entry(
DEV_ENTRY
__field_struct(struct mt76_txwi, h)
__field(struct sk_buff *, skb)
__field(struct mt76_sta *, sta)
),
TP_fast_assign(
DEV_ASSIGN;
__entry->h = *h;
__entry->skb = skb;
__entry->sta = sta;
),
TP_printk(DEV_PR_FMT "skb:%p sta:%p flg:%04hx rate_ctl:%04hx "
"ack:%02hhx wcid:%02hhx len_ctl:%05hx", DEV_PR_ARG,
__entry->skb, __entry->sta,
le16_to_cpu(__entry->h.flags),
le16_to_cpu(__entry->h.rate_ctl),
__entry->h.ack_ctl, __entry->h.wcid,
le16_to_cpu(__entry->h.len_ctl))
);
TRACE_EVENT(mt_tx_dma_done,
TP_PROTO(struct mt7601u_dev *dev, struct sk_buff *skb),
TP_ARGS(dev, skb),
TP_STRUCT__entry(
DEV_ENTRY
__field(struct sk_buff *, skb)
),
TP_fast_assign(
DEV_ASSIGN;
__entry->skb = skb;
),
TP_printk(DEV_PR_FMT "%p", DEV_PR_ARG, __entry->skb)
);
TRACE_EVENT(mt_tx_status_cleaned,
TP_PROTO(struct mt7601u_dev *dev, int cleaned),
TP_ARGS(dev, cleaned),
TP_STRUCT__entry(
DEV_ENTRY
__field(int, cleaned)
),
TP_fast_assign(
DEV_ASSIGN;
__entry->cleaned = cleaned;
),
TP_printk(DEV_PR_FMT "%d", DEV_PR_ARG, __entry->cleaned)
);
TRACE_EVENT(mt_tx_status,
TP_PROTO(struct mt7601u_dev *dev, u32 stat1, u32 stat2),
TP_ARGS(dev, stat1, stat2),
TP_STRUCT__entry(
DEV_ENTRY
__field(u32, stat1) __field(u32, stat2)
),
TP_fast_assign(
DEV_ASSIGN;
__entry->stat1 = stat1;
__entry->stat2 = stat2;
),
TP_printk(DEV_PR_FMT "%08x %08x",
DEV_PR_ARG, __entry->stat1, __entry->stat2)
);
TRACE_EVENT(mt_rx_dma_aggr,
TP_PROTO(struct mt7601u_dev *dev, int cnt, bool paged),
TP_ARGS(dev, cnt, paged),
TP_STRUCT__entry(
DEV_ENTRY
__field(u8, cnt)
__field(bool, paged)
),
TP_fast_assign(
DEV_ASSIGN;
__entry->cnt = cnt;
__entry->paged = paged;
),
TP_printk(DEV_PR_FMT "cnt:%d paged:%d",
DEV_PR_ARG, __entry->cnt, __entry->paged)
);
DEFINE_EVENT(dev_simple_evt, set_key,
TP_PROTO(struct mt7601u_dev *dev, u8 val),
TP_ARGS(dev, val)
);
TRACE_EVENT(set_shared_key,
TP_PROTO(struct mt7601u_dev *dev, u8 vid, u8 key),
TP_ARGS(dev, vid, key),
TP_STRUCT__entry(
DEV_ENTRY
__field(u8, vid)
__field(u8, key)
),
TP_fast_assign(
DEV_ASSIGN;
__entry->vid = vid;
__entry->key = key;
),
TP_printk(DEV_PR_FMT "phy:%02hhx off:%02hhx",
DEV_PR_ARG, __entry->vid, __entry->key)
);
#endif
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace
#include <trace/define_trace.h>

Просмотреть файл

@ -0,0 +1,319 @@
/*
* Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "mt7601u.h"
#include "trace.h"
enum mt76_txq_id {
MT_TXQ_VO = IEEE80211_AC_VO,
MT_TXQ_VI = IEEE80211_AC_VI,
MT_TXQ_BE = IEEE80211_AC_BE,
MT_TXQ_BK = IEEE80211_AC_BK,
MT_TXQ_PSD,
MT_TXQ_MCU,
__MT_TXQ_MAX
};
/* Hardware uses mirrored order of queues with Q0 having the highest priority */
static u8 q2hwq(u8 q)
{
return q ^ 0x3;
}
/* Take mac80211 Q id from the skb and translate it to hardware Q id */
static u8 skb2q(struct sk_buff *skb)
{
int qid = skb_get_queue_mapping(skb);
if (WARN_ON(qid >= MT_TXQ_PSD)) {
qid = MT_TXQ_BE;
skb_set_queue_mapping(skb, qid);
}
return q2hwq(qid);
}
/* Note: TX retry reporting is a bit broken.
* Retries are reported only once per AMPDU and often come a frame early
* i.e. they are reported in the last status preceding the AMPDU. Apart
* from the fact that it's hard to know the length of the AMPDU (which is
* required to know to how many consecutive frames retries should be
* applied), if status comes early on full FIFO it gets lost and retries
* of the whole AMPDU become invisible.
* As a work-around encode the desired rate in PKT_ID of TX descriptor
* and based on that guess the retries (every rate is tried once).
* Only downside here is that for MCS0 we have to rely solely on
* transmission failures as no retries can ever be reported.
* Not having to read EXT_FIFO has a nice effect of doubling the number
* of reports which can be fetched.
* Also the vendor driver never uses the EXT_FIFO register so it may be
* undertested.
*/
static u8 mt7601u_tx_pktid_enc(struct mt7601u_dev *dev, u8 rate, bool is_probe)
{
u8 encoded = (rate + 1) + is_probe * 8;
/* Because PKT_ID 0 disables status reporting only 15 values are
* available but 16 are needed (8 MCS * 2 for encoding is_probe)
* - we need to cram together two rates. MCS0 and MCS7 with is_probe
* share PKT_ID 9.
*/
if (is_probe && rate == 7)
return encoded - 7;
return encoded;
}
static void
mt7601u_tx_pktid_dec(struct mt7601u_dev *dev, struct mt76_tx_status *stat)
{
u8 req_rate = stat->pktid;
u8 eff_rate = stat->rate & 0x7;
req_rate -= 1;
if (req_rate > 7) {
stat->is_probe = true;
req_rate -= 8;
/* Decide between MCS0 and MCS7 which share pktid 9 */
if (!req_rate && eff_rate)
req_rate = 7;
}
stat->retry = req_rate - eff_rate;
}
static void mt7601u_tx_skb_remove_dma_overhead(struct sk_buff *skb,
struct ieee80211_tx_info *info)
{
int pkt_len = (unsigned long)info->status.status_driver_data[0];
skb_pull(skb, sizeof(struct mt76_txwi) + 4);
if (ieee80211_get_hdrlen_from_skb(skb) % 4)
mt76_remove_hdr_pad(skb);
skb_trim(skb, pkt_len);
}
void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
mt7601u_tx_skb_remove_dma_overhead(skb, info);
ieee80211_tx_info_clear_status(info);
info->status.rates[0].idx = -1;
info->flags |= IEEE80211_TX_STAT_ACK;
ieee80211_tx_status(dev->hw, skb);
}
static int mt7601u_skb_rooms(struct mt7601u_dev *dev, struct sk_buff *skb)
{
int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
u32 need_head;
need_head = sizeof(struct mt76_txwi) + 4;
if (hdr_len % 4)
need_head += 2;
return skb_cow(skb, need_head);
}
static struct mt76_txwi *
mt7601u_push_txwi(struct mt7601u_dev *dev, struct sk_buff *skb,
struct ieee80211_sta *sta, struct mt76_wcid *wcid,
int pkt_len)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_rate *rate = &info->control.rates[0];
struct mt76_txwi *txwi;
unsigned long flags;
bool is_probe;
u32 pkt_id;
u16 rate_ctl;
u8 nss;
txwi = (struct mt76_txwi *)skb_push(skb, sizeof(struct mt76_txwi));
memset(txwi, 0, sizeof(*txwi));
if (!wcid->tx_rate_set)
ieee80211_get_tx_rates(info->control.vif, sta, skb,
info->control.rates, 1);
spin_lock_irqsave(&dev->lock, flags);
if (rate->idx < 0 || !rate->count)
rate_ctl = wcid->tx_rate;
else
rate_ctl = mt76_mac_tx_rate_val(dev, rate, &nss);
spin_unlock_irqrestore(&dev->lock, flags);
txwi->rate_ctl = cpu_to_le16(rate_ctl);
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
ba_size <<= sta->ht_cap.ampdu_factor;
ba_size = min_t(int, 63, ba_size);
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
ba_size = 0;
txwi->ack_ctl |= MT76_SET(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
txwi->flags = cpu_to_le16(MT_TXWI_FLAGS_AMPDU |
MT76_SET(MT_TXWI_FLAGS_MPDU_DENSITY,
sta->ht_cap.ampdu_density));
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
txwi->flags = 0;
}
txwi->wcid = wcid->idx;
is_probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
pkt_id = mt7601u_tx_pktid_enc(dev, rate_ctl & 0x7, is_probe);
pkt_len |= MT76_SET(MT_TXWI_LEN_PKTID, pkt_id);
txwi->len_ctl = cpu_to_le16(pkt_len);
return txwi;
}
void mt7601u_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct mt7601u_dev *dev = hw->priv;
struct ieee80211_vif *vif = info->control.vif;
struct ieee80211_sta *sta = control->sta;
struct mt76_sta *msta = NULL;
struct mt76_wcid *wcid = dev->mon_wcid;
struct mt76_txwi *txwi;
int pkt_len = skb->len;
int hw_q = skb2q(skb);
BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
info->status.status_driver_data[0] = (void *)(unsigned long)pkt_len;
if (mt7601u_skb_rooms(dev, skb) || mt76_insert_hdr_pad(skb)) {
ieee80211_free_txskb(dev->hw, skb);
return;
}
if (sta) {
msta = (struct mt76_sta *) sta->drv_priv;
wcid = &msta->wcid;
} else if (vif) {
struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
wcid = &mvif->group_wcid;
}
txwi = mt7601u_push_txwi(dev, skb, sta, wcid, pkt_len);
if (mt7601u_dma_enqueue_tx(dev, skb, wcid, hw_q))
return;
trace_mt_tx(dev, skb, msta, txwi);
}
void mt7601u_tx_stat(struct work_struct *work)
{
struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev,
stat_work.work);
struct mt76_tx_status stat;
unsigned long flags;
int cleaned = 0;
while (!test_bit(MT7601U_STATE_REMOVED, &dev->state)) {
stat = mt7601u_mac_fetch_tx_status(dev);
if (!stat.valid)
break;
mt7601u_tx_pktid_dec(dev, &stat);
mt76_send_tx_status(dev, &stat);
cleaned++;
}
trace_mt_tx_status_cleaned(dev, cleaned);
spin_lock_irqsave(&dev->tx_lock, flags);
if (cleaned)
queue_delayed_work(dev->stat_wq, &dev->stat_work,
msecs_to_jiffies(10));
else if (test_and_clear_bit(MT7601U_STATE_MORE_STATS, &dev->state))
queue_delayed_work(dev->stat_wq, &dev->stat_work,
msecs_to_jiffies(20));
else
clear_bit(MT7601U_STATE_READING_STATS, &dev->state);
spin_unlock_irqrestore(&dev->tx_lock, flags);
}
int mt7601u_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u16 queue, const struct ieee80211_tx_queue_params *params)
{
struct mt7601u_dev *dev = hw->priv;
u8 cw_min = 5, cw_max = 10, hw_q = q2hwq(queue);
u32 val;
/* TODO: should we do funny things with the parameters?
* See what mt7601u_set_default_edca() used to do in init.c.
*/
if (params->cw_min)
cw_min = fls(params->cw_min);
if (params->cw_max)
cw_max = fls(params->cw_max);
WARN_ON(params->txop > 0xff);
WARN_ON(params->aifs > 0xf);
WARN_ON(cw_min > 0xf);
WARN_ON(cw_max > 0xf);
val = MT76_SET(MT_EDCA_CFG_AIFSN, params->aifs) |
MT76_SET(MT_EDCA_CFG_CWMIN, cw_min) |
MT76_SET(MT_EDCA_CFG_CWMAX, cw_max);
/* TODO: based on user-controlled EnableTxBurst var vendor drv sets
* a really long txop on AC0 (see connect.c:2009) but only on
* connect? When not connected should be 0.
*/
if (!hw_q)
val |= 0x60;
else
val |= MT76_SET(MT_EDCA_CFG_TXOP, params->txop);
mt76_wr(dev, MT_EDCA_CFG_AC(hw_q), val);
val = mt76_rr(dev, MT_WMM_TXOP(hw_q));
val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(hw_q));
val |= params->txop << MT_WMM_TXOP_SHIFT(hw_q);
mt76_wr(dev, MT_WMM_TXOP(hw_q), val);
val = mt76_rr(dev, MT_WMM_AIFSN);
val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(hw_q));
val |= params->aifs << MT_WMM_AIFSN_SHIFT(hw_q);
mt76_wr(dev, MT_WMM_AIFSN, val);
val = mt76_rr(dev, MT_WMM_CWMIN);
val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(hw_q));
val |= cw_min << MT_WMM_CWMIN_SHIFT(hw_q);
mt76_wr(dev, MT_WMM_CWMIN, val);
val = mt76_rr(dev, MT_WMM_CWMAX);
val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(hw_q));
val |= cw_max << MT_WMM_CWMAX_SHIFT(hw_q);
mt76_wr(dev, MT_WMM_CWMAX, val);
return 0;
}

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше