wireless-drivers-next patches for 4.11

Mostly smaller changeds and fixes all over, nothing really major
 standing out.
 
 Major changes:
 
 iwlwifi
 
 * work on support for new A000 devices continues
 * fix 802.11w, which was failing to due an IGTK bug
 
 ath10k
 
 * add debugfs file peer_debug_trigger for debugging firmware
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJYnHNpAAoJEG4XJFUm622b48QH/iQ7eCZMSYBOb3QIqJnpEkD/
 k1xtch60J/JBTw11mS8TvvqyA19BnLZmLIIbbVWLTPeL5S4MJBGmEISHE9dc3FmY
 CP//GjQYdxSm7VgZJSUKolQOIsY4kj/WuY4eoJzWiOfaloXHk0ascNrigdXCUSec
 lRM8sX77KY/30qKJGN/Yc1ccT8zmRa/Skdt7JSHzNKakHiykqvR6NgRa6stf3g5K
 NJAIrZO7K1Yndr187EsX4mr3ataAeo5+YHxoIReuYq2pcrZ6uSDsO3tJfC+4upqR
 SrN/x6ds5SUYCKfwy0i/oxwuzYb8hWFH0m+yXhdkNPoaIyhgGHqzCVzdzbaqcDw=
 =IyB5
 -----END PGP SIGNATURE-----

Merge tag 'wireless-drivers-next-for-davem-2017-02-09' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next

Kalle Valo says:

====================
wireless-drivers-next patches for 4.11

Mostly smaller changeds and fixes all over, nothing really major
standing out.

Major changes:

iwlwifi

* work on support for new A000 devices continues
* fix 802.11w, which was failing to due an IGTK bug

ath10k

* add debugfs file peer_debug_trigger for debugging firmware
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2017-02-10 13:47:52 -05:00
Родитель dc371700d4 cbda794cf1
Коммит e3f29c4809
122 изменённых файлов: 4702 добавлений и 3554 удалений

Просмотреть файл

@ -3,6 +3,7 @@ config ATH10K
depends on MAC80211 && HAS_DMA
select ATH_COMMON
select CRC32
select WANT_DEV_COREDUMP
---help---
This module adds support for wireless adapters based on
Atheros IEEE 802.11ac family of chipsets.

Просмотреть файл

@ -33,6 +33,9 @@ static const struct of_device_id ath10k_ahb_of_match[] = {
MODULE_DEVICE_TABLE(of, ath10k_ahb_of_match);
#define QCA4019_SRAM_ADDR 0x000C0000
#define QCA4019_SRAM_LEN 0x00040000 /* 256 kb */
static inline struct ath10k_ahb *ath10k_ahb_priv(struct ath10k *ar)
{
return &((struct ath10k_pci *)ar->drv_priv)->ahb[0];
@ -699,6 +702,25 @@ out:
return ret;
}
static u32 ath10k_ahb_qca4019_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
{
u32 val = 0, region = addr & 0xfffff;
val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
if (region >= QCA4019_SRAM_ADDR && region <=
(QCA4019_SRAM_ADDR + QCA4019_SRAM_LEN)) {
/* SRAM contents for QCA4019 can be directly accessed and
* no conversions are required
*/
val |= region;
} else {
val |= 0x100000 | region;
}
return val;
}
static const struct ath10k_hif_ops ath10k_ahb_hif_ops = {
.tx_sg = ath10k_pci_hif_tx_sg,
.diag_read = ath10k_pci_hif_diag_read,
@ -766,6 +788,7 @@ static int ath10k_ahb_probe(struct platform_device *pdev)
ar_pci->mem_len = ar_ahb->mem_len;
ar_pci->ar = ar;
ar_pci->bus_ops = &ath10k_ahb_bus_ops;
ar_pci->targ_cpu_to_ce_addr = ath10k_ahb_qca4019_targ_cpu_to_ce_addr;
ret = ath10k_pci_setup_resource(ar);
if (ret) {

Просмотреть файл

@ -959,9 +959,9 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
*/
dest_ring->base_addr_owner_space_unaligned =
dma_zalloc_coherent(ar->dev,
(nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN),
&base_addr, GFP_KERNEL);
(nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN),
&base_addr, GFP_KERNEL);
if (!dest_ring->base_addr_owner_space_unaligned) {
kfree(dest_ring);
return ERR_PTR(-ENOMEM);

Просмотреть файл

@ -1996,7 +1996,8 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
ath10k_dbg(ar, ATH10K_DBG_BOOT, "firmware %s booted\n",
ar->hw->wiphy->fw_version);
if (test_bit(WMI_SERVICE_EXT_RES_CFG_SUPPORT, ar->wmi.svc_map)) {
if (test_bit(WMI_SERVICE_EXT_RES_CFG_SUPPORT, ar->wmi.svc_map) &&
mode == ATH10K_FIRMWARE_MODE_NORMAL) {
val = 0;
if (ath10k_peer_stats_enabled(ar))
val = WMI_10_4_PEER_STATS;
@ -2049,10 +2050,13 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
* possible to implicitly make it correct by creating a dummy vdev and
* then deleting it.
*/
status = ath10k_core_reset_rx_filter(ar);
if (status) {
ath10k_err(ar, "failed to reset rx filter: %d\n", status);
goto err_hif_stop;
if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
status = ath10k_core_reset_rx_filter(ar);
if (status) {
ath10k_err(ar,
"failed to reset rx filter: %d\n", status);
goto err_hif_stop;
}
}
/* If firmware indicates Full Rx Reorder support it must be used in a

Просмотреть файл

@ -306,6 +306,69 @@ static const struct file_operations fops_delba = {
.llseek = default_llseek,
};
static ssize_t ath10k_dbg_sta_read_peer_debug_trigger(struct file *file,
char __user *user_buf,
size_t count,
loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
struct ath10k *ar = arsta->arvif->ar;
char buf[8];
int len = 0;
mutex_lock(&ar->conf_mutex);
len = scnprintf(buf, sizeof(buf) - len,
"Write 1 to once trigger the debug logs\n");
mutex_unlock(&ar->conf_mutex);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t
ath10k_dbg_sta_write_peer_debug_trigger(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
struct ath10k *ar = arsta->arvif->ar;
u8 peer_debug_trigger;
int ret;
if (kstrtou8_from_user(user_buf, count, 0, &peer_debug_trigger))
return -EINVAL;
if (peer_debug_trigger != 1)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH10K_STATE_ON) {
ret = -ENETDOWN;
goto out;
}
ret = ath10k_wmi_peer_set_param(ar, arsta->arvif->vdev_id, sta->addr,
WMI_PEER_DEBUG, peer_debug_trigger);
if (ret) {
ath10k_warn(ar, "failed to set param to trigger peer tid logs for station ret: %d\n",
ret);
goto out;
}
out:
mutex_unlock(&ar->conf_mutex);
return count;
}
static const struct file_operations fops_peer_debug_trigger = {
.open = simple_open,
.read = ath10k_dbg_sta_read_peer_debug_trigger,
.write = ath10k_dbg_sta_write_peer_debug_trigger,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir)
{
@ -314,4 +377,6 @@ void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
debugfs_create_file("addba", S_IWUSR, dir, sta, &fops_addba);
debugfs_create_file("addba_resp", S_IWUSR, dir, sta, &fops_addba_resp);
debugfs_create_file("delba", S_IWUSR, dir, sta, &fops_delba);
debugfs_create_file("peer_debug_trigger", 0600, dir, sta,
&fops_peer_debug_trigger);
}

Просмотреть файл

@ -1636,7 +1636,7 @@ struct ath10k_htt {
int size;
/* size - 1 */
unsigned size_mask;
unsigned int size_mask;
/* how many rx buffers to keep in the ring */
int fill_level;
@ -1657,7 +1657,7 @@ struct ath10k_htt {
/* where HTT SW has processed bufs filled by rx MAC DMA */
struct {
unsigned msdu_payld;
unsigned int msdu_payld;
} sw_rd_idx;
/*
@ -1820,7 +1820,7 @@ int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu);
int ath10k_htt_tx(struct ath10k_htt *htt,
enum ath10k_hw_txrx_mode txmode,
struct sk_buff *msdu);

Просмотреть файл

@ -2492,7 +2492,7 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
skb->data, skb->len);
break;
};
}
return true;
}
EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);

Просмотреть файл

@ -840,29 +840,33 @@ void ath10k_pci_rx_replenish_retry(unsigned long ptr)
ath10k_pci_rx_post(ar);
}
static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
{
u32 val = 0, region = addr & 0xfffff;
val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
& 0x7ff) << 21;
val |= 0x100000 | region;
return val;
}
static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
{
u32 val = 0, region = addr & 0xfffff;
val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
val |= 0x100000 | region;
return val;
}
static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
{
u32 val = 0;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
switch (ar->hw_rev) {
case ATH10K_HW_QCA988X:
case ATH10K_HW_QCA9887:
case ATH10K_HW_QCA6174:
case ATH10K_HW_QCA9377:
val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
CORE_CTRL_ADDRESS) &
0x7ff) << 21;
break;
case ATH10K_HW_QCA9888:
case ATH10K_HW_QCA99X0:
case ATH10K_HW_QCA9984:
case ATH10K_HW_QCA4019:
val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
break;
}
if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr))
return -ENOTSUPP;
val |= 0x100000 | (addr & 0xfffff);
return val;
return ar_pci->targ_cpu_to_ce_addr(ar, addr);
}
/*
@ -1590,7 +1594,7 @@ void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
/* TODO: Find appropriate register configuration for QCA99X0
* to mask irq/MSI.
*/
break;
break;
}
}
@ -3170,6 +3174,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
bool pci_ps;
int (*pci_soft_reset)(struct ath10k *ar);
int (*pci_hard_reset)(struct ath10k *ar);
u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
switch (pci_dev->device) {
case QCA988X_2_0_DEVICE_ID:
@ -3177,12 +3182,14 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
pci_ps = false;
pci_soft_reset = ath10k_pci_warm_reset;
pci_hard_reset = ath10k_pci_qca988x_chip_reset;
targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
break;
case QCA9887_1_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA9887;
pci_ps = false;
pci_soft_reset = ath10k_pci_warm_reset;
pci_hard_reset = ath10k_pci_qca988x_chip_reset;
targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
break;
case QCA6164_2_1_DEVICE_ID:
case QCA6174_2_1_DEVICE_ID:
@ -3190,30 +3197,35 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
pci_ps = true;
pci_soft_reset = ath10k_pci_warm_reset;
pci_hard_reset = ath10k_pci_qca6174_chip_reset;
targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
break;
case QCA99X0_2_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA99X0;
pci_ps = false;
pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
break;
case QCA9984_1_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA9984;
pci_ps = false;
pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
break;
case QCA9888_2_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA9888;
pci_ps = false;
pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
break;
case QCA9377_1_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA9377;
pci_ps = true;
pci_soft_reset = NULL;
pci_hard_reset = ath10k_pci_qca6174_chip_reset;
targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
break;
default:
WARN_ON(1);
@ -3240,6 +3252,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ar_pci->bus_ops = &ath10k_pci_bus_ops;
ar_pci->pci_soft_reset = pci_soft_reset;
ar_pci->pci_hard_reset = pci_hard_reset;
ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr;
ar->id.vendor = pdev->vendor;
ar->id.device = pdev->device;

Просмотреть файл

@ -233,6 +233,11 @@ struct ath10k_pci {
/* Chip specific pci full reset function */
int (*pci_hard_reset)(struct ath10k *ar);
/* chip specific methods for converting target CPU virtual address
* space to CE address space
*/
u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
/* Keep this entry in the last, memory for struct ath10k_ahb is
* allocated (ahb support enabled case) in the continuation of
* this struct.

Просмотреть файл

@ -5811,6 +5811,7 @@ enum wmi_peer_param {
WMI_PEER_CHAN_WIDTH = 0x4,
WMI_PEER_NSS = 0x5,
WMI_PEER_USE_4ADDR = 0x6,
WMI_PEER_DEBUG = 0xa,
WMI_PEER_DUMMY_VAR = 0xff, /* dummy parameter for STA PS workaround */
};
@ -6604,7 +6605,7 @@ struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len);
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
u32 cmd_id);
void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *);
void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *arg);
void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
struct ath10k_fw_stats_pdev *dst);

Просмотреть файл

@ -108,7 +108,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
#define ATH_AGGR_MIN_QDEPTH 2
/* minimum h/w qdepth for non-aggregated traffic */
#define ATH_NON_AGGR_MIN_QDEPTH 8
#define ATH_TX_COMPLETE_POLL_INT 1000
#define ATH_HW_CHECK_POLL_INT 1000
#define ATH_TXFIFO_DEPTH 8
#define ATH_TX_ERROR 0x01
@ -745,7 +745,7 @@ void ath9k_csa_update(struct ath_softc *sc);
#define ATH_PAPRD_TIMEOUT 100 /* msecs */
#define ATH_PLL_WORK_INTERVAL 100
void ath_tx_complete_poll_work(struct work_struct *work);
void ath_hw_check_work(struct work_struct *work);
void ath_reset_work(struct work_struct *work);
bool ath_hw_check(struct ath_softc *sc);
void ath_hw_pll_work(struct work_struct *work);
@ -998,6 +998,7 @@ struct ath_softc {
struct survey_info *cur_survey;
struct survey_info survey[ATH9K_NUM_CHANNELS];
spinlock_t intr_lock;
struct tasklet_struct intr_tq;
struct tasklet_struct bcon_tasklet;
struct ath_hw *sc_ah;
@ -1053,7 +1054,7 @@ struct ath_softc {
#ifdef CONFIG_ATH9K_DEBUGFS
struct ath9k_debug debug;
#endif
struct delayed_work tx_complete_work;
struct delayed_work hw_check_work;
struct delayed_work hw_pll_work;
struct timer_list sleep_timer;

Просмотреть файл

@ -1603,6 +1603,10 @@ bool ath9k_hw_check_alive(struct ath_hw *ah)
int count = 50;
u32 reg, last_val;
/* Check if chip failed to wake up */
if (REG_READ(ah, AR_CFG) == 0xdeadbeef)
return false;
if (AR_SREV_9300(ah))
return !ath9k_hw_detect_mac_hang(ah);

Просмотреть файл

@ -669,6 +669,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
common->bt_ant_diversity = 1;
spin_lock_init(&common->cc_lock);
spin_lock_init(&sc->intr_lock);
spin_lock_init(&sc->sc_serial_rw);
spin_lock_init(&sc->sc_pm_lock);
spin_lock_init(&sc->chan_lock);
@ -681,6 +682,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
INIT_WORK(&sc->hw_reset_work, ath_reset_work);
INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
INIT_DELAYED_WORK(&sc->hw_check_work, ath_hw_check_work);
ath9k_init_channel_context(sc);

Просмотреть файл

@ -20,20 +20,13 @@
* TX polling - checks if the TX engine is stuck somewhere
* and issues a chip reset if so.
*/
void ath_tx_complete_poll_work(struct work_struct *work)
static bool ath_tx_complete_check(struct ath_softc *sc)
{
struct ath_softc *sc = container_of(work, struct ath_softc,
tx_complete_work.work);
struct ath_txq *txq;
int i;
bool needreset = false;
if (sc->tx99_state) {
ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
"skip tx hung detection on tx99\n");
return;
}
if (sc->tx99_state)
return true;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
txq = sc->tx.txq_map[i];
@ -41,25 +34,36 @@ void ath_tx_complete_poll_work(struct work_struct *work)
ath_txq_lock(sc, txq);
if (txq->axq_depth) {
if (txq->axq_tx_inprogress) {
needreset = true;
ath_txq_unlock(sc, txq);
break;
} else {
txq->axq_tx_inprogress = true;
goto reset;
}
txq->axq_tx_inprogress = true;
}
ath_txq_unlock(sc, txq);
}
if (needreset) {
ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
"tx hung, resetting the chip\n");
ath9k_queue_reset(sc, RESET_TYPE_TX_HANG);
return;
}
return true;
ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
reset:
ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
"tx hung, resetting the chip\n");
ath9k_queue_reset(sc, RESET_TYPE_TX_HANG);
return false;
}
void ath_hw_check_work(struct work_struct *work)
{
struct ath_softc *sc = container_of(work, struct ath_softc,
hw_check_work.work);
if (!ath_hw_check(sc) ||
!ath_tx_complete_check(sc))
return;
ieee80211_queue_delayed_work(sc->hw, &sc->hw_check_work,
msecs_to_jiffies(ATH_HW_CHECK_POLL_INT));
}
/*

Просмотреть файл

@ -805,21 +805,12 @@ void ath9k_hw_disable_interrupts(struct ath_hw *ah)
}
EXPORT_SYMBOL(ath9k_hw_disable_interrupts);
void ath9k_hw_enable_interrupts(struct ath_hw *ah)
static void __ath9k_hw_enable_interrupts(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
u32 sync_default = AR_INTR_SYNC_DEFAULT;
u32 async_mask;
if (!(ah->imask & ATH9K_INT_GLOBAL))
return;
if (!atomic_inc_and_test(&ah->intr_ref_cnt)) {
ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n",
atomic_read(&ah->intr_ref_cnt));
return;
}
if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
AR_SREV_9561(ah))
sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
@ -841,6 +832,39 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
ath_dbg(common, INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
}
void ath9k_hw_resume_interrupts(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
if (!(ah->imask & ATH9K_INT_GLOBAL))
return;
if (atomic_read(&ah->intr_ref_cnt) != 0) {
ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n",
atomic_read(&ah->intr_ref_cnt));
return;
}
__ath9k_hw_enable_interrupts(ah);
}
EXPORT_SYMBOL(ath9k_hw_resume_interrupts);
void ath9k_hw_enable_interrupts(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
if (!(ah->imask & ATH9K_INT_GLOBAL))
return;
if (!atomic_inc_and_test(&ah->intr_ref_cnt)) {
ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n",
atomic_read(&ah->intr_ref_cnt));
return;
}
__ath9k_hw_enable_interrupts(ah);
}
EXPORT_SYMBOL(ath9k_hw_enable_interrupts);
void ath9k_hw_set_interrupts(struct ath_hw *ah)

Просмотреть файл

@ -744,6 +744,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah);
void ath9k_hw_enable_interrupts(struct ath_hw *ah);
void ath9k_hw_disable_interrupts(struct ath_hw *ah);
void ath9k_hw_kill_interrupts(struct ath_hw *ah);
void ath9k_hw_resume_interrupts(struct ath_hw *ah);
void ar9002_hw_attach_mac_ops(struct ath_hw *ah);

Просмотреть файл

@ -181,7 +181,7 @@ void ath9k_ps_restore(struct ath_softc *sc)
static void __ath_cancel_work(struct ath_softc *sc)
{
cancel_work_sync(&sc->paprd_work);
cancel_delayed_work_sync(&sc->tx_complete_work);
cancel_delayed_work_sync(&sc->hw_check_work);
cancel_delayed_work_sync(&sc->hw_pll_work);
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
@ -198,7 +198,8 @@ void ath_cancel_work(struct ath_softc *sc)
void ath_restart_work(struct ath_softc *sc)
{
ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
ieee80211_queue_delayed_work(sc->hw, &sc->hw_check_work,
ATH_HW_CHECK_POLL_INT);
if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9330(sc->sc_ah))
ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work,
@ -373,21 +374,20 @@ void ath9k_tasklet(unsigned long data)
struct ath_common *common = ath9k_hw_common(ah);
enum ath_reset_type type;
unsigned long flags;
u32 status = sc->intrstatus;
u32 status;
u32 rxmask;
spin_lock_irqsave(&sc->intr_lock, flags);
status = sc->intrstatus;
sc->intrstatus = 0;
spin_unlock_irqrestore(&sc->intr_lock, flags);
ath9k_ps_wakeup(sc);
spin_lock(&sc->sc_pcu_lock);
if (status & ATH9K_INT_FATAL) {
type = RESET_TYPE_FATAL_INT;
ath9k_queue_reset(sc, type);
/*
* Increment the ref. counter here so that
* interrupts are enabled in the reset routine.
*/
atomic_inc(&ah->intr_ref_cnt);
ath_dbg(common, RESET, "FATAL: Skipping interrupts\n");
goto out;
}
@ -403,11 +403,6 @@ void ath9k_tasklet(unsigned long data)
type = RESET_TYPE_BB_WATCHDOG;
ath9k_queue_reset(sc, type);
/*
* Increment the ref. counter here so that
* interrupts are enabled in the reset routine.
*/
atomic_inc(&ah->intr_ref_cnt);
ath_dbg(common, RESET,
"BB_WATCHDOG: Skipping interrupts\n");
goto out;
@ -420,7 +415,6 @@ void ath9k_tasklet(unsigned long data)
if ((sc->gtt_cnt >= MAX_GTT_CNT) && !ath9k_hw_check_alive(ah)) {
type = RESET_TYPE_TX_GTT;
ath9k_queue_reset(sc, type);
atomic_inc(&ah->intr_ref_cnt);
ath_dbg(common, RESET,
"GTT: Skipping interrupts\n");
goto out;
@ -477,7 +471,7 @@ void ath9k_tasklet(unsigned long data)
ath9k_btcoex_handle_interrupt(sc, status);
/* re-enable hardware interrupt */
ath9k_hw_enable_interrupts(ah);
ath9k_hw_resume_interrupts(ah);
out:
spin_unlock(&sc->sc_pcu_lock);
ath9k_ps_restore(sc);
@ -541,7 +535,9 @@ irqreturn_t ath_isr(int irq, void *dev)
return IRQ_NONE;
/* Cache the status */
sc->intrstatus = status;
spin_lock(&sc->intr_lock);
sc->intrstatus |= status;
spin_unlock(&sc->intr_lock);
if (status & SCHED_INTR)
sched = true;
@ -587,7 +583,7 @@ chip_reset:
if (sched) {
/* turn off every interrupt */
ath9k_hw_disable_interrupts(ah);
ath9k_hw_kill_interrupts(ah);
tasklet_schedule(&sc->intr_tq);
}
@ -2091,7 +2087,7 @@ void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop,
int timeout;
bool drain_txq;
cancel_delayed_work_sync(&sc->tx_complete_work);
cancel_delayed_work_sync(&sc->hw_check_work);
if (ah->ah_flags & AH_UNPLUGGED) {
ath_dbg(common, ANY, "Device has been unplugged!\n");
@ -2129,7 +2125,8 @@ void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop,
ath9k_ps_restore(sc);
}
ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
ieee80211_queue_delayed_work(hw, &sc->hw_check_work,
ATH_HW_CHECK_POLL_INT);
}
static bool ath9k_tx_frames_pending(struct ieee80211_hw *hw)

Просмотреть файл

@ -2872,8 +2872,6 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
return error;
}
INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
error = ath_tx_edma_init(sc);

Просмотреть файл

@ -574,6 +574,7 @@ static void wcn36xx_hw_scan_worker(struct work_struct *work)
struct cfg80211_scan_request *req = wcn->scan_req;
u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS_EX];
struct cfg80211_scan_info scan_info = {};
bool aborted = false;
int i;
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac80211 scan %d channels worker\n", req->n_channels);
@ -585,6 +586,13 @@ static void wcn36xx_hw_scan_worker(struct work_struct *work)
wcn36xx_smd_init_scan(wcn, HAL_SYS_MODE_SCAN);
for (i = 0; i < req->n_channels; i++) {
mutex_lock(&wcn->scan_lock);
aborted = wcn->scan_aborted;
mutex_unlock(&wcn->scan_lock);
if (aborted)
break;
wcn->scan_freq = req->channels[i]->center_freq;
wcn->scan_band = req->channels[i]->band;
@ -596,7 +604,7 @@ static void wcn36xx_hw_scan_worker(struct work_struct *work)
}
wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN);
scan_info.aborted = false;
scan_info.aborted = aborted;
ieee80211_scan_completed(wcn->hw, &scan_info);
mutex_lock(&wcn->scan_lock);
@ -615,6 +623,8 @@ static int wcn36xx_hw_scan(struct ieee80211_hw *hw,
mutex_unlock(&wcn->scan_lock);
return -EBUSY;
}
wcn->scan_aborted = false;
wcn->scan_req = &hw_req->req;
mutex_unlock(&wcn->scan_lock);
@ -623,6 +633,18 @@ static int wcn36xx_hw_scan(struct ieee80211_hw *hw,
return 0;
}
static void wcn36xx_cancel_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct wcn36xx *wcn = hw->priv;
mutex_lock(&wcn->scan_lock);
wcn->scan_aborted = true;
mutex_unlock(&wcn->scan_lock);
cancel_work_sync(&wcn->scan_work);
}
static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta,
enum nl80211_band band)
{
@ -1034,6 +1056,7 @@ static const struct ieee80211_ops wcn36xx_ops = {
.tx = wcn36xx_tx,
.set_key = wcn36xx_set_key,
.hw_scan = wcn36xx_hw_scan,
.cancel_hw_scan = wcn36xx_cancel_hw_scan,
.bss_info_changed = wcn36xx_bss_info_changed,
.set_rts_threshold = wcn36xx_set_rts_threshold,
.sta_add = wcn36xx_sta_add,

Просмотреть файл

@ -220,6 +220,7 @@ struct wcn36xx {
int scan_freq;
int scan_band;
struct mutex scan_lock;
bool scan_aborted;
/* DXE channels */
struct wcn36xx_dxe_ch dxe_tx_l_ch; /* TX low */

Просмотреть файл

@ -15,6 +15,7 @@
*/
#include <linux/etherdevice.h>
#include <linux/moduleparam.h>
#include "wil6210.h"
#include "wmi.h"

Просмотреть файл

@ -5908,6 +5908,9 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
continue;
}
if (channel->orig_flags & IEEE80211_CHAN_DISABLED)
continue;
/* assuming the chanspecs order is HT20,
* HT40 upper, HT40 lower, and VHT80.
*/
@ -6509,6 +6512,9 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
wiphy->bands[NL80211_BAND_5GHZ] = band;
}
}
wiphy_read_of_freq_limits(wiphy);
return 0;
}

Просмотреть файл

@ -218,6 +218,22 @@ done:
return err;
}
#ifndef CONFIG_BRCM_TRACING
void __brcmf_err(const char *func, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
pr_err("%s: %pV", func, &vaf);
va_end(args);
}
#endif
#if defined(CONFIG_BRCM_TRACING) || defined(CONFIG_BRCMDBG)
void __brcmf_dbg(u32 level, const char *func, const char *fmt, ...)
{

Просмотреть файл

@ -32,16 +32,25 @@ static int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data,
{
void *dump;
size_t ramsize;
int err;
ramsize = brcmf_bus_get_ramsize(bus);
if (ramsize) {
dump = vzalloc(len + ramsize);
if (!dump)
return -ENOMEM;
memcpy(dump, data, len);
brcmf_bus_get_memdump(bus, dump + len, ramsize);
dev_coredumpv(bus->dev, dump, len + ramsize, GFP_KERNEL);
if (!ramsize)
return -ENOTSUPP;
dump = vzalloc(len + ramsize);
if (!dump)
return -ENOMEM;
memcpy(dump, data, len);
err = brcmf_bus_get_memdump(bus, dump + len, ramsize);
if (err) {
vfree(dump);
return err;
}
dev_coredumpv(bus->dev, dump, len + ramsize, GFP_KERNEL);
return 0;
}
@ -49,10 +58,18 @@ static int brcmf_debug_psm_watchdog_notify(struct brcmf_if *ifp,
const struct brcmf_event_msg *evtmsg,
void *data)
{
int err;
brcmf_dbg(TRACE, "enter: bsscfgidx=%d\n", ifp->bsscfgidx);
return brcmf_debug_create_memdump(ifp->drvr->bus_if, data,
evtmsg->datalen);
brcmf_err("PSM's watchdog has fired!\n");
err = brcmf_debug_create_memdump(ifp->drvr->bus_if, data,
evtmsg->datalen);
if (err)
brcmf_err("Failed to get memory dump, %d\n", err);
return err;
}
void brcmf_debugfs_init(void)

Просмотреть файл

@ -45,26 +45,18 @@
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
/* Macro for error messages. net_ratelimit() is used when driver
* debugging is not selected. When debugging the driver error
* messages are as important as other tracing or even more so.
*/
#ifndef CONFIG_BRCM_TRACING
#ifdef CONFIG_BRCMDBG
#define brcmf_err(fmt, ...) pr_err("%s: " fmt, __func__, ##__VA_ARGS__)
#else
#define brcmf_err(fmt, ...) \
do { \
if (net_ratelimit()) \
pr_err("%s: " fmt, __func__, ##__VA_ARGS__); \
} while (0)
#endif
#else
__printf(2, 3)
void __brcmf_err(const char *func, const char *fmt, ...);
#define brcmf_err(fmt, ...) \
__brcmf_err(__func__, fmt, ##__VA_ARGS__)
#endif
/* Macro for error messages. When debugging / tracing the driver all error
* messages are important to us.
*/
#define brcmf_err(fmt, ...) \
do { \
if (IS_ENABLED(CONFIG_BRCMDBG) || \
IS_ENABLED(CONFIG_BRCM_TRACING) || \
net_ratelimit()) \
__brcmf_err(__func__, fmt, ##__VA_ARGS__); \
} while (0)
#if defined(DEBUG) || defined(CONFIG_BRCM_TRACING)
__printf(3, 4)

Просмотреть файл

@ -90,13 +90,16 @@ config IWLWIFI_BCAST_FILTERING
config IWLWIFI_PCIE_RTPM
bool "Enable runtime power management mode for PCIe devices"
depends on IWLMVM && PM
depends on IWLMVM && PM && EXPERT
default false
help
Say Y here to enable runtime power management for PCIe
devices. If enabled, the device will go into low power mode
when idle for a short period of time, allowing for improved
power saving during runtime.
power saving during runtime. Note that this feature requires
a tight integration with the platform. It is not recommended
to enable this feature without proper validation with the
specific target platform.
If unsure, say N.

Просмотреть файл

@ -740,7 +740,10 @@ static u16 rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
/* Find the previous rate that is in the rate mask */
i = index - 1;
for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
if (i >= 0)
mask = BIT(i);
for (; i >= 0; i--, mask >>= 1) {
if (rate_mask & mask) {
low = i;
break;

Просмотреть файл

@ -72,9 +72,13 @@
#define IWL_A000_SMEM_OFFSET 0x400000
#define IWL_A000_SMEM_LEN 0x68000
#define IWL_A000_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
#define IWL_A000_MODULE_FIRMWARE(api) \
IWL_A000_FW_PRE "-" __stringify(api) ".ucode"
#define IWL_A000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
#define IWL_A000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
#define IWL_A000_HR_MODULE_FIRMWARE(api) \
IWL_A000_HR_FW_PRE "-" __stringify(api) ".ucode"
#define IWL_A000_JF_MODULE_FIRMWARE(api) \
IWL_A000_JF_FW_PRE "-" __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_A000 10
@ -116,11 +120,12 @@ static const struct iwl_ht_params iwl_a000_ht_params = {
.mq_rx_supported = true, \
.vht_mu_mimo_supported = true, \
.mac_addr_from_csr = true, \
.use_tfh = true
.use_tfh = true, \
.rf_id = true
const struct iwl_cfg iwla000_2ac_cfg = {
const struct iwl_cfg iwla000_2ac_cfg_hr = {
.name = "Intel(R) Dual Band Wireless AC a000",
.fw_name_pre = IWL_A000_FW_PRE,
.fw_name_pre = IWL_A000_HR_FW_PRE,
IWL_DEVICE_A000,
.ht_params = &iwl_a000_ht_params,
.nvm_ver = IWL_A000_NVM_VERSION,
@ -128,4 +133,15 @@ const struct iwl_cfg iwla000_2ac_cfg = {
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
MODULE_FIRMWARE(IWL_A000_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
const struct iwl_cfg iwla000_2ac_cfg_jf = {
.name = "Intel(R) Dual Band Wireless AC a000",
.fw_name_pre = IWL_A000_JF_FW_PRE,
IWL_DEVICE_A000,
.ht_params = &iwl_a000_ht_params,
.nvm_ver = IWL_A000_NVM_VERSION,
.nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
MODULE_FIRMWARE(IWL_A000_HR_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_A000_JF_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));

Просмотреть файл

@ -455,7 +455,8 @@ extern const struct iwl_cfg iwl9260_2ac_cfg;
extern const struct iwl_cfg iwl9270_2ac_cfg;
extern const struct iwl_cfg iwl9460_2ac_cfg;
extern const struct iwl_cfg iwl9560_2ac_cfg;
extern const struct iwl_cfg iwla000_2ac_cfg;
extern const struct iwl_cfg iwla000_2ac_cfg_hr;
extern const struct iwl_cfg iwla000_2ac_cfg_jf;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */

Просмотреть файл

@ -349,6 +349,7 @@ enum {
/* RF_ID value */
#define CSR_HW_RF_ID_TYPE_JF (0x00105000)
#define CSR_HW_RF_ID_TYPE_LC (0x00101000)
#define CSR_HW_RF_ID_TYPE_HR (0x00109000)
/* EEPROM REG */
#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)

Просмотреть файл

@ -102,7 +102,6 @@ static struct dentry *iwl_dbgfs_root;
* @op_mode: the running op_mode
* @trans: transport layer
* @dev: for debug prints only
* @cfg: configuration struct
* @fw_index: firmware revision to try loading
* @firmware_name: composite filename of ucode file to load
* @request_firmware_complete: the firmware has been obtained from user space
@ -114,7 +113,6 @@ struct iwl_drv {
struct iwl_op_mode *op_mode;
struct iwl_trans *trans;
struct device *dev;
const struct iwl_cfg *cfg;
int fw_index; /* firmware we're trying to load */
char firmware_name[64]; /* name of firmware file to load */
@ -213,18 +211,18 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw,
static int iwl_request_firmware(struct iwl_drv *drv, bool first)
{
const char *name_pre = drv->cfg->fw_name_pre;
const char *name_pre = drv->trans->cfg->fw_name_pre;
char tag[8];
if (first) {
drv->fw_index = drv->cfg->ucode_api_max;
drv->fw_index = drv->trans->cfg->ucode_api_max;
sprintf(tag, "%d", drv->fw_index);
} else {
drv->fw_index--;
sprintf(tag, "%d", drv->fw_index);
}
if (drv->fw_index < drv->cfg->ucode_api_min) {
if (drv->fw_index < drv->trans->cfg->ucode_api_min) {
IWL_ERR(drv, "no suitable firmware found!\n");
return -ENOENT;
}
@ -1207,7 +1205,7 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
dbgfs_dir = drv->dbgfs_op_mode;
#endif
op_mode = ops->start(drv->trans, drv->cfg, &drv->fw, dbgfs_dir);
op_mode = ops->start(drv->trans, drv->trans->cfg, &drv->fw, dbgfs_dir);
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (!op_mode) {
@ -1247,8 +1245,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
struct iwlwifi_opmode_table *op;
int err;
struct iwl_firmware_pieces *pieces;
const unsigned int api_max = drv->cfg->ucode_api_max;
const unsigned int api_min = drv->cfg->ucode_api_min;
const unsigned int api_max = drv->trans->cfg->ucode_api_max;
const unsigned int api_min = drv->trans->cfg->ucode_api_min;
size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX];
u32 api_ver;
int i;
@ -1310,7 +1308,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
* In mvm uCode there is no difference between data and instructions
* sections.
*/
if (fw->type == IWL_FW_DVM && validate_sec_sizes(drv, pieces, drv->cfg))
if (fw->type == IWL_FW_DVM && validate_sec_sizes(drv, pieces,
drv->trans->cfg))
goto try_again;
/* Allocate ucode buffers for card's bus-master loading ... */
@ -1408,14 +1407,14 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
fw->init_evtlog_size = (pieces->init_evtlog_size - 16)/12;
else
fw->init_evtlog_size =
drv->cfg->base_params->max_event_log_size;
drv->trans->cfg->base_params->max_event_log_size;
fw->init_errlog_ptr = pieces->init_errlog_ptr;
fw->inst_evtlog_ptr = pieces->inst_evtlog_ptr;
if (pieces->inst_evtlog_size)
fw->inst_evtlog_size = (pieces->inst_evtlog_size - 16)/12;
else
fw->inst_evtlog_size =
drv->cfg->base_params->max_event_log_size;
drv->trans->cfg->base_params->max_event_log_size;
fw->inst_errlog_ptr = pieces->inst_errlog_ptr;
/*
@ -1504,8 +1503,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
kfree(pieces);
}
struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
const struct iwl_cfg *cfg)
struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
{
struct iwl_drv *drv;
int ret;
@ -1518,7 +1516,6 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
drv->trans = trans;
drv->dev = trans->dev;
drv->cfg = cfg;
init_completion(&drv->request_firmware_complete);
INIT_LIST_HEAD(&drv->list);

Просмотреть файл

@ -118,15 +118,13 @@ struct iwl_cfg;
* iwl_drv_start - start the drv
*
* @trans_ops: the ops of the transport
* @cfg: device specific constants / virtual functions
*
* starts the driver: fetches the firmware. This should be called by bus
* specific system flows implementations. For example, the bus specific probe
* function should do bus related operations only, and then call to this
* function. It returns the driver object or %NULL if an error occurred.
*/
struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
const struct iwl_cfg *cfg);
struct iwl_drv *iwl_drv_start(struct iwl_trans *trans);
/**
* iwl_drv_stop - stop the drv

Просмотреть файл

@ -91,7 +91,7 @@ void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
memcpy(mvmvif->rekey_data.kek, data->kek, NL80211_KEK_LEN);
memcpy(mvmvif->rekey_data.kck, data->kck, NL80211_KCK_LEN);
mvmvif->rekey_data.replay_ctr =
cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr));
cpu_to_le64(be64_to_cpup((__be64 *)data->replay_ctr));
mvmvif->rekey_data.valid = true;
mutex_unlock(&mvm->mutex);
@ -1741,7 +1741,7 @@ out:
static struct iwl_wowlan_status *
iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
u32 base = mvm->error_event_table;
u32 base = mvm->error_event_table[0];
struct error_table_start {
/* cf. struct iwl_error_event_table */
u32 valid;

Просмотреть файл

@ -64,13 +64,14 @@
#define __fw_api_mac_h__
/*
* The first MAC indices (starting from 0)
* are available to the driver, AUX follows
* The first MAC indices (starting from 0) are available to the driver,
* AUX indices follows - 1 for non-CDB, 2 for CDB.
*/
#define MAC_INDEX_AUX 4
#define MAC_INDEX_MIN_DRIVER 0
#define NUM_MAC_INDEX_DRIVER MAC_INDEX_AUX
#define NUM_MAC_INDEX (MAC_INDEX_AUX + 1)
#define NUM_MAC_INDEX (NUM_MAC_INDEX_DRIVER + 1)
#define NUM_MAC_INDEX_CDB (NUM_MAC_INDEX_DRIVER + 2)
#define IWL_MVM_STATION_COUNT 16
#define IWL_MVM_TDLS_STA_COUNT 4

Просмотреть файл

@ -453,6 +453,8 @@ enum scan_config_flags {
SCAN_CONFIG_FLAG_CLEAR_CAM_MODE = BIT(19),
SCAN_CONFIG_FLAG_SET_PROMISC_MODE = BIT(20),
SCAN_CONFIG_FLAG_CLEAR_PROMISC_MODE = BIT(21),
SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED = BIT(22),
SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED = BIT(23),
/* Bits 26-31 are for num of channels in channel_array */
#define SCAN_CONFIG_N_CHANNELS(n) ((n) << 26)
@ -485,6 +487,20 @@ enum iwl_channel_flags {
IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE = BIT(3),
};
/**
* struct iwl_scan_dwell
* @active: default dwell time for active scan
* @passive: default dwell time for passive scan
* @fragmented: default dwell time for fragmented scan
* @extended: default dwell time for channels 1, 6 and 11
*/
struct iwl_scan_dwell {
u8 active;
u8 passive;
u8 fragmented;
u8 extended;
} __packed;
/**
* struct iwl_scan_config
* @flags: enum scan_config_flags
@ -493,10 +509,7 @@ enum iwl_channel_flags {
* @legacy_rates: default legacy rates - enum scan_config_rates
* @out_of_channel_time: default max out of serving channel time
* @suspend_time: default max suspend time
* @dwell_active: default dwell time for active scan
* @dwell_passive: default dwell time for passive scan
* @dwell_fragmented: default dwell time for fragmented scan
* @dwell_extended: default dwell time for channels 1, 6 and 11
* @dwell: dwells for the scan
* @mac_addr: default mac address to be used in probes
* @bcast_sta_id: the index of the station in the fw
* @channel_flags: default channel flags - enum iwl_channel_flags
@ -510,16 +523,29 @@ struct iwl_scan_config {
__le32 legacy_rates;
__le32 out_of_channel_time;
__le32 suspend_time;
u8 dwell_active;
u8 dwell_passive;
u8 dwell_fragmented;
u8 dwell_extended;
struct iwl_scan_dwell dwell;
u8 mac_addr[ETH_ALEN];
u8 bcast_sta_id;
u8 channel_flags;
u8 channel_array[];
} __packed; /* SCAN_CONFIG_DB_CMD_API_S */
#define SCAN_TWO_LMACS 2
struct iwl_scan_config_cdb {
__le32 flags;
__le32 tx_chains;
__le32 rx_chains;
__le32 legacy_rates;
__le32 out_of_channel_time[SCAN_TWO_LMACS];
__le32 suspend_time[SCAN_TWO_LMACS];
struct iwl_scan_dwell dwell;
u8 mac_addr[ETH_ALEN];
u8 bcast_sta_id;
u8 channel_flags;
u8 channel_array[];
} __packed; /* SCAN_CONFIG_DB_CMD_API_S_3 */
/**
* iwl_umac_scan_flags
*@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request
@ -540,17 +566,18 @@ enum iwl_umac_scan_uid_offsets {
};
enum iwl_umac_scan_general_flags {
IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC = BIT(0),
IWL_UMAC_SCAN_GEN_FLAGS_OVER_BT = BIT(1),
IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL = BIT(2),
IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE = BIT(3),
IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT = BIT(4),
IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE = BIT(5),
IWL_UMAC_SCAN_GEN_FLAGS_MULTIPLE_SSID = BIT(6),
IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED = BIT(7),
IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED = BIT(8),
IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9),
IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL = BIT(10),
IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC = BIT(0),
IWL_UMAC_SCAN_GEN_FLAGS_OVER_BT = BIT(1),
IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL = BIT(2),
IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE = BIT(3),
IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT = BIT(4),
IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE = BIT(5),
IWL_UMAC_SCAN_GEN_FLAGS_MULTIPLE_SSID = BIT(6),
IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED = BIT(7),
IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED = BIT(8),
IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9),
IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL = BIT(10),
IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED = BIT(11),
};
/**
@ -610,8 +637,9 @@ struct iwl_scan_req_umac_tail {
* @active_dwell: dwell time for active scan
* @passive_dwell: dwell time for passive scan
* @fragmented_dwell: dwell time for fragmented passive scan
* @max_out_time: max out of serving channel time
* @suspend_time: max suspend time
* @max_out_time: max out of serving channel time, per LMAC - for CDB there
* are 2 LMACs
* @suspend_time: max suspend time, per LMAC - for CDB there are 2 LMACs
* @scan_priority: scan internal prioritization &enum iwl_scan_priority
* @channel_flags: &enum iwl_scan_channel_flags
* @n_channels: num of channels in scan request
@ -631,15 +659,33 @@ struct iwl_scan_req_umac {
u8 active_dwell;
u8 passive_dwell;
u8 fragmented_dwell;
__le32 max_out_time;
__le32 suspend_time;
__le32 scan_priority;
/* SCAN_CHANNEL_PARAMS_API_S_VER_4 */
u8 channel_flags;
u8 n_channels;
__le16 reserved;
u8 data[];
} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
union {
struct {
__le32 max_out_time;
__le32 suspend_time;
__le32 scan_priority;
/* SCAN_CHANNEL_PARAMS_API_S_VER_4 */
u8 channel_flags;
u8 n_channels;
__le16 reserved;
u8 data[];
} no_cdb; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
struct {
__le32 max_out_time[SCAN_TWO_LMACS];
__le32 suspend_time[SCAN_TWO_LMACS];
__le32 scan_priority;
/* SCAN_CHANNEL_PARAMS_API_S_VER_4 */
u8 channel_flags;
u8 n_channels;
__le16 reserved;
u8 data[];
} cdb; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_5 */
};
} __packed;
#define IWL_SCAN_REQ_UMAC_SIZE_CDB sizeof(struct iwl_scan_req_umac)
#define IWL_SCAN_REQ_UMAC_SIZE (sizeof(struct iwl_scan_req_umac) - \
2 * sizeof(__le32))
/**
* struct iwl_umac_scan_abort

Просмотреть файл

@ -220,7 +220,7 @@ struct mvm_statistics_bt_activity {
__le32 lo_priority_rx_denied_cnt;
} __packed; /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */
struct mvm_statistics_general_v8 {
struct mvm_statistics_general_common {
__le32 radio_temperature;
__le32 radio_voltage;
struct mvm_statistics_dbg dbg;
@ -248,11 +248,22 @@ struct mvm_statistics_general_v8 {
__le64 on_time_rf;
__le64 on_time_scan;
__le64 tx_time;
} __packed;
struct mvm_statistics_general_v8 {
struct mvm_statistics_general_common common;
__le32 beacon_counter[NUM_MAC_INDEX];
u8 beacon_average_energy[NUM_MAC_INDEX];
u8 reserved[4 - (NUM_MAC_INDEX % 4)];
} __packed; /* STATISTICS_GENERAL_API_S_VER_8 */
struct mvm_statistics_general_cdb {
struct mvm_statistics_general_common common;
__le32 beacon_counter[NUM_MAC_INDEX_CDB];
u8 beacon_average_energy[NUM_MAC_INDEX_CDB];
u8 reserved[4 - (NUM_MAC_INDEX_CDB % 4)];
} __packed; /* STATISTICS_GENERAL_API_S_VER_9 */
/**
* struct mvm_statistics_load - RX statistics for multi-queue devices
* @air_time: accumulated air time, per mac
@ -267,6 +278,13 @@ struct mvm_statistics_load {
u8 avg_energy[IWL_MVM_STATION_COUNT];
} __packed; /* STATISTICS_RX_MAC_STATION_S_VER_1 */
struct mvm_statistics_load_cdb {
__le32 air_time[NUM_MAC_INDEX_CDB];
__le32 byte_count[NUM_MAC_INDEX_CDB];
__le32 pkt_count[NUM_MAC_INDEX_CDB];
u8 avg_energy[IWL_MVM_STATION_COUNT];
} __packed; /* STATISTICS_RX_MAC_STATION_S_VER_2 */
struct mvm_statistics_rx {
struct mvm_statistics_rx_phy ofdm;
struct mvm_statistics_rx_phy cck;
@ -281,6 +299,7 @@ struct mvm_statistics_rx {
* while associated. To disable this behavior, set DISABLE_NOTIF flag in the
* STATISTICS_CMD (0x9c), below.
*/
struct iwl_notif_statistics_v10 {
__le32 flag;
struct mvm_statistics_rx rx;
@ -296,6 +315,14 @@ struct iwl_notif_statistics_v11 {
struct mvm_statistics_load load_stats;
} __packed; /* STATISTICS_NTFY_API_S_VER_11 */
struct iwl_notif_statistics_cdb {
__le32 flag;
struct mvm_statistics_rx rx;
struct mvm_statistics_tx tx;
struct mvm_statistics_general_cdb general;
struct mvm_statistics_load_cdb load_stats;
} __packed; /* STATISTICS_NTFY_API_S_VER_12 */
#define IWL_STATISTICS_FLG_CLEAR 0x1
#define IWL_STATISTICS_FLG_DISABLE_NOTIF 0x2

Просмотреть файл

@ -672,8 +672,7 @@ struct iwl_mac_beacon_cmd_v6 {
} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_6 */
/**
* struct iwl_mac_beacon_cmd - beacon template command with offloaded CSA
* @tx: the tx commands associated with the beacon frame
* struct iwl_mac_beacon_cmd_data - data of beacon template with offloaded CSA
* @template_id: currently equal to the mac context id of the coresponding
* mac.
* @tim_idx: the offset of the tim IE in the beacon
@ -682,16 +681,38 @@ struct iwl_mac_beacon_cmd_v6 {
* @csa_offset: offset to the CSA IE if present
* @frame: the template of the beacon frame
*/
struct iwl_mac_beacon_cmd {
struct iwl_tx_cmd tx;
struct iwl_mac_beacon_cmd_data {
__le32 template_id;
__le32 tim_idx;
__le32 tim_size;
__le32 ecsa_offset;
__le32 csa_offset;
struct ieee80211_hdr frame[0];
};
/**
* struct iwl_mac_beacon_cmd_v7 - beacon template command with offloaded CSA
* @tx: the tx commands associated with the beacon frame
* @data: see &iwl_mac_beacon_cmd_data
*/
struct iwl_mac_beacon_cmd_v7 {
struct iwl_tx_cmd tx;
struct iwl_mac_beacon_cmd_data data;
} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_7 */
/**
* struct iwl_mac_beacon_cmd - beacon template command with offloaded CSA
* @byte_cnt: byte count of the beacon frame
* @flags: for future use
* @data: see &iwl_mac_beacon_cmd_data
*/
struct iwl_mac_beacon_cmd {
__le16 byte_cnt;
__le16 flags;
__le64 reserved;
struct iwl_mac_beacon_cmd_data data;
} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_8 */
struct iwl_beacon_notif {
struct iwl_mvm_tx_resp beacon_notify_hdr;
__le64 tsf;

Просмотреть файл

@ -341,6 +341,10 @@ enum iwl_prot_offload_subcmd_ids {
STORED_BEACON_NTF = 0xFF,
};
enum iwl_regulatory_and_nvm_subcmd_ids {
NVM_ACCESS_COMPLETE = 0x0,
};
enum iwl_fmac_debug_cmds {
LMAC_RD_WR = 0x0,
UMAC_RD_WR = 0x1,
@ -355,6 +359,7 @@ enum {
PHY_OPS_GROUP = 0x4,
DATA_PATH_GROUP = 0x5,
PROT_OFFLOAD_GROUP = 0xb,
REGULATORY_AND_NVM_GROUP = 0xc,
DEBUG_GROUP = 0xf,
};
@ -593,60 +598,7 @@ enum {
#define IWL_ALIVE_FLG_RFKILL BIT(0)
struct mvm_alive_resp_ver1 {
__le16 status;
__le16 flags;
u8 ucode_minor;
u8 ucode_major;
__le16 id;
u8 api_minor;
u8 api_major;
u8 ver_subtype;
u8 ver_type;
u8 mac;
u8 opt;
__le16 reserved2;
__le32 timestamp;
__le32 error_event_table_ptr; /* SRAM address for error log */
__le32 log_event_table_ptr; /* SRAM address for event log */
__le32 cpu_register_ptr;
__le32 dbgm_config_ptr;
__le32 alive_counter_ptr;
__le32 scd_base_ptr; /* SRAM address for SCD */
} __packed; /* ALIVE_RES_API_S_VER_1 */
struct mvm_alive_resp_ver2 {
__le16 status;
__le16 flags;
u8 ucode_minor;
u8 ucode_major;
__le16 id;
u8 api_minor;
u8 api_major;
u8 ver_subtype;
u8 ver_type;
u8 mac;
u8 opt;
__le16 reserved2;
__le32 timestamp;
__le32 error_event_table_ptr; /* SRAM address for error log */
__le32 log_event_table_ptr; /* SRAM address for LMAC event log */
__le32 cpu_register_ptr;
__le32 dbgm_config_ptr;
__le32 alive_counter_ptr;
__le32 scd_base_ptr; /* SRAM address for SCD */
__le32 st_fwrd_addr; /* pointer to Store and forward */
__le32 st_fwrd_size;
u8 umac_minor; /* UMAC version: minor */
u8 umac_major; /* UMAC version: major */
__le16 umac_id; /* UMAC version: id */
__le32 error_info_addr; /* SRAM address for UMAC error log */
__le32 dbg_print_buff_addr;
} __packed; /* ALIVE_RES_API_S_VER_2 */
struct mvm_alive_resp {
__le16 status;
__le16 flags;
struct iwl_lmac_alive {
__le32 ucode_minor;
__le32 ucode_major;
u8 ver_subtype;
@ -662,12 +614,29 @@ struct mvm_alive_resp {
__le32 scd_base_ptr; /* SRAM address for SCD */
__le32 st_fwrd_addr; /* pointer to Store and forward */
__le32 st_fwrd_size;
} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_3 */
struct iwl_umac_alive {
__le32 umac_minor; /* UMAC version: minor */
__le32 umac_major; /* UMAC version: major */
__le32 error_info_addr; /* SRAM address for UMAC error log */
__le32 dbg_print_buff_addr;
} __packed; /* UMAC_ALIVE_DATA_API_S_VER_2 */
struct mvm_alive_resp_v3 {
__le16 status;
__le16 flags;
struct iwl_lmac_alive lmac_data;
struct iwl_umac_alive umac_data;
} __packed; /* ALIVE_RES_API_S_VER_3 */
struct mvm_alive_resp {
__le16 status;
__le16 flags;
struct iwl_lmac_alive lmac_data[2];
struct iwl_umac_alive umac_data;
} __packed; /* ALIVE_RES_API_S_VER_4 */
/* Error response/notification */
enum {
FW_ERR_UNKNOWN_CMD = 0x0,
@ -708,7 +677,6 @@ struct iwl_error_resp {
#define MAX_MACS_IN_BINDING (3)
#define MAX_BINDINGS (4)
#define AUX_BINDING_INDEX (3)
#define MAX_PHYS (4)
/* Used to extract ID and color from the context dword */
#define FW_CTXT_ID_POS (0)
@ -1251,13 +1219,16 @@ struct iwl_missed_beacons_notif {
* @external_ver: external image version
* @status: MFUART loading status
* @duration: MFUART loading time
* @image_size: MFUART image size in bytes
*/
struct iwl_mfuart_load_notif {
__le32 installed_ver;
__le32 external_ver;
__le32 status;
__le32 duration;
} __packed; /*MFU_LOADER_NTFY_API_S_VER_1*/
/* image size valid only in v2 of the command */
__le32 image_size;
} __packed; /*MFU_LOADER_NTFY_API_S_VER_2*/
/**
* struct iwl_set_calib_default_cmd - set default value for calibration.
@ -2200,4 +2171,11 @@ struct iwl_dbg_mem_access_rsp {
__le32 data[];
} __packed; /* DEBUG_(U|L)MAC_RD_WR_RSP_API_S_VER_1 */
/**
* struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed
*/
struct iwl_nvm_access_complete_cmd {
__le32 reserved;
} __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */
#endif /* __fw_api_h__ */

Просмотреть файл

@ -811,12 +811,16 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
struct iwl_fw_error_dump_paging *paging;
struct page *pages =
mvm->fw_paging_db[i].fw_paging_block;
dma_addr_t addr = mvm->fw_paging_db[i].fw_paging_phys;
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
dump_data->len = cpu_to_le32(sizeof(*paging) +
PAGING_BLOCK_SIZE);
paging = (void *)dump_data->data;
paging->index = cpu_to_le32(i);
dma_sync_single_for_cpu(mvm->trans->dev, addr,
PAGING_BLOCK_SIZE,
DMA_BIDIRECTIONAL);
memcpy(paging->data, page_address(pages),
PAGING_BLOCK_SIZE);
dump_data = iwl_fw_error_next_data(dump_data);

Просмотреть файл

@ -214,6 +214,10 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block),
image->sec[sec_idx].data,
mvm->fw_paging_db[0].fw_paging_size);
dma_sync_single_for_device(mvm->trans->dev,
mvm->fw_paging_db[0].fw_paging_phys,
mvm->fw_paging_db[0].fw_paging_size,
DMA_BIDIRECTIONAL);
IWL_DEBUG_FW(mvm,
"Paging: copied %d CSS bytes to first block\n",
@ -228,9 +232,16 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
* loop stop at num_of_paging_blk since that last block is not full.
*/
for (idx = 1; idx < mvm->num_of_paging_blk; idx++) {
memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
struct iwl_fw_paging *block = &mvm->fw_paging_db[idx];
memcpy(page_address(block->fw_paging_block),
image->sec[sec_idx].data + offset,
mvm->fw_paging_db[idx].fw_paging_size);
block->fw_paging_size);
dma_sync_single_for_device(mvm->trans->dev,
block->fw_paging_phys,
block->fw_paging_size,
DMA_BIDIRECTIONAL);
IWL_DEBUG_FW(mvm,
"Paging: copied %d paging bytes to block %d\n",
@ -242,9 +253,15 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
/* copy the last paging block */
if (mvm->num_of_pages_in_last_blk > 0) {
memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
struct iwl_fw_paging *block = &mvm->fw_paging_db[idx];
memcpy(page_address(block->fw_paging_block),
image->sec[sec_idx].data + offset,
FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk);
dma_sync_single_for_device(mvm->trans->dev,
block->fw_paging_phys,
block->fw_paging_size,
DMA_BIDIRECTIONAL);
IWL_DEBUG_FW(mvm,
"Paging: copied %d pages in the last block %d\n",
@ -444,81 +461,61 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
struct iwl_mvm *mvm =
container_of(notif_wait, struct iwl_mvm, notif_wait);
struct iwl_mvm_alive_data *alive_data = data;
struct mvm_alive_resp_ver1 *palive1;
struct mvm_alive_resp_ver2 *palive2;
struct mvm_alive_resp_v3 *palive3;
struct mvm_alive_resp *palive;
struct iwl_umac_alive *umac;
struct iwl_lmac_alive *lmac1;
struct iwl_lmac_alive *lmac2 = NULL;
u16 status;
if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
palive1 = (void *)pkt->data;
mvm->support_umac_log = false;
mvm->error_event_table =
le32_to_cpu(palive1->error_event_table_ptr);
mvm->log_event_table =
le32_to_cpu(palive1->log_event_table_ptr);
alive_data->scd_base_addr = le32_to_cpu(palive1->scd_base_ptr);
alive_data->valid = le16_to_cpu(palive1->status) ==
IWL_ALIVE_STATUS_OK;
IWL_DEBUG_FW(mvm,
"Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
le16_to_cpu(palive1->status), palive1->ver_type,
palive1->ver_subtype, palive1->flags);
} else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
palive2 = (void *)pkt->data;
mvm->error_event_table =
le32_to_cpu(palive2->error_event_table_ptr);
mvm->log_event_table =
le32_to_cpu(palive2->log_event_table_ptr);
alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr);
mvm->umac_error_event_table =
le32_to_cpu(palive2->error_info_addr);
mvm->sf_space.addr = le32_to_cpu(palive2->st_fwrd_addr);
mvm->sf_space.size = le32_to_cpu(palive2->st_fwrd_size);
alive_data->valid = le16_to_cpu(palive2->status) ==
IWL_ALIVE_STATUS_OK;
if (mvm->umac_error_event_table)
mvm->support_umac_log = true;
IWL_DEBUG_FW(mvm,
"Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
le16_to_cpu(palive2->status), palive2->ver_type,
palive2->ver_subtype, palive2->flags);
IWL_DEBUG_FW(mvm,
"UMAC version: Major - 0x%x, Minor - 0x%x\n",
palive2->umac_major, palive2->umac_minor);
} else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
palive = (void *)pkt->data;
mvm->error_event_table =
le32_to_cpu(palive->error_event_table_ptr);
mvm->log_event_table =
le32_to_cpu(palive->log_event_table_ptr);
alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
mvm->umac_error_event_table =
le32_to_cpu(palive->error_info_addr);
mvm->sf_space.addr = le32_to_cpu(palive->st_fwrd_addr);
mvm->sf_space.size = le32_to_cpu(palive->st_fwrd_size);
alive_data->valid = le16_to_cpu(palive->status) ==
IWL_ALIVE_STATUS_OK;
if (mvm->umac_error_event_table)
mvm->support_umac_log = true;
IWL_DEBUG_FW(mvm,
"Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
le16_to_cpu(palive->status), palive->ver_type,
palive->ver_subtype, palive->flags);
IWL_DEBUG_FW(mvm,
"UMAC version: Major - 0x%x, Minor - 0x%x\n",
le32_to_cpu(palive->umac_major),
le32_to_cpu(palive->umac_minor));
umac = &palive->umac_data;
lmac1 = &palive->lmac_data[0];
lmac2 = &palive->lmac_data[1];
status = le16_to_cpu(palive->status);
} else {
palive3 = (void *)pkt->data;
umac = &palive3->umac_data;
lmac1 = &palive3->lmac_data;
status = le16_to_cpu(palive3->status);
}
mvm->error_event_table[0] = le32_to_cpu(lmac1->error_event_table_ptr);
if (lmac2)
mvm->error_event_table[1] =
le32_to_cpu(lmac2->error_event_table_ptr);
mvm->log_event_table = le32_to_cpu(lmac1->log_event_table_ptr);
mvm->sf_space.addr = le32_to_cpu(lmac1->st_fwrd_addr);
mvm->sf_space.size = le32_to_cpu(lmac1->st_fwrd_size);
mvm->umac_error_event_table = le32_to_cpu(umac->error_info_addr);
alive_data->scd_base_addr = le32_to_cpu(lmac1->scd_base_ptr);
alive_data->valid = status == IWL_ALIVE_STATUS_OK;
if (mvm->umac_error_event_table)
mvm->support_umac_log = true;
IWL_DEBUG_FW(mvm,
"Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
status, lmac1->ver_type, lmac1->ver_subtype);
if (lmac2)
IWL_DEBUG_FW(mvm, "Alive ucode CDB\n");
IWL_DEBUG_FW(mvm,
"UMAC version: Major - 0x%x, Minor - 0x%x\n",
le32_to_cpu(umac->umac_major),
le32_to_cpu(umac->umac_minor));
return true;
}
static bool iwl_wait_init_complete(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt, void *data)
{
WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
return true;
}
@ -537,6 +534,48 @@ static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
return false;
}
static int iwl_mvm_init_paging(struct iwl_mvm *mvm)
{
const struct fw_img *fw = &mvm->fw->img[mvm->cur_ucode];
int ret;
/*
* Configure and operate fw paging mechanism.
* The driver configures the paging flow only once.
* The CPU2 paging image is included in the IWL_UCODE_INIT image.
*/
if (!fw->paging_mem_size)
return 0;
/*
* When dma is not enabled, the driver needs to copy / write
* the downloaded / uploaded page to / from the smem.
* This gets the location of the place were the pages are
* stored.
*/
if (!is_device_dma_capable(mvm->trans->dev)) {
ret = iwl_trans_get_paging_item(mvm);
if (ret) {
IWL_ERR(mvm, "failed to get FW paging item\n");
return ret;
}
}
ret = iwl_save_fw_paging(mvm, fw);
if (ret) {
IWL_ERR(mvm, "failed to save the FW paging image\n");
return ret;
}
ret = iwl_send_paging_cmd(mvm, fw);
if (ret) {
IWL_ERR(mvm, "failed to send the paging cmd\n");
iwl_free_fw_paging(mvm);
return ret;
}
return 0;
}
static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
enum iwl_ucode_type ucode_type)
{
@ -607,40 +646,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
/*
* configure and operate fw paging mechanism.
* driver configures the paging flow only once, CPU2 paging image
* included in the IWL_UCODE_INIT image.
*/
if (fw->paging_mem_size) {
/*
* When dma is not enabled, the driver needs to copy / write
* the downloaded / uploaded page to / from the smem.
* This gets the location of the place were the pages are
* stored.
*/
if (!is_device_dma_capable(mvm->trans->dev)) {
ret = iwl_trans_get_paging_item(mvm);
if (ret) {
IWL_ERR(mvm, "failed to get FW paging item\n");
return ret;
}
}
ret = iwl_save_fw_paging(mvm, fw);
if (ret) {
IWL_ERR(mvm, "failed to save the FW paging image\n");
return ret;
}
ret = iwl_send_paging_cmd(mvm, fw);
if (ret) {
IWL_ERR(mvm, "failed to send the paging cmd\n");
iwl_free_fw_paging(mvm);
return ret;
}
}
/*
* Note: all the queues are enabled as part of the interface
* initialization, but in firmware restart scenarios they
@ -798,6 +803,75 @@ out:
return ret;
}
int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
{
struct iwl_notification_wait init_wait;
struct iwl_nvm_access_complete_cmd nvm_complete = {};
static const u16 init_complete[] = {
INIT_COMPLETE_NOTIF,
};
int ret;
lockdep_assert_held(&mvm->mutex);
iwl_init_notification_wait(&mvm->notif_wait,
&init_wait,
init_complete,
ARRAY_SIZE(init_complete),
iwl_wait_init_complete,
NULL);
/* Will also start the device */
ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
if (ret) {
IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
goto error;
}
/* TODO: remove when integrating context info */
ret = iwl_mvm_init_paging(mvm);
if (ret) {
IWL_ERR(mvm, "Failed to init paging: %d\n",
ret);
goto error;
}
/* Read the NVM only at driver load time, no need to do this twice */
if (read_nvm) {
/* Read nvm */
ret = iwl_nvm_init(mvm, true);
if (ret) {
IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
goto error;
}
}
/* In case we read the NVM from external file, load it to the NIC */
if (mvm->nvm_file_name)
iwl_mvm_load_nvm_to_nic(mvm);
ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
if (WARN_ON(ret))
goto error;
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
NVM_ACCESS_COMPLETE), 0,
sizeof(nvm_complete), &nvm_complete);
if (ret) {
IWL_ERR(mvm, "Failed to run complete NVM access: %d\n",
ret);
goto error;
}
/* We wait for the INIT complete notification */
return iwl_wait_notification(&mvm->notif_wait, &init_wait,
MVM_UCODE_ALIVE_TIMEOUT);
error:
iwl_remove_notification(&mvm->notif_wait, &init_wait);
return ret;
}
static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
@ -1058,6 +1132,43 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
return ret;
}
static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
{
int ret;
if (iwl_mvm_has_new_tx_api(mvm))
return iwl_run_unified_mvm_ucode(mvm, false);
ret = iwl_run_init_mvm_ucode(mvm, false);
if (iwlmvm_mod_params.init_dbg)
return 0;
if (ret) {
IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
/* this can't happen */
if (WARN_ON(ret > 0))
ret = -ERFKILL;
return ret;
}
/*
* Stop and start the transport without entering low power
* mode. This will save the state of other components on the
* device that are triggered by the INIT firwmare (MFUART).
*/
_iwl_trans_stop_device(mvm->trans, false);
ret = _iwl_trans_start_hw(mvm->trans, false);
if (ret)
return ret;
ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
if (ret)
return ret;
return iwl_mvm_init_paging(mvm);
}
int iwl_mvm_up(struct iwl_mvm *mvm)
{
int ret, i;
@ -1070,35 +1181,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (ret)
return ret;
/*
* If we haven't completed the run of the init ucode during
* module loading, load init ucode now
* (for example, if we were in RFKILL)
*/
ret = iwl_run_init_mvm_ucode(mvm, false);
if (iwlmvm_mod_params.init_dbg)
return 0;
if (ret) {
IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
/* this can't happen */
if (WARN_ON(ret > 0))
ret = -ERFKILL;
goto error;
}
/*
* Stop and start the transport without entering low power
* mode. This will save the state of other components on the
* device that are triggered by the INIT firwmare (MFUART).
*/
_iwl_trans_stop_device(mvm->trans, false);
ret = _iwl_trans_start_hw(mvm->trans, false);
if (ret)
goto error;
ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
ret = iwl_mvm_load_rt_fw(mvm);
if (ret) {
IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
goto error;
@ -1125,13 +1208,15 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
/* Send phy db control command and then phy db calibration*/
ret = iwl_send_phy_db_data(mvm->phy_db);
if (ret)
goto error;
if (!iwl_mvm_has_new_tx_api(mvm)) {
ret = iwl_send_phy_db_data(mvm->phy_db);
if (ret)
goto error;
ret = iwl_send_phy_cfg_cmd(mvm);
if (ret)
goto error;
ret = iwl_send_phy_cfg_cmd(mvm);
if (ret)
goto error;
}
/* Init RSS configuration */
if (iwl_mvm_has_new_rx_api(mvm)) {
@ -1317,4 +1402,9 @@ void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
le32_to_cpu(mfuart_notif->external_ver),
le32_to_cpu(mfuart_notif->status),
le32_to_cpu(mfuart_notif->duration));
if (iwl_rx_packet_payload_len(pkt) == sizeof(*mfuart_notif))
IWL_DEBUG_INFO(mvm,
"MFUART: image size: 0x%08x\n",
le32_to_cpu(mfuart_notif->image_size));
}

Просмотреть файл

@ -531,38 +531,26 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
/*
* If DQA is supported - queues were already disabled, since in
* DQA-mode the queues are a property of the STA and not of the
* vif, and at this point the STA was already deleted
*/
if (iwl_mvm_is_dqa_supported(mvm))
return;
switch (vif->type) {
case NL80211_IFTYPE_P2P_DEVICE:
if (!iwl_mvm_is_dqa_supported(mvm))
iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
IWL_MVM_OFFCHANNEL_QUEUE,
IWL_MAX_TID_COUNT, 0);
else
iwl_mvm_disable_txq(mvm,
IWL_MVM_DQA_P2P_DEVICE_QUEUE,
vif->hw_queue[0], IWL_MAX_TID_COUNT,
0);
iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
IWL_MVM_OFFCHANNEL_QUEUE,
IWL_MAX_TID_COUNT, 0);
break;
case NL80211_IFTYPE_AP:
iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
IWL_MAX_TID_COUNT, 0);
if (iwl_mvm_is_dqa_supported(mvm))
iwl_mvm_disable_txq(mvm,
IWL_MVM_DQA_AP_PROBE_RESP_QUEUE,
vif->hw_queue[0], IWL_MAX_TID_COUNT,
0);
/* fall through */
default:
/*
* If DQA is supported - queues were already disabled, since in
* DQA-mode the queues are a property of the STA and not of the
* vif, and at this point the STA was already deleted
*/
if (iwl_mvm_is_dqa_supported(mvm))
break;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
iwl_mvm_disable_txq(mvm, vif->hw_queue[ac],
vif->hw_queue[ac],
@ -991,7 +979,7 @@ static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm,
}
static void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm,
struct iwl_mac_beacon_cmd_v6 *beacon_cmd,
__le32 *tim_index, __le32 *tim_size,
u8 *beacon, u32 frame_size)
{
u32 tim_idx;
@ -1008,8 +996,8 @@ static void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm,
/* If TIM field was found, set variables */
if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
beacon_cmd->tim_idx = cpu_to_le32(tim_idx);
beacon_cmd->tim_size = cpu_to_le32((u32)beacon[tim_idx+1]);
*tim_index = cpu_to_le32(tim_idx);
*tim_size = cpu_to_le32((u32)beacon[tim_idx + 1]);
} else {
IWL_WARN(mvm, "Unable to find TIM Element in beacon\n");
}
@ -1043,8 +1031,9 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
};
union {
struct iwl_mac_beacon_cmd_v6 beacon_cmd_v6;
struct iwl_mac_beacon_cmd beacon_cmd;
struct iwl_mac_beacon_cmd_v7 beacon_cmd;
} u = {};
struct iwl_mac_beacon_cmd beacon_cmd;
struct ieee80211_tx_info *info;
u32 beacon_skb_len;
u32 rate, tx_flags;
@ -1054,6 +1043,46 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
beacon_skb_len = beacon->len;
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD)) {
u32 csa_offset, ecsa_offset;
csa_offset = iwl_mvm_find_ie_offset(beacon->data,
WLAN_EID_CHANNEL_SWITCH,
beacon_skb_len);
ecsa_offset =
iwl_mvm_find_ie_offset(beacon->data,
WLAN_EID_EXT_CHANSWITCH_ANN,
beacon_skb_len);
if (iwl_mvm_has_new_tx_api(mvm)) {
beacon_cmd.data.template_id =
cpu_to_le32((u32)mvmvif->id);
beacon_cmd.data.ecsa_offset = cpu_to_le32(ecsa_offset);
beacon_cmd.data.csa_offset = cpu_to_le32(csa_offset);
beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon_skb_len);
if (vif->type == NL80211_IFTYPE_AP)
iwl_mvm_mac_ctxt_set_tim(mvm,
&beacon_cmd.data.tim_idx,
&beacon_cmd.data.tim_size,
beacon->data,
beacon_skb_len);
cmd.len[0] = sizeof(beacon_cmd);
cmd.data[0] = &beacon_cmd;
goto send;
} else {
u.beacon_cmd.data.ecsa_offset =
cpu_to_le32(ecsa_offset);
u.beacon_cmd.data.csa_offset = cpu_to_le32(csa_offset);
cmd.len[0] = sizeof(u.beacon_cmd);
cmd.data[0] = &u;
}
} else {
cmd.len[0] = sizeof(u.beacon_cmd_v6);
cmd.data[0] = &u;
}
/* TODO: for now the beacon template id is set to be the mac context id.
* Might be better to handle it as another resource ... */
u.beacon_cmd_v6.template_id = cpu_to_le32((u32)mvmvif->id);
@ -1092,29 +1121,13 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
/* Set up TX beacon command fields */
if (vif->type == NL80211_IFTYPE_AP)
iwl_mvm_mac_ctxt_set_tim(mvm, &u.beacon_cmd_v6,
iwl_mvm_mac_ctxt_set_tim(mvm, &u.beacon_cmd_v6.tim_idx,
&u.beacon_cmd_v6.tim_size,
beacon->data,
beacon_skb_len);
send:
/* Submit command */
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD)) {
u.beacon_cmd.csa_offset =
cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
WLAN_EID_CHANNEL_SWITCH,
beacon_skb_len));
u.beacon_cmd.ecsa_offset =
cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
WLAN_EID_EXT_CHANSWITCH_ANN,
beacon_skb_len));
cmd.len[0] = sizeof(u.beacon_cmd);
} else {
cmd.len[0] = sizeof(u.beacon_cmd_v6);
}
cmd.data[0] = &u;
cmd.dataflags[0] = 0;
cmd.len[1] = beacon_skb_len;
cmd.data[1] = beacon->data;

Просмотреть файл

@ -1210,8 +1210,6 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
/* the fw is stopped, the aux sta is dead: clean up driver state */
iwl_mvm_del_aux_sta(mvm);
iwl_free_fw_paging(mvm);
/*
* Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
* won't be called in this case).
@ -2010,16 +2008,16 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_UMAC_SCAN))
iwl_mvm_config_scan(mvm);
} else if (changes & BSS_CHANGED_BEACON_INFO) {
}
if (changes & BSS_CHANGED_BEACON_INFO) {
/*
* We received a beacon _after_ association so
* We received a beacon from the associated AP so
* remove the session protection.
*/
iwl_mvm_remove_time_event(mvm, mvmvif,
&mvmvif->time_event_data);
}
if (changes & BSS_CHANGED_BEACON_INFO) {
iwl_mvm_sf_update(mvm, vif, false);
WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
}
@ -2106,22 +2104,6 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
if (ret)
goto out_unbind;
/* enable the multicast queue, now that we have a station for it */
if (iwl_mvm_is_dqa_supported(mvm)) {
unsigned int wdg_timeout =
iwl_mvm_get_wd_timeout(mvm, vif, false, false);
struct iwl_trans_txq_scd_cfg cfg = {
.fifo = IWL_MVM_TX_FIFO_MCAST,
.sta_id = mvmvif->bcast_sta.sta_id,
.tid = IWL_MAX_TID_COUNT,
.aggregate = false,
.frame_limit = IWL_FRAME_LIMIT,
};
iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
&cfg, wdg_timeout);
}
/* must be set before quota calculations */
mvmvif->ap_ibss_active = true;
@ -2554,6 +2536,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
int ret;
IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n",
@ -2582,8 +2565,6 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
if (old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST &&
iwl_mvm_is_dqa_supported(mvm)) {
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta);
flush_work(&mvm->add_stream_wk);
@ -2594,6 +2575,9 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
}
mutex_lock(&mvm->mutex);
/* track whether or not the station is associated */
mvm_sta->associated = new_state >= IEEE80211_STA_ASSOC;
if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE) {
/*
@ -2643,11 +2627,10 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
mvmvif->ap_assoc_sta_count++;
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
}
iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
true);
ret = iwl_mvm_update_sta(mvm, vif, sta);
if (ret == 0)
iwl_mvm_rs_rate_init(mvm, sta,
mvmvif->phy_ctxt->channel->band,
true);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) {

Просмотреть файл

@ -739,8 +739,9 @@ struct iwl_mvm {
enum iwl_ucode_type cur_ucode;
bool ucode_loaded;
bool hw_registered;
bool calibrating;
u32 error_event_table;
u32 error_event_table[2];
u32 log_event_table;
u32 umac_error_event_table;
bool support_umac_log;
@ -1217,6 +1218,19 @@ static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm)
return mvm->trans->cfg->use_tfh;
}
static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm)
{
/*
* TODO:
* The issue of how to determine CDB support is still not well defined.
* It may be that it will be for all next HW devices and it may be per
* FW compilation and it may also differ between different devices.
* For now take a ride on the new TX API and get back to it when
* it is well defined.
*/
return iwl_mvm_has_new_tx_api(mvm);
}
static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm)
{
#ifdef CONFIG_THERMAL
@ -1257,6 +1271,7 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm);
******************/
/* uCode */
int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm);
int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm);
/* Utils */
int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
@ -1686,6 +1701,7 @@ void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
{
iwl_free_fw_paging(mvm);
mvm->ucode_loaded = false;
iwl_trans_stop_device(mvm->trans);
}

Просмотреть файл

@ -466,6 +466,13 @@ static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
HCMD_NAME(STORED_BEACON_NTF),
};
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = {
HCMD_NAME(NVM_ACCESS_COMPLETE),
};
static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
[LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
[LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
@ -474,6 +481,8 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
[PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
[DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
[PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
[REGULATORY_AND_NVM_GROUP] =
HCMD_ARR(iwl_mvm_regulatory_and_nvm_names),
};
/* this forward declaration can avoid to export the function */
@ -597,7 +606,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->last_agg_queue = IWL_MVM_DQA_MAX_DATA_QUEUE;
}
mvm->sf_state = SF_UNINIT;
mvm->cur_ucode = IWL_UCODE_INIT;
if (iwl_mvm_has_new_tx_api(mvm))
mvm->cur_ucode = IWL_UCODE_REGULAR;
else
mvm->cur_ucode = IWL_UCODE_INIT;
mvm->drop_bcn_ap_mode = true;
mutex_init(&mvm->mutex);
@ -720,7 +732,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mutex_lock(&mvm->mutex);
iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
err = iwl_run_init_mvm_ucode(mvm, true);
if (iwl_mvm_has_new_tx_api(mvm))
err = iwl_run_unified_mvm_ucode(mvm, true);
else
err = iwl_run_init_mvm_ucode(mvm, true);
if (!err || !iwlmvm_mod_params.init_dbg)
iwl_mvm_stop_device(mvm);
iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
@ -743,6 +758,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
err = iwl_mvm_mac_setup_register(mvm);
if (err)
goto out_free;
mvm->hw_registered = true;
min_backoff = calc_min_backoff(trans, cfg);
iwl_mvm_thermal_initialize(mvm, min_backoff);
@ -764,6 +780,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
out_unregister:
ieee80211_unregister_hw(mvm->hw);
mvm->hw_registered = false;
iwl_mvm_leds_exit(mvm);
iwl_mvm_thermal_exit(mvm);
out_free:
@ -1192,7 +1209,8 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
reprobe->dev = mvm->trans->dev;
INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
schedule_work(&reprobe->work);
} else if (mvm->cur_ucode == IWL_UCODE_REGULAR) {
} else if (mvm->cur_ucode == IWL_UCODE_REGULAR &&
mvm->hw_registered) {
/* don't let the transport/FW power down */
iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);

Просмотреть файл

@ -174,6 +174,14 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
enum ieee80211_ac_numbers ac;
bool tid_found = false;
#ifdef CONFIG_IWLWIFI_DEBUGFS
/* set advanced pm flag with no uapsd ACs to enable ps-poll */
if (mvmvif->dbgfs_pm.use_ps_poll) {
cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
return;
}
#endif
for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_BK; ac++) {
if (!mvmvif->queue_params[ac].uapsd)
continue;
@ -204,16 +212,6 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
}
}
if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) {
#ifdef CONFIG_IWLWIFI_DEBUGFS
/* set advanced pm flag with no uapsd ACs to enable ps-poll */
if (mvmvif->dbgfs_pm.use_ps_poll)
cmd->flags |=
cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
#endif
return;
}
cmd->flags |= cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK);
if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) |
@ -601,9 +599,8 @@ static void iwl_mvm_power_ps_disabled_iterator(void *_data, u8* mac,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
bool *disable_ps = _data;
if (mvmvif->phy_ctxt)
if (mvmvif->phy_ctxt->id < MAX_PHYS)
*disable_ps |= mvmvif->ps_disabled;
if (mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX)
*disable_ps |= mvmvif->ps_disabled;
}
static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac,
@ -611,6 +608,7 @@ static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_power_vifs *power_iterator = _data;
bool active = mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX;
switch (ieee80211_vif_type_p2p(vif)) {
case NL80211_IFTYPE_P2P_DEVICE:
@ -621,34 +619,30 @@ static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac,
/* only a single MAC of the same type */
WARN_ON(power_iterator->ap_vif);
power_iterator->ap_vif = vif;
if (mvmvif->phy_ctxt)
if (mvmvif->phy_ctxt->id < MAX_PHYS)
power_iterator->ap_active = true;
if (active)
power_iterator->ap_active = true;
break;
case NL80211_IFTYPE_MONITOR:
/* only a single MAC of the same type */
WARN_ON(power_iterator->monitor_vif);
power_iterator->monitor_vif = vif;
if (mvmvif->phy_ctxt)
if (mvmvif->phy_ctxt->id < MAX_PHYS)
power_iterator->monitor_active = true;
if (active)
power_iterator->monitor_active = true;
break;
case NL80211_IFTYPE_P2P_CLIENT:
/* only a single MAC of the same type */
WARN_ON(power_iterator->p2p_vif);
power_iterator->p2p_vif = vif;
if (mvmvif->phy_ctxt)
if (mvmvif->phy_ctxt->id < MAX_PHYS)
power_iterator->p2p_active = true;
if (active)
power_iterator->p2p_active = true;
break;
case NL80211_IFTYPE_STATION:
power_iterator->bss_vif = vif;
if (mvmvif->phy_ctxt)
if (mvmvif->phy_ctxt->id < MAX_PHYS)
power_iterator->bss_active = true;
if (active)
power_iterator->bss_active = true;
break;
default:

Просмотреть файл

@ -972,7 +972,9 @@ static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask,
/* Find the previous rate that is in the rate mask */
i = index - 1;
for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
if (i >= 0)
mask = BIT(i);
for (; i >= 0; i--, mask >>= 1) {
if (rate_mask & mask) {
low = i;
break;
@ -3616,6 +3618,8 @@ int rs_pretty_print_rate(char *buf, const u32 rate)
} else if (rate & RATE_MCS_HT_MSK) {
type = "HT";
mcs = rate & RATE_HT_MCS_INDEX_MSK;
nss = ((rate & RATE_HT_MCS_NSS_MSK)
>> RATE_HT_MCS_NSS_POS) + 1;
} else {
type = "Unknown"; /* shouldn't happen */
}

Просмотреть файл

@ -497,8 +497,7 @@ struct iwl_mvm_stat_data {
struct iwl_mvm *mvm;
__le32 mac_id;
u8 beacon_filter_average_energy;
struct mvm_statistics_general_v8 *general;
struct mvm_statistics_load *load;
void *general;
};
static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
@ -518,10 +517,26 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
* the notification directly.
*/
if (data->general) {
mvmvif->beacon_stats.num_beacons =
le32_to_cpu(data->general->beacon_counter[mvmvif->id]);
mvmvif->beacon_stats.avg_signal =
-data->general->beacon_average_energy[mvmvif->id];
u16 vif_id = mvmvif->id;
if (iwl_mvm_is_cdb_supported(mvm)) {
struct mvm_statistics_general_cdb *general =
data->general;
mvmvif->beacon_stats.num_beacons =
le32_to_cpu(general->beacon_counter[vif_id]);
mvmvif->beacon_stats.avg_signal =
-general->beacon_average_energy[vif_id];
} else {
struct mvm_statistics_general_v8 *general =
data->general;
mvmvif->beacon_stats.num_beacons =
le32_to_cpu(general->beacon_counter[vif_id]);
mvmvif->beacon_stats.avg_signal =
-general->beacon_average_energy[vif_id];
}
}
if (mvmvif->id != id)
@ -615,46 +630,65 @@ iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
struct iwl_notif_statistics_v11 *stats = (void *)&pkt->data;
struct iwl_notif_statistics_cdb *stats = (void *)&pkt->data;
struct iwl_mvm_stat_data data = {
.mvm = mvm,
};
int expected_size = iwl_mvm_has_new_rx_api(mvm) ? sizeof(*stats) :
sizeof(struct iwl_notif_statistics_v10);
int expected_size;
if (iwl_mvm_is_cdb_supported(mvm))
expected_size = sizeof(*stats);
else if (iwl_mvm_has_new_rx_api(mvm))
expected_size = sizeof(struct iwl_notif_statistics_v11);
else
expected_size = sizeof(struct iwl_notif_statistics_v10);
if (iwl_rx_packet_payload_len(pkt) != expected_size)
goto invalid;
data.mac_id = stats->rx.general.mac_id;
data.beacon_filter_average_energy =
stats->general.beacon_filter_average_energy;
stats->general.common.beacon_filter_average_energy;
iwl_mvm_update_rx_statistics(mvm, &stats->rx);
mvm->radio_stats.rx_time = le64_to_cpu(stats->general.rx_time);
mvm->radio_stats.tx_time = le64_to_cpu(stats->general.tx_time);
mvm->radio_stats.rx_time = le64_to_cpu(stats->general.common.rx_time);
mvm->radio_stats.tx_time = le64_to_cpu(stats->general.common.tx_time);
mvm->radio_stats.on_time_rf =
le64_to_cpu(stats->general.on_time_rf);
le64_to_cpu(stats->general.common.on_time_rf);
mvm->radio_stats.on_time_scan =
le64_to_cpu(stats->general.on_time_scan);
le64_to_cpu(stats->general.common.on_time_scan);
data.general = &stats->general;
if (iwl_mvm_has_new_rx_api(mvm)) {
int i;
u8 *energy;
__le32 *bytes, *air_time;
data.load = &stats->load_stats;
if (!iwl_mvm_is_cdb_supported(mvm)) {
struct iwl_notif_statistics_v11 *v11 =
(void *)&pkt->data;
energy = (void *)&v11->load_stats.avg_energy;
bytes = (void *)&v11->load_stats.byte_count;
air_time = (void *)&v11->load_stats.air_time;
} else {
energy = (void *)&stats->load_stats.avg_energy;
bytes = (void *)&stats->load_stats.byte_count;
air_time = (void *)&stats->load_stats.air_time;
}
rcu_read_lock();
for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
struct iwl_mvm_sta *sta;
if (!data.load->avg_energy[i])
if (!energy[i])
continue;
sta = iwl_mvm_sta_from_staid_rcu(mvm, i);
if (!sta)
continue;
sta->avg_energy = data.load->avg_energy[i];
sta->avg_energy = energy[i];
}
rcu_read_unlock();
}

Просмотреть файл

@ -149,8 +149,17 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
unsigned int headlen, fraglen, pad_len = 0;
unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD)
if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
pad_len = 2;
/*
* If the device inserted padding it means that (it thought)
* the 802.11 header wasn't a multiple of 4 bytes long. In
* this case, reserve two bytes at the start of the SKB to
* align the payload properly in case we end up copying it.
*/
skb_reserve(skb, pad_len);
}
len -= pad_len;
/* If frame is small enough to fit in skb->head, pull it completely.
@ -409,7 +418,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
/* ignore nssn smaller than head sn - this can happen due to timeout */
if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
return;
goto set_timer;
while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
int index = ssn % reorder_buf->buf_size;
@ -432,6 +441,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
}
reorder_buf->head_sn = nssn;
set_timer:
if (reorder_buf->num_stored && !reorder_buf->removed) {
u16 index = reorder_buf->head_sn % reorder_buf->buf_size;

Просмотреть файл

@ -197,7 +197,7 @@ static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
int *global_cnt = data;
if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt &&
mvmvif->phy_ctxt->id < MAX_PHYS)
mvmvif->phy_ctxt->id < NUM_PHY_CTX)
*global_cnt += 1;
}
@ -943,18 +943,92 @@ static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm)
return cpu_to_le32(rates);
}
static void iwl_mvm_fill_scan_dwell(struct iwl_mvm *mvm,
struct iwl_scan_dwell *dwell,
struct iwl_mvm_scan_timing_params *timing)
{
dwell->active = timing->dwell_active;
dwell->passive = timing->dwell_passive;
dwell->fragmented = timing->dwell_fragmented;
dwell->extended = timing->dwell_extended;
}
static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels)
{
struct ieee80211_supported_band *band;
int i, j = 0;
band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
for (i = 0; i < band->n_channels; i++, j++)
channels[j] = band->channels[i].hw_value;
band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
for (i = 0; i < band->n_channels; i++, j++)
channels[j] = band->channels[i].hw_value;
}
static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
u32 flags, u8 channel_flags)
{
enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
struct iwl_scan_config *cfg = config;
cfg->flags = cpu_to_le32(flags);
cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm);
cfg->out_of_channel_time = cpu_to_le32(scan_timing[type].max_out_time);
cfg->suspend_time = cpu_to_le32(scan_timing[type].suspend_time);
iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell, &scan_timing[type]);
memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
cfg->bcast_sta_id = mvm->aux_sta.sta_id;
cfg->channel_flags = channel_flags;
iwl_mvm_fill_channels(mvm, cfg->channel_array);
}
static void iwl_mvm_fill_scan_config_cdb(struct iwl_mvm *mvm, void *config,
u32 flags, u8 channel_flags)
{
enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
struct iwl_scan_config_cdb *cfg = config;
cfg->flags = cpu_to_le32(flags);
cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm);
cfg->out_of_channel_time[0] =
cpu_to_le32(scan_timing[type].max_out_time);
cfg->out_of_channel_time[1] =
cpu_to_le32(scan_timing[type].max_out_time);
cfg->suspend_time[0] = cpu_to_le32(scan_timing[type].suspend_time);
cfg->suspend_time[1] = cpu_to_le32(scan_timing[type].suspend_time);
iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell, &scan_timing[type]);
memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
cfg->bcast_sta_id = mvm->aux_sta.sta_id;
cfg->channel_flags = channel_flags;
iwl_mvm_fill_channels(mvm, cfg->channel_array);
}
int iwl_mvm_config_scan(struct iwl_mvm *mvm)
{
struct iwl_scan_config *scan_config;
struct ieee80211_supported_band *band;
int num_channels =
mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels +
mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels;
int ret, i, j = 0, cmd_size;
void *cfg;
int ret, cmd_size;
struct iwl_host_cmd cmd = {
.id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
};
enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
int num_channels =
mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels +
mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels;
u32 flags;
u8 channel_flags;
if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
return -ENOBUFS;
@ -965,52 +1039,45 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
return 0;
}
cmd_size = sizeof(*scan_config) + mvm->fw->ucode_capa.n_scan_channels;
if (iwl_mvm_is_cdb_supported(mvm))
cmd_size = sizeof(struct iwl_scan_config_cdb);
else
cmd_size = sizeof(struct iwl_scan_config);
cmd_size += mvm->fw->ucode_capa.n_scan_channels;
scan_config = kzalloc(cmd_size, GFP_KERNEL);
if (!scan_config)
cfg = kzalloc(cmd_size, GFP_KERNEL);
if (!cfg)
return -ENOMEM;
scan_config->flags = cpu_to_le32(SCAN_CONFIG_FLAG_ACTIVATE |
SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
SCAN_CONFIG_FLAG_SET_TX_CHAINS |
SCAN_CONFIG_FLAG_SET_RX_CHAINS |
SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
SCAN_CONFIG_FLAG_SET_ALL_TIMES |
SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
SCAN_CONFIG_FLAG_SET_MAC_ADDR |
SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
SCAN_CONFIG_N_CHANNELS(num_channels) |
(type == IWL_SCAN_TYPE_FRAGMENTED ?
SCAN_CONFIG_FLAG_SET_FRAGMENTED :
SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED));
scan_config->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
scan_config->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
scan_config->legacy_rates = iwl_mvm_scan_config_rates(mvm);
scan_config->out_of_channel_time =
cpu_to_le32(scan_timing[type].max_out_time);
scan_config->suspend_time = cpu_to_le32(scan_timing[type].suspend_time);
scan_config->dwell_active = scan_timing[type].dwell_active;
scan_config->dwell_passive = scan_timing[type].dwell_passive;
scan_config->dwell_fragmented = scan_timing[type].dwell_fragmented;
scan_config->dwell_extended = scan_timing[type].dwell_extended;
flags = SCAN_CONFIG_FLAG_ACTIVATE |
SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
SCAN_CONFIG_FLAG_SET_TX_CHAINS |
SCAN_CONFIG_FLAG_SET_RX_CHAINS |
SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
SCAN_CONFIG_FLAG_SET_ALL_TIMES |
SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
SCAN_CONFIG_FLAG_SET_MAC_ADDR |
SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS |
SCAN_CONFIG_N_CHANNELS(num_channels) |
(type == IWL_SCAN_TYPE_FRAGMENTED ?
SCAN_CONFIG_FLAG_SET_FRAGMENTED :
SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
memcpy(&scan_config->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
channel_flags = IWL_CHANNEL_FLAG_EBS |
IWL_CHANNEL_FLAG_ACCURATE_EBS |
IWL_CHANNEL_FLAG_EBS_ADD |
IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
scan_config->bcast_sta_id = mvm->aux_sta.sta_id;
scan_config->channel_flags = IWL_CHANNEL_FLAG_EBS |
IWL_CHANNEL_FLAG_ACCURATE_EBS |
IWL_CHANNEL_FLAG_EBS_ADD |
IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
if (iwl_mvm_is_cdb_supported(mvm)) {
flags |= (type == IWL_SCAN_TYPE_FRAGMENTED) ?
SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
iwl_mvm_fill_scan_config_cdb(mvm, cfg, flags, channel_flags);
} else {
iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags);
}
band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
for (i = 0; i < band->n_channels; i++, j++)
scan_config->channel_array[j] = band->channels[i].hw_value;
band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
for (i = 0; i < band->n_channels; i++, j++)
scan_config->channel_array[j] = band->channels[i].hw_value;
cmd.data[0] = scan_config;
cmd.data[0] = cfg;
cmd.len[0] = cmd_size;
cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
@ -1020,7 +1087,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
if (!ret)
mvm->scan_type = type;
kfree(scan_config);
kfree(cfg);
return ret;
}
@ -1039,19 +1106,31 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
struct iwl_scan_req_umac *cmd,
struct iwl_mvm_scan_params *params)
{
struct iwl_mvm_scan_timing_params *timing = &scan_timing[params->type];
if (params->measurement_dwell) {
cmd->active_dwell = params->measurement_dwell;
cmd->passive_dwell = params->measurement_dwell;
cmd->extended_dwell = params->measurement_dwell;
} else {
cmd->active_dwell = scan_timing[params->type].dwell_active;
cmd->passive_dwell = scan_timing[params->type].dwell_passive;
cmd->extended_dwell = scan_timing[params->type].dwell_extended;
cmd->active_dwell = timing->dwell_active;
cmd->passive_dwell = timing->dwell_passive;
cmd->extended_dwell = timing->dwell_extended;
}
cmd->fragmented_dwell = timing->dwell_fragmented;
if (iwl_mvm_is_cdb_supported(mvm)) {
cmd->cdb.max_out_time[0] = cpu_to_le32(timing->max_out_time);
cmd->cdb.suspend_time[0] = cpu_to_le32(timing->suspend_time);
cmd->cdb.max_out_time[1] = cpu_to_le32(timing->max_out_time);
cmd->cdb.suspend_time[1] = cpu_to_le32(timing->suspend_time);
cmd->cdb.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
} else {
cmd->no_cdb.max_out_time = cpu_to_le32(timing->max_out_time);
cmd->no_cdb.suspend_time = cpu_to_le32(timing->suspend_time);
cmd->no_cdb.scan_priority =
cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
}
cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented;
cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
cmd->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
if (iwl_mvm_is_regular_scan(params))
cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
@ -1063,9 +1142,8 @@ static void
iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
struct ieee80211_channel **channels,
int n_channels, u32 ssid_bitmap,
struct iwl_scan_req_umac *cmd)
struct iwl_scan_channel_cfg_umac *channel_cfg)
{
struct iwl_scan_channel_cfg_umac *channel_cfg = (void *)&cmd->data;
int i;
for (i = 0; i < n_channels; i++) {
@ -1088,8 +1166,11 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
if (params->type == IWL_SCAN_TYPE_FRAGMENTED) {
flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
if (iwl_mvm_is_cdb_supported(mvm))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED;
}
if (iwl_mvm_rrm_scan_needed(mvm))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED;
@ -1126,11 +1207,14 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int type)
{
struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data +
void *cmd_data = iwl_mvm_is_cdb_supported(mvm) ?
(void *)&cmd->cdb.data : (void *)&cmd->no_cdb.data;
struct iwl_scan_req_umac_tail *sec_part = cmd_data +
sizeof(struct iwl_scan_channel_cfg_umac) *
mvm->fw->ucode_capa.n_scan_channels;
int uid, i;
u32 ssid_bitmap = 0;
u8 channel_flags = 0;
struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif);
lockdep_assert_held(&mvm->mutex);
@ -1157,16 +1241,23 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
if (iwl_mvm_scan_use_ebs(mvm, vif))
cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
cmd->n_channels = params->n_channels;
if (iwl_mvm_is_cdb_supported(mvm)) {
cmd->cdb.channel_flags = channel_flags;
cmd->cdb.n_channels = params->n_channels;
} else {
cmd->no_cdb.channel_flags = channel_flags;
cmd->no_cdb.n_channels = params->n_channels;
}
iwl_scan_build_ssids(params, sec_part->direct_scan, &ssid_bitmap);
iwl_mvm_umac_scan_cfg_channels(mvm, params->channels,
params->n_channels, ssid_bitmap, cmd);
params->n_channels, ssid_bitmap,
cmd_data);
for (i = 0; i < params->n_scan_plans; i++) {
struct cfg80211_sched_scan_plan *scan_plan =
@ -1601,8 +1692,13 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
int iwl_mvm_scan_size(struct iwl_mvm *mvm)
{
int base_size = IWL_SCAN_REQ_UMAC_SIZE;
if (iwl_mvm_is_cdb_supported(mvm))
base_size = IWL_SCAN_REQ_UMAC_SIZE_CDB;
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
return sizeof(struct iwl_scan_req_umac) +
return base_size +
sizeof(struct iwl_scan_channel_cfg_umac) *
mvm->fw->ucode_capa.n_scan_channels +
sizeof(struct iwl_scan_req_umac_tail);

Просмотреть файл

@ -202,7 +202,8 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
add_sta_cmd.station_flags |=
cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
if (mvm_sta->associated)
add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
if (sta->wme) {
add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
@ -457,6 +458,52 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
return disable_agg_tids;
}
static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
bool same_sta)
{
struct iwl_mvm_sta *mvmsta;
u8 txq_curr_ac, sta_id, tid;
unsigned long disable_agg_tids = 0;
int ret;
lockdep_assert_held(&mvm->mutex);
spin_lock_bh(&mvm->queue_info_lock);
txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
sta_id = mvm->queue_info[queue].ra_sta_id;
tid = mvm->queue_info[queue].txq_tid;
spin_unlock_bh(&mvm->queue_info_lock);
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
/* Disable the queue */
if (disable_agg_tids)
iwl_mvm_invalidate_sta_queue(mvm, queue,
disable_agg_tids, false);
ret = iwl_mvm_disable_txq(mvm, queue,
mvmsta->vif->hw_queue[txq_curr_ac],
tid, 0);
if (ret) {
/* Re-mark the inactive queue as inactive */
spin_lock_bh(&mvm->queue_info_lock);
mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
spin_unlock_bh(&mvm->queue_info_lock);
IWL_ERR(mvm,
"Failed to free inactive queue %d (ret=%d)\n",
queue, ret);
return ret;
}
/* If TXQ is allocated to another STA, update removal in FW */
if (!same_sta)
iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
return 0;
}
static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
unsigned long tfd_queue_mask, u8 ac)
{
@ -645,7 +692,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
u8 mac_queue = mvmsta->vif->hw_queue[ac];
int queue = -1;
bool using_inactive_queue = false;
bool using_inactive_queue = false, same_sta = false;
unsigned long disable_agg_tids = 0;
enum iwl_mvm_agg_state queue_state;
bool shared_queue = false;
@ -702,6 +749,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
using_inactive_queue = true;
same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
IWL_DEBUG_TX_QUEUES(mvm,
"Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
queue, mvmsta->sta_id, tid);
@ -748,38 +796,9 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
* first
*/
if (using_inactive_queue) {
u8 txq_curr_ac, sta_id;
spin_lock_bh(&mvm->queue_info_lock);
txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
sta_id = mvm->queue_info[queue].ra_sta_id;
spin_unlock_bh(&mvm->queue_info_lock);
disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
/* Disable the queue */
if (disable_agg_tids)
iwl_mvm_invalidate_sta_queue(mvm, queue,
disable_agg_tids, false);
ret = iwl_mvm_disable_txq(mvm, queue,
mvmsta->vif->hw_queue[txq_curr_ac],
tid, 0);
if (ret) {
IWL_ERR(mvm,
"Failed to free inactive queue %d (ret=%d)\n",
queue, ret);
/* Re-mark the inactive queue as inactive */
spin_lock_bh(&mvm->queue_info_lock);
mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
spin_unlock_bh(&mvm->queue_info_lock);
ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
if (ret)
return ret;
}
/* If TXQ is allocated to another STA, update removal in FW */
if (sta_id != mvmsta->sta_id)
iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
}
IWL_DEBUG_TX_QUEUES(mvm,
@ -1095,6 +1114,7 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
int queue;
bool using_inactive_queue = false, same_sta = false;
/*
* Check for inactive queues, so we don't reach a situation where we
@ -1118,6 +1138,14 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
spin_unlock_bh(&mvm->queue_info_lock);
IWL_ERR(mvm, "No available queues for new station\n");
return -ENOSPC;
} else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
/*
* If this queue is already allocated but inactive we'll need to
* first free this queue before enabling it again, we'll mark
* it as reserved to make sure no new traffic arrives on it
*/
using_inactive_queue = true;
same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
}
mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
@ -1125,6 +1153,9 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
mvmsta->reserved_queue = queue;
if (using_inactive_queue)
iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
queue, mvmsta->sta_id);
@ -1471,6 +1502,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
u8 sta_id = mvm_sta->sta_id;
int ret;
lockdep_assert_held(&mvm->mutex);
@ -1479,7 +1511,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
kfree(mvm_sta->dup_data);
if ((vif->type == NL80211_IFTYPE_STATION &&
mvmvif->ap_sta_id == mvm_sta->sta_id) ||
mvmvif->ap_sta_id == sta_id) ||
iwl_mvm_is_dqa_supported(mvm)){
ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
if (ret)
@ -1495,8 +1527,17 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
/* If DQA is supported - the queues can be disabled now */
if (iwl_mvm_is_dqa_supported(mvm))
if (iwl_mvm_is_dqa_supported(mvm)) {
iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
/*
* If pending_frames is set at this point - it must be
* driver internal logic error, since queues are empty
* and removed successuly.
* warn on it but set it to 0 anyway to avoid station
* not being removed later in the function
*/
WARN_ON(atomic_xchg(&mvm->pending_frames[sta_id], 0));
}
/* If there is a TXQ still marked as reserved - free it */
if (iwl_mvm_is_dqa_supported(mvm) &&
@ -1514,7 +1555,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
(*status != IWL_MVM_QUEUE_FREE),
"sta_id %d reserved txq %d status %d",
mvm_sta->sta_id, reserved_txq, *status)) {
sta_id, reserved_txq, *status)) {
spin_unlock_bh(&mvm->queue_info_lock);
return -EINVAL;
}
@ -1524,7 +1565,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
}
if (vif->type == NL80211_IFTYPE_STATION &&
mvmvif->ap_sta_id == mvm_sta->sta_id) {
mvmvif->ap_sta_id == sta_id) {
/* if associated - we can't remove the AP STA now */
if (vif->bss_conf.assoc)
return ret;
@ -1533,7 +1574,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
/* clear d0i3_ap_sta_id if no longer relevant */
if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id)
if (mvm->d0i3_ap_sta_id == sta_id)
mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
}
}
@ -1542,7 +1583,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
* This shouldn't happen - the TDLS channel switch should be canceled
* before the STA is removed.
*/
if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == mvm_sta->sta_id)) {
if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
cancel_delayed_work(&mvm->tdls_cs.dwork);
}
@ -1552,21 +1593,20 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
* calls the drain worker.
*/
spin_lock_bh(&mvm_sta->lock);
/*
* There are frames pending on the AC queues for this station.
* We need to wait until all the frames are drained...
*/
if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) {
rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
if (atomic_read(&mvm->pending_frames[sta_id])) {
rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id],
ERR_PTR(-EBUSY));
spin_unlock_bh(&mvm_sta->lock);
/* disable TDLS sta queues on drain complete */
if (sta->tdls) {
mvm->tfd_drained[mvm_sta->sta_id] =
mvm_sta->tfd_queue_msk;
IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n",
mvm_sta->sta_id);
mvm->tfd_drained[sta_id] = mvm_sta->tfd_queue_msk;
IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n", sta_id);
}
ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
@ -1750,6 +1790,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
const u8 *baddr = _baddr;
int ret;
lockdep_assert_held(&mvm->mutex);
@ -1765,19 +1806,16 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
iwl_mvm_get_wd_timeout(mvm, vif, false, false);
int queue;
if ((vif->type == NL80211_IFTYPE_AP) &&
(mvmvif->bcast_sta.tfd_queue_msk &
BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE)))
if (vif->type == NL80211_IFTYPE_AP)
queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
else if ((vif->type == NL80211_IFTYPE_P2P_DEVICE) &&
(mvmvif->bcast_sta.tfd_queue_msk &
BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE)))
else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
else if (WARN(1, "Missed required TXQ for adding bcast STA\n"))
else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
return -EINVAL;
iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, &cfg,
wdg_timeout);
bsta->tfd_queue_msk |= BIT(queue);
}
if (vif->type == NL80211_IFTYPE_ADHOC)
@ -1786,8 +1824,67 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT))
return -ENOSPC;
return iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
mvmvif->id, mvmvif->color);
ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
mvmvif->id, mvmvif->color);
if (ret)
return ret;
/*
* In AP vif type, we also need to enable the cab_queue. However, we
* have to enable it after the ADD_STA command is sent, otherwise the
* FW will throw an assert once we send the ADD_STA command (it'll
* detect a mismatch in the tfd_queue_msk, as we can't add the
* enabled-cab_queue to the mask)
*/
if (iwl_mvm_is_dqa_supported(mvm) &&
vif->type == NL80211_IFTYPE_AP) {
struct iwl_trans_txq_scd_cfg cfg = {
.fifo = IWL_MVM_TX_FIFO_MCAST,
.sta_id = mvmvif->bcast_sta.sta_id,
.tid = IWL_MAX_TID_COUNT,
.aggregate = false,
.frame_limit = IWL_FRAME_LIMIT,
};
unsigned int wdg_timeout =
iwl_mvm_get_wd_timeout(mvm, vif, false, false);
iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue,
0, &cfg, wdg_timeout);
}
return 0;
}
static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
lockdep_assert_held(&mvm->mutex);
if (vif->type == NL80211_IFTYPE_AP)
iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
IWL_MAX_TID_COUNT, 0);
if (mvmvif->bcast_sta.tfd_queue_msk &
BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE)) {
iwl_mvm_disable_txq(mvm,
IWL_MVM_DQA_AP_PROBE_RESP_QUEUE,
vif->hw_queue[0], IWL_MAX_TID_COUNT,
0);
mvmvif->bcast_sta.tfd_queue_msk &=
~BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE);
}
if (mvmvif->bcast_sta.tfd_queue_msk &
BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE)) {
iwl_mvm_disable_txq(mvm,
IWL_MVM_DQA_P2P_DEVICE_QUEUE,
vif->hw_queue[0], IWL_MAX_TID_COUNT,
0);
mvmvif->bcast_sta.tfd_queue_msk &=
~BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE);
}
}
/* Send the FW a request to remove the station from it's internal data
@ -1799,6 +1896,9 @@ int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
if (iwl_mvm_is_dqa_supported(mvm))
iwl_mvm_free_bcast_sta_queues(mvm, vif);
ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
if (ret)
IWL_WARN(mvm, "Failed sending remove station\n");
@ -1812,22 +1912,16 @@ int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
if (!iwl_mvm_is_dqa_supported(mvm))
if (!iwl_mvm_is_dqa_supported(mvm)) {
qmask = iwl_mvm_mac_get_queues_mask(vif);
if (vif->type == NL80211_IFTYPE_AP) {
/*
* The firmware defines the TFD queue mask to only be relevant
* for *unicast* queues, so the multicast (CAB) queue shouldn't
* be included.
* be included. This only happens in NL80211_IFTYPE_AP vif type,
* so the next line will only have an effect there.
*/
qmask &= ~BIT(vif->cab_queue);
if (iwl_mvm_is_dqa_supported(mvm))
qmask |= BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE);
} else if (iwl_mvm_is_dqa_supported(mvm) &&
vif->type == NL80211_IFTYPE_P2P_DEVICE) {
qmask |= BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE);
}
return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
@ -2232,6 +2326,13 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
IWL_ERR(mvm, "Failed to allocate agg queue\n");
goto release_locks;
}
/*
* TXQ shouldn't be in inactive mode for non-DQA, so getting
* an inactive queue from iwl_mvm_find_free_queue() is
* certainly a bug
*/
WARN_ON(mvm->queue_info[txq_id].status ==
IWL_MVM_QUEUE_INACTIVE);
/* TXQ hasn't yet been enabled, so mark it only as reserved */
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
@ -2947,6 +3048,11 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
/* Get the station from the mvm local station table */
mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
if (!mvm_sta) {
IWL_ERR(mvm, "Failed to find station\n");
return -EINVAL;
}
sta_id = mvm_sta->sta_id;
IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
keyconf->keyidx, sta_id);
@ -2974,8 +3080,6 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
return 0;
}
sta_id = mvm_sta->sta_id;
ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
if (ret)
return ret;

Просмотреть файл

@ -437,6 +437,7 @@ struct iwl_mvm_sta {
bool disable_tx;
bool tlc_amsdu;
bool sleeping;
bool associated;
u8 agg_tids;
u8 sleep_tx_count;
u8 avg_energy;

Просмотреть файл

@ -202,7 +202,6 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
struct iwl_tx_cmd *tx_cmd,
struct ieee80211_tx_info *info, u8 sta_id)
{
struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (void *)skb->data;
__le16 fc = hdr->frame_control;
u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
@ -284,9 +283,8 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
tx_flags |= TX_CMD_FLG_WRITE_TX_POWER;
tx_cmd->tx_flags = cpu_to_le32(tx_flags);
/* Total # bytes to be transmitted */
tx_cmd->len = cpu_to_le16((u16)skb->len +
(uintptr_t)skb_info->driver_data[0]);
/* Total # bytes to be transmitted - PCIe code will adjust for A-MSDU */
tx_cmd->len = cpu_to_le16((u16)skb->len);
tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
tx_cmd->sta_id = sta_id;
@ -466,7 +464,6 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_sta *sta, u8 sta_id)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
struct iwl_device_cmd *dev_cmd;
struct iwl_tx_cmd *tx_cmd;
@ -486,12 +483,18 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
return dev_cmd;
}
static void iwl_mvm_skb_prepare_status(struct sk_buff *skb,
struct iwl_device_cmd *cmd)
{
struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
memset(&skb_info->status, 0, sizeof(skb_info->status));
memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data));
skb_info->driver_data[1] = dev_cmd;
return dev_cmd;
skb_info->driver_data[1] = cmd;
}
static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
@ -503,15 +506,17 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
switch (info->control.vif->type) {
case NL80211_IFTYPE_AP:
/*
* handle legacy hostapd as well, where station may be added
* only after assoc.
* Handle legacy hostapd as well, where station may be added
* only after assoc. Take care of the case where we send a
* deauth to a station that we don't have.
*/
if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc))
if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc) ||
ieee80211_is_deauth(fc))
return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
if (info->hw_queue == info->control.vif->cab_queue)
return info->hw_queue;
WARN_ON_ONCE(1);
WARN_ONCE(1, "fc=0x%02x", le16_to_cpu(fc));
return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
case NL80211_IFTYPE_P2P_DEVICE:
if (ieee80211_is_mgmt(fc))
@ -543,9 +548,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
* queue. STATION (HS2.0) uses the auxiliary context of the FW,
* and hence needs to be sent on the aux queue
*/
if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
if (skb_info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
skb_info->control.vif->type == NL80211_IFTYPE_STATION)
IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
skb_info->hw_queue = mvm->aux_queue;
memcpy(&info, skb->cb, sizeof(info));
@ -557,9 +562,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
info.hw_queue != info.control.vif->cab_queue)))
return -1;
/* This holds the amsdu headers length */
skb_info->driver_data[0] = (void *)(uintptr_t)0;
queue = info.hw_queue;
/*
@ -570,9 +572,10 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
* (this is not possible for unicast packets as a TLDS discovery
* response are sent without a station entry); otherwise use the
* AUX station.
* In DQA mode, if vif is of type STATION and frames are not multicast,
* they should be sent from the BSS queue. For example, TDLS setup
* frames should be sent on this queue, as they go through the AP.
* In DQA mode, if vif is of type STATION and frames are not multicast
* or offchannel, they should be sent from the BSS queue.
* For example, TDLS setup frames should be sent on this queue,
* as they go through the AP.
*/
sta_id = mvm->aux_sta.sta_id;
if (info.control.vif) {
@ -594,7 +597,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
if (ap_sta_id != IWL_MVM_STATION_COUNT)
sta_id = ap_sta_id;
} else if (iwl_mvm_is_dqa_supported(mvm) &&
info.control.vif->type == NL80211_IFTYPE_STATION) {
info.control.vif->type == NL80211_IFTYPE_STATION &&
queue != mvm->aux_queue) {
queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
}
}
@ -605,6 +609,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
if (!dev_cmd)
return -1;
/* From now on, we cannot access info->control */
iwl_mvm_skb_prepare_status(skb, dev_cmd);
tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
/* Copy MAC header from skb into command buffer */
@ -641,7 +648,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len;
bool ipv4 = (skb->protocol == htons(ETH_P_IP));
u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
u16 amsdu_add, snap_ip_tcp, pad, i = 0;
u16 snap_ip_tcp, pad, i = 0;
unsigned int dbg_max_amsdu_len;
netdev_features_t netdev_features = NETIF_F_CSUM_MASK | NETIF_F_SG;
u8 *qc, tid, txf;
@ -743,21 +750,6 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
/* This skb fits in one single A-MSDU */
if (num_subframes * mss >= tcp_payload_len) {
struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
/*
* Compute the length of all the data added for the A-MSDU.
* This will be used to compute the length to write in the TX
* command. We have: SNAP + IP + TCP for n -1 subframes and
* ETH header for n subframes. Note that the original skb
* already had one set of SNAP / IP / TCP headers.
*/
num_subframes = DIV_ROUND_UP(tcp_payload_len, mss);
amsdu_add = num_subframes * sizeof(struct ethhdr) +
(num_subframes - 1) * (snap_ip_tcp + pad);
/* This holds the amsdu headers length */
skb_info->driver_data[0] = (void *)(uintptr_t)amsdu_add;
__skb_queue_tail(mpdus_skb, skb);
return 0;
}
@ -796,14 +788,6 @@ segment:
ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes);
if (tcp_payload_len > mss) {
struct ieee80211_tx_info *skb_info =
IEEE80211_SKB_CB(tmp);
num_subframes = DIV_ROUND_UP(tcp_payload_len, mss);
amsdu_add = num_subframes * sizeof(struct ethhdr) +
(num_subframes - 1) * (snap_ip_tcp + pad);
skb_info->driver_data[0] =
(void *)(uintptr_t)amsdu_add;
skb_shinfo(tmp)->gso_size = mss;
} else {
qc = ieee80211_get_qos_ctl((void *)tmp->data);
@ -915,7 +899,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
goto drop;
tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
/* From now on, we cannot access info->control */
/*
* we handle that entirely ourselves -- for uAPSD the firmware
@ -926,6 +909,10 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
spin_lock(&mvmsta->lock);
/* nullfunc frames should go to the MGMT queue regardless of QOS,
* the condition of !ieee80211_is_qos_nullfunc(fc) keeps the default
* assignment of MGMT TID
*/
if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
u8 *qc = NULL;
qc = ieee80211_get_qos_ctl(hdr);
@ -938,27 +925,13 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
hdr->seq_ctrl |= cpu_to_le16(seq_number);
is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
} else if (iwl_mvm_is_dqa_supported(mvm) &&
(ieee80211_is_qos_nullfunc(fc) ||
ieee80211_is_nullfunc(fc))) {
/*
* nullfunc frames should go to the MGMT queue regardless of QOS
*/
tid = IWL_MAX_TID_COUNT;
if (WARN_ON_ONCE(is_ampdu &&
mvmsta->tid_data[tid].state != IWL_AGG_ON))
goto drop_unlock_sta;
}
if (iwl_mvm_is_dqa_supported(mvm)) {
if (iwl_mvm_is_dqa_supported(mvm) || is_ampdu)
txq_id = mvmsta->tid_data[tid].txq_id;
if (ieee80211_is_mgmt(fc))
tx_cmd->tid_tspec = IWL_TID_NON_QOS;
}
/* Copy MAC header from skb into command buffer */
memcpy(tx_cmd->hdr, hdr, hdrlen);
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
if (sta->tdls && !iwl_mvm_is_dqa_supported(mvm)) {
/* default to TID 0 for non-QoS packets */
u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid;
@ -966,11 +939,10 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
txq_id = mvmsta->hw_queue[tid_to_mac80211_ac[tdls_tid]];
}
if (is_ampdu) {
if (WARN_ON_ONCE(mvmsta->tid_data[tid].state != IWL_AGG_ON))
goto drop_unlock_sta;
txq_id = mvmsta->tid_data[tid].txq_id;
}
/* Copy MAC header from skb into command buffer */
memcpy(tx_cmd->hdr, hdr, hdrlen);
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
/* Check if TXQ needs to be allocated or re-activated */
if (unlikely(txq_id == IEEE80211_INVAL_HW_QUEUE ||
@ -1022,6 +994,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
/* From now on, we cannot access info->control */
iwl_mvm_skb_prepare_status(skb, dev_cmd);
if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
goto drop_unlock_sta;
@ -1031,7 +1006,10 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
spin_unlock(&mvmsta->lock);
/* Increase pending frames count if this isn't AMPDU */
if (!is_ampdu)
if ((iwl_mvm_is_dqa_supported(mvm) &&
mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_ON &&
mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_STARTING) ||
(!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu))
atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
return 0;
@ -1047,7 +1025,6 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_info info;
struct sk_buff_head mpdus_skbs;
unsigned int payload_len;
@ -1061,9 +1038,6 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
memcpy(&info, skb->cb, sizeof(info));
/* This holds the amsdu headers length */
skb_info->driver_data[0] = (void *)(uintptr_t)0;
if (!skb_is_gso(skb))
return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
@ -1302,8 +1276,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
memset(&info->status, 0, sizeof(info->status));
info->flags &= ~IEEE80211_TX_CTL_AMPDU;
/* inform mac80211 about what happened with the frame */
switch (status & TX_STATUS_MSK) {
case TX_STATUS_SUCCESS:
@ -1326,10 +1298,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
(void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate);
/* Single frame failure in an AMPDU queue => send BAR */
if (txq_id >= mvm->first_agg_queue &&
if (info->flags & IEEE80211_TX_CTL_AMPDU &&
!(info->flags & IEEE80211_TX_STAT_ACK) &&
!(info->flags & IEEE80211_TX_STAT_TX_FILTERED))
info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
info->flags &= ~IEEE80211_TX_CTL_AMPDU;
/* W/A FW bug: seq_ctl is wrong when the status isn't success */
if (status != TX_STATUS_SUCCESS) {
@ -1364,7 +1337,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
ieee80211_tx_status(mvm->hw, skb);
}
if (txq_id >= mvm->first_agg_queue) {
if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue) {
/* If this is an aggregation queue, we use the ssn since:
* ssn = wifi seq_num % 256.
* The seq_ctl is the sequence control of the packet to which

Просмотреть файл

@ -497,13 +497,11 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref);
}
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
{
struct iwl_trans *trans = mvm->trans;
struct iwl_error_event_table table;
u32 base;
base = mvm->error_event_table;
if (mvm->cur_ucode == IWL_UCODE_INIT) {
if (!base)
base = mvm->fw->init_errlog_ptr;
@ -574,6 +572,14 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
}
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
{
iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[0]);
if (mvm->error_event_table[1])
iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[1]);
if (mvm->support_umac_log)
iwl_mvm_dump_umac_error_log(mvm);
@ -649,8 +655,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
/* Make sure this TID isn't already enabled */
if (mvm->queue_info[queue].tid_bitmap & BIT(cfg->tid)) {
spin_unlock_bh(&mvm->queue_info_lock);
IWL_ERR(mvm, "Trying to enable TXQ with existing TID %d\n",
cfg->tid);
IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
queue, cfg->tid);
return;
}

Просмотреть файл

@ -533,7 +533,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg)},
/* a000 Series */
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr)},
#endif /* CONFIG_IWLMVM */
{0}
@ -673,11 +673,17 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
cfg = &iwl9000lc_2ac_cfg;
iwl_trans->cfg = cfg;
}
if (cfg == &iwla000_2ac_cfg_hr &&
iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_JF) {
cfg = &iwla000_2ac_cfg_jf;
iwl_trans->cfg = cfg;
}
}
#endif
pci_set_drvdata(pdev, iwl_trans);
iwl_trans->drv = iwl_drv_start(iwl_trans, cfg);
iwl_trans->drv = iwl_drv_start(iwl_trans);
if (IS_ERR(iwl_trans->drv)) {
ret = PTR_ERR(iwl_trans->drv);
@ -778,13 +784,14 @@ static int iwl_pci_resume(struct device *device)
/*
* Enable rfkill interrupt (in order to keep track of
* the rfkill status)
* the rfkill status). Must be locked to avoid processing
* a possible rfkill interrupt between reading the state
* and calling iwl_trans_pcie_rf_kill() with it.
*/
mutex_lock(&trans_pcie->mutex);
iwl_enable_rfkill_int(trans);
hw_rfkill = iwl_is_rfkill_set(trans);
mutex_lock(&trans_pcie->mutex);
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
mutex_unlock(&trans_pcie->mutex);

Просмотреть файл

@ -279,7 +279,7 @@ struct iwl_txq {
bool frozen;
u8 active;
bool ampdu;
bool block;
int block;
unsigned long wd_timeout;
struct sk_buff_head overflow_q;
@ -670,6 +670,8 @@ static inline u8 get_cmd_index(struct iwl_txq *q, u32 index)
static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
{
lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->mutex);
return !(iwl_read32(trans, CSR_GP_CNTRL) &
CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
}

Просмотреть файл

@ -1607,17 +1607,19 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
if (inta & CSR_INT_BIT_RF_KILL) {
bool hw_rfkill;
mutex_lock(&trans_pcie->mutex);
hw_rfkill = iwl_is_rfkill_set(trans);
if (hw_rfkill)
set_bit(STATUS_RFKILL, &trans->status);
IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
hw_rfkill ? "disable radio" : "enable radio");
isr_stats->rfkill++;
mutex_lock(&trans_pcie->mutex);
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
mutex_unlock(&trans_pcie->mutex);
if (hw_rfkill) {
set_bit(STATUS_RFKILL, &trans->status);
if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
&trans->status))
IWL_DEBUG_RF_KILL(trans,
@ -1952,17 +1954,19 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) {
bool hw_rfkill;
mutex_lock(&trans_pcie->mutex);
hw_rfkill = iwl_is_rfkill_set(trans);
if (hw_rfkill)
set_bit(STATUS_RFKILL, &trans->status);
IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
hw_rfkill ? "disable radio" : "enable radio");
isr_stats->rfkill++;
mutex_lock(&trans_pcie->mutex);
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
mutex_unlock(&trans_pcie->mutex);
if (hw_rfkill) {
set_bit(STATUS_RFKILL, &trans->status);
if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
&trans->status))
IWL_DEBUG_RF_KILL(trans,

Просмотреть файл

@ -1076,6 +1076,123 @@ static bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans)
return hw_rfkill;
}
struct iwl_causes_list {
u32 cause_num;
u32 mask_reg;
u8 addr;
};
static struct iwl_causes_list causes_list[] = {
{MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0},
{MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1},
{MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3},
{MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5},
{MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10},
{MSIX_HW_INT_CAUSES_REG_WAKEUP, CSR_MSIX_HW_INT_MASK_AD, 0x11},
{MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16},
{MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17},
{MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18},
{MSIX_HW_INT_CAUSES_REG_SW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x29},
{MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A},
{MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B},
{MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D},
{MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
};
static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
int i;
/*
* Access all non RX causes and map them to the default irq.
* In case we are missing at least one interrupt vector,
* the first interrupt vector will serve non-RX and FBQ causes.
*/
for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val);
iwl_clear_bit(trans, causes_list[i].mask_reg,
causes_list[i].cause_num);
}
}
static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 offset =
trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
u32 val, idx;
/*
* The first RX queue - fallback queue, which is designated for
* management frame, command responses etc, is always mapped to the
* first interrupt vector. The other RX queues are mapped to
* the other (N - 2) interrupt vectors.
*/
val = BIT(MSIX_FH_INT_CAUSES_Q(0));
for (idx = 1; idx < trans->num_rx_queues; idx++) {
iwl_write8(trans, CSR_MSIX_RX_IVAR(idx),
MSIX_FH_INT_CAUSES_Q(idx - offset));
val |= BIT(MSIX_FH_INT_CAUSES_Q(idx));
}
iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val);
val = MSIX_FH_INT_CAUSES_Q(0);
if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
val |= MSIX_NON_AUTO_CLEAR_CAUSE;
iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val);
if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
}
static void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
{
struct iwl_trans *trans = trans_pcie->trans;
if (!trans_pcie->msix_enabled) {
if (trans->cfg->mq_rx_supported &&
test_bit(STATUS_DEVICE_ENABLED, &trans->status))
iwl_write_prph(trans, UREG_CHICK,
UREG_CHICK_MSI_ENABLE);
return;
}
/*
* The IVAR table needs to be configured again after reset,
* but if the device is disabled, we can't write to
* prph.
*/
if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
/*
* Each cause from the causes list above and the RX causes is
* represented as a byte in the IVAR table. The first nibble
* represents the bound interrupt vector of the cause, the second
* represents no auto clear for this cause. This will be set if its
* interrupt vector is bound to serve other causes.
*/
iwl_pcie_map_rx_causes(trans);
iwl_pcie_map_non_rx_causes(trans);
}
static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
{
struct iwl_trans *trans = trans_pcie->trans;
iwl_pcie_conf_msix_hw(trans_pcie);
if (!trans_pcie->msix_enabled)
return;
trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);
trans_pcie->fh_mask = trans_pcie->fh_init_mask;
trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD);
trans_pcie->hw_mask = trans_pcie->hw_init_mask;
}
static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@ -1128,6 +1245,15 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
usleep_range(1000, 2000);
/*
* Upon stop, the IVAR table gets erased, so msi-x won't
* work. This causes a bug in RF-KILL flows, since the interrupt
* that enables radio won't fire on the correct irq, and the
* driver won't be able to handle the interrupt.
* Configure the IVAR table again after reset.
*/
iwl_pcie_conf_msix_hw(trans_pcie);
/*
* Upon stop, the APM issues an interrupt if HW RF kill is set.
* This is a bug in certain verions of the hardware.
@ -1346,6 +1472,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
enum iwl_d3_status *status,
bool test, bool reset)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 val;
int ret;
@ -1358,11 +1485,15 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
iwl_pcie_enable_rx_wake(trans, true);
/*
* Also enables interrupts - none will happen as the device doesn't
* know we're waking it up, only when the opmode actually tells it
* after this call.
* Reconfigure IVAR table in case of MSIX or reset ict table in
* MSI mode since HW reset erased it.
* Also enables interrupts - none will happen as
* the device doesn't know we're waking it up, only when
* the opmode actually tells it after this call.
*/
iwl_pcie_reset_ict(trans);
iwl_pcie_conf_msix_hw(trans_pcie);
if (!trans_pcie->msix_enabled)
iwl_pcie_reset_ict(trans);
iwl_enable_interrupts(trans);
iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
@ -1405,109 +1536,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
return 0;
}
struct iwl_causes_list {
u32 cause_num;
u32 mask_reg;
u8 addr;
};
static struct iwl_causes_list causes_list[] = {
{MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0},
{MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1},
{MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3},
{MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5},
{MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10},
{MSIX_HW_INT_CAUSES_REG_WAKEUP, CSR_MSIX_HW_INT_MASK_AD, 0x11},
{MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16},
{MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17},
{MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18},
{MSIX_HW_INT_CAUSES_REG_SW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x29},
{MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A},
{MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B},
{MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D},
{MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
};
static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
int i;
/*
* Access all non RX causes and map them to the default irq.
* In case we are missing at least one interrupt vector,
* the first interrupt vector will serve non-RX and FBQ causes.
*/
for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val);
iwl_clear_bit(trans, causes_list[i].mask_reg,
causes_list[i].cause_num);
}
}
static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 offset =
trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
u32 val, idx;
/*
* The first RX queue - fallback queue, which is designated for
* management frame, command responses etc, is always mapped to the
* first interrupt vector. The other RX queues are mapped to
* the other (N - 2) interrupt vectors.
*/
val = BIT(MSIX_FH_INT_CAUSES_Q(0));
for (idx = 1; idx < trans->num_rx_queues; idx++) {
iwl_write8(trans, CSR_MSIX_RX_IVAR(idx),
MSIX_FH_INT_CAUSES_Q(idx - offset));
val |= BIT(MSIX_FH_INT_CAUSES_Q(idx));
}
iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val);
val = MSIX_FH_INT_CAUSES_Q(0);
if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
val |= MSIX_NON_AUTO_CLEAR_CAUSE;
iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val);
if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
}
static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
{
struct iwl_trans *trans = trans_pcie->trans;
if (!trans_pcie->msix_enabled) {
if (trans->cfg->mq_rx_supported)
iwl_write_prph(trans, UREG_CHICK,
UREG_CHICK_MSI_ENABLE);
return;
}
iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
/*
* Each cause from the causes list above and the RX causes is
* represented as a byte in the IVAR table. The first nibble
* represents the bound interrupt vector of the cause, the second
* represents no auto clear for this cause. This will be set if its
* interrupt vector is bound to serve other causes.
*/
iwl_pcie_map_rx_causes(trans);
iwl_pcie_map_non_rx_causes(trans);
trans_pcie->fh_init_mask =
~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);
trans_pcie->fh_mask = trans_pcie->fh_init_mask;
trans_pcie->hw_init_mask =
~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD);
trans_pcie->hw_mask = trans_pcie->hw_init_mask;
}
static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
struct iwl_trans *trans)
{
@ -1675,6 +1703,7 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
iwl_pcie_apm_init(trans);
iwl_pcie_init_msix(trans_pcie);
/* From now on, the op_mode will be kept updated about RF kill state */
iwl_enable_rfkill_int(trans);
@ -2953,16 +2982,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
PCIE_LINK_STATE_CLKPM);
}
if (cfg->mq_rx_supported)
addr_size = 64;
else
addr_size = 36;
if (cfg->use_tfh) {
addr_size = 64;
trans_pcie->max_tbs = IWL_TFH_NUM_TBS;
trans_pcie->tfd_size = sizeof(struct iwl_tfh_tfd);
} else {
addr_size = 36;
trans_pcie->max_tbs = IWL_NUM_OF_TBS;
trans_pcie->tfd_size = sizeof(struct iwl_tfd);
}

Просмотреть файл

@ -2096,6 +2096,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_cmd_meta *out_meta,
struct iwl_device_cmd *dev_cmd, u16 tb1_len)
{
struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
@ -2145,6 +2146,13 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
*/
skb_pull(skb, hdr_len + iv_len);
/*
* Remove the length of all the headers that we don't actually
* have in the MPDU by themselves, but that we duplicate into
* all the different MSDUs inside the A-MSDU.
*/
le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
tso_start(skb, &tso);
while (total_len) {
@ -2155,7 +2163,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
unsigned int hdr_tb_len;
dma_addr_t hdr_tb_phys;
struct tcphdr *tcph;
u8 *iph;
u8 *iph, *subf_hdrs_start = hdr_page->pos;
total_len -= data_left;
@ -2216,6 +2224,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
hdr_tb_len, false);
trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr,
hdr_tb_len);
/* add this subframe's headers' length to the tx_cmd */
le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
/* prepare the start_hdr for the next subframe */
start_hdr = hdr_page->pos;
@ -2408,9 +2418,10 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
tb1_len = len;
}
/* The first TB points to bi-directional DMA data */
memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
IWL_FIRST_TB_SIZE);
/*
* The first TB points to bi-directional DMA data, we'll
* memcpy the data into it later.
*/
iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
IWL_FIRST_TB_SIZE, true);
@ -2434,6 +2445,10 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
goto out_err;
}
/* building the A-MSDU might have changed this data, so memcpy it now */
memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
IWL_FIRST_TB_SIZE);
tfd = iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
/* Set up entry for this TFD in Tx byte-count array */
iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),

Просмотреть файл

@ -27,7 +27,7 @@
#include <linux/timer.h>
#include <linux/ieee80211.h>
#include <uapi/linux/if_arp.h>
#include <net/mac80211.h>
#include <net/cfg80211.h>
#define MWIFIEX_BSS_COEX_COUNT 2
#define MWIFIEX_MAX_BSS_NUM (3)

Просмотреть файл

@ -1159,8 +1159,6 @@ int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
encrypt_key.is_rx_seq_valid = true;
}
} else {
if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP)
return 0;
encrypt_key.key_disable = true;
if (mac_addr)
memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN);

Просмотреть файл

@ -1200,7 +1200,7 @@ static void rt2400pci_write_beacon(struct queue_entry *entry,
/*
* Dump beacon to userspace through debugfs.
*/
rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry);
out:
/*
* Enable beaconing again.

Просмотреть файл

@ -1349,7 +1349,7 @@ static void rt2500pci_write_beacon(struct queue_entry *entry,
/*
* Dump beacon to userspace through debugfs.
*/
rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry);
out:
/*
* Enable beaconing again.

Просмотреть файл

@ -1170,7 +1170,7 @@ static void rt2500usb_write_beacon(struct queue_entry *entry,
/*
* Dump beacon to userspace through debugfs.
*/
rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry);
/*
* USB devices cannot blindly pass the skb->len as the

Просмотреть файл

@ -1014,7 +1014,7 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
/*
* Dump beacon to userspace through debugfs.
*/
rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry);
/*
* Write entire beacon with TXWI and padding to register.

Просмотреть файл

@ -1400,11 +1400,11 @@ void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop);
*/
#ifdef CONFIG_RT2X00_LIB_DEBUGFS
void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
enum rt2x00_dump_type type, struct sk_buff *skb);
enum rt2x00_dump_type type, struct queue_entry *entry);
#else
static inline void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
enum rt2x00_dump_type type,
struct sk_buff *skb)
struct queue_entry *entry)
{
}
#endif /* CONFIG_RT2X00_LIB_DEBUGFS */

Просмотреть файл

@ -157,9 +157,10 @@ void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev,
}
void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
enum rt2x00_dump_type type, struct sk_buff *skb)
enum rt2x00_dump_type type, struct queue_entry *entry)
{
struct rt2x00debug_intf *intf = rt2x00dev->debugfs_intf;
struct sk_buff *skb = entry->skb;
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
struct sk_buff *skbcopy;
struct rt2x00dump_hdr *dump_hdr;
@ -196,8 +197,8 @@ void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
dump_hdr->chip_rf = cpu_to_le16(rt2x00dev->chip.rf);
dump_hdr->chip_rev = cpu_to_le16(rt2x00dev->chip.rev);
dump_hdr->type = cpu_to_le16(type);
dump_hdr->queue_index = skbdesc->entry->queue->qid;
dump_hdr->entry_index = skbdesc->entry->entry_idx;
dump_hdr->queue_index = entry->queue->qid;
dump_hdr->entry_index = entry->entry_idx;
dump_hdr->timestamp_sec = cpu_to_le32(timestamp.tv_sec);
dump_hdr->timestamp_usec = cpu_to_le32(timestamp.tv_usec);

Просмотреть файл

@ -363,7 +363,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
* Send frame to debugfs immediately, after this call is completed
* we are going to overwrite the skb->cb array.
*/
rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry->skb);
rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry);
/*
* Determine if the frame has been successfully transmitted and
@ -772,7 +772,7 @@ void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp)
*/
rt2x00link_update_stats(rt2x00dev, entry->skb, &rxdesc);
rt2x00debug_update_crypto(rt2x00dev, &rxdesc);
rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_RXDONE, entry->skb);
rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_RXDONE, entry);
/*
* Initialize RX status information, and send frame
@ -1436,21 +1436,6 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
cancel_work_sync(&rt2x00dev->intf_work);
cancel_delayed_work_sync(&rt2x00dev->autowakeup_work);
cancel_work_sync(&rt2x00dev->sleep_work);
#if IS_ENABLED(CONFIG_RT2X00_LIB_USB)
if (rt2x00_is_usb(rt2x00dev)) {
usb_kill_anchored_urbs(rt2x00dev->anchor);
hrtimer_cancel(&rt2x00dev->txstatus_timer);
cancel_work_sync(&rt2x00dev->rxdone_work);
cancel_work_sync(&rt2x00dev->txdone_work);
}
#endif
if (rt2x00dev->workqueue)
destroy_workqueue(rt2x00dev->workqueue);
/*
* Free the tx status fifo.
*/
kfifo_free(&rt2x00dev->txstatus_fifo);
/*
* Kill the tx status tasklet.
@ -1466,6 +1451,14 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
*/
rt2x00lib_uninitialize(rt2x00dev);
if (rt2x00dev->workqueue)
destroy_workqueue(rt2x00dev->workqueue);
/*
* Free the tx status fifo.
*/
kfifo_free(&rt2x00dev->txstatus_fifo);
/*
* Free extra components
*/

Просмотреть файл

@ -83,7 +83,6 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
*/
skbdesc = get_skb_frame_desc(skb);
memset(skbdesc, 0, sizeof(*skbdesc));
skbdesc->entry = entry;
if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA)) {
dma_addr_t skb_dma;
@ -544,7 +543,7 @@ static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
* All processing on the frame has been completed, this means
* it is now ready to be dumped to userspace through debugfs.
*/
rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb);
rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry);
}
static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
@ -689,7 +688,6 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
goto out;
}
skbdesc->entry = entry;
entry->skb = skb;
/*
@ -774,7 +772,6 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
*/
skbdesc = get_skb_frame_desc(intf->beacon->skb);
memset(skbdesc, 0, sizeof(*skbdesc));
skbdesc->entry = intf->beacon;
/*
* Send beacon to hardware.

Просмотреть файл

@ -116,8 +116,6 @@ struct skb_frame_desc {
__le32 iv[2];
dma_addr_t skb_dma;
struct queue_entry *entry;
};
/**

Просмотреть файл

@ -319,10 +319,8 @@ static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data)
entry->skb->data, length,
rt2x00usb_interrupt_txdone, entry);
usb_anchor_urb(entry_priv->urb, rt2x00dev->anchor);
status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
if (status) {
usb_unanchor_urb(entry_priv->urb);
if (status == -ENODEV)
clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
@ -410,10 +408,8 @@ static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data)
entry->skb->data, entry->skb->len,
rt2x00usb_interrupt_rxdone, entry);
usb_anchor_urb(entry_priv->urb, rt2x00dev->anchor);
status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
if (status) {
usb_unanchor_urb(entry_priv->urb);
if (status == -ENODEV)
clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
@ -744,6 +740,11 @@ void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev)
{
struct data_queue *queue;
usb_kill_anchored_urbs(rt2x00dev->anchor);
hrtimer_cancel(&rt2x00dev->txstatus_timer);
cancel_work_sync(&rt2x00dev->rxdone_work);
cancel_work_sync(&rt2x00dev->txdone_work);
queue_for_each(rt2x00dev, queue)
rt2x00usb_free_entries(queue);
}
@ -824,10 +825,6 @@ int rt2x00usb_probe(struct usb_interface *usb_intf,
if (retval)
goto exit_free_device;
retval = rt2x00lib_probe_dev(rt2x00dev);
if (retval)
goto exit_free_reg;
rt2x00dev->anchor = devm_kmalloc(&usb_dev->dev,
sizeof(struct usb_anchor),
GFP_KERNEL);
@ -835,10 +832,17 @@ int rt2x00usb_probe(struct usb_interface *usb_intf,
retval = -ENOMEM;
goto exit_free_reg;
}
init_usb_anchor(rt2x00dev->anchor);
retval = rt2x00lib_probe_dev(rt2x00dev);
if (retval)
goto exit_free_anchor;
return 0;
exit_free_anchor:
usb_kill_anchored_urbs(rt2x00dev->anchor);
exit_free_reg:
rt2x00usb_free_reg(rt2x00dev);

Просмотреть файл

@ -1903,8 +1903,7 @@ static void rt61pci_write_tx_desc(struct queue_entry *entry,
rt2x00_desc_read(txd, 5, &word);
rt2x00_set_field32(&word, TXD_W5_PID_TYPE, entry->queue->qid);
rt2x00_set_field32(&word, TXD_W5_PID_SUBTYPE,
skbdesc->entry->entry_idx);
rt2x00_set_field32(&word, TXD_W5_PID_SUBTYPE, entry->entry_idx);
rt2x00_set_field32(&word, TXD_W5_TX_POWER,
TXPOWER_TO_DEV(entry->queue->rt2x00dev->tx_power));
rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1);
@ -1989,7 +1988,7 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
/*
* Dump beacon to userspace through debugfs.
*/
rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry);
/*
* Write entire beacon with descriptor and padding to register.

Просмотреть файл

@ -1557,7 +1557,7 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
/*
* Dump beacon to userspace through debugfs.
*/
rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry);
/*
* Write entire beacon with descriptor and padding to register.

Просмотреть файл

@ -475,6 +475,8 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
(void *)rtl_swlps_rfon_wq_callback);
INIT_DELAYED_WORK(&rtlpriv->works.fwevt_wq,
(void *)rtl_fwevt_wq_callback);
INIT_DELAYED_WORK(&rtlpriv->works.c2hcmd_wq,
(void *)rtl_c2hcmd_wq_callback);
}
@ -489,6 +491,7 @@ void rtl_deinit_deferred_work(struct ieee80211_hw *hw)
cancel_delayed_work(&rtlpriv->works.ps_work);
cancel_delayed_work(&rtlpriv->works.ps_rfon_wq);
cancel_delayed_work(&rtlpriv->works.fwevt_wq);
cancel_delayed_work(&rtlpriv->works.c2hcmd_wq);
}
EXPORT_SYMBOL_GPL(rtl_deinit_deferred_work);
@ -556,6 +559,7 @@ int rtl_init_core(struct ieee80211_hw *hw)
spin_lock_init(&rtlpriv->locks.rf_lock);
spin_lock_init(&rtlpriv->locks.waitq_lock);
spin_lock_init(&rtlpriv->locks.entry_list_lock);
spin_lock_init(&rtlpriv->locks.c2hcmd_lock);
spin_lock_init(&rtlpriv->locks.cck_and_rw_pagea_lock);
spin_lock_init(&rtlpriv->locks.check_sendpkt_lock);
spin_lock_init(&rtlpriv->locks.fw_ps_lock);
@ -563,6 +567,7 @@ int rtl_init_core(struct ieee80211_hw *hw)
spin_lock_init(&rtlpriv->locks.iqk_lock);
/* <5> init list */
INIT_LIST_HEAD(&rtlpriv->entry_list);
INIT_LIST_HEAD(&rtlpriv->c2hcmd_list);
rtlmac->link_state = MAC80211_NOLINK;
@ -575,6 +580,7 @@ EXPORT_SYMBOL_GPL(rtl_init_core);
void rtl_deinit_core(struct ieee80211_hw *hw)
{
rtl_c2hcmd_launcher(hw, 0);
}
EXPORT_SYMBOL_GPL(rtl_deinit_core);
@ -1729,6 +1735,93 @@ void rtl_fwevt_wq_callback(void *data)
rtlpriv->cfg->ops->c2h_command_handle(hw);
}
void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, u8 tag, u8 len, u8 *val)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
unsigned long flags;
struct rtl_c2hcmd *c2hcmd;
c2hcmd = kmalloc(sizeof(*c2hcmd), GFP_KERNEL);
if (!c2hcmd)
goto label_err;
c2hcmd->val = kmalloc(len, GFP_KERNEL);
if (!c2hcmd->val)
goto label_err2;
/* fill data */
c2hcmd->tag = tag;
c2hcmd->len = len;
memcpy(c2hcmd->val, val, len);
/* enqueue */
spin_lock_irqsave(&rtlpriv->locks.c2hcmd_lock, flags);
list_add_tail(&c2hcmd->list, &rtlpriv->c2hcmd_list);
spin_unlock_irqrestore(&rtlpriv->locks.c2hcmd_lock, flags);
/* wake up wq */
queue_delayed_work(rtlpriv->works.rtl_wq, &rtlpriv->works.c2hcmd_wq, 0);
return;
label_err2:
kfree(c2hcmd);
label_err:
RT_TRACE(rtlpriv, COMP_CMD, DBG_WARNING,
"C2H cmd enqueue fail.\n");
}
EXPORT_SYMBOL(rtl_c2hcmd_enqueue);
void rtl_c2hcmd_launcher(struct ieee80211_hw *hw, int exec)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
unsigned long flags;
struct rtl_c2hcmd *c2hcmd;
int i;
for (i = 0; i < 200; i++) {
/* dequeue a task */
spin_lock_irqsave(&rtlpriv->locks.c2hcmd_lock, flags);
c2hcmd = list_first_entry_or_null(&rtlpriv->c2hcmd_list,
struct rtl_c2hcmd, list);
if (c2hcmd)
list_del(&c2hcmd->list);
spin_unlock_irqrestore(&rtlpriv->locks.c2hcmd_lock, flags);
/* do it */
if (!c2hcmd)
break;
if (rtlpriv->cfg->ops->c2h_content_parsing && exec)
rtlpriv->cfg->ops->c2h_content_parsing(hw,
c2hcmd->tag, c2hcmd->len, c2hcmd->val);
/* free */
kfree(c2hcmd->val);
kfree(c2hcmd);
}
}
void rtl_c2hcmd_wq_callback(void *data)
{
struct rtl_works *rtlworks = container_of_dwork_rtl(data,
struct rtl_works,
c2hcmd_wq);
struct ieee80211_hw *hw = rtlworks->hw;
rtl_c2hcmd_launcher(hw, 1);
}
void rtl_easy_concurrent_retrytimer_callback(unsigned long data)
{
struct ieee80211_hw *hw = (struct ieee80211_hw *)data;

Просмотреть файл

@ -136,6 +136,9 @@ int rtl_rx_agg_stop(struct ieee80211_hw *hw,
struct ieee80211_sta *sta, u16 tid);
void rtl_watchdog_wq_callback(void *data);
void rtl_fwevt_wq_callback(void *data);
void rtl_c2hcmd_wq_callback(void *data);
void rtl_c2hcmd_launcher(struct ieee80211_hw *hw, int exec);
void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, u8 tag, u8 len, u8 *val);
void rtl_get_tcb_desc(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info,

Просмотреть файл

@ -1,4 +1,8 @@
btcoexist-objs := halbtc8723b2ant.o \
btcoexist-objs := halbtc8192e2ant.o \
halbtc8723b1ant.o \
halbtc8723b2ant.o \
halbtc8821a1ant.o \
halbtc8821a2ant.o \
halbtcoutsrc.o \
rtl_btc.o

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -141,11 +141,40 @@ static u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist)
if (rtlphy->current_channel != 0)
chnl = rtlphy->current_channel;
btc_alg_dbg(ALGO_TRACE,
"static halbtc_get_wifi_central_chnl:%d\n", chnl);
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"static halbtc_get_wifi_central_chnl:%d\n", chnl);
return chnl;
}
u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv)
{
return rtlpriv->btcoexist.btc_info.single_ant_path;
}
u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv)
{
return rtlpriv->btcoexist.btc_info.bt_type;
}
u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
{
u8 num;
if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2)
num = 2;
else
num = 1;
return num;
}
u8 rtl_get_hwpg_package_type(struct rtl_priv *rtlpriv)
{
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
return rtlhal->package_type;
}
static void halbtc_leave_lps(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv;
@ -335,6 +364,9 @@ static bool halbtc_get(void *void_btcoexist, u8 get_type, void *out_buf)
case BTC_GET_U4_BT_PATCH_VER:
*u32_tmp = halbtc_get_bt_patch_version(btcoexist);
break;
case BTC_GET_U4_VENDOR:
*u32_tmp = BTC_VENDOR_OTHER;
break;
case BTC_GET_U1_WIFI_DOT11_CHNL:
*u8_tmp = rtlphy->current_channel;
break;

Просмотреть файл

@ -116,18 +116,6 @@ extern u32 btc_dbg_type[];
#define WIFI_P2P_GO_CONNECTED BIT3
#define WIFI_P2P_GC_CONNECTED BIT4
#define btc_alg_dbg(dbgflag, fmt, ...) \
do { \
if (unlikely(btc_dbg_type[BTC_MSG_ALGORITHM] & dbgflag)) \
printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
} while (0)
#define btc_iface_dbg(dbgflag, fmt, ...) \
do { \
if (unlikely(btc_dbg_type[BTC_MSG_INTERFACE] & dbgflag)) \
printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
} while (0)
#define BTC_RSSI_HIGH(_rssi_) \
((_rssi_ == BTC_RSSI_STATE_HIGH || \
_rssi_ == BTC_RSSI_STATE_STAY_HIGH) ? true : false)
@ -228,6 +216,7 @@ enum btc_get_type {
BTC_GET_U4_WIFI_FW_VER,
BTC_GET_U4_WIFI_LINK_STATUS,
BTC_GET_U4_BT_PATCH_VER,
BTC_GET_U4_VENDOR,
/* type u1Byte */
BTC_GET_U1_WIFI_DOT11_CHNL,
@ -245,6 +234,12 @@ enum btc_get_type {
BTC_GET_MAX
};
enum btc_vendor {
BTC_VENDOR_LENOVO,
BTC_VENDOR_ASUS,
BTC_VENDOR_OTHER
};
enum btc_set_type {
/* type bool */
BTC_SET_BL_BT_DISABLE,
@ -263,6 +258,7 @@ enum btc_set_type {
/* type trigger some action */
BTC_SET_ACT_GET_BT_RSSI,
BTC_SET_ACT_AGGREGATE_CTRL,
BTC_SET_ACT_ANTPOSREGRISTRY_CTRL,
/********* for 1Ant **********/
/* type bool */

Просмотреть файл

@ -178,17 +178,6 @@ struct rtl_btc_ops *rtl_btc_get_ops_pointer(void)
}
EXPORT_SYMBOL(rtl_btc_get_ops_pointer);
u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
{
u8 num;
if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2)
num = 2;
else
num = 1;
return num;
}
enum rt_media_status mgnt_link_status_query(struct ieee80211_hw *hw)
{
@ -209,11 +198,6 @@ u8 rtl_get_hwpg_bt_exist(struct rtl_priv *rtlpriv)
return rtlpriv->btcoexist.btc_info.btcoexist;
}
u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv)
{
return rtlpriv->btcoexist.btc_info.bt_type;
}
MODULE_AUTHOR("Page He <page_he@realsil.com.cn>");
MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");

Просмотреть файл

@ -46,9 +46,12 @@ void rtl_btc_special_packet_notify(struct rtl_priv *rtlpriv, u8 pkt_type);
struct rtl_btc_ops *rtl_btc_get_ops_pointer(void);
u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv);
u8 rtl_get_hwpg_bt_exist(struct rtl_priv *rtlpriv);
u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv);
u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv);
u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv);
u8 rtl_get_hwpg_package_type(struct rtl_priv *rtlpriv);
enum rt_media_status mgnt_link_status_query(struct ieee80211_hw *hw);
#endif

Просмотреть файл

@ -45,12 +45,13 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
u32 target_command;
u32 target_content = 0;
u8 entry_i;
int entry_i;
RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_DMESG, "Key content :",
key_cont_128, 16);
for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
/* 0-1 config + mac, 2-5 fill 128key,6-7 are reserved */
for (entry_i = CAM_CONTENT_COUNT - 1; entry_i >= 0; entry_i--) {
target_command = entry_i + CAM_CONTENT_COUNT * entry_no;
target_command = target_command | BIT(31) | BIT(16);
@ -102,7 +103,6 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
target_content);
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
target_command);
udelay(100);
RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
"WRITE A4: %x\n", target_content);

Просмотреть файл

@ -233,6 +233,7 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
int err = 0;
u8 retry_limit = 0x30;
if (mac->vif) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
@ -271,6 +272,7 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
(u8 *)(&mac->basic_rates));
retry_limit = 0x07;
break;
case NL80211_IFTYPE_P2P_GO:
mac->p2p = P2P_ROLE_GO;
@ -287,6 +289,8 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
mac->basic_rates = 0xff0;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
(u8 *)(&mac->basic_rates));
retry_limit = 0x07;
break;
case NL80211_IFTYPE_MESH_POINT:
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
@ -300,6 +304,8 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
mac->basic_rates = 0xff0;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
(u8 *)(&mac->basic_rates));
retry_limit = 0x07;
break;
default:
pr_err("operation mode %d is not supported!\n",
@ -321,6 +327,10 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
memcpy(mac->mac_addr, vif->addr, ETH_ALEN);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr);
mac->retry_long = retry_limit;
mac->retry_short = retry_limit;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT,
(u8 *)(&retry_limit));
out:
mutex_unlock(&rtlpriv->locks.conf_mutex);
return err;
@ -645,10 +655,15 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
"IEEE80211_CONF_CHANGE_RETRY_LIMITS %x\n",
hw->conf.long_frame_max_tx_count);
mac->retry_long = hw->conf.long_frame_max_tx_count;
mac->retry_short = hw->conf.long_frame_max_tx_count;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT,
/* brought up everything changes (changed == ~0) indicates first
* open, so use our default value instead of that of wiphy.
*/
if (changed != ~0) {
mac->retry_long = hw->conf.long_frame_max_tx_count;
mac->retry_short = hw->conf.long_frame_max_tx_count;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT,
(u8 *)(&hw->conf.long_frame_max_tx_count));
}
}
if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&

Просмотреть файл

@ -73,8 +73,6 @@ static void efuse_word_enable_data_read(u8 word_en, u8 *sourdata,
u8 *targetdata);
static u8 enable_efuse_data_write(struct ieee80211_hw *hw,
u16 efuse_addr, u8 word_en, u8 *data);
static void efuse_power_switch(struct ieee80211_hw *hw, u8 write,
u8 pwrstate);
static u16 efuse_get_current_size(struct ieee80211_hw *hw);
static u8 efuse_calculate_word_cnts(u8 word_en);
@ -1124,7 +1122,7 @@ static u8 enable_efuse_data_write(struct ieee80211_hw *hw,
return badworden;
}
static void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate)
void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@ -1210,6 +1208,7 @@ static void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate)
}
}
}
EXPORT_SYMBOL(efuse_power_switch);
static u16 efuse_get_current_size(struct ieee80211_hw *hw)
{

Просмотреть файл

@ -109,6 +109,7 @@ bool efuse_shadow_update_chk(struct ieee80211_hw *hw);
void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw);
void efuse_force_write_vendor_Id(struct ieee80211_hw *hw);
void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx);
void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate);
int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv,
int max_size, u8 *hwinfo, int *params);
void rtl_fill_dummy(u8 *pfwbuf, u32 *pfwlen);

Просмотреть файл

@ -1213,6 +1213,10 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
mac->current_ampdu_density = 7;
mac->current_ampdu_factor = 3;
/*Retry Limit*/
mac->retry_short = 7;
mac->retry_long = 7;
/*QOS*/
rtlpci->acm_method = EACMWAY2_SW;
@ -1813,6 +1817,7 @@ static int rtl_pci_start(struct ieee80211_hw *hw)
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
int err;
@ -1830,6 +1835,8 @@ static int rtl_pci_start(struct ieee80211_hw *hw)
"Failed to config hardware!\n");
return err;
}
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT,
&rtlmac->retry_long);
rtlpriv->cfg->ops->enable_interrupt(hw);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "enable_interrupt OK\n");

Просмотреть файл

@ -271,10 +271,10 @@ struct mp_adapter {
};
struct rtl_pci_priv {
struct bt_coexist_info bt_coexist;
struct rtl_led_ctl ledctl;
struct rtl_pci dev;
struct mp_adapter ndis_adapter;
struct rtl_led_ctl ledctl;
struct bt_coexist_info bt_coexist;
};
#define rtl_pcipriv(hw) (((struct rtl_pci_priv *)(rtl_priv(hw))->priv))

Просмотреть файл

@ -34,6 +34,7 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
/*<1> reset trx ring */
if (rtlhal->interface == INTF_PCI)
@ -46,6 +47,8 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
/*<2> Enable Adapter */
if (rtlpriv->cfg->ops->hw_init(hw))
return false;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT,
&rtlmac->retry_long);
RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
/*<3> Enable Interrupt */

Просмотреть файл

@ -817,19 +817,18 @@ static bool _rtl88ee_llt_table_init(struct ieee80211_hw *hw)
static void _rtl88ee_gen_refresh_led_state(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
if (rtlpriv->rtlhal.up_first_time)
return;
if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS)
rtl88ee_sw_led_on(hw, pLed0);
rtl88ee_sw_led_on(hw, pled0);
else if (ppsc->rfoff_reason == RF_CHANGE_BY_INIT)
rtl88ee_sw_led_on(hw, pLed0);
rtl88ee_sw_led_on(hw, pled0);
else
rtl88ee_sw_led_off(hw, pLed0);
rtl88ee_sw_led_off(hw, pled0);
}
static bool _rtl88ee_init_mac(struct ieee80211_hw *hw)
@ -1931,14 +1930,13 @@ exit:
static void _rtl88ee_hal_customized_behavior(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
pcipriv->ledctl.led_opendrain = true;
rtlpriv->ledctl.led_opendrain = true;
switch (rtlhal->oem_id) {
case RT_CID_819X_HP:
pcipriv->ledctl.led_opendrain = true;
rtlpriv->ledctl.led_opendrain = true;
break;
case RT_CID_819X_LENOVO:
case RT_CID_DEFAULT:

Просмотреть файл

@ -67,7 +67,6 @@ void rtl88ee_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
void rtl88ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
u8 ledcfg;
RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
@ -79,7 +78,7 @@ void rtl88ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
case LED_PIN_LED0:
ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
ledcfg &= 0xf0;
if (pcipriv->ledctl.led_opendrain) {
if (rtlpriv->ledctl.led_opendrain) {
rtl_write_byte(rtlpriv, REG_LEDCFG2,
(ledcfg | BIT(3) | BIT(5) | BIT(6)));
ledcfg = rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG);
@ -104,24 +103,26 @@ void rtl88ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
void rtl88ee_init_sw_leds(struct ieee80211_hw *hw)
{
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
_rtl88ee_init_led(hw, &pcipriv->ledctl.sw_led0, LED_PIN_LED0);
_rtl88ee_init_led(hw, &pcipriv->ledctl.sw_led1, LED_PIN_LED1);
struct rtl_priv *rtlpriv = rtl_priv(hw);
_rtl88ee_init_led(hw, &rtlpriv->ledctl.sw_led0, LED_PIN_LED0);
_rtl88ee_init_led(hw, &rtlpriv->ledctl.sw_led1, LED_PIN_LED1);
}
static void _rtl88ee_sw_led_control(struct ieee80211_hw *hw,
enum led_ctl_mode ledaction)
{
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
switch (ledaction) {
case LED_CTL_POWER_ON:
case LED_CTL_LINK:
case LED_CTL_NO_LINK:
rtl88ee_sw_led_on(hw, pLed0);
rtl88ee_sw_led_on(hw, pled0);
break;
case LED_CTL_POWER_OFF:
rtl88ee_sw_led_off(hw, pLed0);
rtl88ee_sw_led_off(hw, pled0);
break;
default:
break;

Просмотреть файл

@ -638,7 +638,6 @@ EXPORT_SYMBOL(rtl92c_dm_init_edca_turbo);
static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
static u64 last_txok_cnt;
@ -651,20 +650,20 @@ static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
u32 edca_be_dl = 0x5ea42b;
bool bt_change_edca = false;
if ((last_bt_edca_ul != rtlpcipriv->bt_coexist.bt_edca_ul) ||
(last_bt_edca_dl != rtlpcipriv->bt_coexist.bt_edca_dl)) {
if ((last_bt_edca_ul != rtlpriv->btcoexist.bt_edca_ul) ||
(last_bt_edca_dl != rtlpriv->btcoexist.bt_edca_dl)) {
rtlpriv->dm.current_turbo_edca = false;
last_bt_edca_ul = rtlpcipriv->bt_coexist.bt_edca_ul;
last_bt_edca_dl = rtlpcipriv->bt_coexist.bt_edca_dl;
last_bt_edca_ul = rtlpriv->btcoexist.bt_edca_ul;
last_bt_edca_dl = rtlpriv->btcoexist.bt_edca_dl;
}
if (rtlpcipriv->bt_coexist.bt_edca_ul != 0) {
edca_be_ul = rtlpcipriv->bt_coexist.bt_edca_ul;
if (rtlpriv->btcoexist.bt_edca_ul != 0) {
edca_be_ul = rtlpriv->btcoexist.bt_edca_ul;
bt_change_edca = true;
}
if (rtlpcipriv->bt_coexist.bt_edca_dl != 0) {
edca_be_ul = rtlpcipriv->bt_coexist.bt_edca_dl;
if (rtlpriv->btcoexist.bt_edca_dl != 0) {
edca_be_ul = rtlpriv->btcoexist.bt_edca_dl;
bt_change_edca = true;
}
@ -673,7 +672,7 @@ static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
return;
}
if ((!mac->ht_enable) && (!rtlpcipriv->bt_coexist.bt_coexistence)) {
if ((!mac->ht_enable) && (!rtlpriv->btcoexist.bt_coexistence)) {
if (!(edca_be_ul & 0xffff0000))
edca_be_ul |= 0x005e0000;
@ -1471,7 +1470,6 @@ EXPORT_SYMBOL(rtl92c_dm_watchdog);
u8 rtl92c_bt_rssi_state_change(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
long undec_sm_pwdb;
u8 curr_bt_rssi_state = 0x00;
@ -1510,8 +1508,8 @@ u8 rtl92c_bt_rssi_state_change(struct ieee80211_hw *hw)
else
curr_bt_rssi_state &= (~BT_RSSI_STATE_BG_EDCA_LOW);
if (curr_bt_rssi_state != rtlpcipriv->bt_coexist.bt_rssi_state) {
rtlpcipriv->bt_coexist.bt_rssi_state = curr_bt_rssi_state;
if (curr_bt_rssi_state != rtlpriv->btcoexist.bt_rssi_state) {
rtlpriv->btcoexist.bt_rssi_state = curr_bt_rssi_state;
return true;
} else {
return false;
@ -1522,7 +1520,6 @@ EXPORT_SYMBOL(rtl92c_bt_rssi_state_change);
static bool rtl92c_bt_state_change(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
u32 polling, ratio_tx, ratio_pri;
u32 bt_tx, bt_pri;
@ -1542,14 +1539,14 @@ static bool rtl92c_bt_state_change(struct ieee80211_hw *hw)
return false;
bt_state &= BIT_OFFSET_LEN_MASK_32(0, 1);
if (bt_state != rtlpcipriv->bt_coexist.bt_cur_state) {
rtlpcipriv->bt_coexist.bt_cur_state = bt_state;
if (bt_state != rtlpriv->btcoexist.bt_cur_state) {
rtlpriv->btcoexist.bt_cur_state = bt_state;
if (rtlpcipriv->bt_coexist.reg_bt_sco == 3) {
rtlpcipriv->bt_coexist.bt_service = BT_IDLE;
if (rtlpriv->btcoexist.reg_bt_sco == 3) {
rtlpriv->btcoexist.bt_service = BT_IDLE;
bt_state = bt_state |
((rtlpcipriv->bt_coexist.bt_ant_isolation == 1) ?
((rtlpriv->btcoexist.bt_ant_isolation == 1) ?
0 : BIT_OFFSET_LEN_MASK_32(1, 1)) |
BIT_OFFSET_LEN_MASK_32(2, 1);
rtl_write_byte(rtlpriv, 0x4fd, bt_state);
@ -1559,10 +1556,10 @@ static bool rtl92c_bt_state_change(struct ieee80211_hw *hw)
ratio_tx = bt_tx * 1000 / polling;
ratio_pri = bt_pri * 1000 / polling;
rtlpcipriv->bt_coexist.ratio_tx = ratio_tx;
rtlpcipriv->bt_coexist.ratio_pri = ratio_pri;
rtlpriv->btcoexist.ratio_tx = ratio_tx;
rtlpriv->btcoexist.ratio_pri = ratio_pri;
if (bt_state && rtlpcipriv->bt_coexist.reg_bt_sco == 3) {
if (bt_state && rtlpriv->btcoexist.reg_bt_sco == 3) {
if ((ratio_tx < 30) && (ratio_pri < 30))
cur_service_type = BT_IDLE;
@ -1577,17 +1574,17 @@ static bool rtl92c_bt_state_change(struct ieee80211_hw *hw)
else
cur_service_type = BT_OTHER_ACTION;
if (cur_service_type != rtlpcipriv->bt_coexist.bt_service) {
rtlpcipriv->bt_coexist.bt_service = cur_service_type;
if (cur_service_type != rtlpriv->btcoexist.bt_service) {
rtlpriv->btcoexist.bt_service = cur_service_type;
bt_state = bt_state |
((rtlpcipriv->bt_coexist.bt_ant_isolation == 1) ?
((rtlpriv->btcoexist.bt_ant_isolation == 1) ?
0 : BIT_OFFSET_LEN_MASK_32(1, 1)) |
((rtlpcipriv->bt_coexist.bt_service != BT_IDLE) ?
((rtlpriv->btcoexist.bt_service != BT_IDLE) ?
0 : BIT_OFFSET_LEN_MASK_32(2, 1));
/* Add interrupt migration when bt is not ini
* idle state (no traffic). */
if (rtlpcipriv->bt_coexist.bt_service != BT_IDLE) {
if (rtlpriv->btcoexist.bt_service != BT_IDLE) {
rtl_write_word(rtlpriv, 0x504, 0x0ccc);
rtl_write_byte(rtlpriv, 0x506, 0x54);
rtl_write_byte(rtlpriv, 0x507, 0x54);
@ -1626,80 +1623,77 @@ static bool rtl92c_bt_wifi_connect_change(struct ieee80211_hw *hw)
static void rtl92c_bt_set_normal(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
if (rtlpcipriv->bt_coexist.bt_service == BT_OTHERBUSY) {
rtlpcipriv->bt_coexist.bt_edca_ul = 0x5ea72b;
rtlpcipriv->bt_coexist.bt_edca_dl = 0x5ea72b;
} else if (rtlpcipriv->bt_coexist.bt_service == BT_BUSY) {
rtlpcipriv->bt_coexist.bt_edca_ul = 0x5eb82f;
rtlpcipriv->bt_coexist.bt_edca_dl = 0x5eb82f;
} else if (rtlpcipriv->bt_coexist.bt_service == BT_SCO) {
if (rtlpcipriv->bt_coexist.ratio_tx > 160) {
rtlpcipriv->bt_coexist.bt_edca_ul = 0x5ea72f;
rtlpcipriv->bt_coexist.bt_edca_dl = 0x5ea72f;
if (rtlpriv->btcoexist.bt_service == BT_OTHERBUSY) {
rtlpriv->btcoexist.bt_edca_ul = 0x5ea72b;
rtlpriv->btcoexist.bt_edca_dl = 0x5ea72b;
} else if (rtlpriv->btcoexist.bt_service == BT_BUSY) {
rtlpriv->btcoexist.bt_edca_ul = 0x5eb82f;
rtlpriv->btcoexist.bt_edca_dl = 0x5eb82f;
} else if (rtlpriv->btcoexist.bt_service == BT_SCO) {
if (rtlpriv->btcoexist.ratio_tx > 160) {
rtlpriv->btcoexist.bt_edca_ul = 0x5ea72f;
rtlpriv->btcoexist.bt_edca_dl = 0x5ea72f;
} else {
rtlpcipriv->bt_coexist.bt_edca_ul = 0x5ea32b;
rtlpcipriv->bt_coexist.bt_edca_dl = 0x5ea42b;
rtlpriv->btcoexist.bt_edca_ul = 0x5ea32b;
rtlpriv->btcoexist.bt_edca_dl = 0x5ea42b;
}
} else {
rtlpcipriv->bt_coexist.bt_edca_ul = 0;
rtlpcipriv->bt_coexist.bt_edca_dl = 0;
rtlpriv->btcoexist.bt_edca_ul = 0;
rtlpriv->btcoexist.bt_edca_dl = 0;
}
if ((rtlpcipriv->bt_coexist.bt_service != BT_IDLE) &&
(rtlpriv->mac80211.mode == WIRELESS_MODE_G ||
if ((rtlpriv->btcoexist.bt_service != BT_IDLE) &&
(rtlpriv->mac80211.mode == WIRELESS_MODE_G ||
(rtlpriv->mac80211.mode == (WIRELESS_MODE_G | WIRELESS_MODE_B))) &&
(rtlpcipriv->bt_coexist.bt_rssi_state &
(rtlpriv->btcoexist.bt_rssi_state &
BT_RSSI_STATE_BG_EDCA_LOW)) {
rtlpcipriv->bt_coexist.bt_edca_ul = 0x5eb82b;
rtlpcipriv->bt_coexist.bt_edca_dl = 0x5eb82b;
rtlpriv->btcoexist.bt_edca_ul = 0x5eb82b;
rtlpriv->btcoexist.bt_edca_dl = 0x5eb82b;
}
}
static void rtl92c_bt_ant_isolation(struct ieee80211_hw *hw, u8 tmp1byte)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
/* Only enable HW BT coexist when BT in "Busy" state. */
if (rtlpriv->mac80211.vendor == PEER_CISCO &&
rtlpcipriv->bt_coexist.bt_service == BT_OTHER_ACTION) {
rtlpriv->btcoexist.bt_service == BT_OTHER_ACTION) {
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0);
} else {
if ((rtlpcipriv->bt_coexist.bt_service == BT_BUSY) &&
(rtlpcipriv->bt_coexist.bt_rssi_state &
if ((rtlpriv->btcoexist.bt_service == BT_BUSY) &&
(rtlpriv->btcoexist.bt_rssi_state &
BT_RSSI_STATE_NORMAL_POWER)) {
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0);
} else if ((rtlpcipriv->bt_coexist.bt_service ==
} else if ((rtlpriv->btcoexist.bt_service ==
BT_OTHER_ACTION) && (rtlpriv->mac80211.mode <
WIRELESS_MODE_N_24G) &&
(rtlpcipriv->bt_coexist.bt_rssi_state &
(rtlpriv->btcoexist.bt_rssi_state &
BT_RSSI_STATE_SPECIAL_LOW)) {
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0);
} else if (rtlpcipriv->bt_coexist.bt_service == BT_PAN) {
} else if (rtlpriv->btcoexist.bt_service == BT_PAN) {
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, tmp1byte);
} else {
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, tmp1byte);
}
}
if (rtlpcipriv->bt_coexist.bt_service == BT_PAN)
if (rtlpriv->btcoexist.bt_service == BT_PAN)
rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x10100);
else
rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x0);
if (rtlpcipriv->bt_coexist.bt_rssi_state &
if (rtlpriv->btcoexist.bt_rssi_state &
BT_RSSI_STATE_NORMAL_POWER) {
rtl92c_bt_set_normal(hw);
} else {
rtlpcipriv->bt_coexist.bt_edca_ul = 0;
rtlpcipriv->bt_coexist.bt_edca_dl = 0;
rtlpriv->btcoexist.bt_edca_ul = 0;
rtlpriv->btcoexist.bt_edca_dl = 0;
}
if (rtlpcipriv->bt_coexist.bt_service != BT_IDLE) {
if (rtlpriv->btcoexist.bt_service != BT_IDLE) {
rtlpriv->cfg->ops->set_rfreg(hw,
RF90_PATH_A,
0x1e,
@ -1707,12 +1701,12 @@ static void rtl92c_bt_ant_isolation(struct ieee80211_hw *hw, u8 tmp1byte)
} else {
rtlpriv->cfg->ops->set_rfreg(hw,
RF90_PATH_A, 0x1e, 0xf0,
rtlpcipriv->bt_coexist.bt_rfreg_origin_1e);
rtlpriv->btcoexist.bt_rfreg_origin_1e);
}
if (!rtlpriv->dm.dynamic_txpower_enable) {
if (rtlpcipriv->bt_coexist.bt_service != BT_IDLE) {
if (rtlpcipriv->bt_coexist.bt_rssi_state &
if (rtlpriv->btcoexist.bt_service != BT_IDLE) {
if (rtlpriv->btcoexist.bt_rssi_state &
BT_RSSI_STATE_TXPOWER_LOW) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_BT2;
@ -1732,37 +1726,34 @@ static void rtl92c_bt_ant_isolation(struct ieee80211_hw *hw, u8 tmp1byte)
static void rtl92c_check_bt_change(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u8 tmp1byte = 0;
if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version) &&
rtlpcipriv->bt_coexist.bt_coexistence)
rtlpriv->btcoexist.bt_coexistence)
tmp1byte |= BIT(5);
if (rtlpcipriv->bt_coexist.bt_cur_state) {
if (rtlpcipriv->bt_coexist.bt_ant_isolation)
if (rtlpriv->btcoexist.bt_cur_state) {
if (rtlpriv->btcoexist.bt_ant_isolation)
rtl92c_bt_ant_isolation(hw, tmp1byte);
} else {
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, tmp1byte);
rtlpriv->cfg->ops->set_rfreg(hw, RF90_PATH_A, 0x1e, 0xf0,
rtlpcipriv->bt_coexist.bt_rfreg_origin_1e);
rtlpriv->btcoexist.bt_rfreg_origin_1e);
rtlpcipriv->bt_coexist.bt_edca_ul = 0;
rtlpcipriv->bt_coexist.bt_edca_dl = 0;
rtlpriv->btcoexist.bt_edca_ul = 0;
rtlpriv->btcoexist.bt_edca_dl = 0;
}
}
void rtl92c_dm_bt_coexist(struct ieee80211_hw *hw)
{
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
struct rtl_priv *rtlpriv = rtl_priv(hw);
bool wifi_connect_change;
bool bt_state_change;
bool rssi_state_change;
if ((rtlpcipriv->bt_coexist.bt_coexistence) &&
(rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4)) {
if ((rtlpriv->btcoexist.bt_coexistence) &&
(rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4)) {
wifi_connect_change = rtl92c_bt_wifi_connect_change(hw);
bt_state_change = rtl92c_bt_state_change(hw);
rssi_state_change = rtl92c_bt_rssi_state_change(hw);

Просмотреть файл

@ -148,7 +148,6 @@ void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@ -276,8 +275,8 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u8 *p_regtoset = NULL;
u8 index = 0;
if ((rtlpcipriv->bt_coexist.bt_coexistence) &&
(rtlpcipriv->bt_coexist.bt_coexist_type ==
if ((rtlpriv->btcoexist.bt_coexistence) &&
(rtlpriv->btcoexist.bt_coexist_type ==
BT_CSR_BC4))
p_regtoset = regtoset_bt;
else
@ -655,26 +654,25 @@ static bool _rtl92ce_llt_table_init(struct ieee80211_hw *hw)
static void _rtl92ce_gen_refresh_led_state(struct ieee80211_hw *hw)
{
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
if (rtlpci->up_first_time)
return;
if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS)
rtl92ce_sw_led_on(hw, pLed0);
rtl92ce_sw_led_on(hw, pled0);
else if (ppsc->rfoff_reason == RF_CHANGE_BY_INIT)
rtl92ce_sw_led_on(hw, pLed0);
rtl92ce_sw_led_on(hw, pled0);
else
rtl92ce_sw_led_off(hw, pLed0);
rtl92ce_sw_led_off(hw, pled0);
}
static bool _rtl92ce_init_mac(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@ -683,7 +681,7 @@ static bool _rtl92ce_init_mac(struct ieee80211_hw *hw)
u16 retry;
rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00);
if (rtlpcipriv->bt_coexist.bt_coexistence) {
if (rtlpriv->btcoexist.bt_coexistence) {
u32 value32;
value32 = rtl_read_dword(rtlpriv, REG_APS_FSMCO);
value32 |= (SOP_ABG | SOP_AMB | XOP_BTCK);
@ -692,7 +690,7 @@ static bool _rtl92ce_init_mac(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL, 0x0F);
if (rtlpcipriv->bt_coexist.bt_coexistence) {
if (rtlpriv->btcoexist.bt_coexistence) {
u32 u4b_tmp = rtl_read_dword(rtlpriv, REG_AFE_XTAL_CTRL);
u4b_tmp &= (~0x00024800);
@ -726,7 +724,7 @@ static bool _rtl92ce_init_mac(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL + 1, 0x82);
udelay(2);
if (rtlpcipriv->bt_coexist.bt_coexistence) {
if (rtlpriv->btcoexist.bt_coexistence) {
bytetmp = rtl_read_byte(rtlpriv, REG_AFE_XTAL_CTRL+2) & 0xfd;
rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL+2, bytetmp);
}
@ -798,7 +796,6 @@ static void _rtl92ce_hw_configure(struct ieee80211_hw *hw)
{
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
u8 reg_bw_opmode;
u32 reg_prsr;
@ -828,8 +825,8 @@ static void _rtl92ce_hw_configure(struct ieee80211_hw *hw)
rtl_write_dword(rtlpriv, REG_RARFRC, 0x01000000);
rtl_write_dword(rtlpriv, REG_RARFRC + 4, 0x07060504);
if ((rtlpcipriv->bt_coexist.bt_coexistence) &&
(rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4))
if ((rtlpriv->btcoexist.bt_coexistence) &&
(rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4))
rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0x97427431);
else
rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0xb972a841);
@ -848,8 +845,8 @@ static void _rtl92ce_hw_configure(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_PIFS, 0x1C);
rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16);
if ((rtlpcipriv->bt_coexist.bt_coexistence) &&
(rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4)) {
if ((rtlpriv->btcoexist.bt_coexistence) &&
(rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4)) {
rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0020);
rtl_write_word(rtlpriv, REG_PROT_MODE_CTRL, 0x0402);
} else {
@ -857,8 +854,8 @@ static void _rtl92ce_hw_configure(struct ieee80211_hw *hw)
rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0020);
}
if ((rtlpcipriv->bt_coexist.bt_coexistence) &&
(rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4))
if ((rtlpriv->btcoexist.bt_coexistence) &&
(rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4))
rtl_write_dword(rtlpriv, REG_FAST_EDCA_CTRL, 0x03086666);
else
rtl_write_dword(rtlpriv, REG_FAST_EDCA_CTRL, 0x086666);
@ -1313,7 +1310,6 @@ void rtl92ce_disable_interrupt(struct ieee80211_hw *hw)
static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
u8 u1b_tmp;
u32 u4b_tmp;
@ -1331,9 +1327,9 @@ static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x00000000);
u1b_tmp = rtl_read_byte(rtlpriv, REG_GPIO_PIN_CTRL);
if ((rtlpcipriv->bt_coexist.bt_coexistence) &&
((rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4) ||
(rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC8))) {
if ((rtlpriv->btcoexist.bt_coexistence) &&
((rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4) ||
(rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC8))) {
rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x00F30000 |
(u1b_tmp << 8));
} else {
@ -1345,7 +1341,7 @@ static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x80);
if (!IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x23);
if (rtlpcipriv->bt_coexist.bt_coexistence) {
if (rtlpriv->btcoexist.bt_coexistence) {
u4b_tmp = rtl_read_dword(rtlpriv, REG_AFE_XTAL_CTRL);
u4b_tmp |= 0x03824800;
rtl_write_dword(rtlpriv, REG_AFE_XTAL_CTRL, u4b_tmp);
@ -1724,12 +1720,11 @@ exit:
static void _rtl92ce_hal_customized_behavior(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
switch (rtlhal->oem_id) {
case RT_CID_819X_HP:
pcipriv->ledctl.led_opendrain = true;
rtlpriv->ledctl.led_opendrain = true;
break;
case RT_CID_819X_LENOVO:
case RT_CID_DEFAULT:
@ -1782,7 +1777,6 @@ static void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw,
struct ieee80211_sta *sta)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@ -1838,12 +1832,12 @@ static void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw,
break;
}
if ((rtlpcipriv->bt_coexist.bt_coexistence) &&
(rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4) &&
(rtlpcipriv->bt_coexist.bt_cur_state) &&
(rtlpcipriv->bt_coexist.bt_ant_isolation) &&
((rtlpcipriv->bt_coexist.bt_service == BT_SCO) ||
(rtlpcipriv->bt_coexist.bt_service == BT_BUSY)))
if ((rtlpriv->btcoexist.bt_coexistence) &&
(rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4) &&
(rtlpriv->btcoexist.bt_cur_state) &&
(rtlpriv->btcoexist.bt_ant_isolation) &&
((rtlpriv->btcoexist.bt_service == BT_SCO) ||
(rtlpriv->btcoexist.bt_service == BT_BUSY)))
ratr_value &= 0x0fffcfc0;
else
ratr_value &= 0x0FFFFFFF;
@ -2237,65 +2231,64 @@ void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
static void rtl8192ce_bt_var_init(struct ieee80211_hw *hw)
{
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpcipriv->bt_coexist.bt_coexistence =
rtlpcipriv->bt_coexist.eeprom_bt_coexist;
rtlpcipriv->bt_coexist.bt_ant_num =
rtlpcipriv->bt_coexist.eeprom_bt_ant_num;
rtlpcipriv->bt_coexist.bt_coexist_type =
rtlpcipriv->bt_coexist.eeprom_bt_type;
rtlpriv->btcoexist.bt_coexistence =
rtlpriv->btcoexist.eeprom_bt_coexist;
rtlpriv->btcoexist.bt_ant_num =
rtlpriv->btcoexist.eeprom_bt_ant_num;
rtlpriv->btcoexist.bt_coexist_type =
rtlpriv->btcoexist.eeprom_bt_type;
if (rtlpcipriv->bt_coexist.reg_bt_iso == 2)
rtlpcipriv->bt_coexist.bt_ant_isolation =
rtlpcipriv->bt_coexist.eeprom_bt_ant_isol;
if (rtlpriv->btcoexist.reg_bt_iso == 2)
rtlpriv->btcoexist.bt_ant_isolation =
rtlpriv->btcoexist.eeprom_bt_ant_isol;
else
rtlpcipriv->bt_coexist.bt_ant_isolation =
rtlpcipriv->bt_coexist.reg_bt_iso;
rtlpriv->btcoexist.bt_ant_isolation =
rtlpriv->btcoexist.reg_bt_iso;
rtlpcipriv->bt_coexist.bt_radio_shared_type =
rtlpcipriv->bt_coexist.eeprom_bt_radio_shared;
rtlpriv->btcoexist.bt_radio_shared_type =
rtlpriv->btcoexist.eeprom_bt_radio_shared;
if (rtlpcipriv->bt_coexist.bt_coexistence) {
if (rtlpcipriv->bt_coexist.reg_bt_sco == 1)
rtlpcipriv->bt_coexist.bt_service = BT_OTHER_ACTION;
else if (rtlpcipriv->bt_coexist.reg_bt_sco == 2)
rtlpcipriv->bt_coexist.bt_service = BT_SCO;
else if (rtlpcipriv->bt_coexist.reg_bt_sco == 4)
rtlpcipriv->bt_coexist.bt_service = BT_BUSY;
else if (rtlpcipriv->bt_coexist.reg_bt_sco == 5)
rtlpcipriv->bt_coexist.bt_service = BT_OTHERBUSY;
if (rtlpriv->btcoexist.bt_coexistence) {
if (rtlpriv->btcoexist.reg_bt_sco == 1)
rtlpriv->btcoexist.bt_service = BT_OTHER_ACTION;
else if (rtlpriv->btcoexist.reg_bt_sco == 2)
rtlpriv->btcoexist.bt_service = BT_SCO;
else if (rtlpriv->btcoexist.reg_bt_sco == 4)
rtlpriv->btcoexist.bt_service = BT_BUSY;
else if (rtlpriv->btcoexist.reg_bt_sco == 5)
rtlpriv->btcoexist.bt_service = BT_OTHERBUSY;
else
rtlpcipriv->bt_coexist.bt_service = BT_IDLE;
rtlpriv->btcoexist.bt_service = BT_IDLE;
rtlpcipriv->bt_coexist.bt_edca_ul = 0;
rtlpcipriv->bt_coexist.bt_edca_dl = 0;
rtlpcipriv->bt_coexist.bt_rssi_state = 0xff;
rtlpriv->btcoexist.bt_edca_ul = 0;
rtlpriv->btcoexist.bt_edca_dl = 0;
rtlpriv->btcoexist.bt_rssi_state = 0xff;
}
}
void rtl8192ce_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
bool auto_load_fail, u8 *hwinfo)
{
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 val;
if (!auto_load_fail) {
rtlpcipriv->bt_coexist.eeprom_bt_coexist =
rtlpriv->btcoexist.eeprom_bt_coexist =
((hwinfo[RF_OPTION1] & 0xe0) >> 5);
val = hwinfo[RF_OPTION4];
rtlpcipriv->bt_coexist.eeprom_bt_type = ((val & 0xe) >> 1);
rtlpcipriv->bt_coexist.eeprom_bt_ant_num = (val & 0x1);
rtlpcipriv->bt_coexist.eeprom_bt_ant_isol = ((val & 0x10) >> 4);
rtlpcipriv->bt_coexist.eeprom_bt_radio_shared =
rtlpriv->btcoexist.eeprom_bt_type = ((val & 0xe) >> 1);
rtlpriv->btcoexist.eeprom_bt_ant_num = (val & 0x1);
rtlpriv->btcoexist.eeprom_bt_ant_isol = ((val & 0x10) >> 4);
rtlpriv->btcoexist.eeprom_bt_radio_shared =
((val & 0x20) >> 5);
} else {
rtlpcipriv->bt_coexist.eeprom_bt_coexist = 0;
rtlpcipriv->bt_coexist.eeprom_bt_type = BT_2WIRE;
rtlpcipriv->bt_coexist.eeprom_bt_ant_num = ANT_X2;
rtlpcipriv->bt_coexist.eeprom_bt_ant_isol = 0;
rtlpcipriv->bt_coexist.eeprom_bt_radio_shared = BT_RADIO_SHARED;
rtlpriv->btcoexist.eeprom_bt_coexist = 0;
rtlpriv->btcoexist.eeprom_bt_type = BT_2WIRE;
rtlpriv->btcoexist.eeprom_bt_ant_num = ANT_X2;
rtlpriv->btcoexist.eeprom_bt_ant_isol = 0;
rtlpriv->btcoexist.eeprom_bt_radio_shared = BT_RADIO_SHARED;
}
rtl8192ce_bt_var_init(hw);
@ -2303,14 +2296,14 @@ void rtl8192ce_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
void rtl8192ce_bt_reg_init(struct ieee80211_hw *hw)
{
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
struct rtl_priv *rtlpriv = rtl_priv(hw);
/* 0:Low, 1:High, 2:From Efuse. */
rtlpcipriv->bt_coexist.reg_bt_iso = 2;
rtlpriv->btcoexist.reg_bt_iso = 2;
/* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter. */
rtlpcipriv->bt_coexist.reg_bt_sco = 3;
rtlpriv->btcoexist.reg_bt_sco = 3;
/* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */
rtlpcipriv->bt_coexist.reg_bt_sco = 0;
rtlpriv->btcoexist.reg_bt_sco = 0;
}
@ -2318,23 +2311,22 @@ void rtl8192ce_bt_hw_init(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
u8 u1_tmp;
if (rtlpcipriv->bt_coexist.bt_coexistence &&
((rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4) ||
rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC8)) {
if (rtlpriv->btcoexist.bt_coexistence &&
((rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4) ||
rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC8)) {
if (rtlpcipriv->bt_coexist.bt_ant_isolation)
if (rtlpriv->btcoexist.bt_ant_isolation)
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0);
u1_tmp = rtl_read_byte(rtlpriv, 0x4fd) &
BIT_OFFSET_LEN_MASK_32(0, 1);
u1_tmp = u1_tmp |
((rtlpcipriv->bt_coexist.bt_ant_isolation == 1) ?
((rtlpriv->btcoexist.bt_ant_isolation == 1) ?
0 : BIT_OFFSET_LEN_MASK_32(1, 1)) |
((rtlpcipriv->bt_coexist.bt_service == BT_SCO) ?
((rtlpriv->btcoexist.bt_service == BT_SCO) ?
0 : BIT_OFFSET_LEN_MASK_32(2, 1));
rtl_write_byte(rtlpriv, 0x4fd, u1_tmp);

Просмотреть файл

@ -67,7 +67,6 @@ void rtl92ce_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
u8 ledcfg;
RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
@ -80,7 +79,7 @@ void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
case LED_PIN_LED0:
ledcfg &= 0xf0;
if (pcipriv->ledctl.led_opendrain)
if (rtlpriv->ledctl.led_opendrain)
rtl_write_byte(rtlpriv, REG_LEDCFG2,
(ledcfg | BIT(1) | BIT(5) | BIT(6)));
else
@ -100,24 +99,26 @@ void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
void rtl92ce_init_sw_leds(struct ieee80211_hw *hw)
{
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
_rtl92ce_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0);
_rtl92ce_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1);
struct rtl_priv *rtlpriv = rtl_priv(hw);
_rtl92ce_init_led(hw, &rtlpriv->ledctl.sw_led0, LED_PIN_LED0);
_rtl92ce_init_led(hw, &rtlpriv->ledctl.sw_led1, LED_PIN_LED1);
}
static void _rtl92ce_sw_led_control(struct ieee80211_hw *hw,
enum led_ctl_mode ledaction)
{
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
switch (ledaction) {
case LED_CTL_POWER_ON:
case LED_CTL_LINK:
case LED_CTL_NO_LINK:
rtl92ce_sw_led_on(hw, pLed0);
rtl92ce_sw_led_on(hw, pled0);
break;
case LED_CTL_POWER_OFF:
rtl92ce_sw_led_off(hw, pLed0);
rtl92ce_sw_led_off(hw, pled0);
break;
default:
break;

Просмотреть файл

@ -393,12 +393,11 @@ exit:
static void _rtl92cu_hal_customized_behavior(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
switch (rtlhal->oem_id) {
case RT_CID_819X_HP:
usb_priv->ledctl.led_opendrain = true;
rtlpriv->ledctl.led_opendrain = true;
break;
case RT_CID_819X_LENOVO:
case RT_CID_DEFAULT:

Просмотреть файл

@ -67,7 +67,6 @@ void rtl92cu_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_usb_priv *usbpriv = rtl_usbpriv(hw);
u8 ledcfg;
RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
@ -78,7 +77,7 @@ void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
case LED_PIN_LED0:
ledcfg &= 0xf0;
if (usbpriv->ledctl.led_opendrain)
if (rtlpriv->ledctl.led_opendrain)
rtl_write_byte(rtlpriv, REG_LEDCFG2,
(ledcfg | BIT(1) | BIT(5) | BIT(6)));
else
@ -99,16 +98,18 @@ void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
void rtl92cu_init_sw_leds(struct ieee80211_hw *hw)
{
struct rtl_usb_priv *usbpriv = rtl_usbpriv(hw);
_rtl92cu_init_led(hw, &(usbpriv->ledctl.sw_led0), LED_PIN_LED0);
_rtl92cu_init_led(hw, &(usbpriv->ledctl.sw_led1), LED_PIN_LED1);
struct rtl_priv *rtlpriv = rtl_priv(hw);
_rtl92cu_init_led(hw, &rtlpriv->ledctl.sw_led0, LED_PIN_LED0);
_rtl92cu_init_led(hw, &rtlpriv->ledctl.sw_led1, LED_PIN_LED1);
}
void rtl92cu_deinit_sw_leds(struct ieee80211_hw *hw)
{
struct rtl_usb_priv *usbpriv = rtl_usbpriv(hw);
_rtl92cu_deInit_led(&(usbpriv->ledctl.sw_led0));
_rtl92cu_deInit_led(&(usbpriv->ledctl.sw_led1));
struct rtl_priv *rtlpriv = rtl_priv(hw);
_rtl92cu_deInit_led(&rtlpriv->ledctl.sw_led0);
_rtl92cu_deInit_led(&rtlpriv->ledctl.sw_led1);
}
static void _rtl92cu_sw_led_control(struct ieee80211_hw *hw,

Просмотреть файл

@ -614,19 +614,19 @@ static bool _rtl92de_llt_table_init(struct ieee80211_hw *hw)
static void _rtl92de_gen_refresh_led_state(struct ieee80211_hw *hw)
{
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
if (rtlpci->up_first_time)
return;
if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS)
rtl92de_sw_led_on(hw, pLed0);
rtl92de_sw_led_on(hw, pled0);
else if (ppsc->rfoff_reason == RF_CHANGE_BY_INIT)
rtl92de_sw_led_on(hw, pLed0);
rtl92de_sw_led_on(hw, pled0);
else
rtl92de_sw_led_off(hw, pLed0);
rtl92de_sw_led_off(hw, pled0);
}
static bool _rtl92de_init_mac(struct ieee80211_hw *hw)

Просмотреть файл

@ -76,7 +76,6 @@ void rtl92de_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
void rtl92de_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
u8 ledcfg;
RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
@ -89,7 +88,7 @@ void rtl92de_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
case LED_PIN_LED0:
ledcfg &= 0xf0;
if (pcipriv->ledctl.led_opendrain)
if (rtlpriv->ledctl.led_opendrain)
rtl_write_byte(rtlpriv, REG_LEDCFG2,
(ledcfg | BIT(1) | BIT(5) | BIT(6)));
else
@ -110,24 +109,26 @@ void rtl92de_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
void rtl92de_init_sw_leds(struct ieee80211_hw *hw)
{
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
_rtl92ce_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0);
_rtl92ce_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1);
struct rtl_priv *rtlpriv = rtl_priv(hw);
_rtl92ce_init_led(hw, &rtlpriv->ledctl.sw_led0, LED_PIN_LED0);
_rtl92ce_init_led(hw, &rtlpriv->ledctl.sw_led1, LED_PIN_LED1);
}
static void _rtl92ce_sw_led_control(struct ieee80211_hw *hw,
enum led_ctl_mode ledaction)
{
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
switch (ledaction) {
case LED_CTL_POWER_ON:
case LED_CTL_LINK:
case LED_CTL_NO_LINK:
rtl92de_sw_led_on(hw, pLed0);
rtl92de_sw_led_on(hw, pled0);
break;
case LED_CTL_POWER_OFF:
rtl92de_sw_led_off(hw, pLed0);
rtl92de_sw_led_off(hw, pled0);
break;
default:
break;

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше